[split-required] Split 500-850 LOC files (batch 2)
backend-lehrer (10 files): - game/database.py (785 → 5), correction_api.py (683 → 4) - classroom_engine/antizipation.py (676 → 5) - llm_gateway schools/edu_search already done in prior batch klausur-service (12 files): - orientation_crop_api.py (694 → 5), pdf_export.py (677 → 4) - zeugnis_crawler.py (676 → 5), grid_editor_api.py (671 → 5) - eh_templates.py (658 → 5), mail/api.py (651 → 5) - qdrant_service.py (638 → 5), training_api.py (625 → 4) website (6 pages): - middleware (696 → 8), mail (733 → 6), consent (628 → 8) - compliance/risks (622 → 5), export (502 → 5), brandbook (629 → 7) studio-v2 (3 components): - B2BMigrationWizard (848 → 3), CleanupPanel (765 → 2) - dashboard-experimental (739 → 2) admin-lehrer (4 files): - uebersetzungen (769 → 4), manager (670 → 2) - ChunkBrowserQA (675 → 6), dsfa/page (674 → 5) Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -1,694 +1,16 @@
|
||||
"""
|
||||
Orientation & Crop API - Steps 1 and 4 of the OCR Pipeline.
|
||||
|
||||
Step 1: Orientation detection (fix 90/180/270 degree rotations)
|
||||
Step 4 (UI index 3): Page cropping (after deskew + dewarp, so the image is straight)
|
||||
|
||||
These endpoints were extracted from the main pipeline to keep files manageable.
|
||||
Barrel re-export: merges routers from orientation_api and crop_api,
|
||||
and re-exports set_cache_ref for main.py.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import time
|
||||
import uuid as uuid_mod
|
||||
from typing import Any, Dict, List, Optional
|
||||
from fastapi import APIRouter
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from pydantic import BaseModel
|
||||
from orientation_crop_helpers import set_cache_ref # noqa: F401
|
||||
from orientation_api import router as _orientation_router
|
||||
from crop_api import router as _crop_router
|
||||
|
||||
from cv_vocab_pipeline import detect_and_fix_orientation
|
||||
from page_crop import detect_and_crop_page, detect_page_splits
|
||||
from ocr_pipeline_session_store import (
|
||||
create_session_db,
|
||||
get_session_db,
|
||||
get_session_image,
|
||||
get_sub_sessions,
|
||||
update_session_db,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix="/api/v1/ocr-pipeline", tags=["ocr-pipeline"])
|
||||
|
||||
|
||||
# Reference to the shared cache from ocr_pipeline_api (set in main.py)
|
||||
_cache: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
|
||||
def set_cache_ref(cache: Dict[str, Dict[str, Any]]):
|
||||
"""Set reference to the shared cache from ocr_pipeline_api."""
|
||||
global _cache
|
||||
_cache = cache
|
||||
|
||||
|
||||
async def _ensure_cached(session_id: str) -> Dict[str, Any]:
|
||||
"""Ensure session is in cache, loading from DB if needed."""
|
||||
if session_id in _cache:
|
||||
return _cache[session_id]
|
||||
|
||||
session = await get_session_db(session_id)
|
||||
if not session:
|
||||
raise HTTPException(status_code=404, detail=f"Session {session_id} not found")
|
||||
|
||||
cache_entry: Dict[str, Any] = {
|
||||
"id": session_id,
|
||||
**session,
|
||||
"original_bgr": None,
|
||||
"oriented_bgr": None,
|
||||
"cropped_bgr": None,
|
||||
"deskewed_bgr": None,
|
||||
"dewarped_bgr": None,
|
||||
}
|
||||
|
||||
for img_type, bgr_key in [
|
||||
("original", "original_bgr"),
|
||||
("oriented", "oriented_bgr"),
|
||||
("cropped", "cropped_bgr"),
|
||||
("deskewed", "deskewed_bgr"),
|
||||
("dewarped", "dewarped_bgr"),
|
||||
]:
|
||||
png_data = await get_session_image(session_id, img_type)
|
||||
if png_data:
|
||||
arr = np.frombuffer(png_data, dtype=np.uint8)
|
||||
bgr = cv2.imdecode(arr, cv2.IMREAD_COLOR)
|
||||
cache_entry[bgr_key] = bgr
|
||||
|
||||
_cache[session_id] = cache_entry
|
||||
return cache_entry
|
||||
|
||||
|
||||
async def _append_pipeline_log(session_id: str, step: str, metrics: dict, duration_ms: int):
|
||||
"""Append a step entry to the pipeline log."""
|
||||
from datetime import datetime
|
||||
session = await get_session_db(session_id)
|
||||
if not session:
|
||||
return
|
||||
pipeline_log = session.get("pipeline_log") or {"steps": []}
|
||||
pipeline_log["steps"].append({
|
||||
"step": step,
|
||||
"completed_at": datetime.utcnow().isoformat(),
|
||||
"success": True,
|
||||
"duration_ms": duration_ms,
|
||||
"metrics": metrics,
|
||||
})
|
||||
await update_session_db(session_id, pipeline_log=pipeline_log)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Step 1: Orientation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@router.post("/sessions/{session_id}/orientation")
|
||||
async def detect_orientation(session_id: str):
|
||||
"""Detect and fix 90/180/270 degree rotations from scanners.
|
||||
|
||||
Reads the original image, applies orientation correction,
|
||||
stores the result as oriented_png.
|
||||
"""
|
||||
cached = await _ensure_cached(session_id)
|
||||
|
||||
img_bgr = cached.get("original_bgr")
|
||||
if img_bgr is None:
|
||||
raise HTTPException(status_code=400, detail="Original image not available")
|
||||
|
||||
t0 = time.time()
|
||||
|
||||
# Detect and fix orientation
|
||||
oriented_bgr, orientation_deg = detect_and_fix_orientation(img_bgr.copy())
|
||||
|
||||
duration = time.time() - t0
|
||||
|
||||
orientation_result = {
|
||||
"orientation_degrees": orientation_deg,
|
||||
"corrected": orientation_deg != 0,
|
||||
"duration_seconds": round(duration, 2),
|
||||
}
|
||||
|
||||
# Encode oriented image
|
||||
success, png_buf = cv2.imencode(".png", oriented_bgr)
|
||||
oriented_png = png_buf.tobytes() if success else b""
|
||||
|
||||
# Update cache
|
||||
cached["oriented_bgr"] = oriented_bgr
|
||||
cached["orientation_result"] = orientation_result
|
||||
|
||||
# Persist to DB
|
||||
await update_session_db(
|
||||
session_id,
|
||||
oriented_png=oriented_png,
|
||||
orientation_result=orientation_result,
|
||||
current_step=2,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"OCR Pipeline: orientation session %s: %d° (%s) in %.2fs",
|
||||
session_id, orientation_deg,
|
||||
"corrected" if orientation_deg else "no change",
|
||||
duration,
|
||||
)
|
||||
|
||||
await _append_pipeline_log(session_id, "orientation", {
|
||||
"orientation_degrees": orientation_deg,
|
||||
"corrected": orientation_deg != 0,
|
||||
}, duration_ms=int(duration * 1000))
|
||||
|
||||
h, w = oriented_bgr.shape[:2]
|
||||
return {
|
||||
"session_id": session_id,
|
||||
**orientation_result,
|
||||
"image_width": w,
|
||||
"image_height": h,
|
||||
"oriented_image_url": f"/api/v1/ocr-pipeline/sessions/{session_id}/image/oriented",
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Step 1b: Page-split detection — runs AFTER orientation, BEFORE deskew
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@router.post("/sessions/{session_id}/page-split")
|
||||
async def detect_page_split(session_id: str):
|
||||
"""Detect if the image is a double-page book spread and split into sub-sessions.
|
||||
|
||||
Must be called **after orientation** (step 1) and **before deskew** (step 2).
|
||||
Each sub-session receives the raw page region and goes through the full
|
||||
pipeline (deskew → dewarp → crop → columns → rows → words → grid)
|
||||
independently, so each page gets its own deskew correction.
|
||||
|
||||
Returns ``{"multi_page": false}`` if only one page is detected.
|
||||
"""
|
||||
cached = await _ensure_cached(session_id)
|
||||
|
||||
# Use oriented (preferred), fall back to original
|
||||
img_bgr = next(
|
||||
(v for k in ("oriented_bgr", "original_bgr")
|
||||
if (v := cached.get(k)) is not None),
|
||||
None,
|
||||
)
|
||||
if img_bgr is None:
|
||||
raise HTTPException(status_code=400, detail="No image available for page-split detection")
|
||||
|
||||
t0 = time.time()
|
||||
page_splits = detect_page_splits(img_bgr)
|
||||
used_original = False
|
||||
|
||||
if not page_splits or len(page_splits) < 2:
|
||||
# Orientation may have rotated a landscape double-page spread to
|
||||
# portrait. Try the original (pre-orientation) image as fallback.
|
||||
orig_bgr = cached.get("original_bgr")
|
||||
if orig_bgr is not None and orig_bgr is not img_bgr:
|
||||
page_splits_orig = detect_page_splits(orig_bgr)
|
||||
if page_splits_orig and len(page_splits_orig) >= 2:
|
||||
logger.info(
|
||||
"OCR Pipeline: page-split session %s: spread detected on "
|
||||
"ORIGINAL (orientation rotated it away)",
|
||||
session_id,
|
||||
)
|
||||
img_bgr = orig_bgr
|
||||
page_splits = page_splits_orig
|
||||
used_original = True
|
||||
|
||||
if not page_splits or len(page_splits) < 2:
|
||||
duration = time.time() - t0
|
||||
logger.info(
|
||||
"OCR Pipeline: page-split session %s: single page (%.2fs)",
|
||||
session_id, duration,
|
||||
)
|
||||
return {
|
||||
"session_id": session_id,
|
||||
"multi_page": False,
|
||||
"duration_seconds": round(duration, 2),
|
||||
}
|
||||
|
||||
# Multi-page spread detected — create sub-sessions for full pipeline.
|
||||
# start_step=2 means "ready for deskew" (orientation already applied).
|
||||
# start_step=1 means "needs orientation too" (split from original image).
|
||||
start_step = 1 if used_original else 2
|
||||
sub_sessions = await _create_page_sub_sessions_full(
|
||||
session_id, cached, img_bgr, page_splits, start_step=start_step,
|
||||
)
|
||||
duration = time.time() - t0
|
||||
|
||||
split_info: Dict[str, Any] = {
|
||||
"multi_page": True,
|
||||
"page_count": len(page_splits),
|
||||
"page_splits": page_splits,
|
||||
"used_original": used_original,
|
||||
"duration_seconds": round(duration, 2),
|
||||
}
|
||||
|
||||
# Mark parent session as split and hidden from session list
|
||||
await update_session_db(session_id, crop_result=split_info, status='split')
|
||||
cached["crop_result"] = split_info
|
||||
|
||||
await _append_pipeline_log(session_id, "page_split", {
|
||||
"multi_page": True,
|
||||
"page_count": len(page_splits),
|
||||
}, duration_ms=int(duration * 1000))
|
||||
|
||||
logger.info(
|
||||
"OCR Pipeline: page-split session %s: %d pages detected in %.2fs",
|
||||
session_id, len(page_splits), duration,
|
||||
)
|
||||
|
||||
h, w = img_bgr.shape[:2]
|
||||
return {
|
||||
"session_id": session_id,
|
||||
**split_info,
|
||||
"image_width": w,
|
||||
"image_height": h,
|
||||
"sub_sessions": sub_sessions,
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Step 4 (UI index 3): Crop — runs after deskew + dewarp
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@router.post("/sessions/{session_id}/crop")
|
||||
async def auto_crop(session_id: str):
|
||||
"""Auto-detect and crop scanner/book borders.
|
||||
|
||||
Reads the dewarped image (post-deskew + dewarp, so the page is straight).
|
||||
Falls back to oriented → original if earlier steps were skipped.
|
||||
|
||||
If the image is a multi-page spread (e.g. book on scanner), it will
|
||||
automatically split into separate sub-sessions per page, crop each
|
||||
individually, and return the split info.
|
||||
"""
|
||||
cached = await _ensure_cached(session_id)
|
||||
|
||||
# Use dewarped (preferred), fall back to oriented, then original
|
||||
img_bgr = next(
|
||||
(v for k in ("dewarped_bgr", "oriented_bgr", "original_bgr")
|
||||
if (v := cached.get(k)) is not None),
|
||||
None,
|
||||
)
|
||||
if img_bgr is None:
|
||||
raise HTTPException(status_code=400, detail="No image available for cropping")
|
||||
|
||||
t0 = time.time()
|
||||
|
||||
# --- Check for existing sub-sessions (from page-split step) ---
|
||||
# If page-split already created sub-sessions, skip multi-page detection
|
||||
# in the crop step. Each sub-session runs its own crop independently.
|
||||
existing_subs = await get_sub_sessions(session_id)
|
||||
if existing_subs:
|
||||
crop_result = cached.get("crop_result") or {}
|
||||
if crop_result.get("multi_page"):
|
||||
# Already split — just return the existing info
|
||||
duration = time.time() - t0
|
||||
h, w = img_bgr.shape[:2]
|
||||
return {
|
||||
"session_id": session_id,
|
||||
**crop_result,
|
||||
"image_width": w,
|
||||
"image_height": h,
|
||||
"sub_sessions": [
|
||||
{"id": s["id"], "name": s.get("name"), "page_index": s.get("box_index", i)}
|
||||
for i, s in enumerate(existing_subs)
|
||||
],
|
||||
"note": "Page split was already performed; each sub-session runs its own crop.",
|
||||
}
|
||||
|
||||
# --- Multi-page detection (fallback for sessions that skipped page-split) ---
|
||||
page_splits = detect_page_splits(img_bgr)
|
||||
|
||||
if page_splits and len(page_splits) >= 2:
|
||||
# Multi-page spread detected — create sub-sessions
|
||||
sub_sessions = await _create_page_sub_sessions(
|
||||
session_id, cached, img_bgr, page_splits,
|
||||
)
|
||||
duration = time.time() - t0
|
||||
|
||||
crop_info: Dict[str, Any] = {
|
||||
"crop_applied": True,
|
||||
"multi_page": True,
|
||||
"page_count": len(page_splits),
|
||||
"page_splits": page_splits,
|
||||
"duration_seconds": round(duration, 2),
|
||||
}
|
||||
cached["crop_result"] = crop_info
|
||||
|
||||
# Store the first page as the main cropped image for backward compat
|
||||
first_page = page_splits[0]
|
||||
first_bgr = img_bgr[
|
||||
first_page["y"]:first_page["y"] + first_page["height"],
|
||||
first_page["x"]:first_page["x"] + first_page["width"],
|
||||
].copy()
|
||||
first_cropped, _ = detect_and_crop_page(first_bgr)
|
||||
cached["cropped_bgr"] = first_cropped
|
||||
|
||||
ok, png_buf = cv2.imencode(".png", first_cropped)
|
||||
await update_session_db(
|
||||
session_id,
|
||||
cropped_png=png_buf.tobytes() if ok else b"",
|
||||
crop_result=crop_info,
|
||||
current_step=5,
|
||||
status='split',
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"OCR Pipeline: crop session %s: multi-page split into %d pages in %.2fs",
|
||||
session_id, len(page_splits), duration,
|
||||
)
|
||||
|
||||
await _append_pipeline_log(session_id, "crop", {
|
||||
"multi_page": True,
|
||||
"page_count": len(page_splits),
|
||||
}, duration_ms=int(duration * 1000))
|
||||
|
||||
h, w = first_cropped.shape[:2]
|
||||
return {
|
||||
"session_id": session_id,
|
||||
**crop_info,
|
||||
"image_width": w,
|
||||
"image_height": h,
|
||||
"cropped_image_url": f"/api/v1/ocr-pipeline/sessions/{session_id}/image/cropped",
|
||||
"sub_sessions": sub_sessions,
|
||||
}
|
||||
|
||||
# --- Single page (normal) ---
|
||||
cropped_bgr, crop_info = detect_and_crop_page(img_bgr)
|
||||
|
||||
duration = time.time() - t0
|
||||
crop_info["duration_seconds"] = round(duration, 2)
|
||||
crop_info["multi_page"] = False
|
||||
|
||||
# Encode cropped image
|
||||
success, png_buf = cv2.imencode(".png", cropped_bgr)
|
||||
cropped_png = png_buf.tobytes() if success else b""
|
||||
|
||||
# Update cache
|
||||
cached["cropped_bgr"] = cropped_bgr
|
||||
cached["crop_result"] = crop_info
|
||||
|
||||
# Persist to DB
|
||||
await update_session_db(
|
||||
session_id,
|
||||
cropped_png=cropped_png,
|
||||
crop_result=crop_info,
|
||||
current_step=5,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"OCR Pipeline: crop session %s: applied=%s format=%s in %.2fs",
|
||||
session_id, crop_info["crop_applied"],
|
||||
crop_info.get("detected_format", "?"),
|
||||
duration,
|
||||
)
|
||||
|
||||
await _append_pipeline_log(session_id, "crop", {
|
||||
"crop_applied": crop_info["crop_applied"],
|
||||
"detected_format": crop_info.get("detected_format"),
|
||||
"format_confidence": crop_info.get("format_confidence"),
|
||||
}, duration_ms=int(duration * 1000))
|
||||
|
||||
h, w = cropped_bgr.shape[:2]
|
||||
return {
|
||||
"session_id": session_id,
|
||||
**crop_info,
|
||||
"image_width": w,
|
||||
"image_height": h,
|
||||
"cropped_image_url": f"/api/v1/ocr-pipeline/sessions/{session_id}/image/cropped",
|
||||
}
|
||||
|
||||
|
||||
async def _create_page_sub_sessions(
|
||||
parent_session_id: str,
|
||||
parent_cached: dict,
|
||||
full_img_bgr: np.ndarray,
|
||||
page_splits: List[Dict[str, Any]],
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Create sub-sessions for each detected page in a multi-page spread.
|
||||
|
||||
Each page region is individually cropped, then stored as a sub-session
|
||||
with its own cropped image ready for the rest of the pipeline.
|
||||
"""
|
||||
# Check for existing sub-sessions (idempotent)
|
||||
existing = await get_sub_sessions(parent_session_id)
|
||||
if existing:
|
||||
return [
|
||||
{"id": s["id"], "name": s["name"], "page_index": s.get("box_index", i)}
|
||||
for i, s in enumerate(existing)
|
||||
]
|
||||
|
||||
parent_name = parent_cached.get("name", "Scan")
|
||||
parent_filename = parent_cached.get("filename", "scan.png")
|
||||
|
||||
sub_sessions: List[Dict[str, Any]] = []
|
||||
|
||||
for page in page_splits:
|
||||
pi = page["page_index"]
|
||||
px, py = page["x"], page["y"]
|
||||
pw, ph = page["width"], page["height"]
|
||||
|
||||
# Extract page region
|
||||
page_bgr = full_img_bgr[py:py + ph, px:px + pw].copy()
|
||||
|
||||
# Crop each page individually (remove its own borders)
|
||||
cropped_page, page_crop_info = detect_and_crop_page(page_bgr)
|
||||
|
||||
# Encode as PNG
|
||||
ok, png_buf = cv2.imencode(".png", cropped_page)
|
||||
page_png = png_buf.tobytes() if ok else b""
|
||||
|
||||
sub_id = str(uuid_mod.uuid4())
|
||||
sub_name = f"{parent_name} — Seite {pi + 1}"
|
||||
|
||||
await create_session_db(
|
||||
session_id=sub_id,
|
||||
name=sub_name,
|
||||
filename=parent_filename,
|
||||
original_png=page_png,
|
||||
)
|
||||
|
||||
# Pre-populate: set cropped = original (already cropped)
|
||||
await update_session_db(
|
||||
sub_id,
|
||||
cropped_png=page_png,
|
||||
crop_result=page_crop_info,
|
||||
current_step=5,
|
||||
)
|
||||
|
||||
ch, cw = cropped_page.shape[:2]
|
||||
sub_sessions.append({
|
||||
"id": sub_id,
|
||||
"name": sub_name,
|
||||
"page_index": pi,
|
||||
"source_rect": page,
|
||||
"cropped_size": {"width": cw, "height": ch},
|
||||
"detected_format": page_crop_info.get("detected_format"),
|
||||
})
|
||||
|
||||
logger.info(
|
||||
"Page sub-session %s: page %d, region x=%d w=%d -> cropped %dx%d",
|
||||
sub_id, pi + 1, px, pw, cw, ch,
|
||||
)
|
||||
|
||||
return sub_sessions
|
||||
|
||||
|
||||
async def _create_page_sub_sessions_full(
|
||||
parent_session_id: str,
|
||||
parent_cached: dict,
|
||||
full_img_bgr: np.ndarray,
|
||||
page_splits: List[Dict[str, Any]],
|
||||
start_step: int = 2,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Create sub-sessions for each page with RAW regions for full pipeline processing.
|
||||
|
||||
Unlike ``_create_page_sub_sessions`` (used by the crop step), these
|
||||
sub-sessions store the *uncropped* page region and start at
|
||||
``start_step`` (default 2 = ready for deskew; 1 if orientation still
|
||||
needed). Each page goes through its own pipeline independently,
|
||||
which is essential for book spreads where each page has a different tilt.
|
||||
"""
|
||||
# Idempotent: reuse existing sub-sessions
|
||||
existing = await get_sub_sessions(parent_session_id)
|
||||
if existing:
|
||||
return [
|
||||
{"id": s["id"], "name": s["name"], "page_index": s.get("box_index", i)}
|
||||
for i, s in enumerate(existing)
|
||||
]
|
||||
|
||||
parent_name = parent_cached.get("name", "Scan")
|
||||
parent_filename = parent_cached.get("filename", "scan.png")
|
||||
|
||||
sub_sessions: List[Dict[str, Any]] = []
|
||||
|
||||
for page in page_splits:
|
||||
pi = page["page_index"]
|
||||
px, py = page["x"], page["y"]
|
||||
pw, ph = page["width"], page["height"]
|
||||
|
||||
# Extract RAW page region — NO individual cropping here; each
|
||||
# sub-session will run its own crop step after deskew + dewarp.
|
||||
page_bgr = full_img_bgr[py:py + ph, px:px + pw].copy()
|
||||
|
||||
# Encode as PNG
|
||||
ok, png_buf = cv2.imencode(".png", page_bgr)
|
||||
page_png = png_buf.tobytes() if ok else b""
|
||||
|
||||
sub_id = str(uuid_mod.uuid4())
|
||||
sub_name = f"{parent_name} — Seite {pi + 1}"
|
||||
|
||||
await create_session_db(
|
||||
session_id=sub_id,
|
||||
name=sub_name,
|
||||
filename=parent_filename,
|
||||
original_png=page_png,
|
||||
)
|
||||
|
||||
# start_step=2 → ready for deskew (orientation already done on spread)
|
||||
# start_step=1 → needs its own orientation (split from original image)
|
||||
await update_session_db(sub_id, current_step=start_step)
|
||||
|
||||
# Cache the BGR so the pipeline can start immediately
|
||||
_cache[sub_id] = {
|
||||
"id": sub_id,
|
||||
"filename": parent_filename,
|
||||
"name": sub_name,
|
||||
"original_bgr": page_bgr,
|
||||
"oriented_bgr": None,
|
||||
"cropped_bgr": None,
|
||||
"deskewed_bgr": None,
|
||||
"dewarped_bgr": None,
|
||||
"orientation_result": None,
|
||||
"crop_result": None,
|
||||
"deskew_result": None,
|
||||
"dewarp_result": None,
|
||||
"ground_truth": {},
|
||||
"current_step": start_step,
|
||||
}
|
||||
|
||||
rh, rw = page_bgr.shape[:2]
|
||||
sub_sessions.append({
|
||||
"id": sub_id,
|
||||
"name": sub_name,
|
||||
"page_index": pi,
|
||||
"source_rect": page,
|
||||
"image_size": {"width": rw, "height": rh},
|
||||
})
|
||||
|
||||
logger.info(
|
||||
"Page sub-session %s (full pipeline): page %d, region x=%d w=%d → %dx%d",
|
||||
sub_id, pi + 1, px, pw, rw, rh,
|
||||
)
|
||||
|
||||
return sub_sessions
|
||||
|
||||
|
||||
class ManualCropRequest(BaseModel):
|
||||
x: float # percentage 0-100
|
||||
y: float # percentage 0-100
|
||||
width: float # percentage 0-100
|
||||
height: float # percentage 0-100
|
||||
|
||||
|
||||
@router.post("/sessions/{session_id}/crop/manual")
|
||||
async def manual_crop(session_id: str, req: ManualCropRequest):
|
||||
"""Manually crop using percentage coordinates."""
|
||||
cached = await _ensure_cached(session_id)
|
||||
|
||||
img_bgr = next(
|
||||
(v for k in ("dewarped_bgr", "oriented_bgr", "original_bgr")
|
||||
if (v := cached.get(k)) is not None),
|
||||
None,
|
||||
)
|
||||
if img_bgr is None:
|
||||
raise HTTPException(status_code=400, detail="No image available for cropping")
|
||||
|
||||
h, w = img_bgr.shape[:2]
|
||||
|
||||
# Convert percentages to pixels
|
||||
px_x = int(w * req.x / 100.0)
|
||||
px_y = int(h * req.y / 100.0)
|
||||
px_w = int(w * req.width / 100.0)
|
||||
px_h = int(h * req.height / 100.0)
|
||||
|
||||
# Clamp
|
||||
px_x = max(0, min(px_x, w - 1))
|
||||
px_y = max(0, min(px_y, h - 1))
|
||||
px_w = max(1, min(px_w, w - px_x))
|
||||
px_h = max(1, min(px_h, h - px_y))
|
||||
|
||||
cropped_bgr = img_bgr[px_y:px_y + px_h, px_x:px_x + px_w].copy()
|
||||
|
||||
success, png_buf = cv2.imencode(".png", cropped_bgr)
|
||||
cropped_png = png_buf.tobytes() if success else b""
|
||||
|
||||
crop_result = {
|
||||
"crop_applied": True,
|
||||
"crop_rect": {"x": px_x, "y": px_y, "width": px_w, "height": px_h},
|
||||
"crop_rect_pct": {"x": round(req.x, 2), "y": round(req.y, 2),
|
||||
"width": round(req.width, 2), "height": round(req.height, 2)},
|
||||
"original_size": {"width": w, "height": h},
|
||||
"cropped_size": {"width": px_w, "height": px_h},
|
||||
"method": "manual",
|
||||
}
|
||||
|
||||
cached["cropped_bgr"] = cropped_bgr
|
||||
cached["crop_result"] = crop_result
|
||||
|
||||
await update_session_db(
|
||||
session_id,
|
||||
cropped_png=cropped_png,
|
||||
crop_result=crop_result,
|
||||
current_step=5,
|
||||
)
|
||||
|
||||
ch, cw = cropped_bgr.shape[:2]
|
||||
return {
|
||||
"session_id": session_id,
|
||||
**crop_result,
|
||||
"image_width": cw,
|
||||
"image_height": ch,
|
||||
"cropped_image_url": f"/api/v1/ocr-pipeline/sessions/{session_id}/image/cropped",
|
||||
}
|
||||
|
||||
|
||||
@router.post("/sessions/{session_id}/crop/skip")
|
||||
async def skip_crop(session_id: str):
|
||||
"""Skip cropping — use dewarped (or oriented/original) image as-is."""
|
||||
cached = await _ensure_cached(session_id)
|
||||
|
||||
img_bgr = next(
|
||||
(v for k in ("dewarped_bgr", "oriented_bgr", "original_bgr")
|
||||
if (v := cached.get(k)) is not None),
|
||||
None,
|
||||
)
|
||||
if img_bgr is None:
|
||||
raise HTTPException(status_code=400, detail="No image available")
|
||||
|
||||
h, w = img_bgr.shape[:2]
|
||||
|
||||
# Store the dewarped image as cropped (identity crop)
|
||||
success, png_buf = cv2.imencode(".png", img_bgr)
|
||||
cropped_png = png_buf.tobytes() if success else b""
|
||||
|
||||
crop_result = {
|
||||
"crop_applied": False,
|
||||
"skipped": True,
|
||||
"original_size": {"width": w, "height": h},
|
||||
"cropped_size": {"width": w, "height": h},
|
||||
}
|
||||
|
||||
cached["cropped_bgr"] = img_bgr
|
||||
cached["crop_result"] = crop_result
|
||||
|
||||
await update_session_db(
|
||||
session_id,
|
||||
cropped_png=cropped_png,
|
||||
crop_result=crop_result,
|
||||
current_step=5,
|
||||
)
|
||||
|
||||
return {
|
||||
"session_id": session_id,
|
||||
**crop_result,
|
||||
"image_width": w,
|
||||
"image_height": h,
|
||||
"cropped_image_url": f"/api/v1/ocr-pipeline/sessions/{session_id}/image/cropped",
|
||||
}
|
||||
router = APIRouter()
|
||||
router.include_router(_orientation_router)
|
||||
router.include_router(_crop_router)
|
||||
|
||||
Reference in New Issue
Block a user