Files
breakpilot-lehrer/admin-lehrer/components/ocr-overlay/useSlideWordPositions.ts
Benjamin Admin 2010cab894
Some checks failed
CI / go-lint (push) Has been skipped
CI / python-lint (push) Has been skipped
CI / nodejs-lint (push) Has been skipped
CI / test-go-school (push) Successful in 36s
CI / test-go-edu-search (push) Successful in 31s
CI / test-python-klausur (push) Failing after 2m11s
CI / test-python-agent-core (push) Successful in 24s
CI / test-nodejs-website (push) Successful in 31s
fix: Slide-Modus Scale-Berechnung auf Ink-Span statt Ink-Count
totalInk zaehlte nur dunkle Pixel-Spalten (Striche), ignorierte
Luecken zwischen Buchstaben. Scale war dadurch viel zu klein,
Schrift unlesbar. Jetzt wird der Ink-Span (erstes bis letztes
dunkles Pixel) als Referenz fuer die Textbreite verwendet.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-11 16:41:38 +01:00

232 lines
8.1 KiB
TypeScript

import { useEffect, useState } from 'react'
import type { GridCell } from '@/app/(admin)/ai/ocr-overlay/types'
export interface WordPosition {
xPct: number
wPct: number
text: string
fontRatio: number
}
/**
* Alternative positioning algorithm: "slide from left".
*
* Instead of matching text groups to pixel clusters (which can lose words),
* this algorithm takes ALL recognised words and slides them left-to-right
* across the row's dark-pixel projection until each word "locks" onto its
* ink coverage.
*
* Algorithm per cell:
* 1. Build horizontal dark-pixel projection (same as cluster approach).
* 2. Split the cell text into individual tokens (words/symbols).
* 3. Measure each token's expected pixel width (canvas measureText).
* 4. Slide a cursor from x=0 rightward. For each token, find the first
* x position where the projection has enough dark pixels under the
* token's width span (≥ coverageThreshold of the span is "inked").
* 5. Lock the token at that x, advance cursor past it + a small gap.
*
* This guarantees:
* - ALL words appear (nothing is dropped)
* - Original spacing is roughly preserved (words land on their ink)
* - Box borders/lines are naturally covered by "|" / "l" tokens
* - No complex cluster-matching or artifact-merging rules needed
*
* Returns Map<cell_id, WordPosition[]>.
*/
export function useSlideWordPositions(
imageUrl: string,
cells: GridCell[],
active: boolean,
rotation: 0 | 180 = 0,
): Map<string, WordPosition[]> {
const [result, setResult] = useState<Map<string, WordPosition[]>>(new Map())
useEffect(() => {
if (!active || cells.length === 0 || !imageUrl) return
const img = new Image()
img.crossOrigin = 'anonymous'
img.onload = () => {
const imgW = img.naturalWidth
const imgH = img.naturalHeight
const canvas = document.createElement('canvas')
canvas.width = imgW
canvas.height = imgH
const ctx = canvas.getContext('2d')
if (!ctx) return
if (rotation === 180) {
ctx.translate(imgW, imgH)
ctx.rotate(Math.PI)
ctx.drawImage(img, 0, 0)
ctx.setTransform(1, 0, 0, 1, 0, 0)
} else {
ctx.drawImage(img, 0, 0)
}
const refFontSize = 40
const fontFam = "'Liberation Sans', Arial, sans-serif"
ctx.font = `${refFontSize}px ${fontFam}`
const positions = new Map<string, WordPosition[]>()
for (const cell of cells) {
if (!cell.bbox_pct || !cell.text) continue
// --- Get cell rectangle in image pixels ---
let cx: number, cy: number
const cw = Math.round(cell.bbox_pct.w / 100 * imgW)
const ch = Math.round(cell.bbox_pct.h / 100 * imgH)
if (rotation === 180) {
cx = Math.round((100 - cell.bbox_pct.x - cell.bbox_pct.w) / 100 * imgW)
cy = Math.round((100 - cell.bbox_pct.y - cell.bbox_pct.h) / 100 * imgH)
} else {
cx = Math.round(cell.bbox_pct.x / 100 * imgW)
cy = Math.round(cell.bbox_pct.y / 100 * imgH)
}
if (cw <= 0 || ch <= 0) continue
if (cx < 0) cx = 0
if (cy < 0) cy = 0
if (cx + cw > imgW || cy + ch > imgH) continue
// --- Build dark-pixel projection ---
const imageData = ctx.getImageData(cx, cy, cw, ch)
const proj = new Float32Array(cw)
for (let y = 0; y < ch; y++) {
for (let x = 0; x < cw; x++) {
const idx = (y * cw + x) * 4
const lum = 0.299 * imageData.data[idx] + 0.587 * imageData.data[idx + 1] + 0.114 * imageData.data[idx + 2]
if (lum < 128) proj[x]++
}
}
// Dark pixel threshold per column (minimum to count as "inked")
const threshold = Math.max(1, ch * 0.03)
// Build binary ink mask: true if column has enough dark pixels
const ink = new Uint8Array(cw)
for (let x = 0; x < cw; x++) {
ink[x] = proj[x] >= threshold ? 1 : 0
}
// For 180° rotation, flip the ink mask
if (rotation === 180) {
ink.reverse()
}
// --- Split text into tokens ---
// Use triple-space groups first (preserving OCR column separation),
// then split each group into individual words for fine positioning.
const tokens = cell.text.split(/\s+/).filter(Boolean)
if (tokens.length === 0) continue
// Measure each token's width in pixels (at reference font size)
const tokenWidths = tokens.map(t => ctx.measureText(t).width)
// Total measured width of all tokens + inter-word spaces
const spaceWidth = ctx.measureText(' ').width
const totalTextW = tokenWidths.reduce((a, b) => a + b, 0) + (tokens.length - 1) * spaceWidth
// Scale factor: map measured text width → pixel width on image.
// Use the total INK SPAN (first dark pixel to last dark pixel),
// not the count of dark columns. Text characters have gaps between
// strokes, so counting only dark pixels gives a much-too-small scale.
let firstInk = -1, lastInk = -1
for (let x = 0; x < cw; x++) {
if (ink[x]) {
if (firstInk < 0) firstInk = x
lastInk = x
}
}
// If almost no ink, skip
if (firstInk < 0 || lastInk <= firstInk) continue
const inkSpan = lastInk - firstInk + 1
const scale = inkSpan / totalTextW
// --- Slide each token from left to right ---
const wordPos: WordPosition[] = []
let cursor = 0 // current search position in cell pixels
const minGapPx = Math.max(2, Math.round(cw * 0.005)) // minimum gap between tokens
for (let ti = 0; ti < tokens.length; ti++) {
const tokenW = Math.round(tokenWidths[ti] * scale)
if (tokenW <= 0) continue
// Find first position from cursor where the token has enough ink coverage.
// "Enough" = at least 15% of the token's width has ink underneath.
const coverageNeeded = Math.max(1, Math.round(tokenW * 0.15))
let bestX = cursor
for (let x = cursor; x <= cw - tokenW; x++) {
let inkCount = 0
for (let dx = 0; dx < tokenW; dx++) {
inkCount += ink[x + dx]
}
if (inkCount >= coverageNeeded) {
bestX = x
break
}
// If we've scanned way past where ink should be, just use cursor
if (x > cursor + cw * 0.3 && ti > 0) {
bestX = cursor
break
}
}
// Compute font size from token width vs measured width
const autoFontPx = refFontSize * (tokenW / tokenWidths[ti])
const fontRatio = Math.min(autoFontPx / ch, 1.0)
// Convert pixel position to percentage within cell, then to image %
const xInCellPct = bestX / cw
const wInCellPct = tokenW / cw
wordPos.push({
xPct: cell.bbox_pct.x + xInCellPct * cell.bbox_pct.w,
wPct: wInCellPct * cell.bbox_pct.w,
text: tokens[ti],
fontRatio,
})
// Advance cursor past this token + gap
cursor = bestX + tokenW + minGapPx
}
if (wordPos.length > 0) {
positions.set(cell.cell_id, wordPos)
}
}
// Normalise font: use mode fontRatio for all words
const allRatios: number[] = []
for (const wps of positions.values()) {
for (const wp of wps) allRatios.push(wp.fontRatio)
}
if (allRatios.length > 0) {
const buckets = new Map<number, number>()
for (const r of allRatios) {
const key = Math.round(r * 50) / 50
buckets.set(key, (buckets.get(key) || 0) + 1)
}
let modeRatio = allRatios[0]
let modeCount = 0
for (const [ratio, count] of buckets) {
if (count > modeCount) { modeRatio = ratio; modeCount = count }
}
for (const wps of positions.values()) {
for (const wp of wps) wp.fontRatio = modeRatio
}
}
setResult(positions)
}
img.src = imageUrl
}, [active, cells, imageUrl, rotation])
return result
}