Compare commits
5 Commits
96ea23164d
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
21b69e06be | ||
|
|
0168ab1a67 | ||
|
|
925f4356ce | ||
|
|
cc4cb3bc2f | ||
|
|
0685fb12da |
@@ -2,7 +2,6 @@
|
||||
|
||||
import { Suspense } from 'react'
|
||||
import { PagePurpose } from '@/components/common/PagePurpose'
|
||||
import { BoxSessionTabs } from '@/components/ocr-pipeline/BoxSessionTabs'
|
||||
import { KombiStepper } from '@/components/ocr-kombi/KombiStepper'
|
||||
import { SessionList } from '@/components/ocr-kombi/SessionList'
|
||||
import { SessionHeader } from '@/components/ocr-kombi/SessionHeader'
|
||||
@@ -27,8 +26,6 @@ function OcrKombiContent() {
|
||||
loadingSessions,
|
||||
activeCategory,
|
||||
isGroundTruth,
|
||||
subSessions,
|
||||
parentSessionId,
|
||||
steps,
|
||||
gridSaveRef,
|
||||
groupedSessions,
|
||||
@@ -40,11 +37,8 @@ function OcrKombiContent() {
|
||||
deleteSession,
|
||||
renameSession,
|
||||
updateCategory,
|
||||
handleSessionChange,
|
||||
setSessionId,
|
||||
setSessionName,
|
||||
setSubSessions,
|
||||
setParentSessionId,
|
||||
setIsGroundTruth,
|
||||
} = useKombiPipeline()
|
||||
|
||||
@@ -75,17 +69,11 @@ function OcrKombiContent() {
|
||||
<StepPageSplit
|
||||
sessionId={sessionId}
|
||||
sessionName={sessionName}
|
||||
onNext={() => {
|
||||
// If sub-sessions were created, switch to the first one
|
||||
if (subSessions.length > 0) {
|
||||
setSessionId(subSessions[0].id)
|
||||
setSessionName(subSessions[0].name)
|
||||
}
|
||||
handleNext()
|
||||
}}
|
||||
onSubSessionsCreated={(subs) => {
|
||||
setSubSessions(subs)
|
||||
if (sessionId) setParentSessionId(sessionId)
|
||||
onNext={handleNext}
|
||||
onSplitComplete={(childId, childName) => {
|
||||
// Switch to the first child session and refresh the list
|
||||
setSessionId(childId)
|
||||
setSessionName(childName)
|
||||
loadSessions()
|
||||
}}
|
||||
/>
|
||||
@@ -161,15 +149,6 @@ function OcrKombiContent() {
|
||||
onStepClick={handleStepClick}
|
||||
/>
|
||||
|
||||
{subSessions.length > 0 && parentSessionId && sessionId && (
|
||||
<BoxSessionTabs
|
||||
parentSessionId={parentSessionId}
|
||||
subSessions={subSessions}
|
||||
activeSessionId={sessionId}
|
||||
onSessionChange={handleSessionChange}
|
||||
/>
|
||||
)}
|
||||
|
||||
<div className="min-h-[400px]">{renderStep()}</div>
|
||||
</div>
|
||||
)
|
||||
|
||||
@@ -8,7 +8,6 @@ export { DOCUMENT_CATEGORIES } from '../ocr-pipeline/types'
|
||||
export type {
|
||||
SessionListItem,
|
||||
SessionInfo,
|
||||
SubSession,
|
||||
OrientationResult,
|
||||
CropResult,
|
||||
DeskewResult,
|
||||
|
||||
@@ -4,7 +4,7 @@ import { useCallback, useEffect, useState, useRef } from 'react'
|
||||
import { useSearchParams } from 'next/navigation'
|
||||
import type { PipelineStep, DocumentCategory } from './types'
|
||||
import { KOMBI_V2_STEPS, dbStepToKombiV2Ui } from './types'
|
||||
import type { SubSession, SessionListItem } from '../ocr-pipeline/types'
|
||||
import type { SessionListItem } from '../ocr-pipeline/types'
|
||||
|
||||
export type { SessionListItem }
|
||||
|
||||
@@ -33,8 +33,6 @@ export function useKombiPipeline() {
|
||||
const [loadingSessions, setLoadingSessions] = useState(true)
|
||||
const [activeCategory, setActiveCategory] = useState<DocumentCategory | undefined>(undefined)
|
||||
const [isGroundTruth, setIsGroundTruth] = useState(false)
|
||||
const [subSessions, setSubSessions] = useState<SubSession[]>([])
|
||||
const [parentSessionId, setParentSessionId] = useState<string | null>(null)
|
||||
const [steps, setSteps] = useState<PipelineStep[]>(initSteps())
|
||||
|
||||
const searchParams = useSearchParams()
|
||||
@@ -115,7 +113,7 @@ export function useKombiPipeline() {
|
||||
|
||||
// ---- Open session ----
|
||||
|
||||
const openSession = useCallback(async (sid: string, keepSubSessions?: boolean) => {
|
||||
const openSession = useCallback(async (sid: string) => {
|
||||
try {
|
||||
const res = await fetch(`${KLAUSUR_API}/api/v1/ocr-pipeline/sessions/${sid}`)
|
||||
if (!res.ok) return
|
||||
@@ -126,17 +124,6 @@ export function useKombiPipeline() {
|
||||
setActiveCategory(data.document_category || undefined)
|
||||
setIsGroundTruth(!!data.ground_truth?.build_grid_reference)
|
||||
|
||||
// Sub-session handling
|
||||
if (data.sub_sessions?.length > 0) {
|
||||
setSubSessions(data.sub_sessions)
|
||||
setParentSessionId(sid)
|
||||
} else if (data.parent_session_id) {
|
||||
setParentSessionId(data.parent_session_id)
|
||||
} else if (!keepSubSessions) {
|
||||
setSubSessions([])
|
||||
setParentSessionId(null)
|
||||
}
|
||||
|
||||
// Determine UI step from DB state
|
||||
const dbStep = data.current_step || 1
|
||||
const hasGrid = !!data.grid_editor_result
|
||||
@@ -159,22 +146,10 @@ export function useKombiPipeline() {
|
||||
uiStep = 1
|
||||
}
|
||||
|
||||
const skipIds: string[] = []
|
||||
const isSubSession = !!data.parent_session_id
|
||||
if (isSubSession && dbStep >= 5) {
|
||||
skipIds.push('upload', 'orientation', 'page-split', 'deskew', 'dewarp', 'content-crop')
|
||||
if (uiStep < 6) uiStep = 6
|
||||
} else if (isSubSession && dbStep >= 2) {
|
||||
skipIds.push('upload', 'orientation')
|
||||
if (uiStep < 2) uiStep = 2
|
||||
}
|
||||
|
||||
setSteps(
|
||||
KOMBI_V2_STEPS.map((s, i) => ({
|
||||
...s,
|
||||
status: skipIds.includes(s.id)
|
||||
? 'skipped'
|
||||
: i < uiStep ? 'completed' : i === uiStep ? 'active' : 'pending',
|
||||
status: i < uiStep ? 'completed' : i === uiStep ? 'active' : 'pending',
|
||||
})),
|
||||
)
|
||||
setCurrentStep(uiStep)
|
||||
@@ -226,8 +201,6 @@ export function useKombiPipeline() {
|
||||
setSteps(initSteps())
|
||||
setCurrentStep(0)
|
||||
setSessionId(null)
|
||||
setSubSessions([])
|
||||
setParentSessionId(null)
|
||||
loadSessions()
|
||||
return
|
||||
}
|
||||
@@ -249,8 +222,6 @@ export function useKombiPipeline() {
|
||||
setSessionId(null)
|
||||
setSessionName('')
|
||||
setCurrentStep(0)
|
||||
setSubSessions([])
|
||||
setParentSessionId(null)
|
||||
setSteps(initSteps())
|
||||
}, [])
|
||||
|
||||
@@ -292,40 +263,6 @@ export function useKombiPipeline() {
|
||||
}
|
||||
}, [sessionId])
|
||||
|
||||
// ---- Orientation completion (checks for page-split sub-sessions) ----
|
||||
|
||||
const handleOrientationComplete = useCallback(async (sid: string) => {
|
||||
setSessionId(sid)
|
||||
loadSessions()
|
||||
|
||||
try {
|
||||
const res = await fetch(`${KLAUSUR_API}/api/v1/ocr-pipeline/sessions/${sid}`)
|
||||
if (res.ok) {
|
||||
const data = await res.json()
|
||||
if (data.sub_sessions?.length > 0) {
|
||||
const subs: SubSession[] = data.sub_sessions.map((s: SubSession) => ({
|
||||
id: s.id,
|
||||
name: s.name,
|
||||
box_index: s.box_index,
|
||||
current_step: s.current_step,
|
||||
}))
|
||||
setSubSessions(subs)
|
||||
setParentSessionId(sid)
|
||||
openSession(subs[0].id, true)
|
||||
return
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
console.error('Failed to check for sub-sessions:', e)
|
||||
}
|
||||
|
||||
handleNext()
|
||||
}, [loadSessions, openSession, handleNext])
|
||||
|
||||
const handleSessionChange = useCallback((newSessionId: string) => {
|
||||
openSession(newSessionId, true)
|
||||
}, [openSession])
|
||||
|
||||
return {
|
||||
// State
|
||||
currentStep,
|
||||
@@ -335,8 +272,6 @@ export function useKombiPipeline() {
|
||||
loadingSessions,
|
||||
activeCategory,
|
||||
isGroundTruth,
|
||||
subSessions,
|
||||
parentSessionId,
|
||||
steps,
|
||||
gridSaveRef,
|
||||
// Computed
|
||||
@@ -351,11 +286,7 @@ export function useKombiPipeline() {
|
||||
deleteSession,
|
||||
renameSession,
|
||||
updateCategory,
|
||||
handleOrientationComplete,
|
||||
handleSessionChange,
|
||||
setSessionId,
|
||||
setSubSessions,
|
||||
setParentSessionId,
|
||||
setSessionName,
|
||||
setIsGroundTruth,
|
||||
}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
'use client'
|
||||
|
||||
import { useState, useEffect, useRef } from 'react'
|
||||
import type { SubSession } from '@/app/(admin)/ai/ocr-pipeline/types'
|
||||
|
||||
const KLAUSUR_API = '/klausur-api'
|
||||
|
||||
interface PageSplitResult {
|
||||
@@ -18,10 +16,10 @@ interface StepPageSplitProps {
|
||||
sessionId: string | null
|
||||
sessionName: string
|
||||
onNext: () => void
|
||||
onSubSessionsCreated: (subs: SubSession[]) => void
|
||||
onSplitComplete: (firstChildId: string, firstChildName: string) => void
|
||||
}
|
||||
|
||||
export function StepPageSplit({ sessionId, sessionName, onNext, onSubSessionsCreated }: StepPageSplitProps) {
|
||||
export function StepPageSplit({ sessionId, sessionName, onNext, onSplitComplete }: StepPageSplitProps) {
|
||||
const [detecting, setDetecting] = useState(false)
|
||||
const [splitResult, setSplitResult] = useState<PageSplitResult | null>(null)
|
||||
const [error, setError] = useState('')
|
||||
@@ -40,30 +38,33 @@ export function StepPageSplit({ sessionId, sessionName, onNext, onSubSessionsCre
|
||||
setDetecting(true)
|
||||
setError('')
|
||||
try {
|
||||
// First check if sub-sessions already exist
|
||||
// First check if this session was already split (status='split')
|
||||
const sessionRes = await fetch(`${KLAUSUR_API}/api/v1/ocr-pipeline/sessions/${sessionId}`)
|
||||
if (sessionRes.ok) {
|
||||
const sessionData = await sessionRes.json()
|
||||
if (sessionData.sub_sessions?.length > 0) {
|
||||
// Already split — show existing sub-sessions
|
||||
const subs = sessionData.sub_sessions as { id: string; name: string; page_index?: number; box_index?: number; current_step?: number }[]
|
||||
setSplitResult({
|
||||
multi_page: true,
|
||||
page_count: subs.length,
|
||||
sub_sessions: subs.map((s: { id: string; name: string; page_index?: number; box_index?: number }) => ({
|
||||
id: s.id,
|
||||
name: s.name,
|
||||
page_index: s.page_index ?? s.box_index ?? 0,
|
||||
})),
|
||||
})
|
||||
onSubSessionsCreated(subs.map((s: { id: string; name: string; page_index?: number; box_index?: number; current_step?: number }) => ({
|
||||
id: s.id,
|
||||
name: s.name,
|
||||
box_index: s.page_index ?? s.box_index ?? 0,
|
||||
current_step: s.current_step ?? 2,
|
||||
})))
|
||||
setDetecting(false)
|
||||
return
|
||||
if (sessionData.status === 'split' && sessionData.crop_result?.multi_page) {
|
||||
// Already split — find the child sessions in the session list
|
||||
const listRes = await fetch(`${KLAUSUR_API}/api/v1/ocr-pipeline/sessions`)
|
||||
if (listRes.ok) {
|
||||
const listData = await listRes.json()
|
||||
// Child sessions have names like "ParentName — Seite N"
|
||||
const baseName = sessionName || sessionData.name || ''
|
||||
const children = (listData.sessions || [])
|
||||
.filter((s: { name?: string }) => s.name?.startsWith(baseName + ' — '))
|
||||
.sort((a: { name: string }, b: { name: string }) => a.name.localeCompare(b.name))
|
||||
if (children.length > 0) {
|
||||
setSplitResult({
|
||||
multi_page: true,
|
||||
page_count: children.length,
|
||||
sub_sessions: children.map((s: { id: string; name: string }, i: number) => ({
|
||||
id: s.id, name: s.name, page_index: i,
|
||||
})),
|
||||
})
|
||||
onSplitComplete(children[0].id, children[0].name)
|
||||
setDetecting(false)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,12 +93,8 @@ export function StepPageSplit({ sessionId, sessionName, onNext, onSubSessionsCre
|
||||
sub.name = newName
|
||||
}
|
||||
|
||||
onSubSessionsCreated(data.sub_sessions.map(s => ({
|
||||
id: s.id,
|
||||
name: s.name,
|
||||
box_index: s.page_index,
|
||||
current_step: 2,
|
||||
})))
|
||||
// Signal parent to switch to the first child session
|
||||
onSplitComplete(data.sub_sessions[0].id, data.sub_sessions[0].name)
|
||||
}
|
||||
} catch (e) {
|
||||
setError(e instanceof Error ? e.message : String(e))
|
||||
|
||||
@@ -55,6 +55,9 @@ _STOP_WORDS = frozenset([
|
||||
_hyph_de = None
|
||||
_hyph_en = None
|
||||
|
||||
# Cached spellchecker (for autocorrect_pipe_artifacts)
|
||||
_spell_de = None
|
||||
|
||||
|
||||
def _get_hyphenators():
|
||||
"""Lazy-load pyphen hyphenators (cached across calls)."""
|
||||
@@ -70,6 +73,35 @@ def _get_hyphenators():
|
||||
return _hyph_de, _hyph_en
|
||||
|
||||
|
||||
def _get_spellchecker():
|
||||
"""Lazy-load German spellchecker (cached across calls)."""
|
||||
global _spell_de
|
||||
if _spell_de is not None:
|
||||
return _spell_de
|
||||
try:
|
||||
from spellchecker import SpellChecker
|
||||
except ImportError:
|
||||
return None
|
||||
_spell_de = SpellChecker(language='de')
|
||||
return _spell_de
|
||||
|
||||
|
||||
def _is_known_word(word: str, hyph_de, hyph_en) -> bool:
|
||||
"""Check whether pyphen recognises a word (DE or EN)."""
|
||||
if len(word) < 2:
|
||||
return False
|
||||
return ('|' in hyph_de.inserted(word, hyphen='|')
|
||||
or '|' in hyph_en.inserted(word, hyphen='|'))
|
||||
|
||||
|
||||
def _is_real_word(word: str) -> bool:
|
||||
"""Check whether spellchecker knows this word (case-insensitive)."""
|
||||
spell = _get_spellchecker()
|
||||
if spell is None:
|
||||
return False
|
||||
return word.lower() in spell
|
||||
|
||||
|
||||
def _hyphenate_word(word: str, hyph_de, hyph_en) -> Optional[str]:
|
||||
"""Try to hyphenate a word using DE then EN dictionary.
|
||||
|
||||
@@ -84,6 +116,139 @@ def _hyphenate_word(word: str, hyph_de, hyph_en) -> Optional[str]:
|
||||
return None
|
||||
|
||||
|
||||
def _autocorrect_piped_word(word_with_pipes: str) -> Optional[str]:
|
||||
"""Try to correct a word that has OCR pipe artifacts.
|
||||
|
||||
Printed syllable divider lines on dictionary pages confuse OCR:
|
||||
the vertical stroke is often read as an extra character (commonly
|
||||
``l``, ``I``, ``1``, ``i``) adjacent to where the pipe appears.
|
||||
Sometimes OCR reads one divider as ``|`` and another as a letter,
|
||||
so the garbled character may be far from any detected pipe.
|
||||
|
||||
Uses ``spellchecker`` (frequency-based word list) for validation —
|
||||
unlike pyphen which is a pattern-based hyphenator and accepts
|
||||
nonsense strings like "Zeplpelin".
|
||||
|
||||
Strategy:
|
||||
1. Strip ``|`` — if spellchecker knows the result, done.
|
||||
2. Try deleting each pipe-like character (l, I, 1, i, t).
|
||||
OCR inserts extra chars that resemble vertical strokes.
|
||||
3. Fall back to spellchecker's own ``correction()`` method.
|
||||
4. Preserve the original casing of the first letter.
|
||||
"""
|
||||
stripped = word_with_pipes.replace('|', '')
|
||||
if not stripped or len(stripped) < 3:
|
||||
return stripped # too short to validate
|
||||
|
||||
# Step 1: if the stripped word is already a real word, done
|
||||
if _is_real_word(stripped):
|
||||
return stripped
|
||||
|
||||
# Step 2: try deleting pipe-like characters (most likely artifacts)
|
||||
_PIPE_LIKE = frozenset('lI1it')
|
||||
for idx in range(len(stripped)):
|
||||
if stripped[idx] not in _PIPE_LIKE:
|
||||
continue
|
||||
candidate = stripped[:idx] + stripped[idx + 1:]
|
||||
if len(candidate) >= 3 and _is_real_word(candidate):
|
||||
return candidate
|
||||
|
||||
# Step 3: use spellchecker's built-in correction
|
||||
spell = _get_spellchecker()
|
||||
if spell is not None:
|
||||
suggestion = spell.correction(stripped.lower())
|
||||
if suggestion and suggestion != stripped.lower():
|
||||
# Preserve original first-letter case
|
||||
if stripped[0].isupper():
|
||||
suggestion = suggestion[0].upper() + suggestion[1:]
|
||||
return suggestion
|
||||
|
||||
return None # could not fix
|
||||
|
||||
|
||||
def autocorrect_pipe_artifacts(
|
||||
zones_data: List[Dict], session_id: str,
|
||||
) -> int:
|
||||
"""Strip OCR pipe artifacts and correct garbled words in-place.
|
||||
|
||||
Printed syllable divider lines on dictionary scans are read by OCR
|
||||
as ``|`` characters embedded in words (e.g. ``Zel|le``, ``Ze|plpe|lin``).
|
||||
This function:
|
||||
|
||||
1. Strips ``|`` from every word in content cells.
|
||||
2. Validates with spellchecker (real dictionary lookup).
|
||||
3. If not recognised, tries deleting pipe-like characters or uses
|
||||
spellchecker's correction (e.g. ``Zeplpelin`` → ``Zeppelin``).
|
||||
4. Updates both word-box texts and cell text.
|
||||
|
||||
Returns the number of cells modified.
|
||||
"""
|
||||
spell = _get_spellchecker()
|
||||
if spell is None:
|
||||
logger.warning("spellchecker not available — pipe autocorrect limited")
|
||||
# Fall back: still strip pipes even without spellchecker
|
||||
pass
|
||||
|
||||
modified = 0
|
||||
for z in zones_data:
|
||||
for cell in z.get("cells", []):
|
||||
ct = cell.get("col_type", "")
|
||||
if not ct.startswith("column_"):
|
||||
continue
|
||||
|
||||
cell_changed = False
|
||||
|
||||
# --- Fix word boxes ---
|
||||
for wb in cell.get("word_boxes", []):
|
||||
wb_text = wb.get("text", "")
|
||||
if "|" not in wb_text:
|
||||
continue
|
||||
|
||||
# Separate trailing punctuation
|
||||
m = re.match(
|
||||
r'^([^a-zA-ZäöüÄÖÜßẞ]*)'
|
||||
r'(.*?)'
|
||||
r'([^a-zA-ZäöüÄÖÜßẞ]*)$',
|
||||
wb_text,
|
||||
)
|
||||
if not m:
|
||||
continue
|
||||
lead, core, trail = m.group(1), m.group(2), m.group(3)
|
||||
if "|" not in core:
|
||||
continue
|
||||
|
||||
corrected = _autocorrect_piped_word(core)
|
||||
if corrected is not None and corrected != core:
|
||||
wb["text"] = lead + corrected + trail
|
||||
cell_changed = True
|
||||
|
||||
# --- Rebuild cell text from word boxes ---
|
||||
if cell_changed:
|
||||
wbs = cell.get("word_boxes", [])
|
||||
if wbs:
|
||||
cell["text"] = " ".join(
|
||||
(wb.get("text") or "") for wb in wbs
|
||||
)
|
||||
modified += 1
|
||||
|
||||
# --- Fallback: strip residual | from cell text ---
|
||||
# (covers cases where word_boxes don't exist or weren't fixed)
|
||||
text = cell.get("text", "")
|
||||
if "|" in text:
|
||||
clean = text.replace("|", "")
|
||||
if clean != text:
|
||||
cell["text"] = clean
|
||||
if not cell_changed:
|
||||
modified += 1
|
||||
|
||||
if modified:
|
||||
logger.info(
|
||||
"build-grid session %s: autocorrected pipe artifacts in %d cells",
|
||||
session_id, modified,
|
||||
)
|
||||
return modified
|
||||
|
||||
|
||||
def _try_merge_pipe_gaps(text: str, hyph_de) -> str:
|
||||
"""Merge fragments separated by single spaces where OCR split at a pipe.
|
||||
|
||||
@@ -185,7 +350,7 @@ def merge_word_gaps_in_zones(zones_data: List[Dict], session_id: str) -> int:
|
||||
|
||||
|
||||
def _try_merge_word_gaps(text: str, hyph_de) -> str:
|
||||
"""Merge OCR word fragments with relaxed threshold (max_short=6).
|
||||
"""Merge OCR word fragments with relaxed threshold (max_short=5).
|
||||
|
||||
Similar to ``_try_merge_pipe_gaps`` but allows slightly longer fragments
|
||||
(max_short=5 instead of 3). Still requires pyphen to recognize the
|
||||
|
||||
@@ -1323,6 +1323,14 @@ async def _build_grid_core(
|
||||
and wb.get("conf", 100) < 85):
|
||||
to_remove.add(i)
|
||||
|
||||
# Rule (a2): isolated non-alphanumeric symbols (graphic OCR artifacts)
|
||||
# Small images/icons next to words get OCR'd as ">", "<", "~", etc.
|
||||
# Remove word boxes that contain NO letters or digits.
|
||||
for i, wb in enumerate(wbs):
|
||||
t = (wb.get("text") or "").strip()
|
||||
if t and not re.search(r'[a-zA-Z0-9äöüÄÖÜß]', t) and len(t) <= 2:
|
||||
to_remove.add(i)
|
||||
|
||||
# Rule (b) + (c): overlap and duplicate detection
|
||||
# Sort by x for pairwise comparison
|
||||
_ALPHA_WORD_RE = re.compile(r'^[A-Za-z\u00c0-\u024f\-]+[.,;:!?]*$')
|
||||
@@ -1353,6 +1361,19 @@ async def _build_grid_core(
|
||||
to_merge.append((i1, i2))
|
||||
continue
|
||||
|
||||
# High overlap (>75%) with different alphabetic text:
|
||||
# OCR merge can expand a prefix box (e.g. "zer" w=42 → w=104)
|
||||
# causing it to heavily overlap with the next fragment ("brech").
|
||||
# Merge instead of removing when one is a short prefix (≤4 chars)
|
||||
# and the texts are different.
|
||||
if (overlap_pct > 0.75
|
||||
and _ALPHA_WORD_RE.match(t1)
|
||||
and _ALPHA_WORD_RE.match(t2)
|
||||
and t1.rstrip(".,;:!?").lower() != t2.rstrip(".,;:!?").lower()
|
||||
and min(len(t1.rstrip(".,;:!?")), len(t2.rstrip(".,;:!?"))) <= 4):
|
||||
to_merge.append((i1, i2))
|
||||
continue
|
||||
|
||||
if overlap_pct <= 0.40:
|
||||
continue # too little overlap and not alphabetic merge
|
||||
|
||||
@@ -1393,15 +1414,22 @@ async def _build_grid_core(
|
||||
c2 = w2.get("conf", 50)
|
||||
to_remove.add(i1 if c1 <= c2 else i2)
|
||||
|
||||
# Execute merges first (syllable-split words)
|
||||
# Execute merges first (syllable-split words).
|
||||
# Use merge_parent to support chain merging: if "zer" absorbed
|
||||
# "brech" and then "brech"+"lich" is a merge pair, redirect to
|
||||
# merge "lich" into "zer" → "zerbrechlich".
|
||||
if to_merge:
|
||||
merged_indices: set = set()
|
||||
merge_parent: Dict[int, int] = {} # absorbed → absorber
|
||||
for mi1, mi2 in to_merge:
|
||||
if mi1 in to_remove or mi2 in to_remove:
|
||||
continue # don't merge if one is being removed
|
||||
if mi1 in merged_indices or mi2 in merged_indices:
|
||||
continue # already merged
|
||||
mw1, mw2 = wbs[mi1], wbs[mi2]
|
||||
# Follow chain: if mi1 was absorbed, find root absorber
|
||||
actual_mi1 = mi1
|
||||
while actual_mi1 in merge_parent:
|
||||
actual_mi1 = merge_parent[actual_mi1]
|
||||
if actual_mi1 in to_remove or mi2 in to_remove:
|
||||
continue
|
||||
if mi2 in merge_parent:
|
||||
continue # mi2 already absorbed
|
||||
mw1, mw2 = wbs[actual_mi1], wbs[mi2]
|
||||
# Concatenate text (no space — they're parts of one word)
|
||||
mt1 = (mw1.get("text") or "").rstrip(".,;:!?")
|
||||
mt2 = (mw2.get("text") or "").strip()
|
||||
@@ -1419,9 +1447,8 @@ async def _build_grid_core(
|
||||
mw1["width"] = mr - mx
|
||||
mw1["height"] = mb - my
|
||||
mw1["conf"] = (mw1.get("conf", 50) + mw2.get("conf", 50)) / 2
|
||||
to_remove.add(mi2) # remove the second one
|
||||
merged_indices.add(mi1)
|
||||
merged_indices.add(mi2)
|
||||
to_remove.add(mi2)
|
||||
merge_parent[mi2] = actual_mi1
|
||||
bullet_removed -= 1 # net: merge, not removal
|
||||
|
||||
if to_remove:
|
||||
@@ -1600,6 +1627,15 @@ async def _build_grid_core(
|
||||
except Exception as e:
|
||||
logger.warning("Word-gap merge failed: %s", e)
|
||||
|
||||
# --- Pipe auto-correction: fix OCR artifacts from printed syllable dividers ---
|
||||
# Strips | from words, validates with pyphen, tries char-deletion for garbled
|
||||
# words like "Ze|plpe|lin" → "Zeppelin".
|
||||
try:
|
||||
from cv_syllable_detect import autocorrect_pipe_artifacts
|
||||
autocorrect_pipe_artifacts(zones_data, session_id)
|
||||
except Exception as e:
|
||||
logger.warning("Pipe autocorrect failed: %s", e)
|
||||
|
||||
# --- Syllable divider insertion for dictionary pages ---
|
||||
# syllable_mode: "auto" = only when original has pipe dividers (1% threshold),
|
||||
# "all" = force on all content words, "en" = English column only,
|
||||
|
||||
@@ -22,6 +22,148 @@ from cv_ocr_engines import _text_has_garbled_ipa
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Cross-column word splitting
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_spell_cache: Optional[Any] = None
|
||||
_spell_loaded = False
|
||||
|
||||
|
||||
def _is_recognized_word(text: str) -> bool:
|
||||
"""Check if *text* is a recognized German or English word.
|
||||
|
||||
Uses the spellchecker library (same as cv_syllable_detect.py).
|
||||
Returns True for real words like "oder", "Kabel", "Zeitung".
|
||||
Returns False for OCR merge artifacts like "sichzie", "dasZimmer".
|
||||
"""
|
||||
global _spell_cache, _spell_loaded
|
||||
if not text or len(text) < 2:
|
||||
return False
|
||||
|
||||
if not _spell_loaded:
|
||||
_spell_loaded = True
|
||||
try:
|
||||
from spellchecker import SpellChecker
|
||||
_spell_cache = SpellChecker(language="de")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if _spell_cache is None:
|
||||
return False
|
||||
|
||||
return text.lower() in _spell_cache
|
||||
|
||||
|
||||
def _split_cross_column_words(
|
||||
words: List[Dict],
|
||||
columns: List[Dict],
|
||||
) -> List[Dict]:
|
||||
"""Split word boxes that span across column boundaries.
|
||||
|
||||
When OCR merges adjacent words from different columns (e.g. "sichzie"
|
||||
spanning Col 1 and Col 2, or "dasZimmer" crossing the boundary),
|
||||
split the word box at the column boundary so each piece is assigned
|
||||
to the correct column.
|
||||
|
||||
Only splits when:
|
||||
- The word has significant overlap (>15% of its width) on both sides
|
||||
- AND the word is not a recognized real word (OCR merge artifact), OR
|
||||
the word contains a case transition (lowercase→uppercase) near the
|
||||
boundary indicating two merged words like "dasZimmer".
|
||||
"""
|
||||
if len(columns) < 2:
|
||||
return words
|
||||
|
||||
# Column boundaries = midpoints between adjacent column edges
|
||||
boundaries = []
|
||||
for i in range(len(columns) - 1):
|
||||
boundary = (columns[i]["x_max"] + columns[i + 1]["x_min"]) / 2
|
||||
boundaries.append(boundary)
|
||||
|
||||
new_words: List[Dict] = []
|
||||
split_count = 0
|
||||
for w in words:
|
||||
w_left = w["left"]
|
||||
w_width = w["width"]
|
||||
w_right = w_left + w_width
|
||||
text = (w.get("text") or "").strip()
|
||||
|
||||
if not text or len(text) < 4 or w_width < 10:
|
||||
new_words.append(w)
|
||||
continue
|
||||
|
||||
# Find the first boundary this word straddles significantly
|
||||
split_boundary = None
|
||||
for b in boundaries:
|
||||
if w_left < b < w_right:
|
||||
left_part = b - w_left
|
||||
right_part = w_right - b
|
||||
# Both sides must have at least 15% of the word width
|
||||
if left_part > w_width * 0.15 and right_part > w_width * 0.15:
|
||||
split_boundary = b
|
||||
break
|
||||
|
||||
if split_boundary is None:
|
||||
new_words.append(w)
|
||||
continue
|
||||
|
||||
# Compute approximate split position in the text.
|
||||
left_width = split_boundary - w_left
|
||||
split_ratio = left_width / w_width
|
||||
approx_pos = len(text) * split_ratio
|
||||
|
||||
# Strategy 1: look for a case transition (lowercase→uppercase) near
|
||||
# the approximate split point — e.g. "dasZimmer" splits at 'Z'.
|
||||
split_char = None
|
||||
search_lo = max(1, int(approx_pos) - 3)
|
||||
search_hi = min(len(text), int(approx_pos) + 2)
|
||||
for i in range(search_lo, search_hi):
|
||||
if text[i - 1].islower() and text[i].isupper():
|
||||
split_char = i
|
||||
break
|
||||
|
||||
# Strategy 2: if no case transition, only split if the whole word
|
||||
# is NOT a real word (i.e. it's an OCR merge artifact like "sichzie").
|
||||
# Real words like "oder", "Kabel", "Zeitung" must not be split.
|
||||
if split_char is None:
|
||||
clean = re.sub(r"[,;:.!?]+$", "", text) # strip trailing punct
|
||||
if _is_recognized_word(clean):
|
||||
new_words.append(w)
|
||||
continue
|
||||
# Not a real word — use floor of proportional position
|
||||
split_char = max(1, min(len(text) - 1, int(approx_pos)))
|
||||
|
||||
left_text = text[:split_char].rstrip()
|
||||
right_text = text[split_char:].lstrip()
|
||||
|
||||
if len(left_text) < 2 or len(right_text) < 2:
|
||||
new_words.append(w)
|
||||
continue
|
||||
|
||||
right_width = w_width - round(left_width)
|
||||
new_words.append({
|
||||
**w,
|
||||
"text": left_text,
|
||||
"width": round(left_width),
|
||||
})
|
||||
new_words.append({
|
||||
**w,
|
||||
"text": right_text,
|
||||
"left": round(split_boundary),
|
||||
"width": right_width,
|
||||
})
|
||||
split_count += 1
|
||||
logger.info(
|
||||
"split cross-column word %r → %r + %r at boundary %.0f",
|
||||
text, left_text, right_text, split_boundary,
|
||||
)
|
||||
|
||||
if split_count:
|
||||
logger.info("split %d cross-column word(s)", split_count)
|
||||
return new_words
|
||||
|
||||
|
||||
def _filter_border_strip_words(words: List[Dict]) -> Tuple[List[Dict], int]:
|
||||
"""Remove page-border decoration strip words BEFORE column detection.
|
||||
|
||||
@@ -1111,6 +1253,12 @@ def _build_zone_grid(
|
||||
"header_rows": [],
|
||||
}
|
||||
|
||||
# Split word boxes that straddle column boundaries (e.g. "sichzie"
|
||||
# spanning Col 1 + Col 2). Must happen after column detection and
|
||||
# before cell assignment.
|
||||
if len(columns) >= 2:
|
||||
zone_words = _split_cross_column_words(zone_words, columns)
|
||||
|
||||
# Build cells
|
||||
cells = _build_cells(zone_words, columns, rows, img_w, img_h)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user