fix: rewrite Kombi merge with row-based sequence alignment
Some checks failed
CI / go-lint (push) Has been skipped
CI / python-lint (push) Has been skipped
CI / nodejs-lint (push) Has been skipped
CI / test-go-school (push) Successful in 28s
CI / test-go-edu-search (push) Successful in 29s
CI / test-python-klausur (push) Failing after 1m59s
CI / test-python-agent-core (push) Successful in 18s
CI / test-nodejs-website (push) Successful in 19s
Some checks failed
CI / go-lint (push) Has been skipped
CI / python-lint (push) Has been skipped
CI / nodejs-lint (push) Has been skipped
CI / test-go-school (push) Successful in 28s
CI / test-go-edu-search (push) Successful in 29s
CI / test-python-klausur (push) Failing after 1m59s
CI / test-python-agent-core (push) Successful in 18s
CI / test-nodejs-website (push) Successful in 19s
Replaces position-based word matching with row-based sequence alignment to fix doubled words and cross-line averaging in Kombi-Modus. New algorithm: 1. Group words into rows by Y-position clustering 2. Match rows between engines by vertical center proximity 3. Within each row: walk both sequences left-to-right, deduplicating 4. Unmatched rows kept as-is Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -2599,175 +2599,189 @@ async def paddle_direct(session_id: str):
|
||||
return {"session_id": session_id, **word_result}
|
||||
|
||||
|
||||
def _box_iou(a: dict, b: dict) -> float:
|
||||
"""Compute IoU between two word boxes (each has left, top, width, height)."""
|
||||
ax1, ay1 = a["left"], a["top"]
|
||||
ax2, ay2 = ax1 + a["width"], ay1 + a["height"]
|
||||
bx1, by1 = b["left"], b["top"]
|
||||
bx2, by2 = bx1 + b["width"], by1 + b["height"]
|
||||
def _group_words_into_rows(words: list, row_gap: int = 12) -> list:
|
||||
"""Group words into rows by Y-position clustering.
|
||||
|
||||
ix1, iy1 = max(ax1, bx1), max(ay1, by1)
|
||||
ix2, iy2 = min(ax2, bx2), min(ay2, by2)
|
||||
inter = max(0, ix2 - ix1) * max(0, iy2 - iy1)
|
||||
if inter == 0:
|
||||
return 0.0
|
||||
area_a = (ax2 - ax1) * (ay2 - ay1)
|
||||
area_b = (bx2 - bx1) * (by2 - by1)
|
||||
return inter / (area_a + area_b - inter) if (area_a + area_b - inter) > 0 else 0.0
|
||||
|
||||
|
||||
def _box_center_dist(a: dict, b: dict) -> float:
|
||||
"""Euclidean distance between box centers."""
|
||||
acx = a["left"] + a["width"] / 2
|
||||
acy = a["top"] + a["height"] / 2
|
||||
bcx = b["left"] + b["width"] / 2
|
||||
bcy = b["top"] + b["height"] / 2
|
||||
return ((acx - bcx) ** 2 + (acy - bcy) ** 2) ** 0.5
|
||||
|
||||
|
||||
def _text_similarity(a: str, b: str) -> float:
|
||||
"""Simple text similarity (0-1). Handles stripped punctuation."""
|
||||
if not a or not b:
|
||||
return 0.0
|
||||
a_lower = a.lower().strip()
|
||||
b_lower = b.lower().strip()
|
||||
if a_lower == b_lower:
|
||||
return 1.0
|
||||
# One might be substring of the other (e.g. "!Betonung" vs "Betonung")
|
||||
if a_lower in b_lower or b_lower in a_lower:
|
||||
return 0.8
|
||||
# Check if they share most characters
|
||||
shorter, longer = (a_lower, b_lower) if len(a_lower) <= len(b_lower) else (b_lower, a_lower)
|
||||
if len(shorter) == 0:
|
||||
return 0.0
|
||||
matches = sum(1 for c in shorter if c in longer)
|
||||
return matches / max(len(shorter), len(longer))
|
||||
|
||||
|
||||
def _words_match(pw: dict, tw: dict) -> bool:
|
||||
"""Determine if a Paddle word and a Tesseract word represent the same word.
|
||||
|
||||
Uses three criteria (any one is sufficient):
|
||||
1. IoU > 0.15 (relaxed from 0.3 — engines produce different-sized boxes)
|
||||
2. Center distance < max(word height, 20px) AND on same row (vertical overlap)
|
||||
3. Text similarity > 0.7 AND on same row
|
||||
Words whose vertical centers are within `row_gap` pixels are on the same row.
|
||||
Returns list of rows, each row is a list of words sorted left-to-right.
|
||||
"""
|
||||
iou = _box_iou(pw, tw)
|
||||
if iou > 0.15:
|
||||
return True
|
||||
if not words:
|
||||
return []
|
||||
# Sort by vertical center
|
||||
sorted_words = sorted(words, key=lambda w: w["top"] + w.get("height", 0) / 2)
|
||||
rows: list = []
|
||||
current_row: list = [sorted_words[0]]
|
||||
current_cy = sorted_words[0]["top"] + sorted_words[0].get("height", 0) / 2
|
||||
|
||||
# Same row check: vertical overlap > 50% of smaller height
|
||||
py1, py2 = pw["top"], pw["top"] + pw["height"]
|
||||
ty1, ty2 = tw["top"], tw["top"] + tw["height"]
|
||||
v_overlap = max(0, min(py2, ty2) - max(py1, ty1))
|
||||
min_h = max(min(pw["height"], tw["height"]), 1)
|
||||
same_row = v_overlap > 0.5 * min_h
|
||||
|
||||
if not same_row:
|
||||
return False
|
||||
|
||||
# Center proximity on same row
|
||||
cdist = _box_center_dist(pw, tw)
|
||||
h_threshold = max(pw["height"], tw["height"], 20)
|
||||
if cdist < h_threshold:
|
||||
return True
|
||||
|
||||
# Text similarity on same row
|
||||
if _text_similarity(pw["text"], tw["text"]) > 0.7:
|
||||
return True
|
||||
|
||||
return False
|
||||
for w in sorted_words[1:]:
|
||||
cy = w["top"] + w.get("height", 0) / 2
|
||||
if abs(cy - current_cy) <= row_gap:
|
||||
current_row.append(w)
|
||||
else:
|
||||
# Sort current row left-to-right before saving
|
||||
rows.append(sorted(current_row, key=lambda w: w["left"]))
|
||||
current_row = [w]
|
||||
current_cy = cy
|
||||
if current_row:
|
||||
rows.append(sorted(current_row, key=lambda w: w["left"]))
|
||||
return rows
|
||||
|
||||
|
||||
def _merge_paddle_tesseract(paddle_words: list, tess_words: list) -> list:
|
||||
"""Merge word boxes from PaddleOCR and Tesseract.
|
||||
def _row_center_y(row: list) -> float:
|
||||
"""Average vertical center of a row of words."""
|
||||
if not row:
|
||||
return 0.0
|
||||
return sum(w["top"] + w.get("height", 0) / 2 for w in row) / len(row)
|
||||
|
||||
Strategy:
|
||||
- For each Paddle word, find the best matching Tesseract word
|
||||
- Match criteria: IoU, center proximity, or text similarity (see _words_match)
|
||||
- Matched pairs: keep Paddle text, average coordinates weighted by confidence
|
||||
- Unmatched Paddle words: keep as-is
|
||||
- Unmatched Tesseract words (conf >= 40): add (bullet points, symbols, etc.)
|
||||
|
||||
def _merge_row_sequences(paddle_row: list, tess_row: list) -> list:
|
||||
"""Merge two word sequences from the same row using sequence alignment.
|
||||
|
||||
Both sequences are sorted left-to-right. Walk through both simultaneously:
|
||||
- If words match (same/similar text): take Paddle text with averaged coords
|
||||
- If they don't match: the extra word is unique to one engine, include it
|
||||
|
||||
This prevents duplicates because both engines produce words in the same order.
|
||||
"""
|
||||
merged = []
|
||||
used_tess: set = set()
|
||||
pi, ti = 0, 0
|
||||
|
||||
for pw in paddle_words:
|
||||
best_score, best_ti = 0.0, -1
|
||||
for ti, tw in enumerate(tess_words):
|
||||
if ti in used_tess:
|
||||
continue
|
||||
if not _words_match(pw, tw):
|
||||
continue
|
||||
# Score: IoU + text_similarity to pick best match
|
||||
score = _box_iou(pw, tw) + _text_similarity(pw["text"], tw["text"])
|
||||
if score > best_score:
|
||||
best_score, best_ti = score, ti
|
||||
while pi < len(paddle_row) and ti < len(tess_row):
|
||||
pw = paddle_row[pi]
|
||||
tw = tess_row[ti]
|
||||
|
||||
if best_ti >= 0:
|
||||
tw = tess_words[best_ti]
|
||||
used_tess.add(best_ti)
|
||||
# Check if these are the same word
|
||||
pt = pw.get("text", "").lower().strip()
|
||||
tt = tw.get("text", "").lower().strip()
|
||||
|
||||
# Same text or one contains the other
|
||||
is_same = (pt == tt) or (len(pt) > 1 and len(tt) > 1 and (pt in tt or tt in pt))
|
||||
|
||||
if is_same:
|
||||
# Matched — average coordinates weighted by confidence
|
||||
pc = pw.get("conf", 80)
|
||||
tc = tw.get("conf", 50)
|
||||
total = pc + tc
|
||||
if total == 0:
|
||||
total = 1
|
||||
merged.append({
|
||||
"text": pw["text"], # Paddle text usually better
|
||||
"text": pw["text"], # Paddle text preferred
|
||||
"left": round((pw["left"] * pc + tw["left"] * tc) / total),
|
||||
"top": round((pw["top"] * pc + tw["top"] * tc) / total),
|
||||
"width": round((pw["width"] * pc + tw["width"] * tc) / total),
|
||||
"height": round((pw["height"] * pc + tw["height"] * tc) / total),
|
||||
"conf": max(pc, tc),
|
||||
})
|
||||
pi += 1
|
||||
ti += 1
|
||||
else:
|
||||
# No Tesseract match — keep Paddle word as-is
|
||||
merged.append(pw)
|
||||
# Different text — one engine found something extra
|
||||
# Look ahead: is the current Paddle word somewhere in Tesseract ahead?
|
||||
paddle_ahead = any(
|
||||
tess_row[t].get("text", "").lower().strip() == pt
|
||||
for t in range(ti + 1, min(ti + 4, len(tess_row)))
|
||||
)
|
||||
# Is the current Tesseract word somewhere in Paddle ahead?
|
||||
tess_ahead = any(
|
||||
paddle_row[p].get("text", "").lower().strip() == tt
|
||||
for p in range(pi + 1, min(pi + 4, len(paddle_row)))
|
||||
)
|
||||
|
||||
# Add unmatched Tesseract words (bullet points, symbols, etc.)
|
||||
for ti, tw in enumerate(tess_words):
|
||||
if ti not in used_tess and tw.get("conf", 0) >= 40:
|
||||
merged.append(tw)
|
||||
|
||||
# Safety net: deduplicate any remaining near-duplicate words
|
||||
return _deduplicate_words(merged)
|
||||
|
||||
|
||||
def _deduplicate_words(words: list) -> list:
|
||||
"""Remove near-duplicate words that slipped through matching.
|
||||
|
||||
Two words are considered duplicates if:
|
||||
- Same text (case-insensitive)
|
||||
- Centers within 30px horizontally and 15px vertically
|
||||
The word with higher confidence is kept.
|
||||
"""
|
||||
if len(words) <= 1:
|
||||
return words
|
||||
keep = [True] * len(words)
|
||||
for i in range(len(words)):
|
||||
if not keep[i]:
|
||||
continue
|
||||
w1 = words[i]
|
||||
cx1 = w1["left"] + w1.get("width", 0) / 2
|
||||
cy1 = w1["top"] + w1.get("height", 0) / 2
|
||||
t1 = w1.get("text", "").lower().strip()
|
||||
for j in range(i + 1, len(words)):
|
||||
if not keep[j]:
|
||||
continue
|
||||
w2 = words[j]
|
||||
t2 = w2.get("text", "").lower().strip()
|
||||
if t1 != t2:
|
||||
continue
|
||||
cx2 = w2["left"] + w2.get("width", 0) / 2
|
||||
cy2 = w2["top"] + w2.get("height", 0) / 2
|
||||
if abs(cx1 - cx2) < 30 and abs(cy1 - cy2) < 15:
|
||||
# Drop the one with lower confidence
|
||||
if w1.get("conf", 0) >= w2.get("conf", 0):
|
||||
keep[j] = False
|
||||
if paddle_ahead and not tess_ahead:
|
||||
# Tesseract has an extra word (e.g. "!" or bullet) → include it
|
||||
if tw.get("conf", 0) >= 30:
|
||||
merged.append(tw)
|
||||
ti += 1
|
||||
elif tess_ahead and not paddle_ahead:
|
||||
# Paddle has an extra word → include it
|
||||
merged.append(pw)
|
||||
pi += 1
|
||||
else:
|
||||
# Both have unique words or neither found ahead → take leftmost first
|
||||
if pw["left"] <= tw["left"]:
|
||||
merged.append(pw)
|
||||
pi += 1
|
||||
else:
|
||||
keep[i] = False
|
||||
break # w1 is dropped, stop comparing
|
||||
return [w for w, k in zip(words, keep) if k]
|
||||
if tw.get("conf", 0) >= 30:
|
||||
merged.append(tw)
|
||||
ti += 1
|
||||
|
||||
# Remaining words from either engine
|
||||
while pi < len(paddle_row):
|
||||
merged.append(paddle_row[pi])
|
||||
pi += 1
|
||||
while ti < len(tess_row):
|
||||
tw = tess_row[ti]
|
||||
if tw.get("conf", 0) >= 30:
|
||||
merged.append(tw)
|
||||
ti += 1
|
||||
|
||||
return merged
|
||||
|
||||
|
||||
def _merge_paddle_tesseract(paddle_words: list, tess_words: list) -> list:
|
||||
"""Merge word boxes from PaddleOCR and Tesseract using row-based sequence alignment.
|
||||
|
||||
Strategy:
|
||||
1. Group each engine's words into rows (by Y-position clustering)
|
||||
2. Match rows between engines (by vertical center proximity)
|
||||
3. Within each matched row: merge sequences left-to-right, deduplicating
|
||||
words that appear in both engines at the same sequence position
|
||||
4. Unmatched rows from either engine: keep as-is
|
||||
|
||||
This prevents:
|
||||
- Cross-line averaging (words from different lines being merged)
|
||||
- Duplicate words (same word from both engines shown twice)
|
||||
"""
|
||||
if not paddle_words and not tess_words:
|
||||
return []
|
||||
if not paddle_words:
|
||||
return [w for w in tess_words if w.get("conf", 0) >= 40]
|
||||
if not tess_words:
|
||||
return list(paddle_words)
|
||||
|
||||
# Step 1: Group into rows
|
||||
paddle_rows = _group_words_into_rows(paddle_words)
|
||||
tess_rows = _group_words_into_rows(tess_words)
|
||||
|
||||
# Step 2: Match rows between engines by vertical center proximity
|
||||
used_tess_rows: set = set()
|
||||
merged_all: list = []
|
||||
|
||||
for pr in paddle_rows:
|
||||
pr_cy = _row_center_y(pr)
|
||||
best_dist, best_tri = float("inf"), -1
|
||||
for tri, tr in enumerate(tess_rows):
|
||||
if tri in used_tess_rows:
|
||||
continue
|
||||
tr_cy = _row_center_y(tr)
|
||||
dist = abs(pr_cy - tr_cy)
|
||||
if dist < best_dist:
|
||||
best_dist, best_tri = dist, tri
|
||||
|
||||
# Row height threshold — rows must be within ~1.5x typical line height
|
||||
max_row_dist = max(
|
||||
max((w.get("height", 20) for w in pr), default=20),
|
||||
15,
|
||||
)
|
||||
|
||||
if best_tri >= 0 and best_dist <= max_row_dist:
|
||||
# Matched row — merge sequences
|
||||
tr = tess_rows[best_tri]
|
||||
used_tess_rows.add(best_tri)
|
||||
merged_all.extend(_merge_row_sequences(pr, tr))
|
||||
else:
|
||||
# No matching Tesseract row — keep Paddle row as-is
|
||||
merged_all.extend(pr)
|
||||
|
||||
# Add unmatched Tesseract rows
|
||||
for tri, tr in enumerate(tess_rows):
|
||||
if tri not in used_tess_rows:
|
||||
for tw in tr:
|
||||
if tw.get("conf", 0) >= 40:
|
||||
merged_all.append(tw)
|
||||
|
||||
return merged_all
|
||||
|
||||
|
||||
@router.post("/sessions/{session_id}/paddle-kombi")
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
"""Tests for the Kombi-Modus merge algorithm.
|
||||
"""Tests for the Kombi-Modus row-based sequence merge algorithm.
|
||||
|
||||
Functions under test (ocr_pipeline_api.py):
|
||||
- _box_iou: IoU between two word boxes
|
||||
- _box_center_dist: Euclidean distance between box centers
|
||||
- _text_similarity: Simple text similarity (0-1)
|
||||
- _words_match: Multi-criteria match (IoU + center + text)
|
||||
- _merge_paddle_tesseract: Merge PaddleOCR + Tesseract word lists
|
||||
- _group_words_into_rows: Cluster words by Y-position into rows
|
||||
- _merge_row_sequences: Merge two word sequences within the same row
|
||||
- _merge_paddle_tesseract: Full merge with row matching + sequence dedup
|
||||
"""
|
||||
|
||||
import pytest
|
||||
@@ -15,11 +13,8 @@ import os
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
|
||||
|
||||
from ocr_pipeline_api import (
|
||||
_box_iou,
|
||||
_box_center_dist,
|
||||
_text_similarity,
|
||||
_words_match,
|
||||
_deduplicate_words,
|
||||
_group_words_into_rows,
|
||||
_merge_row_sequences,
|
||||
_merge_paddle_tesseract,
|
||||
)
|
||||
|
||||
@@ -41,182 +36,218 @@ def _word(text: str, left: int, top: int, width: int = 60, height: int = 20, con
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _box_iou
|
||||
# _group_words_into_rows
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestBoxIoU:
|
||||
class TestGroupWordsIntoRows:
|
||||
|
||||
def test_identical_boxes(self):
|
||||
a = _word("hello", 10, 10, 100, 20)
|
||||
assert _box_iou(a, a) == pytest.approx(1.0)
|
||||
def test_single_row(self):
|
||||
words = [_word("a", 10, 50), _word("b", 100, 52), _word("c", 200, 48)]
|
||||
rows = _group_words_into_rows(words)
|
||||
assert len(rows) == 1
|
||||
assert len(rows[0]) == 3
|
||||
# Sorted left-to-right
|
||||
assert rows[0][0]["text"] == "a"
|
||||
assert rows[0][2]["text"] == "c"
|
||||
|
||||
def test_no_overlap(self):
|
||||
a = _word("a", 0, 0, 50, 20)
|
||||
b = _word("b", 200, 200, 50, 20)
|
||||
assert _box_iou(a, b) == 0.0
|
||||
def test_two_rows(self):
|
||||
words = [
|
||||
_word("a", 10, 50), _word("b", 100, 52),
|
||||
_word("c", 10, 100), _word("d", 100, 102),
|
||||
]
|
||||
rows = _group_words_into_rows(words)
|
||||
assert len(rows) == 2
|
||||
assert [w["text"] for w in rows[0]] == ["a", "b"]
|
||||
assert [w["text"] for w in rows[1]] == ["c", "d"]
|
||||
|
||||
def test_partial_overlap(self):
|
||||
a = _word("a", 0, 0, 100, 20)
|
||||
b = _word("b", 50, 0, 100, 20)
|
||||
assert _box_iou(a, b) == pytest.approx(1000 / 3000, abs=0.01)
|
||||
def test_empty(self):
|
||||
assert _group_words_into_rows([]) == []
|
||||
|
||||
def test_contained_box(self):
|
||||
big = _word("big", 0, 0, 200, 40)
|
||||
small = _word("small", 50, 10, 30, 10)
|
||||
assert _box_iou(big, small) == pytest.approx(300 / 8000, abs=0.01)
|
||||
def test_different_heights_same_row(self):
|
||||
"""Paddle (h=29) and Tesseract (h=21) words at similar Y → same row."""
|
||||
words = [
|
||||
_word("take", 100, 287, 47, 29), # center_y = 301.5
|
||||
_word("take", 103, 289, 52, 21), # center_y = 299.5
|
||||
]
|
||||
rows = _group_words_into_rows(words)
|
||||
assert len(rows) == 1 # Same row, not two rows
|
||||
|
||||
def test_touching_edges(self):
|
||||
a = _word("a", 0, 0, 50, 20)
|
||||
b = _word("b", 50, 0, 50, 20)
|
||||
assert _box_iou(a, b) == 0.0
|
||||
|
||||
def test_zero_area_box(self):
|
||||
a = _word("a", 10, 10, 0, 0)
|
||||
b = _word("b", 10, 10, 50, 20)
|
||||
assert _box_iou(a, b) == 0.0
|
||||
def test_close_rows_separated(self):
|
||||
"""Two rows ~30px apart should be separate rows."""
|
||||
words = [
|
||||
_word("a", 10, 50, height=20), # center_y = 60
|
||||
_word("b", 10, 85, height=20), # center_y = 95
|
||||
]
|
||||
rows = _group_words_into_rows(words)
|
||||
assert len(rows) == 2
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _box_center_dist
|
||||
# _merge_row_sequences
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestBoxCenterDist:
|
||||
class TestMergeRowSequences:
|
||||
|
||||
def test_same_center(self):
|
||||
a = _word("a", 100, 50, 60, 20)
|
||||
assert _box_center_dist(a, a) == 0.0
|
||||
def test_identical_sequences_deduplicated(self):
|
||||
"""Same words from both engines → only one copy each."""
|
||||
paddle = [_word("apple", 50, 10), _word("Apfel", 200, 10)]
|
||||
tess = [_word("apple", 52, 12), _word("Apfel", 198, 11)]
|
||||
merged = _merge_row_sequences(paddle, tess)
|
||||
assert len(merged) == 2
|
||||
assert merged[0]["text"] == "apple"
|
||||
assert merged[1]["text"] == "Apfel"
|
||||
|
||||
def test_horizontal_offset(self):
|
||||
a = _word("a", 100, 50, 60, 20)
|
||||
b = _word("b", 110, 50, 60, 20)
|
||||
assert _box_center_dist(a, b) == pytest.approx(10.0)
|
||||
def test_tesseract_extra_symbol(self):
|
||||
"""Tesseract finds '!' that Paddle missed → included."""
|
||||
paddle = [_word("Betonung", 60, 10)]
|
||||
tess = [_word("!", 20, 10, 12, 20, conf=70), _word("Betonung", 60, 10)]
|
||||
merged = _merge_row_sequences(paddle, tess)
|
||||
texts = [w["text"] for w in merged]
|
||||
assert "!" in texts
|
||||
assert "Betonung" in texts
|
||||
assert len(merged) == 2
|
||||
|
||||
def test_diagonal(self):
|
||||
a = _word("a", 0, 0, 20, 20) # center (10, 10)
|
||||
b = _word("b", 20, 20, 20, 20) # center (30, 30)
|
||||
expected = (20**2 + 20**2) ** 0.5
|
||||
assert _box_center_dist(a, b) == pytest.approx(expected, abs=0.1)
|
||||
def test_paddle_extra_word(self):
|
||||
"""Paddle finds word that Tesseract missed → included."""
|
||||
paddle = [_word("!", 20, 10, 12, 20), _word("word", 60, 10)]
|
||||
tess = [_word("word", 62, 12)]
|
||||
merged = _merge_row_sequences(paddle, tess)
|
||||
assert len(merged) == 2
|
||||
|
||||
def test_coordinates_averaged(self):
|
||||
"""Matched words have coordinates averaged by confidence."""
|
||||
paddle = [_word("hello", 100, 50, 80, 20, conf=90)]
|
||||
tess = [_word("hello", 110, 55, 70, 18, conf=60)]
|
||||
merged = _merge_row_sequences(paddle, tess)
|
||||
assert len(merged) == 1
|
||||
m = merged[0]
|
||||
assert m["text"] == "hello"
|
||||
# (100*90 + 110*60) / 150 = 104
|
||||
assert m["left"] == 104
|
||||
assert m["conf"] == 90
|
||||
|
||||
def test_empty_paddle_row(self):
|
||||
tess = [_word("a", 10, 10, conf=80)]
|
||||
merged = _merge_row_sequences([], tess)
|
||||
assert len(merged) == 1
|
||||
|
||||
def test_empty_tess_row(self):
|
||||
paddle = [_word("a", 10, 10)]
|
||||
merged = _merge_row_sequences(paddle, [])
|
||||
assert len(merged) == 1
|
||||
|
||||
def test_both_empty(self):
|
||||
assert _merge_row_sequences([], []) == []
|
||||
|
||||
def test_substring_match(self):
|
||||
"""'part(in)' from Paddle matches 'part' from Tesseract (substring)."""
|
||||
paddle = [_word("part(in)", 100, 10, 90, 20)]
|
||||
tess = [_word("part", 100, 12, 50, 18), _word("(in)", 155, 12, 40, 18)]
|
||||
merged = _merge_row_sequences(paddle, tess)
|
||||
# part(in) matches part, then (in) is extra from Tesseract
|
||||
assert len(merged) == 2
|
||||
|
||||
def test_low_conf_tesseract_dropped(self):
|
||||
"""Unmatched Tesseract words with conf < 30 are dropped."""
|
||||
paddle = [_word("hello", 100, 10)]
|
||||
tess = [_word("noise", 10, 10, conf=15), _word("hello", 100, 12)]
|
||||
merged = _merge_row_sequences(paddle, tess)
|
||||
texts = [w["text"] for w in merged]
|
||||
assert "noise" not in texts
|
||||
assert len(merged) == 1
|
||||
|
||||
def test_real_world_row(self):
|
||||
"""Reproduce real data: both engines find 'take part teilnehmen More than'."""
|
||||
paddle = [
|
||||
_word("take", 185, 287, 47, 29, conf=90),
|
||||
_word("part(in)", 238, 287, 94, 29, conf=90),
|
||||
_word("teilnehmen", 526, 282, 140, 35, conf=93),
|
||||
_word("More", 944, 287, 50, 29, conf=96),
|
||||
_word("than", 1003, 287, 50, 29, conf=96),
|
||||
]
|
||||
tess = [
|
||||
_word("take", 188, 289, 52, 21, conf=96),
|
||||
_word("part", 249, 292, 48, 24, conf=96),
|
||||
_word("(in)", 305, 290, 38, 24, conf=93),
|
||||
_word("[teık", 352, 292, 47, 21, conf=90),
|
||||
_word("teilnehmen", 534, 290, 127, 21, conf=95),
|
||||
_word("More", 948, 292, 60, 20, conf=90),
|
||||
_word("than", 1017, 291, 49, 21, conf=96),
|
||||
]
|
||||
merged = _merge_row_sequences(paddle, tess)
|
||||
texts = [w["text"] for w in merged]
|
||||
# No duplicates
|
||||
assert texts.count("take") == 1
|
||||
assert texts.count("More") == 1
|
||||
assert texts.count("than") == 1
|
||||
assert texts.count("teilnehmen") == 1
|
||||
# Tesseract-only phonetic kept
|
||||
assert "[teık" in texts
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _text_similarity
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestTextSimilarity:
|
||||
|
||||
def test_identical(self):
|
||||
assert _text_similarity("hello", "hello") == 1.0
|
||||
|
||||
def test_case_insensitive(self):
|
||||
assert _text_similarity("Hello", "hello") == 1.0
|
||||
|
||||
def test_substring(self):
|
||||
"""One is substring of other (e.g. '!Betonung' vs 'Betonung')."""
|
||||
assert _text_similarity("!Betonung", "Betonung") == 0.8
|
||||
|
||||
def test_completely_different(self):
|
||||
assert _text_similarity("abc", "xyz") == 0.0
|
||||
|
||||
def test_empty_strings(self):
|
||||
assert _text_similarity("", "hello") == 0.0
|
||||
assert _text_similarity("", "") == 0.0
|
||||
|
||||
def test_partial_overlap(self):
|
||||
"""Some shared characters."""
|
||||
sim = _text_similarity("apple", "ape")
|
||||
assert 0.0 < sim < 1.0
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _words_match
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestWordsMatch:
|
||||
|
||||
def test_high_iou_matches(self):
|
||||
"""IoU > 0.15 is sufficient for a match."""
|
||||
a = _word("hello", 100, 50, 80, 20)
|
||||
b = _word("hello", 105, 50, 80, 20)
|
||||
assert _words_match(a, b) is True
|
||||
|
||||
def test_same_text_same_row_matches(self):
|
||||
"""Same text on same row matches even with low IoU."""
|
||||
a = _word("Betonung", 100, 50, 80, 20)
|
||||
b = _word("Betonung", 130, 52, 70, 18) # shifted but same row
|
||||
assert _words_match(a, b) is True
|
||||
|
||||
def test_close_centers_same_row_matches(self):
|
||||
"""Nearby centers on same row match."""
|
||||
a = _word("x", 100, 50, 40, 20)
|
||||
b = _word("y", 110, 52, 50, 22) # close, same row
|
||||
assert _words_match(a, b) is True
|
||||
|
||||
def test_different_rows_no_match(self):
|
||||
"""Words on different rows don't match even with same text."""
|
||||
a = _word("hello", 100, 50, 80, 20)
|
||||
b = _word("hello", 100, 200, 80, 20) # far away vertically
|
||||
assert _words_match(a, b) is False
|
||||
|
||||
def test_far_apart_same_row_different_text(self):
|
||||
"""Different text far apart on same row: no match."""
|
||||
a = _word("cat", 10, 50, 40, 20)
|
||||
b = _word("dog", 400, 50, 40, 20)
|
||||
assert _words_match(a, b) is False
|
||||
|
||||
def test_no_overlap_no_proximity_no_text(self):
|
||||
"""Completely different words far apart: no match."""
|
||||
a = _word("abc", 0, 0, 50, 20)
|
||||
b = _word("xyz", 500, 500, 50, 20)
|
||||
assert _words_match(a, b) is False
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _merge_paddle_tesseract
|
||||
# _merge_paddle_tesseract (full pipeline)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestMergePaddleTesseract:
|
||||
|
||||
def test_perfect_match_averages_coords(self):
|
||||
"""Same word at same position: coordinates averaged by confidence."""
|
||||
pw = [_word("hello", 100, 50, 80, 20, conf=90)]
|
||||
tw = [_word("hello", 110, 55, 70, 18, conf=60)]
|
||||
merged = _merge_paddle_tesseract(pw, tw)
|
||||
assert len(merged) == 1
|
||||
m = merged[0]
|
||||
assert m["text"] == "hello"
|
||||
assert m["left"] == 104 # (100*90 + 110*60) / 150
|
||||
assert m["conf"] == 90
|
||||
|
||||
def test_same_word_slightly_offset_merges(self):
|
||||
"""Same word with slight offset still merges (center proximity)."""
|
||||
pw = [_word("Betonung", 100, 50, 90, 22, conf=85)]
|
||||
tw = [_word("Betonung", 115, 52, 80, 20, conf=60)]
|
||||
merged = _merge_paddle_tesseract(pw, tw)
|
||||
assert len(merged) == 1
|
||||
assert merged[0]["text"] == "Betonung"
|
||||
|
||||
def test_truly_different_words_kept_separate(self):
|
||||
"""Non-overlapping different words: both kept."""
|
||||
pw = [_word("hello", 10, 10)]
|
||||
tw = [_word("bullet", 500, 500, conf=50)]
|
||||
def test_same_words_deduplicated(self):
|
||||
"""Both engines find same words → no duplicates."""
|
||||
pw = [
|
||||
_word("apple", 50, 10, 70, 20, conf=90),
|
||||
_word("Apfel", 300, 10, 60, 20, conf=85),
|
||||
]
|
||||
tw = [
|
||||
_word("apple", 52, 11, 68, 19, conf=75),
|
||||
_word("Apfel", 298, 12, 62, 18, conf=70),
|
||||
]
|
||||
merged = _merge_paddle_tesseract(pw, tw)
|
||||
assert len(merged) == 2
|
||||
texts = {m["text"] for m in merged}
|
||||
assert texts == {"hello", "bullet"}
|
||||
texts = sorted(w["text"] for w in merged)
|
||||
assert texts == ["Apfel", "apple"]
|
||||
|
||||
def test_low_conf_tesseract_dropped(self):
|
||||
"""Unmatched Tesseract words with conf < 40 are dropped."""
|
||||
pw = [_word("hello", 10, 10)]
|
||||
tw = [_word("noise", 500, 500, conf=20)]
|
||||
def test_different_rows_not_cross_merged(self):
|
||||
"""Words from different rows must NOT be averaged together."""
|
||||
pw = [
|
||||
_word("row1word", 50, 50, 80, 20, conf=90),
|
||||
_word("row2word", 50, 100, 80, 20, conf=90),
|
||||
]
|
||||
tw = [
|
||||
_word("row1word", 52, 52, 78, 18, conf=80),
|
||||
_word("row2word", 52, 102, 78, 18, conf=80),
|
||||
]
|
||||
merged = _merge_paddle_tesseract(pw, tw)
|
||||
assert len(merged) == 1
|
||||
assert len(merged) == 2
|
||||
# Row 1 word should stay near y=50, not averaged with y=100
|
||||
row1 = [w for w in merged if w["text"] == "row1word"][0]
|
||||
row2 = [w for w in merged if w["text"] == "row2word"][0]
|
||||
assert row1["top"] < 60 # stays near row 1
|
||||
assert row2["top"] > 90 # stays near row 2
|
||||
|
||||
def test_tesseract_extra_symbols_added(self):
|
||||
"""Symbols only found by Tesseract are included."""
|
||||
pw = [_word("Betonung", 60, 10, 80, 20)]
|
||||
tw = [
|
||||
_word("!", 20, 10, 12, 20, conf=65),
|
||||
_word("Betonung", 60, 10, 80, 20, conf=50),
|
||||
]
|
||||
merged = _merge_paddle_tesseract(pw, tw)
|
||||
texts = [w["text"] for w in merged]
|
||||
assert "!" in texts
|
||||
assert "Betonung" in texts
|
||||
assert len(merged) == 2
|
||||
|
||||
def test_paddle_extra_words_added(self):
|
||||
"""Words only found by Paddle are included."""
|
||||
pw = [_word("extra", 10, 10), _word("word", 100, 10)]
|
||||
tw = [_word("word", 102, 12)]
|
||||
merged = _merge_paddle_tesseract(pw, tw)
|
||||
assert len(merged) == 2
|
||||
|
||||
def test_empty_paddle(self):
|
||||
pw = []
|
||||
tw = [_word("bullet", 10, 10, conf=80), _word("noise", 200, 200, conf=10)]
|
||||
merged = _merge_paddle_tesseract(pw, tw)
|
||||
assert len(merged) == 1
|
||||
assert merged[0]["text"] == "bullet"
|
||||
tw = [_word("a", 10, 10, conf=80), _word("b", 200, 200, conf=10)]
|
||||
merged = _merge_paddle_tesseract([], tw)
|
||||
assert len(merged) == 1 # only conf >= 40
|
||||
|
||||
def test_empty_tesseract(self):
|
||||
pw = [_word("a", 10, 10), _word("b", 200, 10)]
|
||||
@@ -226,188 +257,32 @@ class TestMergePaddleTesseract:
|
||||
def test_both_empty(self):
|
||||
assert _merge_paddle_tesseract([], []) == []
|
||||
|
||||
def test_one_to_one_matching(self):
|
||||
"""Each Tesseract word matches at most one Paddle word."""
|
||||
def test_multi_row_deduplication(self):
|
||||
"""Multiple rows with words from both engines, all deduplicated."""
|
||||
pw = [
|
||||
_word("cat", 10, 10, 60, 20, conf=80),
|
||||
_word("dog", 200, 10, 60, 20, conf=80),
|
||||
]
|
||||
tw = [_word("cat", 15, 12, 55, 18, conf=70)]
|
||||
merged = _merge_paddle_tesseract(pw, tw)
|
||||
assert len(merged) == 2 # cat (merged) + dog (unmatched paddle)
|
||||
|
||||
def test_far_apart_different_text_not_merged(self):
|
||||
"""Different words far apart stay separate."""
|
||||
pw = [_word("hello", 0, 0, 100, 20, conf=80)]
|
||||
tw = [_word("world", 500, 300, 100, 20, conf=70)]
|
||||
merged = _merge_paddle_tesseract(pw, tw)
|
||||
assert len(merged) == 2
|
||||
|
||||
def test_paddle_text_preferred(self):
|
||||
"""Merged word uses Paddle's text."""
|
||||
pw = [_word("Betonung", 100, 50, 80, 20, conf=85)]
|
||||
tw = [_word("Betonung!", 100, 50, 80, 20, conf=60)]
|
||||
merged = _merge_paddle_tesseract(pw, tw)
|
||||
assert len(merged) == 1
|
||||
assert merged[0]["text"] == "Betonung"
|
||||
|
||||
def test_confidence_weighted_positions(self):
|
||||
"""Equal confidence → simple average of coordinates."""
|
||||
pw = [_word("x", 100, 200, 60, 20, conf=50)]
|
||||
tw = [_word("x", 110, 200, 60, 20, conf=50)]
|
||||
merged = _merge_paddle_tesseract(pw, tw)
|
||||
assert len(merged) == 1
|
||||
m = merged[0]
|
||||
assert m["left"] == 105
|
||||
assert m["top"] == 200
|
||||
|
||||
def test_zero_confidence_no_division_error(self):
|
||||
"""Words with conf=0 don't cause division by zero."""
|
||||
pw = [_word("a", 100, 50, 80, 20, conf=0)]
|
||||
tw = [_word("a", 100, 50, 80, 20, conf=0)]
|
||||
merged = _merge_paddle_tesseract(pw, tw)
|
||||
assert len(merged) == 1
|
||||
|
||||
def test_duplicate_words_same_position_deduplicated(self):
|
||||
"""The core bug fix: same word at same position from both engines
|
||||
should appear only once, not doubled."""
|
||||
# Simulate typical case: both engines find same words
|
||||
pw = [
|
||||
_word("apple", 50, 10, 70, 20, conf=90),
|
||||
_word("Apfel", 300, 10, 60, 20, conf=85),
|
||||
_word("dog", 50, 50, 50, 20, conf=88),
|
||||
_word("Hund", 300, 50, 60, 20, conf=82),
|
||||
_word("cat", 50, 50, conf=90),
|
||||
_word("Katze", 200, 50, conf=85),
|
||||
_word("dog", 50, 100, conf=88),
|
||||
_word("Hund", 200, 100, conf=82),
|
||||
]
|
||||
tw = [
|
||||
_word("apple", 52, 11, 68, 19, conf=75),
|
||||
_word("Apfel", 298, 12, 62, 18, conf=70),
|
||||
_word("dog", 48, 49, 52, 21, conf=72),
|
||||
_word("Hund", 302, 51, 58, 19, conf=68),
|
||||
_word("cat", 52, 52, conf=75),
|
||||
_word("Katze", 198, 51, conf=70),
|
||||
_word("dog", 48, 101, conf=72),
|
||||
_word("Hund", 202, 102, conf=68),
|
||||
]
|
||||
merged = _merge_paddle_tesseract(pw, tw)
|
||||
# Each word should appear exactly once
|
||||
assert len(merged) == 4
|
||||
texts = [m["text"] for m in merged]
|
||||
assert sorted(texts) == ["Apfel", "Hund", "apple", "dog"]
|
||||
|
||||
|
||||
class TestMergePaddleTesseractBulletPoints:
|
||||
"""Tesseract catches bullet points / symbols that PaddleOCR misses."""
|
||||
|
||||
def test_bullet_added_from_tesseract(self):
|
||||
"""Bullet character from Tesseract is added."""
|
||||
pw = [_word("Betonung", 60, 10, 80, 20)]
|
||||
tw = [
|
||||
_word("•", 10, 10, 15, 15, conf=65),
|
||||
_word("Betonung", 60, 10, 80, 20, conf=50),
|
||||
]
|
||||
merged = _merge_paddle_tesseract(pw, tw)
|
||||
texts = [m["text"] for m in merged]
|
||||
assert "•" in texts
|
||||
assert "Betonung" in texts
|
||||
assert len(merged) == 2
|
||||
|
||||
def test_exclamation_added_from_tesseract(self):
|
||||
"""Exclamation mark from Tesseract is added."""
|
||||
pw = [_word("important", 60, 10, 100, 20)]
|
||||
tw = [
|
||||
_word("!", 40, 10, 12, 20, conf=70),
|
||||
_word("important", 60, 10, 100, 20, conf=55),
|
||||
]
|
||||
merged = _merge_paddle_tesseract(pw, tw)
|
||||
texts = [m["text"] for m in merged]
|
||||
assert "!" in texts
|
||||
assert len(merged) == 2
|
||||
|
||||
def test_multiple_unique_tesseract_symbols(self):
|
||||
"""Multiple symbols only found by Tesseract are all added."""
|
||||
pw = [_word("word", 100, 10, 60, 20)]
|
||||
tw = [
|
||||
_word("!", 20, 10, 10, 20, conf=70),
|
||||
_word("•", 40, 10, 10, 15, conf=65),
|
||||
_word("word", 100, 10, 60, 20, conf=50),
|
||||
]
|
||||
merged = _merge_paddle_tesseract(pw, tw)
|
||||
texts = [m["text"] for m in merged]
|
||||
assert "!" in texts
|
||||
assert "•" in texts
|
||||
assert "word" in texts
|
||||
assert len(merged) == 3
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _deduplicate_words
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestDeduplicateWords:
|
||||
|
||||
def test_no_duplicates(self):
|
||||
"""Different words at different positions: all kept."""
|
||||
words = [_word("a", 10, 10), _word("b", 200, 10), _word("c", 10, 100)]
|
||||
result = _deduplicate_words(words)
|
||||
assert len(result) == 3
|
||||
|
||||
def test_exact_duplicate_removed(self):
|
||||
"""Same text at same position: only one kept."""
|
||||
words = [
|
||||
_word("take", 185, 287, 47, 29, conf=90),
|
||||
_word("take", 188, 289, 52, 21, conf=96),
|
||||
]
|
||||
result = _deduplicate_words(words)
|
||||
assert len(result) == 1
|
||||
assert result[0]["conf"] == 96 # higher confidence kept
|
||||
|
||||
def test_same_text_far_apart_kept(self):
|
||||
"""Same word at very different positions (e.g. repeated in text): both kept."""
|
||||
words = [
|
||||
_word("the", 100, 10),
|
||||
_word("the", 500, 10),
|
||||
]
|
||||
result = _deduplicate_words(words)
|
||||
assert len(result) == 2
|
||||
|
||||
def test_different_text_same_position_kept(self):
|
||||
"""Different words at same position: both kept (not duplicates)."""
|
||||
words = [
|
||||
_word("apple", 100, 50),
|
||||
_word("Apfel", 105, 52),
|
||||
]
|
||||
result = _deduplicate_words(words)
|
||||
assert len(result) == 2
|
||||
|
||||
def test_empty_list(self):
|
||||
assert _deduplicate_words([]) == []
|
||||
|
||||
def test_single_word(self):
|
||||
words = [_word("hello", 10, 10)]
|
||||
assert len(_deduplicate_words(words)) == 1
|
||||
|
||||
def test_real_world_near_duplicates(self):
|
||||
"""Simulate real-world: Paddle (height=29) + Tesseract (height=21) near-dupes."""
|
||||
words = [
|
||||
_word("take", 185, 287, 47, 29, conf=90),
|
||||
_word("part", 249, 292, 48, 24, conf=96),
|
||||
_word("More", 944, 287, 50, 29, conf=96),
|
||||
_word("than", 1003, 287, 50, 29, conf=96),
|
||||
# near-dupes from other engine
|
||||
_word("take", 188, 289, 52, 21, conf=96),
|
||||
_word("part", 249, 294, 47, 25, conf=96),
|
||||
_word("More", 948, 292, 60, 20, conf=90),
|
||||
_word("than", 1017, 291, 49, 21, conf=96),
|
||||
]
|
||||
result = _deduplicate_words(words)
|
||||
# Each word should appear only once
|
||||
assert len(result) == 4
|
||||
texts = sorted(w["text"] for w in result)
|
||||
assert texts == ["More", "part", "take", "than"]
|
||||
texts = sorted(w["text"] for w in merged)
|
||||
assert texts == ["Hund", "Katze", "cat", "dog"]
|
||||
|
||||
|
||||
class TestMergeRealWorldRegression:
|
||||
"""Regression test with actual data from the doubled-words bug."""
|
||||
|
||||
def test_row2_no_duplicates(self):
|
||||
"""Reproduce the row-2 bug: both engines return the same words at
|
||||
slightly different positions. Merge should produce no duplicates."""
|
||||
def test_full_page_no_duplicates(self):
|
||||
"""Both engines find same words at slightly different positions.
|
||||
Merge should produce no near-duplicate words."""
|
||||
paddle = [
|
||||
_word("teilnehmen", 526, 282, 140, 35, conf=93),
|
||||
_word("take", 185, 287, 47, 29, conf=90),
|
||||
@@ -420,14 +295,17 @@ class TestMergeRealWorldRegression:
|
||||
_word("part", 1266, 287, 50, 29, conf=96),
|
||||
_word("in", 1326, 287, 25, 29, conf=96),
|
||||
_word("the", 1360, 287, 38, 29, conf=96),
|
||||
# Second row
|
||||
_word("be", 185, 365, 30, 29, conf=90),
|
||||
_word("good", 216, 365, 50, 29, conf=90),
|
||||
_word("at", 275, 365, 25, 29, conf=90),
|
||||
_word("sth.", 306, 365, 45, 29, conf=90),
|
||||
]
|
||||
tess = [
|
||||
_word("take", 188, 289, 52, 21, conf=96),
|
||||
_word("part", 249, 292, 48, 24, conf=96),
|
||||
_word("(in)", 305, 290, 38, 24, conf=93),
|
||||
_word("teilnehmen", 534, 290, 127, 21, conf=95),
|
||||
_word("(an),", 671, 291, 48, 23, conf=96),
|
||||
_word("mitmachen", 730, 290, 123, 22, conf=96),
|
||||
_word("More", 948, 292, 60, 20, conf=90),
|
||||
_word("than", 1017, 291, 49, 21, conf=96),
|
||||
_word("200", 1076, 292, 43, 20, conf=93),
|
||||
@@ -436,31 +314,36 @@ class TestMergeRealWorldRegression:
|
||||
_word("part", 1276, 294, 47, 25, conf=96),
|
||||
_word("in", 1332, 292, 20, 20, conf=95),
|
||||
_word("the", 1361, 292, 36, 21, conf=95),
|
||||
# Tesseract-only: phonetic transcriptions
|
||||
_word("[teık", 352, 292, 47, 21, conf=90),
|
||||
_word("'pa:t]", 407, 292, 55, 23, conf=89),
|
||||
# Second row
|
||||
_word("be", 189, 369, 28, 21, conf=96),
|
||||
_word("good", 225, 369, 50, 21, conf=96),
|
||||
_word("at", 292, 371, 22, 21, conf=96),
|
||||
_word("sth.", 324, 369, 42, 21, conf=96),
|
||||
]
|
||||
merged = _merge_paddle_tesseract(paddle, tess)
|
||||
|
||||
# Check no near-duplicates remain
|
||||
# Check no near-duplicates: same text within 30px horizontal / 15px vertical
|
||||
for i, w1 in enumerate(merged):
|
||||
for j, w2 in enumerate(merged):
|
||||
if j <= i:
|
||||
continue
|
||||
for j in range(i + 1, len(merged)):
|
||||
w2 = merged[j]
|
||||
if w1["text"].lower() == w2["text"].lower():
|
||||
cx1 = w1["left"] + w1.get("width", 0) / 2
|
||||
cx2 = w2["left"] + w2.get("width", 0) / 2
|
||||
cy1 = w1["top"] + w1.get("height", 0) / 2
|
||||
cy2 = w2["top"] + w2.get("height", 0) / 2
|
||||
assert abs(cx1 - cx2) >= 30 or abs(cy1 - cy2) >= 15, (
|
||||
f"Near-duplicate found: '{w1['text']}' at ({w1['left']},{w1['top']}) "
|
||||
f"Near-duplicate: '{w1['text']}' at ({w1['left']},{w1['top']}) "
|
||||
f"vs ({w2['left']},{w2['top']})"
|
||||
)
|
||||
|
||||
# Tesseract-only words should be present
|
||||
# Tesseract-only phonetic words should be present
|
||||
texts = [w["text"] for w in merged]
|
||||
assert "(in)" in texts # Tesseract split "part(in)" differently
|
||||
assert "(an)," in texts
|
||||
assert "mitmachen" in texts
|
||||
assert "[teık" in texts # phonetic from Tesseract
|
||||
assert "[teık" in texts
|
||||
assert "'pa:t]" in texts
|
||||
|
||||
# Row 1 and Row 2 words should not be merged to same Y position
|
||||
be_word = [w for w in merged if w["text"] == "be"][0]
|
||||
take_word = [w for w in merged if w["text"] == "take"][0]
|
||||
assert abs(be_word["top"] - take_word["top"]) > 30, "Rows should stay separate"
|
||||
|
||||
Reference in New Issue
Block a user