Add merged-word splitting to OCR spell review
Some checks failed
CI / go-lint (push) Has been skipped
CI / python-lint (push) Has been skipped
CI / nodejs-lint (push) Has been skipped
CI / test-go-school (push) Successful in 43s
CI / test-go-edu-search (push) Successful in 38s
CI / test-nodejs-website (push) Has been cancelled
CI / test-python-agent-core (push) Has been cancelled
CI / test-python-klausur (push) Has been cancelled
Some checks failed
CI / go-lint (push) Has been skipped
CI / python-lint (push) Has been skipped
CI / nodejs-lint (push) Has been skipped
CI / test-go-school (push) Successful in 43s
CI / test-go-edu-search (push) Successful in 38s
CI / test-nodejs-website (push) Has been cancelled
CI / test-python-agent-core (push) Has been cancelled
CI / test-python-klausur (push) Has been cancelled
OCR often merges adjacent words when spacing is tight, e.g. "atmyschool" → "at my school", "goodidea" → "good idea". New _try_split_merged_word() uses dynamic programming to find the shortest sequence of dictionary words covering the token. Integrated as step 5 in _spell_fix_token() after general spell correction. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -720,6 +720,58 @@ def _spell_dict_knows(word: str) -> bool:
|
||||
return bool(_en_spell.known([w])) or bool(_de_spell.known([w]))
|
||||
|
||||
|
||||
def _try_split_merged_word(token: str) -> Optional[str]:
|
||||
"""Try to split a merged word like 'atmyschool' into 'at my school'.
|
||||
|
||||
Uses dynamic programming to find the shortest sequence of dictionary
|
||||
words that covers the entire token. Only returns a result when the
|
||||
split produces at least 2 words and ALL parts are known dictionary words.
|
||||
|
||||
Preserves original capitalisation by mapping back to the input string.
|
||||
"""
|
||||
if not _SPELL_AVAILABLE or len(token) < 5:
|
||||
return None
|
||||
|
||||
lower = token.lower()
|
||||
n = len(lower)
|
||||
|
||||
# dp[i] = shortest list of word lengths that covers lower[:i], or None
|
||||
dp: list = [None] * (n + 1)
|
||||
dp[0] = []
|
||||
|
||||
for i in range(1, n + 1):
|
||||
# Try all possible last-word lengths (2..min(i, 20))
|
||||
# Allow single-char words only for 'a' and 'I'
|
||||
min_len = 1
|
||||
for j in range(max(0, i - 20), i):
|
||||
if dp[j] is None:
|
||||
continue
|
||||
word_len = i - j
|
||||
candidate = lower[j:i]
|
||||
if word_len == 1 and candidate not in ('a', 'i'):
|
||||
continue
|
||||
if word_len < 2 and candidate not in ('a', 'i'):
|
||||
continue
|
||||
if _spell_dict_knows(candidate):
|
||||
new_split = dp[j] + [word_len]
|
||||
# Prefer fewer words (shorter split)
|
||||
if dp[i] is None or len(new_split) < len(dp[i]):
|
||||
dp[i] = new_split
|
||||
|
||||
if dp[n] is None or len(dp[n]) < 2:
|
||||
return None
|
||||
|
||||
# Reconstruct with original casing
|
||||
result = []
|
||||
pos = 0
|
||||
for wlen in dp[n]:
|
||||
result.append(token[pos:pos + wlen])
|
||||
pos += wlen
|
||||
|
||||
logger.debug("Split merged word: %r → %r", token, " ".join(result))
|
||||
return " ".join(result)
|
||||
|
||||
|
||||
def _spell_fix_token(token: str, field: str = "") -> Optional[str]:
|
||||
"""Return corrected form of token, or None if no fix needed/possible.
|
||||
|
||||
@@ -777,6 +829,14 @@ def _spell_fix_token(token: str, field: str = "") -> Optional[str]:
|
||||
correction = correction[0].upper() + correction[1:]
|
||||
if _spell_dict_knows(correction):
|
||||
return correction
|
||||
|
||||
# 5. Merged-word split: OCR often merges adjacent words when spacing
|
||||
# is too tight, e.g. "atmyschool" → "at my school"
|
||||
if len(token) >= 5 and token.isalpha():
|
||||
split = _try_split_merged_word(token)
|
||||
if split:
|
||||
return split
|
||||
|
||||
return None
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user