Fix: Update all old-style imports inside packages to new paths
Some checks failed
CI / go-lint (push) Has been skipped
CI / python-lint (push) Has been skipped
CI / nodejs-lint (push) Has been skipped
CI / test-go-school (push) Successful in 1m7s
CI / test-go-edu-search (push) Successful in 46s
CI / test-python-klausur (push) Failing after 2m32s
CI / test-python-agent-core (push) Successful in 33s
CI / test-nodejs-website (push) Successful in 34s
Some checks failed
CI / go-lint (push) Has been skipped
CI / python-lint (push) Has been skipped
CI / nodejs-lint (push) Has been skipped
CI / test-go-school (push) Successful in 1m7s
CI / test-go-edu-search (push) Successful in 46s
CI / test-python-klausur (push) Failing after 2m32s
CI / test-python-agent-core (push) Successful in 33s
CI / test-nodejs-website (push) Successful in 34s
65 files in klausur-service packages + 3 in backend-lehrer packages had stale imports referencing deleted shim modules. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -9,7 +9,7 @@ import logging
|
||||
import re
|
||||
from typing import Any, Dict, List, Tuple
|
||||
|
||||
from cv_ocr_engines import (
|
||||
from ocr.engines.engines import (
|
||||
_words_to_reading_order_text, _group_words_into_lines, _lookup_ipa,
|
||||
)
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ import logging
|
||||
import re
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from cv_ocr_engines import _words_to_reading_order_text
|
||||
from ocr.engines.engines import _words_to_reading_order_text
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -69,14 +69,14 @@ def _finalize_grid(
|
||||
|
||||
# --- Word-gap merge ---
|
||||
try:
|
||||
from cv_syllable_detect import merge_word_gaps_in_zones
|
||||
from ocr.detect.syllable.detect import merge_word_gaps_in_zones
|
||||
merge_word_gaps_in_zones(zones_data, session_id)
|
||||
except Exception as e:
|
||||
logger.warning("Word-gap merge failed: %s", e)
|
||||
|
||||
# --- Pipe auto-correction ---
|
||||
try:
|
||||
from cv_syllable_detect import autocorrect_pipe_artifacts
|
||||
from ocr.detect.syllable.detect import autocorrect_pipe_artifacts
|
||||
autocorrect_pipe_artifacts(zones_data, session_id)
|
||||
except Exception as e:
|
||||
logger.warning("Pipe autocorrect failed: %s", e)
|
||||
@@ -132,10 +132,10 @@ def _detect_dictionary(
|
||||
margin_strip_detected: bool,
|
||||
) -> Dict[str, Any]:
|
||||
"""Run dictionary detection on the assembled grid."""
|
||||
from cv_layout import _score_dictionary_signals
|
||||
from ocr.layout.layout import _score_dictionary_signals
|
||||
dict_detection: Dict[str, Any] = {"is_dictionary": False, "confidence": 0.0}
|
||||
try:
|
||||
from cv_vocab_types import ColumnGeometry
|
||||
from ocr.types import ColumnGeometry
|
||||
for z in zones_data:
|
||||
zone_cells = z.get("cells", [])
|
||||
zone_cols = z.get("columns", [])
|
||||
@@ -222,7 +222,7 @@ def _insert_syllable_dividers(
|
||||
|
||||
if _syllable_eligible:
|
||||
try:
|
||||
from cv_syllable_detect import insert_syllable_dividers
|
||||
from ocr.detect.syllable.detect import insert_syllable_dividers
|
||||
force_syllables = (syllable_mode in ("all", "de", "en"))
|
||||
syllable_insertions = insert_syllable_dividers(
|
||||
zones_data, img_bgr, session_id,
|
||||
@@ -241,7 +241,7 @@ def _split_merged_words(
|
||||
) -> None:
|
||||
"""Split merged words using dictionary lookup."""
|
||||
try:
|
||||
from cv_review import _try_split_merged_word, _SPELL_AVAILABLE
|
||||
from ocr.review.review import _try_split_merged_word, _SPELL_AVAILABLE
|
||||
if not _SPELL_AVAILABLE:
|
||||
return
|
||||
split_count = 0
|
||||
@@ -307,7 +307,7 @@ def _run_spell_checker(
|
||||
) -> None:
|
||||
"""Run SmartSpellChecker on all cells."""
|
||||
try:
|
||||
from smart_spell import SmartSpellChecker
|
||||
from ocr.spell.smart_spell import SmartSpellChecker
|
||||
_ssc = SmartSpellChecker()
|
||||
spell_fix_count = 0
|
||||
|
||||
|
||||
@@ -10,8 +10,8 @@ import logging
|
||||
import re
|
||||
from typing import Any, Dict, List, Optional, Set, Tuple
|
||||
|
||||
from cv_color_detect import detect_word_colors
|
||||
from cv_ocr_engines import (
|
||||
from ocr.detect.color_detect import detect_word_colors
|
||||
from ocr.engines.engines import (
|
||||
fix_cell_phonetics, fix_ipa_continuation_cell, _text_has_garbled_ipa,
|
||||
_lookup_ipa,
|
||||
)
|
||||
@@ -207,7 +207,7 @@ def _run_ipa_correction(
|
||||
|
||||
# --- German IPA (wiki-pronunciation-dict + epitran) ---
|
||||
if de_ipa_target_cols:
|
||||
from cv_ipa_german import insert_german_ipa
|
||||
from ocr.ipa_german import insert_german_ipa
|
||||
insert_german_ipa(all_cells, de_ipa_target_cols)
|
||||
|
||||
ipa_target_cols = en_ipa_target_cols | de_ipa_target_cols
|
||||
|
||||
@@ -11,11 +11,11 @@ from typing import Any, Dict, List, Optional
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from cv_box_detect import detect_boxes, split_page_into_zones
|
||||
from cv_graphic_detect import detect_graphic_elements
|
||||
from cv_color_detect import recover_colored_text
|
||||
from cv_vocab_types import PageZone
|
||||
from ocr_pipeline_session_store import get_session_image
|
||||
from ocr.detect.box_detect import detect_boxes, split_page_into_zones
|
||||
from ocr.detect.graphic_detect import detect_graphic_elements
|
||||
from ocr.detect.color_detect import recover_colored_text
|
||||
from ocr.types import PageZone
|
||||
from ocr.pipeline.session_store import get_session_image
|
||||
|
||||
from grid.editor.filters import (
|
||||
_filter_border_strip_words,
|
||||
|
||||
Reference in New Issue
Block a user