Cleanup: Delete ALL 242 shims, update ALL consumer imports
Some checks failed
CI / go-lint (push) Has been skipped
CI / python-lint (push) Has been skipped
CI / nodejs-lint (push) Has been skipped
CI / test-go-school (push) Successful in 41s
CI / test-go-edu-search (push) Successful in 32s
CI / test-python-klausur (push) Failing after 2m41s
CI / test-python-agent-core (push) Successful in 34s
CI / test-nodejs-website (push) Successful in 39s

klausur-service: 183 shims deleted, 26 test files + 8 source files updated
backend-lehrer: 59 shims deleted, main.py + 8 source files updated

All imports now use the new package paths directly.
Zero shims remaining in the entire codebase.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Benjamin Admin
2026-04-26 00:11:33 +02:00
parent d093a4d388
commit 5f2ed44654
288 changed files with 214 additions and 1182 deletions

View File

@@ -173,7 +173,7 @@ class TestMarkdownParser:
def test_parse_simple_markdown(self):
"""Test parsing simple markdown content."""
from github_crawler import MarkdownParser
from crawler.github import MarkdownParser
content = """# Test Title
@@ -191,21 +191,21 @@ More content here.
def test_extract_title_from_heading(self):
"""Test extracting title from h1 heading."""
from github_crawler import MarkdownParser
from crawler.github import MarkdownParser
title = MarkdownParser._extract_title("# My Document\n\nContent", "fallback.md")
assert title == "My Document"
def test_extract_title_fallback(self):
"""Test fallback to filename when no heading."""
from github_crawler import MarkdownParser
from crawler.github import MarkdownParser
title = MarkdownParser._extract_title("No heading here", "my-document.md")
assert title == "My Document"
def test_detect_german_language(self):
"""Test German language detection."""
from github_crawler import MarkdownParser
from crawler.github import MarkdownParser
german_text = "Dies ist eine Datenschutzerklaerung fuer die Verarbeitung personenbezogener Daten."
lang = MarkdownParser._detect_language(german_text)
@@ -213,7 +213,7 @@ More content here.
def test_detect_english_language(self):
"""Test English language detection."""
from github_crawler import MarkdownParser
from crawler.github import MarkdownParser
english_text = "This is a privacy policy for processing personal data in our application."
lang = MarkdownParser._detect_language(english_text)
@@ -221,7 +221,7 @@ More content here.
def test_find_placeholders(self):
"""Test finding placeholder patterns."""
from github_crawler import MarkdownParser
from crawler.github import MarkdownParser
content = "Company: [COMPANY_NAME], Contact: {email}, Address: __ADDRESS__"
placeholders = MarkdownParser._find_placeholders(content)
@@ -236,7 +236,7 @@ class TestHTMLParser:
def test_parse_simple_html(self):
"""Test parsing simple HTML content."""
from github_crawler import HTMLParser
from crawler.github import HTMLParser
content = """<!DOCTYPE html>
<html>
@@ -255,7 +255,7 @@ class TestHTMLParser:
def test_html_to_text_removes_scripts(self):
"""Test that scripts are removed from HTML."""
from github_crawler import HTMLParser
from crawler.github import HTMLParser
html = "<p>Text</p><script>alert('bad');</script><p>More</p>"
text = HTMLParser._html_to_text(html)
@@ -270,7 +270,7 @@ class TestJSONParser:
def test_parse_simple_json(self):
"""Test parsing simple JSON content."""
from github_crawler import JSONParser
from crawler.github import JSONParser
content = json.dumps({
"title": "Privacy Policy",
@@ -286,7 +286,7 @@ class TestJSONParser:
def test_parse_nested_json(self):
"""Test parsing nested JSON structures."""
from github_crawler import JSONParser
from crawler.github import JSONParser
content = json.dumps({
"sections": {
@@ -305,7 +305,7 @@ class TestExtractedDocument:
def test_extracted_document_hash(self):
"""Test that source hash is auto-generated."""
from github_crawler import ExtractedDocument
from crawler.github import ExtractedDocument
doc = ExtractedDocument(
text="Some content",
@@ -396,7 +396,7 @@ class TestLegalTemplatesIngestion:
def test_infer_template_type_privacy(self):
"""Test inferring privacy policy type."""
from legal_templates_ingestion import LegalTemplatesIngestion
from github_crawler import ExtractedDocument
from crawler.github import ExtractedDocument
from template_sources import SourceConfig, LicenseType
with patch('legal_templates_ingestion.QdrantClient'):
@@ -449,7 +449,7 @@ class TestTemplatesAdminAPI:
def test_templates_status_structure(self):
"""Test the structure of templates status response."""
from admin_api import _templates_ingestion_status
from admin.api import _templates_ingestion_status
# Reset status
_templates_ingestion_status["running"] = False
@@ -462,7 +462,7 @@ class TestTemplatesAdminAPI:
def test_templates_status_running(self):
"""Test status when ingestion is running."""
from admin_api import _templates_ingestion_status
from admin.api import _templates_ingestion_status
_templates_ingestion_status["running"] = True
_templates_ingestion_status["current_source"] = "github-site-policy"
@@ -473,7 +473,7 @@ class TestTemplatesAdminAPI:
def test_templates_results_tracking(self):
"""Test that ingestion results are tracked correctly."""
from admin_api import _templates_ingestion_status
from admin.api import _templates_ingestion_status
_templates_ingestion_status["results"] = {
"github-site-policy": {
@@ -578,7 +578,7 @@ class TestTemplatesIntegration:
def test_full_chunk_creation_pipeline(self, mock_all_services):
"""Test the full chunk creation pipeline."""
from legal_templates_ingestion import LegalTemplatesIngestion
from github_crawler import ExtractedDocument
from crawler.github import ExtractedDocument
from template_sources import SourceConfig, LicenseType
ingestion = LegalTemplatesIngestion()