Files
breakpilot-lehrer/klausur-service/backend/mail/ai_category.py
Benjamin Admin 34da9f4cda [split-required] Split 700-870 LOC files across all services
backend-lehrer (11 files):
- llm_gateway/routes/schools.py (867 → 5), recording_api.py (848 → 6)
- messenger_api.py (840 → 5), print_generator.py (824 → 5)
- unit_analytics_api.py (751 → 5), classroom/routes/context.py (726 → 4)
- llm_gateway/routes/edu_search_seeds.py (710 → 4)

klausur-service (12 files):
- ocr_labeling_api.py (845 → 4), metrics_db.py (833 → 4)
- legal_corpus_api.py (790 → 4), page_crop.py (758 → 3)
- mail/ai_service.py (747 → 4), github_crawler.py (767 → 3)
- trocr_service.py (730 → 4), full_compliance_pipeline.py (723 → 4)
- dsfa_rag_api.py (715 → 4), ocr_pipeline_auto.py (705 → 4)

website (6 pages):
- audit-checklist (867 → 8), content (806 → 6)
- screen-flow (790 → 4), scraper (789 → 5)
- zeugnisse (776 → 5), modules (745 → 4)

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-25 08:01:18 +02:00

270 lines
7.9 KiB
Python

"""
AI Email - Category Classification and Response Suggestions
Rule-based and LLM-based email category classification,
plus response suggestion generation.
Extracted from ai_service.py to keep files under 500 LOC.
"""
import os
import logging
from typing import Optional, List, Tuple
import httpx
from .models import (
EmailCategory,
SenderType,
ResponseSuggestion,
)
logger = logging.getLogger(__name__)
# LLM Gateway configuration
LLM_GATEWAY_URL = os.getenv("LLM_GATEWAY_URL", "http://localhost:8090")
async def classify_category(
http_client: httpx.AsyncClient,
subject: str,
body_preview: str,
sender_type: SenderType,
) -> Tuple[EmailCategory, float]:
"""
Classify email into a category.
Rule-based classification first, falls back to LLM.
"""
category, confidence = _classify_category_rules(subject, body_preview, sender_type)
if confidence > 0.7:
return category, confidence
return await _classify_category_llm(http_client, subject, body_preview)
def _classify_category_rules(
subject: str,
body_preview: str,
sender_type: SenderType,
) -> Tuple[EmailCategory, float]:
"""Rule-based category classification."""
text = f"{subject} {body_preview}".lower()
category_keywords = {
EmailCategory.DIENSTLICH: [
"dienstlich", "dienstanweisung", "erlass", "verordnung",
"bescheid", "verfuegung", "ministerium", "behoerde"
],
EmailCategory.PERSONAL: [
"personalrat", "stellenausschreibung", "versetzung",
"beurteilung", "dienstzeugnis", "krankmeldung", "elternzeit"
],
EmailCategory.FINANZEN: [
"budget", "haushalt", "etat", "abrechnung", "rechnung",
"erstattung", "zuschuss", "foerdermittel"
],
EmailCategory.ELTERN: [
"elternbrief", "elternabend", "schulkonferenz",
"elternvertreter", "elternbeirat"
],
EmailCategory.SCHUELER: [
"schueler", "schuelerin", "zeugnis", "klasse", "unterricht",
"pruefung", "klassenfahrt", "schulpflicht"
],
EmailCategory.FORTBILDUNG: [
"fortbildung", "seminar", "workshop", "schulung",
"weiterbildung", "nlq", "didaktik"
],
EmailCategory.VERANSTALTUNG: [
"einladung", "veranstaltung", "termin", "konferenz",
"sitzung", "tagung", "feier"
],
EmailCategory.SICHERHEIT: [
"sicherheit", "notfall", "brandschutz", "evakuierung",
"hygiene", "corona", "infektionsschutz"
],
EmailCategory.TECHNIK: [
"it", "software", "computer", "netzwerk", "login",
"passwort", "digitalisierung", "iserv"
],
EmailCategory.NEWSLETTER: [
"newsletter", "rundschreiben", "info-mail", "mitteilung"
],
EmailCategory.WERBUNG: [
"angebot", "rabatt", "aktion", "werbung", "abonnement"
],
}
best_category = EmailCategory.SONSTIGES
best_score = 0.0
for category, keywords in category_keywords.items():
score = sum(1 for kw in keywords if kw in text)
if score > best_score:
best_score = score
best_category = category
if sender_type in [SenderType.KULTUSMINISTERIUM, SenderType.LANDESSCHULBEHOERDE, SenderType.RLSB]:
if best_category == EmailCategory.SONSTIGES:
best_category = EmailCategory.DIENSTLICH
best_score = 2
confidence = min(0.9, 0.4 + (best_score * 0.15))
return best_category, confidence
async def _classify_category_llm(
client: httpx.AsyncClient,
subject: str,
body_preview: str,
) -> Tuple[EmailCategory, float]:
"""LLM-based category classification."""
try:
categories = ", ".join([c.value for c in EmailCategory])
prompt = f"""Klassifiziere diese E-Mail in EINE Kategorie:
Betreff: {subject}
Inhalt: {body_preview[:500]}
Kategorien: {categories}
Antworte NUR mit dem Kategorienamen und einer Konfidenz (0.0-1.0):
Format: kategorie|konfidenz
"""
response = await client.post(
f"{LLM_GATEWAY_URL}/api/v1/inference",
json={
"prompt": prompt,
"playbook": "mail_analysis",
"max_tokens": 50,
},
)
if response.status_code == 200:
data = response.json()
result = data.get("response", "sonstiges|0.5")
parts = result.strip().split("|")
if len(parts) >= 2:
category_str = parts[0].strip().lower()
confidence = float(parts[1].strip())
try:
category = EmailCategory(category_str)
return category, min(max(confidence, 0.0), 1.0)
except ValueError:
pass
except Exception as e:
logger.warning(f"LLM category classification failed: {e}")
return EmailCategory.SONSTIGES, 0.5
async def suggest_response(
http_client: httpx.AsyncClient,
subject: str,
body_text: str,
sender_type: SenderType,
category: EmailCategory,
) -> List[ResponseSuggestion]:
"""Generate response suggestions for an email."""
suggestions = []
if sender_type in [SenderType.KULTUSMINISTERIUM, SenderType.LANDESSCHULBEHOERDE, SenderType.RLSB]:
suggestions.append(ResponseSuggestion(
template_type="acknowledgment",
subject=f"Re: {subject}",
body="""Sehr geehrte Damen und Herren,
vielen Dank fuer Ihre Nachricht.
Ich bestaetige den Eingang und werde die Angelegenheit fristgerecht bearbeiten.
Mit freundlichen Gruessen""",
confidence=0.8,
))
if category == EmailCategory.ELTERN:
suggestions.append(ResponseSuggestion(
template_type="parent_response",
subject=f"Re: {subject}",
body="""Liebe Eltern,
vielen Dank fuer Ihre Nachricht.
[Ihre Antwort hier]
Mit freundlichen Gruessen""",
confidence=0.7,
))
try:
llm_suggestion = await _generate_response_llm(http_client, subject, body_text[:500], sender_type)
if llm_suggestion:
suggestions.append(llm_suggestion)
except Exception as e:
logger.warning(f"LLM response generation failed: {e}")
return suggestions
async def _generate_response_llm(
client: httpx.AsyncClient,
subject: str,
body_preview: str,
sender_type: SenderType,
) -> Optional[ResponseSuggestion]:
"""Generate a response suggestion using LLM."""
try:
sender_desc = {
SenderType.KULTUSMINISTERIUM: "dem Kultusministerium",
SenderType.LANDESSCHULBEHOERDE: "der Landesschulbehoerde",
SenderType.RLSB: "dem RLSB",
SenderType.ELTERNVERTRETER: "einem Elternvertreter",
}.get(sender_type, "einem Absender")
prompt = f"""Du bist eine Schulleiterin in Niedersachsen. Formuliere eine professionelle, kurze Antwort auf diese E-Mail von {sender_desc}:
Betreff: {subject}
Inhalt: {body_preview}
Die Antwort sollte:
- Hoeflich und formell sein
- Den Eingang bestaetigen
- Eine konkrete naechste Aktion nennen oder um Klaerung bitten
Antworte NUR mit dem Antworttext (ohne Betreffzeile, ohne "Betreff:").
"""
response = await client.post(
f"{LLM_GATEWAY_URL}/api/v1/inference",
json={
"prompt": prompt,
"playbook": "mail_analysis",
"max_tokens": 300,
},
)
if response.status_code == 200:
data = response.json()
body = data.get("response", "").strip()
if body:
return ResponseSuggestion(
template_type="ai_generated",
subject=f"Re: {subject}",
body=body,
confidence=0.6,
)
except Exception as e:
logger.warning(f"LLM response generation failed: {e}")
return None