This repository has been archived on 2026-02-15. You can view files and clone it. You cannot open issues or pull requests or push a commit.
Files
breakpilot-pwa/backend/alerts_agent/api/routes.py
Benjamin Admin 21a844cb8a fix: Restore all files lost during destructive rebase
A previous `git pull --rebase origin main` dropped 177 local commits,
losing 3400+ files across admin-v2, backend, studio-v2, website,
klausur-service, and many other services. The partial restore attempt
(660295e2) only recovered some files.

This commit restores all missing files from pre-rebase ref 98933f5e
while preserving post-rebase additions (night-scheduler, night-mode UI,
NightModeWidget dashboard integration).

Restored features include:
- AI Module Sidebar (FAB), OCR Labeling, OCR Compare
- GPU Dashboard, RAG Pipeline, Magic Help
- Klausur-Korrektur (8 files), Abitur-Archiv (5+ files)
- Companion, Zeugnisse-Crawler, Screen Flow
- Full backend, studio-v2, website, klausur-service
- All compliance SDKs, agent-core, voice-service
- CI/CD configs, documentation, scripts

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-09 09:51:32 +01:00

511 lines
16 KiB
Python

"""
API Routes für Alerts Agent.
Endpoints:
- POST /alerts/ingest - Manuell Alerts importieren
- POST /alerts/run - Scoring Pipeline starten
- GET /alerts/inbox - Inbox Items abrufen
- POST /alerts/feedback - Relevanz-Feedback geben
- GET /alerts/profile - User Relevance Profile
- PUT /alerts/profile - Profile aktualisieren
"""
import os
from datetime import datetime
from typing import Optional
from fastapi import APIRouter, Depends, HTTPException, Query
from pydantic import BaseModel, Field
from ..models.alert_item import AlertItem, AlertStatus
from ..models.relevance_profile import RelevanceProfile, PriorityItem
from ..processing.relevance_scorer import RelevanceDecision, RelevanceScorer
router = APIRouter(prefix="/alerts", tags=["alerts"])
# LLM Scorer Konfiguration aus Umgebungsvariablen
LLM_GATEWAY_URL = os.getenv("LLM_GATEWAY_URL", "http://localhost:8000/llm")
LLM_API_KEY = os.getenv("LLM_API_KEYS", "").split(",")[0] if os.getenv("LLM_API_KEYS") else ""
ALERTS_USE_LLM = os.getenv("ALERTS_USE_LLM", "false").lower() == "true"
# ============================================================================
# In-Memory Storage (später durch DB ersetzen)
# ============================================================================
_alerts_store: dict[str, AlertItem] = {}
_profile_store: dict[str, RelevanceProfile] = {}
# ============================================================================
# Request/Response Models
# ============================================================================
class AlertIngestRequest(BaseModel):
"""Request für manuelles Alert-Import."""
title: str = Field(..., min_length=1, max_length=500)
url: str = Field(..., min_length=1)
snippet: Optional[str] = Field(default=None, max_length=2000)
topic_label: str = Field(default="Manual Import")
published_at: Optional[datetime] = None
class AlertIngestResponse(BaseModel):
"""Response für Alert-Import."""
id: str
status: str
message: str
class AlertRunRequest(BaseModel):
"""Request für Scoring-Pipeline."""
limit: int = Field(default=50, ge=1, le=200)
skip_scored: bool = Field(default=True)
class AlertRunResponse(BaseModel):
"""Response für Scoring-Pipeline."""
processed: int
keep: int
drop: int
review: int
errors: int
duration_ms: int
class InboxItem(BaseModel):
"""Ein Item in der Inbox."""
id: str
title: str
url: str
snippet: Optional[str]
topic_label: str
published_at: Optional[datetime]
relevance_score: Optional[float]
relevance_decision: Optional[str]
relevance_summary: Optional[str]
status: str
class InboxResponse(BaseModel):
"""Response für Inbox-Abfrage."""
items: list[InboxItem]
total: int
page: int
page_size: int
class FeedbackRequest(BaseModel):
"""Request für Relevanz-Feedback."""
alert_id: str
is_relevant: bool
reason: Optional[str] = None
tags: list[str] = Field(default_factory=list)
class FeedbackResponse(BaseModel):
"""Response für Feedback."""
success: bool
message: str
profile_updated: bool
class ProfilePriorityRequest(BaseModel):
"""Priority für Profile-Update."""
label: str
weight: float = Field(default=0.5, ge=0.0, le=1.0)
keywords: list[str] = Field(default_factory=list)
description: Optional[str] = None
class ProfileUpdateRequest(BaseModel):
"""Request für Profile-Update."""
priorities: Optional[list[ProfilePriorityRequest]] = None
exclusions: Optional[list[str]] = None
policies: Optional[dict] = None
class ProfileResponse(BaseModel):
"""Response für Profile."""
id: str
priorities: list[dict]
exclusions: list[str]
policies: dict
total_scored: int
total_kept: int
total_dropped: int
accuracy_estimate: Optional[float]
# ============================================================================
# Endpoints
# ============================================================================
@router.post("/ingest", response_model=AlertIngestResponse)
async def ingest_alert(request: AlertIngestRequest):
"""
Manuell einen Alert importieren.
Nützlich für Tests oder manuelles Hinzufügen von Artikeln.
"""
alert = AlertItem(
title=request.title,
url=request.url,
snippet=request.snippet or "",
topic_label=request.topic_label,
published_at=request.published_at,
)
_alerts_store[alert.id] = alert
return AlertIngestResponse(
id=alert.id,
status="created",
message=f"Alert '{alert.title[:50]}...' importiert"
)
@router.post("/run", response_model=AlertRunResponse)
async def run_scoring_pipeline(request: AlertRunRequest):
"""
Scoring-Pipeline für neue Alerts starten.
Bewertet alle unbewerteten Alerts und klassifiziert sie
in KEEP, DROP oder REVIEW.
Wenn ALERTS_USE_LLM=true, wird das LLM Gateway für Scoring verwendet.
Sonst wird ein schnelles Keyword-basiertes Scoring durchgeführt.
"""
import time
start = time.time()
# Alle unbewerteten Alerts holen
alerts_to_score = [
a for a in _alerts_store.values()
if a.status == AlertStatus.NEW or (not request.skip_scored and a.status == AlertStatus.SCORED)
][:request.limit]
if not alerts_to_score:
return AlertRunResponse(
processed=0, keep=0, drop=0, review=0, errors=0,
duration_ms=int((time.time() - start) * 1000)
)
keep = drop = review = errors = 0
# Profil für Scoring laden
profile = _profile_store.get("default")
if not profile:
profile = RelevanceProfile.create_default_education_profile()
profile.id = "default"
_profile_store["default"] = profile
if ALERTS_USE_LLM and LLM_API_KEY:
# LLM-basiertes Scoring über Gateway
scorer = RelevanceScorer(
gateway_url=LLM_GATEWAY_URL,
api_key=LLM_API_KEY,
model="breakpilot-teacher-8b",
)
try:
results = await scorer.score_batch(alerts_to_score, profile=profile)
for result in results:
if result.error:
errors += 1
elif result.decision == RelevanceDecision.KEEP:
keep += 1
elif result.decision == RelevanceDecision.DROP:
drop += 1
else:
review += 1
finally:
await scorer.close()
else:
# Fallback: Keyword-basiertes Scoring (schnell, ohne LLM)
for alert in alerts_to_score:
title_lower = alert.title.lower()
snippet_lower = (alert.snippet or "").lower()
combined = title_lower + " " + snippet_lower
# Ausschlüsse aus Profil prüfen
if any(excl.lower() in combined for excl in profile.exclusions):
alert.relevance_score = 0.15
alert.relevance_decision = RelevanceDecision.DROP.value
drop += 1
# Prioritäten aus Profil prüfen
elif any(
p.label.lower() in combined or
any(kw.lower() in combined for kw in (p.keywords if hasattr(p, 'keywords') else []))
for p in profile.priorities
):
alert.relevance_score = 0.85
alert.relevance_decision = RelevanceDecision.KEEP.value
keep += 1
else:
alert.relevance_score = 0.55
alert.relevance_decision = RelevanceDecision.REVIEW.value
review += 1
alert.status = AlertStatus.SCORED
duration_ms = int((time.time() - start) * 1000)
return AlertRunResponse(
processed=len(alerts_to_score),
keep=keep,
drop=drop,
review=review,
errors=errors,
duration_ms=duration_ms,
)
@router.get("/inbox", response_model=InboxResponse)
async def get_inbox(
decision: Optional[str] = Query(default=None, description="Filter: KEEP, DROP, REVIEW"),
page: int = Query(default=1, ge=1),
page_size: int = Query(default=20, ge=1, le=100),
):
"""
Inbox Items abrufen.
Filtert nach Relevanz-Entscheidung. Standard zeigt KEEP und REVIEW.
"""
# Filter Alerts
alerts = list(_alerts_store.values())
if decision:
alerts = [a for a in alerts if a.relevance_decision == decision.upper()]
else:
# Standard: KEEP und REVIEW zeigen
alerts = [a for a in alerts if a.relevance_decision in ["KEEP", "REVIEW"]]
# Sortieren nach Score (absteigend)
alerts.sort(key=lambda a: a.relevance_score or 0, reverse=True)
# Pagination
total = len(alerts)
start = (page - 1) * page_size
end = start + page_size
page_alerts = alerts[start:end]
items = [
InboxItem(
id=a.id,
title=a.title,
url=a.url,
snippet=a.snippet,
topic_label=a.topic_label,
published_at=a.published_at,
relevance_score=a.relevance_score,
relevance_decision=a.relevance_decision,
relevance_summary=a.relevance_summary,
status=a.status.value,
)
for a in page_alerts
]
return InboxResponse(
items=items,
total=total,
page=page,
page_size=page_size,
)
@router.post("/feedback", response_model=FeedbackResponse)
async def submit_feedback(request: FeedbackRequest):
"""
Feedback zu einem Alert geben.
Das Feedback wird verwendet um das Relevanzprofil zu verbessern.
"""
alert = _alerts_store.get(request.alert_id)
if not alert:
raise HTTPException(status_code=404, detail="Alert nicht gefunden")
# Alert Status aktualisieren
alert.status = AlertStatus.REVIEWED
# Profile aktualisieren (Default-Profile für Demo)
profile = _profile_store.get("default")
if not profile:
profile = RelevanceProfile.create_default_education_profile()
profile.id = "default"
_profile_store["default"] = profile
profile.update_from_feedback(
alert_title=alert.title,
alert_url=alert.url,
is_relevant=request.is_relevant,
reason=request.reason or "",
)
return FeedbackResponse(
success=True,
message="Feedback gespeichert",
profile_updated=True,
)
@router.get("/profile", response_model=ProfileResponse)
async def get_profile(user_id: Optional[str] = Query(default=None)):
"""
Relevanz-Profil abrufen.
Ohne user_id wird das Default-Profil zurückgegeben.
"""
profile_id = user_id or "default"
profile = _profile_store.get(profile_id)
if not profile:
# Default-Profile erstellen
profile = RelevanceProfile.create_default_education_profile()
profile.id = profile_id
_profile_store[profile_id] = profile
return ProfileResponse(
id=profile.id,
priorities=[p.to_dict() if isinstance(p, PriorityItem) else p
for p in profile.priorities],
exclusions=profile.exclusions,
policies=profile.policies,
total_scored=profile.total_scored,
total_kept=profile.total_kept,
total_dropped=profile.total_dropped,
accuracy_estimate=profile.accuracy_estimate,
)
@router.put("/profile", response_model=ProfileResponse)
async def update_profile(
request: ProfileUpdateRequest,
user_id: Optional[str] = Query(default=None),
):
"""
Relevanz-Profil aktualisieren.
Erlaubt Anpassung von Prioritäten, Ausschlüssen und Policies.
"""
profile_id = user_id or "default"
profile = _profile_store.get(profile_id)
if not profile:
profile = RelevanceProfile()
profile.id = profile_id
# Updates anwenden
if request.priorities is not None:
profile.priorities = [
PriorityItem(
label=p.label,
weight=p.weight,
keywords=p.keywords,
description=p.description,
)
for p in request.priorities
]
if request.exclusions is not None:
profile.exclusions = request.exclusions
if request.policies is not None:
profile.policies = request.policies
profile.updated_at = datetime.utcnow()
_profile_store[profile_id] = profile
return ProfileResponse(
id=profile.id,
priorities=[p.to_dict() if isinstance(p, PriorityItem) else p
for p in profile.priorities],
exclusions=profile.exclusions,
policies=profile.policies,
total_scored=profile.total_scored,
total_kept=profile.total_kept,
total_dropped=profile.total_dropped,
accuracy_estimate=profile.accuracy_estimate,
)
@router.get("/stats")
async def get_stats():
"""
Statistiken über Alerts und Scoring.
Gibt Statistiken im Format zurück, das das Frontend erwartet:
- total_alerts, new_alerts, kept_alerts, review_alerts, dropped_alerts
- total_topics, active_topics, total_rules
"""
alerts = list(_alerts_store.values())
total = len(alerts)
# Zähle nach Status und Decision
new_alerts = sum(1 for a in alerts if a.status == AlertStatus.NEW)
kept_alerts = sum(1 for a in alerts if a.relevance_decision == "KEEP")
review_alerts = sum(1 for a in alerts if a.relevance_decision == "REVIEW")
dropped_alerts = sum(1 for a in alerts if a.relevance_decision == "DROP")
# Topics und Rules (In-Memory hat diese nicht, aber wir geben 0 zurück)
# Bei DB-Implementierung würden wir hier die Repositories nutzen
total_topics = 0
active_topics = 0
total_rules = 0
# Versuche DB-Statistiken zu laden wenn verfügbar
try:
from alerts_agent.db import get_db
from alerts_agent.db.repository import TopicRepository, RuleRepository
from contextlib import contextmanager
# Versuche eine DB-Session zu bekommen
db_gen = get_db()
db = next(db_gen, None)
if db:
try:
topic_repo = TopicRepository(db)
rule_repo = RuleRepository(db)
all_topics = topic_repo.get_all()
total_topics = len(all_topics)
active_topics = len([t for t in all_topics if t.is_active])
all_rules = rule_repo.get_all()
total_rules = len(all_rules)
finally:
try:
next(db_gen, None)
except StopIteration:
pass
except Exception:
# DB nicht verfügbar, nutze In-Memory Defaults
pass
# Berechne Durchschnittsscore
scored_alerts = [a for a in alerts if a.relevance_score is not None]
avg_score = sum(a.relevance_score for a in scored_alerts) / len(scored_alerts) if scored_alerts else 0.0
return {
# Frontend-kompatibles Format
"total_alerts": total,
"new_alerts": new_alerts,
"kept_alerts": kept_alerts,
"review_alerts": review_alerts,
"dropped_alerts": dropped_alerts,
"total_topics": total_topics,
"active_topics": active_topics,
"total_rules": total_rules,
"avg_score": avg_score,
# Zusätzliche Details (Abwärtskompatibilität)
"by_status": {
"new": new_alerts,
"scored": sum(1 for a in alerts if a.status == AlertStatus.SCORED),
"reviewed": sum(1 for a in alerts if a.status == AlertStatus.REVIEWED),
},
"by_decision": {
"KEEP": kept_alerts,
"REVIEW": review_alerts,
"DROP": dropped_alerts,
},
}