klausur-service (11 files): - cv_gutter_repair, ocr_pipeline_regression, upload_api - ocr_pipeline_sessions, smart_spell, nru_worksheet_generator - ocr_pipeline_overlays, mail/aggregator, zeugnis_api - cv_syllable_detect, self_rag backend-lehrer (17 files): - classroom_engine/suggestions, generators/quiz_generator - worksheets_api, llm_gateway/comparison, state_engine_api - classroom/models (→ 4 submodules), services/file_processor - alerts_agent/api/wizard+digests+routes, content_generators/pdf - classroom/routes/sessions, llm_gateway/inference - classroom_engine/analytics, auth/keycloak_auth - alerts_agent/processing/rule_engine, ai_processor/print_versions agent-core (5 files): - brain/memory_store, brain/knowledge_graph, brain/context_manager - orchestrator/supervisor, sessions/session_manager admin-lehrer (5 components): - GridOverlay, StepGridReview, DevOpsPipelineSidebar - DataFlowDiagram, sbom/wizard/page website (2 files): - DependencyMap, lehrer/abitur-archiv Other: nibis_ingestion, grid_detection_service, export-doclayout-onnx Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
403 lines
12 KiB
Python
403 lines
12 KiB
Python
"""
|
|
API Routes fuer Alerts Agent.
|
|
|
|
Endpoints:
|
|
- POST /alerts/ingest - Manuell Alerts importieren
|
|
- POST /alerts/run - Scoring Pipeline starten
|
|
- GET /alerts/inbox - Inbox Items abrufen
|
|
- POST /alerts/feedback - Relevanz-Feedback geben
|
|
- GET /alerts/profile - User Relevance Profile
|
|
- PUT /alerts/profile - Profile aktualisieren
|
|
"""
|
|
|
|
import os
|
|
from datetime import datetime
|
|
from typing import Optional
|
|
from fastapi import APIRouter, HTTPException, Query
|
|
|
|
from ..models.alert_item import AlertItem, AlertStatus
|
|
from ..models.relevance_profile import RelevanceProfile, PriorityItem
|
|
from ..processing.relevance_scorer import RelevanceDecision, RelevanceScorer
|
|
from .schemas import (
|
|
AlertIngestRequest, AlertIngestResponse,
|
|
AlertRunRequest, AlertRunResponse,
|
|
InboxItem, InboxResponse,
|
|
FeedbackRequest, FeedbackResponse,
|
|
ProfilePriorityRequest, ProfileUpdateRequest, ProfileResponse,
|
|
)
|
|
|
|
|
|
router = APIRouter(prefix="/alerts", tags=["alerts"])
|
|
|
|
# LLM Scorer Konfiguration aus Umgebungsvariablen
|
|
LLM_GATEWAY_URL = os.getenv("LLM_GATEWAY_URL", "http://localhost:8000/llm")
|
|
LLM_API_KEY = os.getenv("LLM_API_KEYS", "").split(",")[0] if os.getenv("LLM_API_KEYS") else ""
|
|
ALERTS_USE_LLM = os.getenv("ALERTS_USE_LLM", "false").lower() == "true"
|
|
|
|
|
|
# ============================================================================
|
|
# In-Memory Storage (spaeter durch DB ersetzen)
|
|
# ============================================================================
|
|
|
|
_alerts_store: dict[str, AlertItem] = {}
|
|
_profile_store: dict[str, RelevanceProfile] = {}
|
|
|
|
|
|
# ============================================================================
|
|
# Endpoints
|
|
# ============================================================================
|
|
|
|
@router.post("/ingest", response_model=AlertIngestResponse)
|
|
async def ingest_alert(request: AlertIngestRequest):
|
|
"""
|
|
Manuell einen Alert importieren.
|
|
|
|
Nuetzlich fuer Tests oder manuelles Hinzufuegen von Artikeln.
|
|
"""
|
|
alert = AlertItem(
|
|
title=request.title,
|
|
url=request.url,
|
|
snippet=request.snippet or "",
|
|
topic_label=request.topic_label,
|
|
published_at=request.published_at,
|
|
)
|
|
|
|
_alerts_store[alert.id] = alert
|
|
|
|
return AlertIngestResponse(
|
|
id=alert.id,
|
|
status="created",
|
|
message=f"Alert '{alert.title[:50]}...' importiert"
|
|
)
|
|
|
|
|
|
@router.post("/run", response_model=AlertRunResponse)
|
|
async def run_scoring_pipeline(request: AlertRunRequest):
|
|
"""
|
|
Scoring-Pipeline fuer neue Alerts starten.
|
|
|
|
Bewertet alle unbewerteten Alerts und klassifiziert sie
|
|
in KEEP, DROP oder REVIEW.
|
|
|
|
Wenn ALERTS_USE_LLM=true, wird das LLM Gateway fuer Scoring verwendet.
|
|
Sonst wird ein schnelles Keyword-basiertes Scoring durchgefuehrt.
|
|
"""
|
|
import time
|
|
start = time.time()
|
|
|
|
# Alle unbewerteten Alerts holen
|
|
alerts_to_score = [
|
|
a for a in _alerts_store.values()
|
|
if a.status == AlertStatus.NEW or (not request.skip_scored and a.status == AlertStatus.SCORED)
|
|
][:request.limit]
|
|
|
|
if not alerts_to_score:
|
|
return AlertRunResponse(
|
|
processed=0, keep=0, drop=0, review=0, errors=0,
|
|
duration_ms=int((time.time() - start) * 1000)
|
|
)
|
|
|
|
keep = drop = review = errors = 0
|
|
|
|
# Profil fuer Scoring laden
|
|
profile = _profile_store.get("default")
|
|
if not profile:
|
|
profile = RelevanceProfile.create_default_education_profile()
|
|
profile.id = "default"
|
|
_profile_store["default"] = profile
|
|
|
|
if ALERTS_USE_LLM and LLM_API_KEY:
|
|
# LLM-basiertes Scoring ueber Gateway
|
|
scorer = RelevanceScorer(
|
|
gateway_url=LLM_GATEWAY_URL,
|
|
api_key=LLM_API_KEY,
|
|
model="breakpilot-teacher-8b",
|
|
)
|
|
try:
|
|
results = await scorer.score_batch(alerts_to_score, profile=profile)
|
|
for result in results:
|
|
if result.error:
|
|
errors += 1
|
|
elif result.decision == RelevanceDecision.KEEP:
|
|
keep += 1
|
|
elif result.decision == RelevanceDecision.DROP:
|
|
drop += 1
|
|
else:
|
|
review += 1
|
|
finally:
|
|
await scorer.close()
|
|
else:
|
|
# Fallback: Keyword-basiertes Scoring (schnell, ohne LLM)
|
|
for alert in alerts_to_score:
|
|
title_lower = alert.title.lower()
|
|
snippet_lower = (alert.snippet or "").lower()
|
|
combined = title_lower + " " + snippet_lower
|
|
|
|
# Ausschluesse aus Profil pruefen
|
|
if any(excl.lower() in combined for excl in profile.exclusions):
|
|
alert.relevance_score = 0.15
|
|
alert.relevance_decision = RelevanceDecision.DROP.value
|
|
drop += 1
|
|
# Prioritaeten aus Profil pruefen
|
|
elif any(
|
|
p.label.lower() in combined or
|
|
any(kw.lower() in combined for kw in (p.keywords if hasattr(p, 'keywords') else []))
|
|
for p in profile.priorities
|
|
):
|
|
alert.relevance_score = 0.85
|
|
alert.relevance_decision = RelevanceDecision.KEEP.value
|
|
keep += 1
|
|
else:
|
|
alert.relevance_score = 0.55
|
|
alert.relevance_decision = RelevanceDecision.REVIEW.value
|
|
review += 1
|
|
|
|
alert.status = AlertStatus.SCORED
|
|
|
|
duration_ms = int((time.time() - start) * 1000)
|
|
|
|
return AlertRunResponse(
|
|
processed=len(alerts_to_score),
|
|
keep=keep,
|
|
drop=drop,
|
|
review=review,
|
|
errors=errors,
|
|
duration_ms=duration_ms,
|
|
)
|
|
|
|
|
|
@router.get("/inbox", response_model=InboxResponse)
|
|
async def get_inbox(
|
|
decision: Optional[str] = Query(default=None, description="Filter: KEEP, DROP, REVIEW"),
|
|
page: int = Query(default=1, ge=1),
|
|
page_size: int = Query(default=20, ge=1, le=100),
|
|
):
|
|
"""
|
|
Inbox Items abrufen.
|
|
|
|
Filtert nach Relevanz-Entscheidung. Standard zeigt KEEP und REVIEW.
|
|
"""
|
|
# Filter Alerts
|
|
alerts = list(_alerts_store.values())
|
|
|
|
if decision:
|
|
alerts = [a for a in alerts if a.relevance_decision == decision.upper()]
|
|
else:
|
|
# Standard: KEEP und REVIEW zeigen
|
|
alerts = [a for a in alerts if a.relevance_decision in ["KEEP", "REVIEW"]]
|
|
|
|
# Sortieren nach Score (absteigend)
|
|
alerts.sort(key=lambda a: a.relevance_score or 0, reverse=True)
|
|
|
|
# Pagination
|
|
total = len(alerts)
|
|
start_idx = (page - 1) * page_size
|
|
end_idx = start_idx + page_size
|
|
page_alerts = alerts[start_idx:end_idx]
|
|
|
|
items = [
|
|
InboxItem(
|
|
id=a.id,
|
|
title=a.title,
|
|
url=a.url,
|
|
snippet=a.snippet,
|
|
topic_label=a.topic_label,
|
|
published_at=a.published_at,
|
|
relevance_score=a.relevance_score,
|
|
relevance_decision=a.relevance_decision,
|
|
relevance_summary=a.relevance_summary,
|
|
status=a.status.value,
|
|
)
|
|
for a in page_alerts
|
|
]
|
|
|
|
return InboxResponse(
|
|
items=items,
|
|
total=total,
|
|
page=page,
|
|
page_size=page_size,
|
|
)
|
|
|
|
|
|
@router.post("/feedback", response_model=FeedbackResponse)
|
|
async def submit_feedback(request: FeedbackRequest):
|
|
"""
|
|
Feedback zu einem Alert geben.
|
|
|
|
Das Feedback wird verwendet um das Relevanzprofil zu verbessern.
|
|
"""
|
|
alert = _alerts_store.get(request.alert_id)
|
|
if not alert:
|
|
raise HTTPException(status_code=404, detail="Alert nicht gefunden")
|
|
|
|
# Alert Status aktualisieren
|
|
alert.status = AlertStatus.REVIEWED
|
|
|
|
# Profile aktualisieren (Default-Profile fuer Demo)
|
|
profile = _profile_store.get("default")
|
|
if not profile:
|
|
profile = RelevanceProfile.create_default_education_profile()
|
|
profile.id = "default"
|
|
_profile_store["default"] = profile
|
|
|
|
profile.update_from_feedback(
|
|
alert_title=alert.title,
|
|
alert_url=alert.url,
|
|
is_relevant=request.is_relevant,
|
|
reason=request.reason or "",
|
|
)
|
|
|
|
return FeedbackResponse(
|
|
success=True,
|
|
message="Feedback gespeichert",
|
|
profile_updated=True,
|
|
)
|
|
|
|
|
|
@router.get("/profile", response_model=ProfileResponse)
|
|
async def get_profile(user_id: Optional[str] = Query(default=None)):
|
|
"""
|
|
Relevanz-Profil abrufen.
|
|
|
|
Ohne user_id wird das Default-Profil zurueckgegeben.
|
|
"""
|
|
profile_id = user_id or "default"
|
|
profile = _profile_store.get(profile_id)
|
|
|
|
if not profile:
|
|
# Default-Profile erstellen
|
|
profile = RelevanceProfile.create_default_education_profile()
|
|
profile.id = profile_id
|
|
_profile_store[profile_id] = profile
|
|
|
|
return ProfileResponse(
|
|
id=profile.id,
|
|
priorities=[p.to_dict() if isinstance(p, PriorityItem) else p
|
|
for p in profile.priorities],
|
|
exclusions=profile.exclusions,
|
|
policies=profile.policies,
|
|
total_scored=profile.total_scored,
|
|
total_kept=profile.total_kept,
|
|
total_dropped=profile.total_dropped,
|
|
accuracy_estimate=profile.accuracy_estimate,
|
|
)
|
|
|
|
|
|
@router.put("/profile", response_model=ProfileResponse)
|
|
async def update_profile(
|
|
request: ProfileUpdateRequest,
|
|
user_id: Optional[str] = Query(default=None),
|
|
):
|
|
"""
|
|
Relevanz-Profil aktualisieren.
|
|
|
|
Erlaubt Anpassung von Prioritaeten, Ausschluessen und Policies.
|
|
"""
|
|
profile_id = user_id or "default"
|
|
profile = _profile_store.get(profile_id)
|
|
|
|
if not profile:
|
|
profile = RelevanceProfile()
|
|
profile.id = profile_id
|
|
|
|
# Updates anwenden
|
|
if request.priorities is not None:
|
|
profile.priorities = [
|
|
PriorityItem(
|
|
label=p.label,
|
|
weight=p.weight,
|
|
keywords=p.keywords,
|
|
description=p.description,
|
|
)
|
|
for p in request.priorities
|
|
]
|
|
|
|
if request.exclusions is not None:
|
|
profile.exclusions = request.exclusions
|
|
|
|
if request.policies is not None:
|
|
profile.policies = request.policies
|
|
|
|
profile.updated_at = datetime.utcnow()
|
|
_profile_store[profile_id] = profile
|
|
|
|
return ProfileResponse(
|
|
id=profile.id,
|
|
priorities=[p.to_dict() if isinstance(p, PriorityItem) else p
|
|
for p in profile.priorities],
|
|
exclusions=profile.exclusions,
|
|
policies=profile.policies,
|
|
total_scored=profile.total_scored,
|
|
total_kept=profile.total_kept,
|
|
total_dropped=profile.total_dropped,
|
|
accuracy_estimate=profile.accuracy_estimate,
|
|
)
|
|
|
|
|
|
@router.get("/stats")
|
|
async def get_stats():
|
|
"""
|
|
Statistiken ueber Alerts und Scoring.
|
|
"""
|
|
alerts = list(_alerts_store.values())
|
|
total = len(alerts)
|
|
|
|
new_alerts = sum(1 for a in alerts if a.status == AlertStatus.NEW)
|
|
kept_alerts = sum(1 for a in alerts if a.relevance_decision == "KEEP")
|
|
review_alerts = sum(1 for a in alerts if a.relevance_decision == "REVIEW")
|
|
dropped_alerts = sum(1 for a in alerts if a.relevance_decision == "DROP")
|
|
|
|
total_topics = 0
|
|
active_topics = 0
|
|
total_rules = 0
|
|
|
|
try:
|
|
from alerts_agent.db import get_db
|
|
from alerts_agent.db.repository import TopicRepository, RuleRepository
|
|
|
|
db_gen = get_db()
|
|
db = next(db_gen, None)
|
|
if db:
|
|
try:
|
|
topic_repo = TopicRepository(db)
|
|
rule_repo = RuleRepository(db)
|
|
|
|
all_topics = topic_repo.get_all()
|
|
total_topics = len(all_topics)
|
|
active_topics = len([t for t in all_topics if t.is_active])
|
|
|
|
all_rules = rule_repo.get_all()
|
|
total_rules = len(all_rules)
|
|
finally:
|
|
try:
|
|
next(db_gen, None)
|
|
except StopIteration:
|
|
pass
|
|
except Exception:
|
|
pass
|
|
|
|
scored_alerts = [a for a in alerts if a.relevance_score is not None]
|
|
avg_score = sum(a.relevance_score for a in scored_alerts) / len(scored_alerts) if scored_alerts else 0.0
|
|
|
|
return {
|
|
"total_alerts": total,
|
|
"new_alerts": new_alerts,
|
|
"kept_alerts": kept_alerts,
|
|
"review_alerts": review_alerts,
|
|
"dropped_alerts": dropped_alerts,
|
|
"total_topics": total_topics,
|
|
"active_topics": active_topics,
|
|
"total_rules": total_rules,
|
|
"avg_score": avg_score,
|
|
"by_status": {
|
|
"new": new_alerts,
|
|
"scored": sum(1 for a in alerts if a.status == AlertStatus.SCORED),
|
|
"reviewed": sum(1 for a in alerts if a.status == AlertStatus.REVIEWED),
|
|
},
|
|
"by_decision": {
|
|
"KEEP": kept_alerts,
|
|
"REVIEW": review_alerts,
|
|
"DROP": dropped_alerts,
|
|
},
|
|
}
|