klausur-service (11 files): - cv_gutter_repair, ocr_pipeline_regression, upload_api - ocr_pipeline_sessions, smart_spell, nru_worksheet_generator - ocr_pipeline_overlays, mail/aggregator, zeugnis_api - cv_syllable_detect, self_rag backend-lehrer (17 files): - classroom_engine/suggestions, generators/quiz_generator - worksheets_api, llm_gateway/comparison, state_engine_api - classroom/models (→ 4 submodules), services/file_processor - alerts_agent/api/wizard+digests+routes, content_generators/pdf - classroom/routes/sessions, llm_gateway/inference - classroom_engine/analytics, auth/keycloak_auth - alerts_agent/processing/rule_engine, ai_processor/print_versions agent-core (5 files): - brain/memory_store, brain/knowledge_graph, brain/context_manager - orchestrator/supervisor, sessions/session_manager admin-lehrer (5 components): - GridOverlay, StepGridReview, DevOpsPipelineSidebar - DataFlowDiagram, sbom/wizard/page website (2 files): - DependencyMap, lehrer/abitur-archiv Other: nibis_ingestion, grid_detection_service, export-doclayout-onnx Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
479 lines
14 KiB
Python
479 lines
14 KiB
Python
"""
|
|
Quiz Generator - Erstellt verschiedene Quiz-Typen aus Quelltexten.
|
|
|
|
Generiert:
|
|
- True/False Fragen
|
|
- Zuordnungsaufgaben (Matching)
|
|
- Sortieraufgaben
|
|
- Offene Fragen mit Musterlösungen
|
|
"""
|
|
|
|
import logging
|
|
import json
|
|
from typing import List, Dict, Any, Optional
|
|
|
|
from .quiz_models import (
|
|
QuizType,
|
|
TrueFalseQuestion,
|
|
MatchingPair,
|
|
SortingItem,
|
|
OpenQuestion,
|
|
Quiz,
|
|
)
|
|
from .quiz_helpers import (
|
|
extract_factual_sentences,
|
|
negate_sentence,
|
|
extract_definitions,
|
|
extract_sequence,
|
|
extract_keywords,
|
|
)
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class QuizGenerator:
|
|
"""
|
|
Generiert verschiedene Quiz-Typen aus Quelltexten.
|
|
"""
|
|
|
|
def __init__(self, llm_client=None):
|
|
"""
|
|
Initialisiert den Generator.
|
|
|
|
Args:
|
|
llm_client: Optional - LLM-Client für intelligente Generierung
|
|
"""
|
|
self.llm_client = llm_client
|
|
logger.info("QuizGenerator initialized")
|
|
|
|
def generate(
|
|
self,
|
|
source_text: str,
|
|
quiz_type: QuizType,
|
|
num_questions: int = 5,
|
|
title: Optional[str] = None,
|
|
topic: Optional[str] = None,
|
|
difficulty: str = "medium"
|
|
) -> Quiz:
|
|
"""
|
|
Generiert ein Quiz aus einem Quelltext.
|
|
|
|
Args:
|
|
source_text: Der Ausgangstext
|
|
quiz_type: Art des Quiz
|
|
num_questions: Anzahl der Fragen/Aufgaben
|
|
title: Optionaler Titel
|
|
topic: Optionales Thema
|
|
difficulty: Schwierigkeitsgrad
|
|
|
|
Returns:
|
|
Quiz-Objekt
|
|
"""
|
|
logger.info(f"Generating {quiz_type} quiz with {num_questions} questions")
|
|
|
|
if not source_text or len(source_text.strip()) < 50:
|
|
logger.warning("Source text too short")
|
|
return self._empty_quiz(quiz_type, title or "Quiz")
|
|
|
|
generators = {
|
|
QuizType.TRUE_FALSE: self._generate_true_false,
|
|
QuizType.MATCHING: self._generate_matching,
|
|
QuizType.SORTING: self._generate_sorting,
|
|
QuizType.OPEN_ENDED: self._generate_open_ended,
|
|
}
|
|
|
|
generator = generators.get(quiz_type)
|
|
if not generator:
|
|
raise ValueError(f"Unbekannter Quiz-Typ: {quiz_type}")
|
|
|
|
questions = generator(source_text, num_questions, difficulty)
|
|
|
|
return Quiz(
|
|
quiz_type=quiz_type,
|
|
title=title or f"{quiz_type.value.replace('_', ' ').title()} Quiz",
|
|
questions=questions,
|
|
topic=topic,
|
|
difficulty=difficulty
|
|
)
|
|
|
|
def _generate_true_false(
|
|
self,
|
|
source_text: str,
|
|
num_questions: int,
|
|
difficulty: str
|
|
) -> List[TrueFalseQuestion]:
|
|
"""Generiert Wahr/Falsch-Fragen."""
|
|
if self.llm_client:
|
|
return self._generate_true_false_llm(source_text, num_questions, difficulty)
|
|
|
|
# Automatische Generierung
|
|
sentences = extract_factual_sentences(source_text)
|
|
questions = []
|
|
|
|
for i, sentence in enumerate(sentences[:num_questions]):
|
|
# Abwechselnd wahre und falsche Aussagen
|
|
if i % 2 == 0:
|
|
questions.append(TrueFalseQuestion(
|
|
statement=sentence,
|
|
is_true=True,
|
|
explanation="Diese Aussage entspricht dem Text.",
|
|
source_reference=sentence[:50]
|
|
))
|
|
else:
|
|
false_statement = negate_sentence(sentence)
|
|
questions.append(TrueFalseQuestion(
|
|
statement=false_statement,
|
|
is_true=False,
|
|
explanation=f"Richtig wäre: {sentence}",
|
|
source_reference=sentence[:50]
|
|
))
|
|
|
|
return questions
|
|
|
|
def _generate_true_false_llm(
|
|
self,
|
|
source_text: str,
|
|
num_questions: int,
|
|
difficulty: str
|
|
) -> List[TrueFalseQuestion]:
|
|
"""Generiert Wahr/Falsch-Fragen mit LLM."""
|
|
prompt = f"""
|
|
Erstelle {num_questions} Wahr/Falsch-Aussagen auf Deutsch basierend auf folgendem Text.
|
|
Schwierigkeit: {difficulty}
|
|
Erstelle etwa gleich viele wahre und falsche Aussagen.
|
|
|
|
Text:
|
|
{source_text}
|
|
|
|
Antworte im JSON-Format:
|
|
{{
|
|
"questions": [
|
|
{{
|
|
"statement": "Die Aussage...",
|
|
"is_true": true,
|
|
"explanation": "Erklärung warum wahr/falsch"
|
|
}}
|
|
]
|
|
}}
|
|
"""
|
|
try:
|
|
response = self.llm_client.generate(prompt)
|
|
data = json.loads(response)
|
|
return [
|
|
TrueFalseQuestion(
|
|
statement=q["statement"],
|
|
is_true=q["is_true"],
|
|
explanation=q["explanation"]
|
|
)
|
|
for q in data.get("questions", [])
|
|
]
|
|
except Exception as e:
|
|
logger.error(f"LLM error: {e}")
|
|
return self._generate_true_false(source_text, num_questions, difficulty)
|
|
|
|
def _generate_matching(
|
|
self,
|
|
source_text: str,
|
|
num_pairs: int,
|
|
difficulty: str
|
|
) -> List[MatchingPair]:
|
|
"""Generiert Zuordnungsaufgaben."""
|
|
if self.llm_client:
|
|
return self._generate_matching_llm(source_text, num_pairs, difficulty)
|
|
|
|
pairs = []
|
|
definitions = extract_definitions(source_text)
|
|
|
|
for term, definition in definitions[:num_pairs]:
|
|
pairs.append(MatchingPair(
|
|
left=term,
|
|
right=definition,
|
|
hint=f"Beginnt mit '{definition[0]}'"
|
|
))
|
|
|
|
return pairs
|
|
|
|
def _generate_matching_llm(
|
|
self,
|
|
source_text: str,
|
|
num_pairs: int,
|
|
difficulty: str
|
|
) -> List[MatchingPair]:
|
|
"""Generiert Zuordnungen mit LLM."""
|
|
prompt = f"""
|
|
Erstelle {num_pairs} Zuordnungspaare auf Deutsch basierend auf folgendem Text.
|
|
Jedes Paar besteht aus einem Begriff und seiner Definition/Erklärung.
|
|
Schwierigkeit: {difficulty}
|
|
|
|
Text:
|
|
{source_text}
|
|
|
|
Antworte im JSON-Format:
|
|
{{
|
|
"pairs": [
|
|
{{
|
|
"term": "Begriff",
|
|
"definition": "Definition des Begriffs",
|
|
"hint": "Optionaler Hinweis"
|
|
}}
|
|
]
|
|
}}
|
|
"""
|
|
try:
|
|
response = self.llm_client.generate(prompt)
|
|
data = json.loads(response)
|
|
return [
|
|
MatchingPair(
|
|
left=p["term"],
|
|
right=p["definition"],
|
|
hint=p.get("hint")
|
|
)
|
|
for p in data.get("pairs", [])
|
|
]
|
|
except Exception as e:
|
|
logger.error(f"LLM error: {e}")
|
|
return self._generate_matching(source_text, num_pairs, difficulty)
|
|
|
|
def _generate_sorting(
|
|
self,
|
|
source_text: str,
|
|
num_items: int,
|
|
difficulty: str
|
|
) -> List[SortingItem]:
|
|
"""Generiert Sortieraufgaben."""
|
|
if self.llm_client:
|
|
return self._generate_sorting_llm(source_text, num_items, difficulty)
|
|
|
|
items = []
|
|
steps = extract_sequence(source_text)
|
|
|
|
for i, step in enumerate(steps[:num_items]):
|
|
items.append(SortingItem(
|
|
text=step,
|
|
correct_position=i + 1
|
|
))
|
|
|
|
return items
|
|
|
|
def _generate_sorting_llm(
|
|
self,
|
|
source_text: str,
|
|
num_items: int,
|
|
difficulty: str
|
|
) -> List[SortingItem]:
|
|
"""Generiert Sortierung mit LLM."""
|
|
prompt = f"""
|
|
Erstelle eine Sortieraufgabe auf Deutsch basierend auf folgendem Text.
|
|
Finde {num_items} Elemente, die in eine logische Reihenfolge gebracht werden müssen.
|
|
(z.B. chronologisch, nach Wichtigkeit, nach Größe, etc.)
|
|
Schwierigkeit: {difficulty}
|
|
|
|
Text:
|
|
{source_text}
|
|
|
|
Antworte im JSON-Format:
|
|
{{
|
|
"category": "chronologisch/nach Größe/etc.",
|
|
"items": [
|
|
{{"text": "Erstes Element", "position": 1}},
|
|
{{"text": "Zweites Element", "position": 2}}
|
|
]
|
|
}}
|
|
"""
|
|
try:
|
|
response = self.llm_client.generate(prompt)
|
|
data = json.loads(response)
|
|
category = data.get("category")
|
|
return [
|
|
SortingItem(
|
|
text=item["text"],
|
|
correct_position=item["position"],
|
|
category=category
|
|
)
|
|
for item in data.get("items", [])
|
|
]
|
|
except Exception as e:
|
|
logger.error(f"LLM error: {e}")
|
|
return self._generate_sorting(source_text, num_items, difficulty)
|
|
|
|
def _generate_open_ended(
|
|
self,
|
|
source_text: str,
|
|
num_questions: int,
|
|
difficulty: str
|
|
) -> List[OpenQuestion]:
|
|
"""Generiert offene Fragen."""
|
|
if self.llm_client:
|
|
return self._generate_open_ended_llm(source_text, num_questions, difficulty)
|
|
|
|
questions = []
|
|
sentences = extract_factual_sentences(source_text)
|
|
|
|
question_starters = [
|
|
"Was bedeutet",
|
|
"Erkläre",
|
|
"Warum",
|
|
"Wie funktioniert",
|
|
"Nenne die Hauptmerkmale von"
|
|
]
|
|
|
|
for i, sentence in enumerate(sentences[:num_questions]):
|
|
keywords = extract_keywords(sentence)
|
|
if keywords:
|
|
keyword = keywords[0]
|
|
starter = question_starters[i % len(question_starters)]
|
|
question = f"{starter} '{keyword}'?"
|
|
|
|
questions.append(OpenQuestion(
|
|
question=question,
|
|
model_answer=sentence,
|
|
keywords=keywords,
|
|
points=1
|
|
))
|
|
|
|
return questions
|
|
|
|
def _generate_open_ended_llm(
|
|
self,
|
|
source_text: str,
|
|
num_questions: int,
|
|
difficulty: str
|
|
) -> List[OpenQuestion]:
|
|
"""Generiert offene Fragen mit LLM."""
|
|
prompt = f"""
|
|
Erstelle {num_questions} offene Fragen auf Deutsch basierend auf folgendem Text.
|
|
Jede Frage sollte eine ausführliche Antwort erfordern.
|
|
Schwierigkeit: {difficulty}
|
|
|
|
Text:
|
|
{source_text}
|
|
|
|
Antworte im JSON-Format:
|
|
{{
|
|
"questions": [
|
|
{{
|
|
"question": "Die Frage...",
|
|
"model_answer": "Eine vollständige Musterantwort",
|
|
"keywords": ["Schlüsselwort1", "Schlüsselwort2"],
|
|
"points": 2
|
|
}}
|
|
]
|
|
}}
|
|
"""
|
|
try:
|
|
response = self.llm_client.generate(prompt)
|
|
data = json.loads(response)
|
|
return [
|
|
OpenQuestion(
|
|
question=q["question"],
|
|
model_answer=q["model_answer"],
|
|
keywords=q.get("keywords", []),
|
|
points=q.get("points", 1)
|
|
)
|
|
for q in data.get("questions", [])
|
|
]
|
|
except Exception as e:
|
|
logger.error(f"LLM error: {e}")
|
|
return self._generate_open_ended(source_text, num_questions, difficulty)
|
|
|
|
def _empty_quiz(self, quiz_type: QuizType, title: str) -> Quiz:
|
|
"""Erstellt leeres Quiz bei Fehler."""
|
|
return Quiz(
|
|
quiz_type=quiz_type,
|
|
title=title,
|
|
questions=[],
|
|
difficulty="medium"
|
|
)
|
|
|
|
def to_dict(self, quiz: Quiz) -> Dict[str, Any]:
|
|
"""Konvertiert Quiz zu Dictionary-Format."""
|
|
questions_data = []
|
|
|
|
for q in quiz.questions:
|
|
if isinstance(q, TrueFalseQuestion):
|
|
questions_data.append({
|
|
"type": "true_false",
|
|
"statement": q.statement,
|
|
"is_true": q.is_true,
|
|
"explanation": q.explanation
|
|
})
|
|
elif isinstance(q, MatchingPair):
|
|
questions_data.append({
|
|
"type": "matching",
|
|
"left": q.left,
|
|
"right": q.right,
|
|
"hint": q.hint
|
|
})
|
|
elif isinstance(q, SortingItem):
|
|
questions_data.append({
|
|
"type": "sorting",
|
|
"text": q.text,
|
|
"correct_position": q.correct_position,
|
|
"category": q.category
|
|
})
|
|
elif isinstance(q, OpenQuestion):
|
|
questions_data.append({
|
|
"type": "open_ended",
|
|
"question": q.question,
|
|
"model_answer": q.model_answer,
|
|
"keywords": q.keywords,
|
|
"points": q.points
|
|
})
|
|
|
|
return {
|
|
"quiz_type": quiz.quiz_type.value,
|
|
"title": quiz.title,
|
|
"topic": quiz.topic,
|
|
"difficulty": quiz.difficulty,
|
|
"questions": questions_data
|
|
}
|
|
|
|
def to_h5p_format(self, quiz: Quiz) -> Dict[str, Any]:
|
|
"""Konvertiert Quiz ins H5P-Format."""
|
|
if quiz.quiz_type == QuizType.TRUE_FALSE:
|
|
return self._true_false_to_h5p(quiz)
|
|
elif quiz.quiz_type == QuizType.MATCHING:
|
|
return self._matching_to_h5p(quiz)
|
|
return {}
|
|
|
|
def _true_false_to_h5p(self, quiz: Quiz) -> Dict[str, Any]:
|
|
"""Konvertiert True/False zu H5P."""
|
|
statements = []
|
|
for q in quiz.questions:
|
|
statements.append({
|
|
"text": q.statement,
|
|
"correct": q.is_true,
|
|
"feedback": q.explanation
|
|
})
|
|
|
|
return {
|
|
"library": "H5P.TrueFalse",
|
|
"params": {
|
|
"statements": statements,
|
|
"behaviour": {
|
|
"enableRetry": True,
|
|
"enableSolutionsButton": True
|
|
}
|
|
}
|
|
}
|
|
|
|
def _matching_to_h5p(self, quiz: Quiz) -> Dict[str, Any]:
|
|
"""Konvertiert Matching zu H5P."""
|
|
pairs = []
|
|
for q in quiz.questions:
|
|
pairs.append({
|
|
"question": q.left,
|
|
"answer": q.right
|
|
})
|
|
|
|
return {
|
|
"library": "H5P.DragText",
|
|
"params": {
|
|
"pairs": pairs,
|
|
"behaviour": {
|
|
"enableRetry": True,
|
|
"enableSolutionsButton": True
|
|
}
|
|
}
|
|
}
|