Services: Admin-Lehrer, Backend-Lehrer, Studio v2, Website, Klausur-Service, School-Service, Voice-Service, Geo-Service, BreakPilot Drive, Agent-Core Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
278 lines
8.5 KiB
Python
278 lines
8.5 KiB
Python
"""
|
|
Multiple Choice Generator - Erstellt MC-Fragen aus Quelltexten.
|
|
|
|
Verwendet LLM (Claude/Ollama) zur Generierung von:
|
|
- Multiple-Choice-Fragen mit 4 Antwortmöglichkeiten
|
|
- Unterschiedliche Schwierigkeitsgrade
|
|
- Erklärungen für falsche Antworten
|
|
"""
|
|
|
|
import logging
|
|
import json
|
|
from typing import List, Dict, Any, Optional
|
|
from dataclasses import dataclass
|
|
from enum import Enum
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class Difficulty(str, Enum):
|
|
"""Schwierigkeitsgrade."""
|
|
EASY = "easy"
|
|
MEDIUM = "medium"
|
|
HARD = "hard"
|
|
|
|
|
|
@dataclass
|
|
class MCOption:
|
|
"""Eine Antwortmöglichkeit."""
|
|
text: str
|
|
is_correct: bool
|
|
explanation: Optional[str] = None
|
|
|
|
|
|
@dataclass
|
|
class MCQuestion:
|
|
"""Eine Multiple-Choice-Frage."""
|
|
question: str
|
|
options: List[MCOption]
|
|
difficulty: Difficulty
|
|
topic: Optional[str] = None
|
|
hint: Optional[str] = None
|
|
|
|
|
|
class MultipleChoiceGenerator:
|
|
"""
|
|
Generiert Multiple-Choice-Fragen aus Quelltexten.
|
|
|
|
Verwendet ein LLM zur intelligenten Fragengenerierung.
|
|
"""
|
|
|
|
def __init__(self, llm_client=None):
|
|
"""
|
|
Initialisiert den Generator.
|
|
|
|
Args:
|
|
llm_client: Optional - LLM-Client für Generierung.
|
|
Falls nicht angegeben, wird ein Mock verwendet.
|
|
"""
|
|
self.llm_client = llm_client
|
|
logger.info("MultipleChoiceGenerator initialized")
|
|
|
|
def generate(
|
|
self,
|
|
source_text: str,
|
|
num_questions: int = 5,
|
|
difficulty: Difficulty = Difficulty.MEDIUM,
|
|
subject: Optional[str] = None,
|
|
grade_level: Optional[str] = None
|
|
) -> List[MCQuestion]:
|
|
"""
|
|
Generiert Multiple-Choice-Fragen aus einem Quelltext.
|
|
|
|
Args:
|
|
source_text: Der Text, aus dem Fragen generiert werden
|
|
num_questions: Anzahl der zu generierenden Fragen
|
|
difficulty: Schwierigkeitsgrad
|
|
subject: Fach (z.B. "Biologie", "Geschichte")
|
|
grade_level: Klassenstufe (z.B. "7")
|
|
|
|
Returns:
|
|
Liste von MCQuestion-Objekten
|
|
"""
|
|
logger.info(f"Generating {num_questions} MC questions (difficulty: {difficulty})")
|
|
|
|
if not source_text or len(source_text.strip()) < 50:
|
|
logger.warning("Source text too short for meaningful questions")
|
|
return []
|
|
|
|
if self.llm_client:
|
|
return self._generate_with_llm(
|
|
source_text, num_questions, difficulty, subject, grade_level
|
|
)
|
|
else:
|
|
return self._generate_mock(
|
|
source_text, num_questions, difficulty
|
|
)
|
|
|
|
def _generate_with_llm(
|
|
self,
|
|
source_text: str,
|
|
num_questions: int,
|
|
difficulty: Difficulty,
|
|
subject: Optional[str],
|
|
grade_level: Optional[str]
|
|
) -> List[MCQuestion]:
|
|
"""Generiert Fragen mit einem LLM."""
|
|
difficulty_desc = {
|
|
Difficulty.EASY: "einfach (Faktenwissen)",
|
|
Difficulty.MEDIUM: "mittel (Verständnis)",
|
|
Difficulty.HARD: "schwer (Anwendung und Analyse)"
|
|
}
|
|
|
|
prompt = f"""
|
|
Erstelle {num_questions} Multiple-Choice-Fragen auf Deutsch basierend auf folgendem Text.
|
|
Schwierigkeitsgrad: {difficulty_desc[difficulty]}
|
|
{f'Fach: {subject}' if subject else ''}
|
|
{f'Klassenstufe: {grade_level}' if grade_level else ''}
|
|
|
|
Text:
|
|
{source_text}
|
|
|
|
Erstelle für jede Frage:
|
|
- Eine klare Frage
|
|
- 4 Antwortmöglichkeiten (genau eine richtig)
|
|
- Eine kurze Erklärung, warum die richtigen Antwort richtig ist
|
|
- Einen optionalen Hinweis
|
|
|
|
Antworte im folgenden JSON-Format:
|
|
{{
|
|
"questions": [
|
|
{{
|
|
"question": "Die Frage...",
|
|
"options": [
|
|
{{"text": "Antwort A", "is_correct": false, "explanation": "Warum falsch"}},
|
|
{{"text": "Antwort B", "is_correct": true, "explanation": "Warum richtig"}},
|
|
{{"text": "Antwort C", "is_correct": false, "explanation": "Warum falsch"}},
|
|
{{"text": "Antwort D", "is_correct": false, "explanation": "Warum falsch"}}
|
|
],
|
|
"topic": "Thema der Frage",
|
|
"hint": "Optionaler Hinweis"
|
|
}}
|
|
]
|
|
}}
|
|
"""
|
|
|
|
try:
|
|
response = self.llm_client.generate(prompt)
|
|
data = json.loads(response)
|
|
return self._parse_llm_response(data, difficulty)
|
|
except Exception as e:
|
|
logger.error(f"Error generating with LLM: {e}")
|
|
return self._generate_mock(source_text, num_questions, difficulty)
|
|
|
|
def _parse_llm_response(
|
|
self,
|
|
data: Dict[str, Any],
|
|
difficulty: Difficulty
|
|
) -> List[MCQuestion]:
|
|
"""Parst die LLM-Antwort zu MCQuestion-Objekten."""
|
|
questions = []
|
|
|
|
for q_data in data.get("questions", []):
|
|
options = [
|
|
MCOption(
|
|
text=opt["text"],
|
|
is_correct=opt.get("is_correct", False),
|
|
explanation=opt.get("explanation")
|
|
)
|
|
for opt in q_data.get("options", [])
|
|
]
|
|
|
|
question = MCQuestion(
|
|
question=q_data.get("question", ""),
|
|
options=options,
|
|
difficulty=difficulty,
|
|
topic=q_data.get("topic"),
|
|
hint=q_data.get("hint")
|
|
)
|
|
questions.append(question)
|
|
|
|
return questions
|
|
|
|
def _generate_mock(
|
|
self,
|
|
source_text: str,
|
|
num_questions: int,
|
|
difficulty: Difficulty
|
|
) -> List[MCQuestion]:
|
|
"""Generiert Mock-Fragen für Tests/Demo."""
|
|
logger.info("Using mock generator (no LLM client)")
|
|
|
|
# Extrahiere einige Schlüsselwörter aus dem Text
|
|
words = source_text.split()
|
|
keywords = [w for w in words if len(w) > 5][:10]
|
|
|
|
questions = []
|
|
for i in range(min(num_questions, 5)):
|
|
keyword = keywords[i] if i < len(keywords) else f"Begriff {i+1}"
|
|
|
|
question = MCQuestion(
|
|
question=f"Was beschreibt '{keyword}' im Kontext des Textes am besten?",
|
|
options=[
|
|
MCOption(text=f"Definition A von {keyword}", is_correct=True,
|
|
explanation="Dies ist die korrekte Definition."),
|
|
MCOption(text=f"Falsche Definition B", is_correct=False,
|
|
explanation="Diese Definition passt nicht."),
|
|
MCOption(text=f"Falsche Definition C", is_correct=False,
|
|
explanation="Diese Definition ist unvollständig."),
|
|
MCOption(text=f"Falsche Definition D", is_correct=False,
|
|
explanation="Diese Definition ist irreführend."),
|
|
],
|
|
difficulty=difficulty,
|
|
topic="Allgemein",
|
|
hint=f"Denke an die Bedeutung von '{keyword}'."
|
|
)
|
|
questions.append(question)
|
|
|
|
return questions
|
|
|
|
def to_h5p_format(self, questions: List[MCQuestion]) -> Dict[str, Any]:
|
|
"""
|
|
Konvertiert Fragen ins H5P-Format für Multiple Choice.
|
|
|
|
Args:
|
|
questions: Liste von MCQuestion-Objekten
|
|
|
|
Returns:
|
|
H5P-kompatibles Dict
|
|
"""
|
|
h5p_questions = []
|
|
|
|
for q in questions:
|
|
answers = []
|
|
for opt in q.options:
|
|
answers.append({
|
|
"text": opt.text,
|
|
"correct": opt.is_correct,
|
|
"tpiMessage": opt.explanation or ""
|
|
})
|
|
|
|
h5p_questions.append({
|
|
"question": q.question,
|
|
"answers": answers,
|
|
"tip": q.hint or ""
|
|
})
|
|
|
|
return {
|
|
"library": "H5P.MultiChoice",
|
|
"params": {
|
|
"questions": h5p_questions,
|
|
"behaviour": {
|
|
"enableRetry": True,
|
|
"enableSolutionsButton": True,
|
|
"singleAnswer": True
|
|
}
|
|
}
|
|
}
|
|
|
|
def to_dict(self, questions: List[MCQuestion]) -> List[Dict[str, Any]]:
|
|
"""Konvertiert Fragen zu Dictionary-Format."""
|
|
return [
|
|
{
|
|
"question": q.question,
|
|
"options": [
|
|
{
|
|
"text": opt.text,
|
|
"is_correct": opt.is_correct,
|
|
"explanation": opt.explanation
|
|
}
|
|
for opt in q.options
|
|
],
|
|
"difficulty": q.difficulty.value,
|
|
"topic": q.topic,
|
|
"hint": q.hint
|
|
}
|
|
for q in questions
|
|
]
|