fix: Restore all files lost during destructive rebase
A previous `git pull --rebase origin main` dropped 177 local commits,
losing 3400+ files across admin-v2, backend, studio-v2, website,
klausur-service, and many other services. The partial restore attempt
(660295e2) only recovered some files.
This commit restores all missing files from pre-rebase ref 98933f5e
while preserving post-rebase additions (night-scheduler, night-mode UI,
NightModeWidget dashboard integration).
Restored features include:
- AI Module Sidebar (FAB), OCR Labeling, OCR Compare
- GPU Dashboard, RAG Pipeline, Magic Help
- Klausur-Korrektur (8 files), Abitur-Archiv (5+ files)
- Companion, Zeugnisse-Crawler, Screen Flow
- Full backend, studio-v2, website, klausur-service
- All compliance SDKs, agent-core, voice-service
- CI/CD configs, documentation, scripts
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
14
backend/generators/__init__.py
Normal file
14
backend/generators/__init__.py
Normal file
@@ -0,0 +1,14 @@
|
||||
# Worksheet Generators Module
|
||||
# AI-powered generators for educational content
|
||||
|
||||
from .mc_generator import MultipleChoiceGenerator
|
||||
from .cloze_generator import ClozeGenerator
|
||||
from .mindmap_generator import MindmapGenerator
|
||||
from .quiz_generator import QuizGenerator
|
||||
|
||||
__all__ = [
|
||||
"MultipleChoiceGenerator",
|
||||
"ClozeGenerator",
|
||||
"MindmapGenerator",
|
||||
"QuizGenerator"
|
||||
]
|
||||
380
backend/generators/cloze_generator.py
Normal file
380
backend/generators/cloze_generator.py
Normal file
@@ -0,0 +1,380 @@
|
||||
"""
|
||||
Cloze Generator - Erstellt Lückentexte aus Quelltexten.
|
||||
|
||||
Generiert:
|
||||
- Lückentexte mit ausgeblendeten Schlüsselwörtern
|
||||
- Verschiedene Schwierigkeitsgrade
|
||||
- Hinweise und Erklärungen
|
||||
"""
|
||||
|
||||
import logging
|
||||
import json
|
||||
import re
|
||||
from typing import List, Dict, Any, Optional
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ClozeType(str, Enum):
|
||||
"""Typen von Lückentexten."""
|
||||
FILL_IN = "fill_in" # Freies Ausfüllen
|
||||
DRAG_DROP = "drag_drop" # Drag & Drop
|
||||
DROPDOWN = "dropdown" # Dropdown-Auswahl
|
||||
|
||||
|
||||
@dataclass
|
||||
class ClozeGap:
|
||||
"""Eine Lücke im Text."""
|
||||
position: int # Position im Text (0-basiert)
|
||||
answer: str # Korrekte Antwort
|
||||
alternatives: List[str] # Alternative korrekte Antworten
|
||||
hint: Optional[str] # Hinweis
|
||||
distractors: List[str] # Falsche Optionen (für Dropdown/Drag-Drop)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ClozeText:
|
||||
"""Ein kompletter Lückentext."""
|
||||
text_with_gaps: str # Text mit Platzhaltern
|
||||
original_text: str # Originaltext
|
||||
gaps: List[ClozeGap] # Liste der Lücken
|
||||
cloze_type: ClozeType # Typ des Lückentexts
|
||||
topic: Optional[str] # Thema
|
||||
difficulty: str # easy, medium, hard
|
||||
|
||||
|
||||
class ClozeGenerator:
|
||||
"""
|
||||
Generiert Lückentexte aus Quelltexten.
|
||||
|
||||
Unterstützt verschiedene Modi:
|
||||
- Automatische Erkennung wichtiger Begriffe
|
||||
- LLM-basierte intelligente Auswahl
|
||||
- Manuelle Vorgabe von Lücken
|
||||
"""
|
||||
|
||||
def __init__(self, llm_client=None):
|
||||
"""
|
||||
Initialisiert den Generator.
|
||||
|
||||
Args:
|
||||
llm_client: Optional - LLM-Client für intelligente Generierung
|
||||
"""
|
||||
self.llm_client = llm_client
|
||||
logger.info("ClozeGenerator initialized")
|
||||
|
||||
# Wortarten, die oft als Lücken geeignet sind
|
||||
self._important_pos = {"NOUN", "VERB", "ADJ"} # Substantive, Verben, Adjektive
|
||||
|
||||
def generate(
|
||||
self,
|
||||
source_text: str,
|
||||
num_gaps: int = 5,
|
||||
difficulty: str = "medium",
|
||||
cloze_type: ClozeType = ClozeType.FILL_IN,
|
||||
topic: Optional[str] = None
|
||||
) -> ClozeText:
|
||||
"""
|
||||
Generiert einen Lückentext aus einem Quelltext.
|
||||
|
||||
Args:
|
||||
source_text: Der Ausgangstext
|
||||
num_gaps: Anzahl der Lücken
|
||||
difficulty: Schwierigkeitsgrad (easy, medium, hard)
|
||||
cloze_type: Art des Lückentexts
|
||||
topic: Optionales Thema
|
||||
|
||||
Returns:
|
||||
ClozeText-Objekt
|
||||
"""
|
||||
logger.info(f"Generating cloze text with {num_gaps} gaps (difficulty: {difficulty})")
|
||||
|
||||
if not source_text or len(source_text.strip()) < 50:
|
||||
logger.warning("Source text too short")
|
||||
return self._empty_cloze(source_text, cloze_type)
|
||||
|
||||
if self.llm_client:
|
||||
return self._generate_with_llm(
|
||||
source_text, num_gaps, difficulty, cloze_type, topic
|
||||
)
|
||||
else:
|
||||
return self._generate_automatic(
|
||||
source_text, num_gaps, difficulty, cloze_type, topic
|
||||
)
|
||||
|
||||
def _generate_with_llm(
|
||||
self,
|
||||
source_text: str,
|
||||
num_gaps: int,
|
||||
difficulty: str,
|
||||
cloze_type: ClozeType,
|
||||
topic: Optional[str]
|
||||
) -> ClozeText:
|
||||
"""Generiert Lückentext mit LLM."""
|
||||
prompt = f"""
|
||||
Erstelle einen Lückentext auf Deutsch basierend auf folgendem Text.
|
||||
Ersetze {num_gaps} wichtige Begriffe durch Lücken.
|
||||
Schwierigkeitsgrad: {difficulty}
|
||||
{f'Thema: {topic}' if topic else ''}
|
||||
|
||||
Originaltext:
|
||||
{source_text}
|
||||
|
||||
Wähle {num_gaps} wichtige Begriffe (Substantive, Verben, Fachbegriffe) aus.
|
||||
Für jeden Begriff gib an:
|
||||
- Das Wort, das ausgeblendet wird
|
||||
- Alternative Schreibweisen (falls vorhanden)
|
||||
- Einen Hinweis
|
||||
- 3 ähnliche aber falsche Wörter (Distraktoren)
|
||||
|
||||
Antworte im JSON-Format:
|
||||
{{
|
||||
"gaps": [
|
||||
{{
|
||||
"word": "Photosynthese",
|
||||
"alternatives": ["Fotosynthese"],
|
||||
"hint": "Prozess bei dem Pflanzen Licht nutzen",
|
||||
"distractors": ["Zellatmung", "Osmose", "Diffusion"]
|
||||
}}
|
||||
]
|
||||
}}
|
||||
"""
|
||||
|
||||
try:
|
||||
response = self.llm_client.generate(prompt)
|
||||
data = json.loads(response)
|
||||
return self._create_cloze_from_llm(
|
||||
source_text, data, difficulty, cloze_type, topic
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating with LLM: {e}")
|
||||
return self._generate_automatic(
|
||||
source_text, num_gaps, difficulty, cloze_type, topic
|
||||
)
|
||||
|
||||
def _generate_automatic(
|
||||
self,
|
||||
source_text: str,
|
||||
num_gaps: int,
|
||||
difficulty: str,
|
||||
cloze_type: ClozeType,
|
||||
topic: Optional[str]
|
||||
) -> ClozeText:
|
||||
"""Generiert Lückentext automatisch ohne LLM."""
|
||||
# Finde wichtige Wörter
|
||||
words = self._find_important_words(source_text)
|
||||
|
||||
# Wähle Wörter basierend auf Schwierigkeit
|
||||
selected = self._select_words_by_difficulty(words, num_gaps, difficulty)
|
||||
|
||||
# Erstelle Lücken
|
||||
gaps = []
|
||||
text_with_gaps = source_text
|
||||
|
||||
for i, (word, pos) in enumerate(selected):
|
||||
# Position im aktuellen Text finden
|
||||
match = re.search(r'\b' + re.escape(word) + r'\b', text_with_gaps)
|
||||
if match:
|
||||
# Ersetze durch Platzhalter
|
||||
placeholder = f"[_{i+1}_]"
|
||||
text_with_gaps = text_with_gaps[:match.start()] + placeholder + text_with_gaps[match.end():]
|
||||
|
||||
gap = ClozeGap(
|
||||
position=i,
|
||||
answer=word,
|
||||
alternatives=[word.lower(), word.upper()],
|
||||
hint=self._generate_hint(word, source_text),
|
||||
distractors=self._generate_distractors(word, words)
|
||||
)
|
||||
gaps.append(gap)
|
||||
|
||||
return ClozeText(
|
||||
text_with_gaps=text_with_gaps,
|
||||
original_text=source_text,
|
||||
gaps=gaps,
|
||||
cloze_type=cloze_type,
|
||||
topic=topic,
|
||||
difficulty=difficulty
|
||||
)
|
||||
|
||||
def _find_important_words(self, text: str) -> List[tuple]:
|
||||
"""Findet wichtige Wörter im Text."""
|
||||
# Einfache Heuristik: Längere Wörter sind oft wichtiger
|
||||
words = re.findall(r'\b[A-Za-zäöüÄÖÜß]{4,}\b', text)
|
||||
|
||||
# Zähle Häufigkeit
|
||||
word_count = {}
|
||||
for word in words:
|
||||
word_lower = word.lower()
|
||||
word_count[word_lower] = word_count.get(word_lower, 0) + 1
|
||||
|
||||
# Sortiere nach Länge und Häufigkeit
|
||||
unique_words = list(set(words))
|
||||
scored = []
|
||||
for word in unique_words:
|
||||
score = len(word) + word_count[word.lower()] * 2
|
||||
# Bevorzuge Wörter mit Großbuchstaben (Substantive)
|
||||
if word[0].isupper():
|
||||
score += 3
|
||||
scored.append((word, score))
|
||||
|
||||
scored.sort(key=lambda x: x[1], reverse=True)
|
||||
return [(w, s) for w, s in scored]
|
||||
|
||||
def _select_words_by_difficulty(
|
||||
self,
|
||||
words: List[tuple],
|
||||
num_gaps: int,
|
||||
difficulty: str
|
||||
) -> List[tuple]:
|
||||
"""Wählt Wörter basierend auf Schwierigkeit."""
|
||||
if difficulty == "easy":
|
||||
# Einfach: Häufige, wichtige Wörter
|
||||
return words[:num_gaps]
|
||||
elif difficulty == "hard":
|
||||
# Schwer: Weniger häufige Wörter
|
||||
return words[num_gaps:num_gaps*2] if len(words) > num_gaps else words[:num_gaps]
|
||||
else:
|
||||
# Medium: Mischung
|
||||
return words[:num_gaps]
|
||||
|
||||
def _generate_hint(self, word: str, text: str) -> str:
|
||||
"""Generiert einen Hinweis für ein Wort."""
|
||||
# Einfacher Hinweis basierend auf Kontext
|
||||
sentences = text.split('.')
|
||||
for sentence in sentences:
|
||||
if word in sentence:
|
||||
# Extrahiere Kontext
|
||||
words_in_sentence = sentence.split()
|
||||
if len(words_in_sentence) > 5:
|
||||
return f"Beginnt mit '{word[0]}' ({len(word)} Buchstaben)"
|
||||
return f"Beginnt mit '{word[0]}'"
|
||||
|
||||
def _generate_distractors(self, word: str, all_words: List[tuple]) -> List[str]:
|
||||
"""Generiert Distraktoren (falsche Optionen)."""
|
||||
distractors = []
|
||||
word_len = len(word)
|
||||
|
||||
# Finde ähnlich lange Wörter
|
||||
for w, _ in all_words:
|
||||
if w.lower() != word.lower():
|
||||
if abs(len(w) - word_len) <= 2:
|
||||
distractors.append(w)
|
||||
if len(distractors) >= 3:
|
||||
break
|
||||
|
||||
# Falls nicht genug, füge generische hinzu
|
||||
while len(distractors) < 3:
|
||||
distractors.append(f"[Option {len(distractors)+1}]")
|
||||
|
||||
return distractors[:3]
|
||||
|
||||
def _create_cloze_from_llm(
|
||||
self,
|
||||
source_text: str,
|
||||
data: Dict[str, Any],
|
||||
difficulty: str,
|
||||
cloze_type: ClozeType,
|
||||
topic: Optional[str]
|
||||
) -> ClozeText:
|
||||
"""Erstellt ClozeText aus LLM-Antwort."""
|
||||
text_with_gaps = source_text
|
||||
gaps = []
|
||||
|
||||
for i, gap_data in enumerate(data.get("gaps", [])):
|
||||
word = gap_data.get("word", "")
|
||||
if word:
|
||||
# Ersetze im Text
|
||||
pattern = r'\b' + re.escape(word) + r'\b'
|
||||
placeholder = f"[_{i+1}_]"
|
||||
text_with_gaps = re.sub(pattern, placeholder, text_with_gaps, count=1)
|
||||
|
||||
gap = ClozeGap(
|
||||
position=i,
|
||||
answer=word,
|
||||
alternatives=gap_data.get("alternatives", []),
|
||||
hint=gap_data.get("hint"),
|
||||
distractors=gap_data.get("distractors", [])
|
||||
)
|
||||
gaps.append(gap)
|
||||
|
||||
return ClozeText(
|
||||
text_with_gaps=text_with_gaps,
|
||||
original_text=source_text,
|
||||
gaps=gaps,
|
||||
cloze_type=cloze_type,
|
||||
topic=topic,
|
||||
difficulty=difficulty
|
||||
)
|
||||
|
||||
def _empty_cloze(self, text: str, cloze_type: ClozeType) -> ClozeText:
|
||||
"""Erstellt leeren ClozeText bei Fehler."""
|
||||
return ClozeText(
|
||||
text_with_gaps=text,
|
||||
original_text=text,
|
||||
gaps=[],
|
||||
cloze_type=cloze_type,
|
||||
topic=None,
|
||||
difficulty="medium"
|
||||
)
|
||||
|
||||
def to_h5p_format(self, cloze: ClozeText) -> Dict[str, Any]:
|
||||
"""
|
||||
Konvertiert Lückentext ins H5P-Format.
|
||||
|
||||
Args:
|
||||
cloze: ClozeText-Objekt
|
||||
|
||||
Returns:
|
||||
H5P-kompatibles Dict
|
||||
"""
|
||||
# H5P Fill in the Blanks Format
|
||||
h5p_text = cloze.text_with_gaps
|
||||
|
||||
# Ersetze Platzhalter durch H5P-Format
|
||||
for i, gap in enumerate(cloze.gaps):
|
||||
placeholder = f"[_{i+1}_]"
|
||||
answers = [gap.answer] + gap.alternatives
|
||||
h5p_answer = "/".join(answers)
|
||||
|
||||
if cloze.cloze_type == ClozeType.DROPDOWN:
|
||||
# Mit Distraktoren
|
||||
all_options = answers + gap.distractors
|
||||
h5p_answer = "/".join(all_options)
|
||||
|
||||
h5p_text = h5p_text.replace(placeholder, f"*{h5p_answer}*")
|
||||
|
||||
return {
|
||||
"library": "H5P.Blanks",
|
||||
"params": {
|
||||
"text": h5p_text,
|
||||
"behaviour": {
|
||||
"enableRetry": True,
|
||||
"enableSolutionsButton": True,
|
||||
"caseSensitive": False,
|
||||
"showSolutionsRequiresInput": True
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def to_dict(self, cloze: ClozeText) -> Dict[str, Any]:
|
||||
"""Konvertiert ClozeText zu Dictionary-Format."""
|
||||
return {
|
||||
"text_with_gaps": cloze.text_with_gaps,
|
||||
"original_text": cloze.original_text,
|
||||
"gaps": [
|
||||
{
|
||||
"position": gap.position,
|
||||
"answer": gap.answer,
|
||||
"alternatives": gap.alternatives,
|
||||
"hint": gap.hint,
|
||||
"distractors": gap.distractors
|
||||
}
|
||||
for gap in cloze.gaps
|
||||
],
|
||||
"cloze_type": cloze.cloze_type.value,
|
||||
"topic": cloze.topic,
|
||||
"difficulty": cloze.difficulty
|
||||
}
|
||||
277
backend/generators/mc_generator.py
Normal file
277
backend/generators/mc_generator.py
Normal file
@@ -0,0 +1,277 @@
|
||||
"""
|
||||
Multiple Choice Generator - Erstellt MC-Fragen aus Quelltexten.
|
||||
|
||||
Verwendet LLM (Claude/Ollama) zur Generierung von:
|
||||
- Multiple-Choice-Fragen mit 4 Antwortmöglichkeiten
|
||||
- Unterschiedliche Schwierigkeitsgrade
|
||||
- Erklärungen für falsche Antworten
|
||||
"""
|
||||
|
||||
import logging
|
||||
import json
|
||||
from typing import List, Dict, Any, Optional
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Difficulty(str, Enum):
|
||||
"""Schwierigkeitsgrade."""
|
||||
EASY = "easy"
|
||||
MEDIUM = "medium"
|
||||
HARD = "hard"
|
||||
|
||||
|
||||
@dataclass
|
||||
class MCOption:
|
||||
"""Eine Antwortmöglichkeit."""
|
||||
text: str
|
||||
is_correct: bool
|
||||
explanation: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class MCQuestion:
|
||||
"""Eine Multiple-Choice-Frage."""
|
||||
question: str
|
||||
options: List[MCOption]
|
||||
difficulty: Difficulty
|
||||
topic: Optional[str] = None
|
||||
hint: Optional[str] = None
|
||||
|
||||
|
||||
class MultipleChoiceGenerator:
|
||||
"""
|
||||
Generiert Multiple-Choice-Fragen aus Quelltexten.
|
||||
|
||||
Verwendet ein LLM zur intelligenten Fragengenerierung.
|
||||
"""
|
||||
|
||||
def __init__(self, llm_client=None):
|
||||
"""
|
||||
Initialisiert den Generator.
|
||||
|
||||
Args:
|
||||
llm_client: Optional - LLM-Client für Generierung.
|
||||
Falls nicht angegeben, wird ein Mock verwendet.
|
||||
"""
|
||||
self.llm_client = llm_client
|
||||
logger.info("MultipleChoiceGenerator initialized")
|
||||
|
||||
def generate(
|
||||
self,
|
||||
source_text: str,
|
||||
num_questions: int = 5,
|
||||
difficulty: Difficulty = Difficulty.MEDIUM,
|
||||
subject: Optional[str] = None,
|
||||
grade_level: Optional[str] = None
|
||||
) -> List[MCQuestion]:
|
||||
"""
|
||||
Generiert Multiple-Choice-Fragen aus einem Quelltext.
|
||||
|
||||
Args:
|
||||
source_text: Der Text, aus dem Fragen generiert werden
|
||||
num_questions: Anzahl der zu generierenden Fragen
|
||||
difficulty: Schwierigkeitsgrad
|
||||
subject: Fach (z.B. "Biologie", "Geschichte")
|
||||
grade_level: Klassenstufe (z.B. "7")
|
||||
|
||||
Returns:
|
||||
Liste von MCQuestion-Objekten
|
||||
"""
|
||||
logger.info(f"Generating {num_questions} MC questions (difficulty: {difficulty})")
|
||||
|
||||
if not source_text or len(source_text.strip()) < 50:
|
||||
logger.warning("Source text too short for meaningful questions")
|
||||
return []
|
||||
|
||||
if self.llm_client:
|
||||
return self._generate_with_llm(
|
||||
source_text, num_questions, difficulty, subject, grade_level
|
||||
)
|
||||
else:
|
||||
return self._generate_mock(
|
||||
source_text, num_questions, difficulty
|
||||
)
|
||||
|
||||
def _generate_with_llm(
|
||||
self,
|
||||
source_text: str,
|
||||
num_questions: int,
|
||||
difficulty: Difficulty,
|
||||
subject: Optional[str],
|
||||
grade_level: Optional[str]
|
||||
) -> List[MCQuestion]:
|
||||
"""Generiert Fragen mit einem LLM."""
|
||||
difficulty_desc = {
|
||||
Difficulty.EASY: "einfach (Faktenwissen)",
|
||||
Difficulty.MEDIUM: "mittel (Verständnis)",
|
||||
Difficulty.HARD: "schwer (Anwendung und Analyse)"
|
||||
}
|
||||
|
||||
prompt = f"""
|
||||
Erstelle {num_questions} Multiple-Choice-Fragen auf Deutsch basierend auf folgendem Text.
|
||||
Schwierigkeitsgrad: {difficulty_desc[difficulty]}
|
||||
{f'Fach: {subject}' if subject else ''}
|
||||
{f'Klassenstufe: {grade_level}' if grade_level else ''}
|
||||
|
||||
Text:
|
||||
{source_text}
|
||||
|
||||
Erstelle für jede Frage:
|
||||
- Eine klare Frage
|
||||
- 4 Antwortmöglichkeiten (genau eine richtig)
|
||||
- Eine kurze Erklärung, warum die richtigen Antwort richtig ist
|
||||
- Einen optionalen Hinweis
|
||||
|
||||
Antworte im folgenden JSON-Format:
|
||||
{{
|
||||
"questions": [
|
||||
{{
|
||||
"question": "Die Frage...",
|
||||
"options": [
|
||||
{{"text": "Antwort A", "is_correct": false, "explanation": "Warum falsch"}},
|
||||
{{"text": "Antwort B", "is_correct": true, "explanation": "Warum richtig"}},
|
||||
{{"text": "Antwort C", "is_correct": false, "explanation": "Warum falsch"}},
|
||||
{{"text": "Antwort D", "is_correct": false, "explanation": "Warum falsch"}}
|
||||
],
|
||||
"topic": "Thema der Frage",
|
||||
"hint": "Optionaler Hinweis"
|
||||
}}
|
||||
]
|
||||
}}
|
||||
"""
|
||||
|
||||
try:
|
||||
response = self.llm_client.generate(prompt)
|
||||
data = json.loads(response)
|
||||
return self._parse_llm_response(data, difficulty)
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating with LLM: {e}")
|
||||
return self._generate_mock(source_text, num_questions, difficulty)
|
||||
|
||||
def _parse_llm_response(
|
||||
self,
|
||||
data: Dict[str, Any],
|
||||
difficulty: Difficulty
|
||||
) -> List[MCQuestion]:
|
||||
"""Parst die LLM-Antwort zu MCQuestion-Objekten."""
|
||||
questions = []
|
||||
|
||||
for q_data in data.get("questions", []):
|
||||
options = [
|
||||
MCOption(
|
||||
text=opt["text"],
|
||||
is_correct=opt.get("is_correct", False),
|
||||
explanation=opt.get("explanation")
|
||||
)
|
||||
for opt in q_data.get("options", [])
|
||||
]
|
||||
|
||||
question = MCQuestion(
|
||||
question=q_data.get("question", ""),
|
||||
options=options,
|
||||
difficulty=difficulty,
|
||||
topic=q_data.get("topic"),
|
||||
hint=q_data.get("hint")
|
||||
)
|
||||
questions.append(question)
|
||||
|
||||
return questions
|
||||
|
||||
def _generate_mock(
|
||||
self,
|
||||
source_text: str,
|
||||
num_questions: int,
|
||||
difficulty: Difficulty
|
||||
) -> List[MCQuestion]:
|
||||
"""Generiert Mock-Fragen für Tests/Demo."""
|
||||
logger.info("Using mock generator (no LLM client)")
|
||||
|
||||
# Extrahiere einige Schlüsselwörter aus dem Text
|
||||
words = source_text.split()
|
||||
keywords = [w for w in words if len(w) > 5][:10]
|
||||
|
||||
questions = []
|
||||
for i in range(min(num_questions, 5)):
|
||||
keyword = keywords[i] if i < len(keywords) else f"Begriff {i+1}"
|
||||
|
||||
question = MCQuestion(
|
||||
question=f"Was beschreibt '{keyword}' im Kontext des Textes am besten?",
|
||||
options=[
|
||||
MCOption(text=f"Definition A von {keyword}", is_correct=True,
|
||||
explanation="Dies ist die korrekte Definition."),
|
||||
MCOption(text=f"Falsche Definition B", is_correct=False,
|
||||
explanation="Diese Definition passt nicht."),
|
||||
MCOption(text=f"Falsche Definition C", is_correct=False,
|
||||
explanation="Diese Definition ist unvollständig."),
|
||||
MCOption(text=f"Falsche Definition D", is_correct=False,
|
||||
explanation="Diese Definition ist irreführend."),
|
||||
],
|
||||
difficulty=difficulty,
|
||||
topic="Allgemein",
|
||||
hint=f"Denke an die Bedeutung von '{keyword}'."
|
||||
)
|
||||
questions.append(question)
|
||||
|
||||
return questions
|
||||
|
||||
def to_h5p_format(self, questions: List[MCQuestion]) -> Dict[str, Any]:
|
||||
"""
|
||||
Konvertiert Fragen ins H5P-Format für Multiple Choice.
|
||||
|
||||
Args:
|
||||
questions: Liste von MCQuestion-Objekten
|
||||
|
||||
Returns:
|
||||
H5P-kompatibles Dict
|
||||
"""
|
||||
h5p_questions = []
|
||||
|
||||
for q in questions:
|
||||
answers = []
|
||||
for opt in q.options:
|
||||
answers.append({
|
||||
"text": opt.text,
|
||||
"correct": opt.is_correct,
|
||||
"tpiMessage": opt.explanation or ""
|
||||
})
|
||||
|
||||
h5p_questions.append({
|
||||
"question": q.question,
|
||||
"answers": answers,
|
||||
"tip": q.hint or ""
|
||||
})
|
||||
|
||||
return {
|
||||
"library": "H5P.MultiChoice",
|
||||
"params": {
|
||||
"questions": h5p_questions,
|
||||
"behaviour": {
|
||||
"enableRetry": True,
|
||||
"enableSolutionsButton": True,
|
||||
"singleAnswer": True
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def to_dict(self, questions: List[MCQuestion]) -> List[Dict[str, Any]]:
|
||||
"""Konvertiert Fragen zu Dictionary-Format."""
|
||||
return [
|
||||
{
|
||||
"question": q.question,
|
||||
"options": [
|
||||
{
|
||||
"text": opt.text,
|
||||
"is_correct": opt.is_correct,
|
||||
"explanation": opt.explanation
|
||||
}
|
||||
for opt in q.options
|
||||
],
|
||||
"difficulty": q.difficulty.value,
|
||||
"topic": q.topic,
|
||||
"hint": q.hint
|
||||
}
|
||||
for q in questions
|
||||
]
|
||||
380
backend/generators/mindmap_generator.py
Normal file
380
backend/generators/mindmap_generator.py
Normal file
@@ -0,0 +1,380 @@
|
||||
"""
|
||||
Mindmap Generator - Erstellt Mindmaps aus Quelltexten.
|
||||
|
||||
Generiert:
|
||||
- Hierarchische Struktur aus Text
|
||||
- Hauptthema mit Unterthemen
|
||||
- Verbindungen und Beziehungen
|
||||
"""
|
||||
|
||||
import logging
|
||||
import json
|
||||
import re
|
||||
from typing import List, Dict, Any, Optional
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class MindmapNode:
|
||||
"""Ein Knoten in der Mindmap."""
|
||||
id: str
|
||||
label: str
|
||||
level: int = 0
|
||||
children: List['MindmapNode'] = field(default_factory=list)
|
||||
color: Optional[str] = None
|
||||
icon: Optional[str] = None
|
||||
notes: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class Mindmap:
|
||||
"""Eine komplette Mindmap."""
|
||||
root: MindmapNode
|
||||
title: str
|
||||
topic: Optional[str] = None
|
||||
total_nodes: int = 0
|
||||
|
||||
|
||||
class MindmapGenerator:
|
||||
"""
|
||||
Generiert Mindmaps aus Quelltexten.
|
||||
|
||||
Extrahiert:
|
||||
- Hauptthema als Zentrum
|
||||
- Unterthemen als Äste
|
||||
- Details als Blätter
|
||||
"""
|
||||
|
||||
def __init__(self, llm_client=None):
|
||||
"""
|
||||
Initialisiert den Generator.
|
||||
|
||||
Args:
|
||||
llm_client: Optional - LLM-Client für intelligente Generierung
|
||||
"""
|
||||
self.llm_client = llm_client
|
||||
logger.info("MindmapGenerator initialized")
|
||||
|
||||
# Farben für verschiedene Ebenen
|
||||
self.level_colors = [
|
||||
"#6C1B1B", # Weinrot (Zentrum)
|
||||
"#3b82f6", # Blau
|
||||
"#22c55e", # Grün
|
||||
"#f59e0b", # Orange
|
||||
"#8b5cf6", # Violett
|
||||
]
|
||||
|
||||
def generate(
|
||||
self,
|
||||
source_text: str,
|
||||
title: Optional[str] = None,
|
||||
max_depth: int = 3,
|
||||
topic: Optional[str] = None
|
||||
) -> Mindmap:
|
||||
"""
|
||||
Generiert eine Mindmap aus einem Quelltext.
|
||||
|
||||
Args:
|
||||
source_text: Der Ausgangstext
|
||||
title: Optionaler Titel (sonst automatisch ermittelt)
|
||||
max_depth: Maximale Tiefe der Hierarchie
|
||||
topic: Optionales Thema
|
||||
|
||||
Returns:
|
||||
Mindmap-Objekt
|
||||
"""
|
||||
logger.info(f"Generating mindmap (max_depth: {max_depth})")
|
||||
|
||||
if not source_text or len(source_text.strip()) < 50:
|
||||
logger.warning("Source text too short")
|
||||
return self._empty_mindmap(title or "Mindmap")
|
||||
|
||||
if self.llm_client:
|
||||
return self._generate_with_llm(source_text, title, max_depth, topic)
|
||||
else:
|
||||
return self._generate_automatic(source_text, title, max_depth, topic)
|
||||
|
||||
def _generate_with_llm(
|
||||
self,
|
||||
source_text: str,
|
||||
title: Optional[str],
|
||||
max_depth: int,
|
||||
topic: Optional[str]
|
||||
) -> Mindmap:
|
||||
"""Generiert Mindmap mit LLM."""
|
||||
prompt = f"""
|
||||
Erstelle eine Mindmap-Struktur auf Deutsch basierend auf folgendem Text.
|
||||
{f'Titel: {title}' if title else 'Ermittle einen passenden Titel.'}
|
||||
Maximale Tiefe: {max_depth} Ebenen
|
||||
{f'Thema: {topic}' if topic else ''}
|
||||
|
||||
Text:
|
||||
{source_text}
|
||||
|
||||
Erstelle eine hierarchische Struktur mit:
|
||||
- Einem zentralen Hauptthema
|
||||
- 3-5 Hauptästen (Unterthemen)
|
||||
- Jeweils 2-4 Details pro Ast
|
||||
|
||||
Antworte im JSON-Format:
|
||||
{{
|
||||
"title": "Hauptthema",
|
||||
"branches": [
|
||||
{{
|
||||
"label": "Unterthema 1",
|
||||
"children": [
|
||||
{{"label": "Detail 1.1"}},
|
||||
{{"label": "Detail 1.2"}}
|
||||
]
|
||||
}},
|
||||
{{
|
||||
"label": "Unterthema 2",
|
||||
"children": [
|
||||
{{"label": "Detail 2.1"}}
|
||||
]
|
||||
}}
|
||||
]
|
||||
}}
|
||||
"""
|
||||
|
||||
try:
|
||||
response = self.llm_client.generate(prompt)
|
||||
data = json.loads(response)
|
||||
return self._create_mindmap_from_llm(data, topic)
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating with LLM: {e}")
|
||||
return self._generate_automatic(source_text, title, max_depth, topic)
|
||||
|
||||
def _generate_automatic(
|
||||
self,
|
||||
source_text: str,
|
||||
title: Optional[str],
|
||||
max_depth: int,
|
||||
topic: Optional[str]
|
||||
) -> Mindmap:
|
||||
"""Generiert Mindmap automatisch ohne LLM."""
|
||||
# Extrahiere Struktur aus Text
|
||||
sections = self._extract_sections(source_text)
|
||||
|
||||
# Bestimme Titel
|
||||
if not title:
|
||||
# Erste Zeile oder erstes Substantiv
|
||||
first_line = source_text.split('\n')[0].strip()
|
||||
title = first_line[:50] if first_line else "Mindmap"
|
||||
|
||||
# Erstelle Root-Knoten
|
||||
node_counter = [0]
|
||||
root = self._create_node(title, 0, node_counter)
|
||||
|
||||
# Füge Hauptäste hinzu
|
||||
for section_title, section_content in sections[:5]: # Max 5 Hauptäste
|
||||
branch = self._create_node(section_title, 1, node_counter)
|
||||
branch.color = self.level_colors[1 % len(self.level_colors)]
|
||||
|
||||
# Füge Details hinzu
|
||||
details = self._extract_details(section_content)
|
||||
for detail in details[:4]: # Max 4 Details pro Ast
|
||||
if max_depth >= 2:
|
||||
leaf = self._create_node(detail, 2, node_counter)
|
||||
leaf.color = self.level_colors[2 % len(self.level_colors)]
|
||||
branch.children.append(leaf)
|
||||
|
||||
root.children.append(branch)
|
||||
|
||||
return Mindmap(
|
||||
root=root,
|
||||
title=title,
|
||||
topic=topic,
|
||||
total_nodes=node_counter[0]
|
||||
)
|
||||
|
||||
def _extract_sections(self, text: str) -> List[tuple]:
|
||||
"""Extrahiert Abschnitte aus dem Text."""
|
||||
sections = []
|
||||
|
||||
# Versuche Überschriften zu finden
|
||||
lines = text.split('\n')
|
||||
current_section = None
|
||||
current_content = []
|
||||
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
|
||||
# Erkenne potenzielle Überschriften
|
||||
if (line.endswith(':') or
|
||||
line.isupper() or
|
||||
len(line) < 50 and line[0].isupper() and '.' not in line):
|
||||
# Speichere vorherige Section
|
||||
if current_section:
|
||||
sections.append((current_section, '\n'.join(current_content)))
|
||||
current_section = line.rstrip(':')
|
||||
current_content = []
|
||||
else:
|
||||
current_content.append(line)
|
||||
|
||||
# Letzte Section
|
||||
if current_section:
|
||||
sections.append((current_section, '\n'.join(current_content)))
|
||||
|
||||
# Falls keine Sections gefunden, erstelle aus Sätzen
|
||||
if not sections:
|
||||
sentences = re.split(r'[.!?]+', text)
|
||||
for i, sentence in enumerate(sentences[:5]):
|
||||
sentence = sentence.strip()
|
||||
if len(sentence) > 10:
|
||||
# Kürze auf max 30 Zeichen für Label
|
||||
label = sentence[:30] + '...' if len(sentence) > 30 else sentence
|
||||
sections.append((label, sentence))
|
||||
|
||||
return sections
|
||||
|
||||
def _extract_details(self, content: str) -> List[str]:
|
||||
"""Extrahiert Details aus Abschnittsinhalt."""
|
||||
details = []
|
||||
|
||||
# Aufzählungen
|
||||
bullet_pattern = r'[-•*]\s*(.+)'
|
||||
bullets = re.findall(bullet_pattern, content)
|
||||
details.extend(bullets)
|
||||
|
||||
# Nummerierte Listen
|
||||
num_pattern = r'\d+[.)]\s*(.+)'
|
||||
numbered = re.findall(num_pattern, content)
|
||||
details.extend(numbered)
|
||||
|
||||
# Falls keine Listen, nehme Sätze
|
||||
if not details:
|
||||
sentences = re.split(r'[.!?]+', content)
|
||||
for sentence in sentences:
|
||||
sentence = sentence.strip()
|
||||
if len(sentence) > 5:
|
||||
label = sentence[:40] + '...' if len(sentence) > 40 else sentence
|
||||
details.append(label)
|
||||
|
||||
return details
|
||||
|
||||
def _create_node(
|
||||
self,
|
||||
label: str,
|
||||
level: int,
|
||||
counter: List[int]
|
||||
) -> MindmapNode:
|
||||
"""Erstellt einen neuen Knoten."""
|
||||
counter[0] += 1
|
||||
return MindmapNode(
|
||||
id=f"node_{counter[0]}",
|
||||
label=label,
|
||||
level=level,
|
||||
children=[],
|
||||
color=self.level_colors[level % len(self.level_colors)]
|
||||
)
|
||||
|
||||
def _create_mindmap_from_llm(
|
||||
self,
|
||||
data: Dict[str, Any],
|
||||
topic: Optional[str]
|
||||
) -> Mindmap:
|
||||
"""Erstellt Mindmap aus LLM-Antwort."""
|
||||
node_counter = [0]
|
||||
title = data.get("title", "Mindmap")
|
||||
|
||||
root = self._create_node(title, 0, node_counter)
|
||||
|
||||
for branch_data in data.get("branches", []):
|
||||
branch = self._create_node(branch_data.get("label", ""), 1, node_counter)
|
||||
branch.color = self.level_colors[1 % len(self.level_colors)]
|
||||
|
||||
for child_data in branch_data.get("children", []):
|
||||
child = self._create_node(child_data.get("label", ""), 2, node_counter)
|
||||
child.color = self.level_colors[2 % len(self.level_colors)]
|
||||
branch.children.append(child)
|
||||
|
||||
root.children.append(branch)
|
||||
|
||||
return Mindmap(
|
||||
root=root,
|
||||
title=title,
|
||||
topic=topic,
|
||||
total_nodes=node_counter[0]
|
||||
)
|
||||
|
||||
def _empty_mindmap(self, title: str) -> Mindmap:
|
||||
"""Erstellt leere Mindmap bei Fehler."""
|
||||
root = MindmapNode(
|
||||
id="root",
|
||||
label=title,
|
||||
level=0,
|
||||
color=self.level_colors[0]
|
||||
)
|
||||
return Mindmap(root=root, title=title, total_nodes=1)
|
||||
|
||||
def to_dict(self, mindmap: Mindmap) -> Dict[str, Any]:
|
||||
"""Konvertiert Mindmap zu Dictionary-Format."""
|
||||
def node_to_dict(node: MindmapNode) -> Dict[str, Any]:
|
||||
return {
|
||||
"id": node.id,
|
||||
"label": node.label,
|
||||
"level": node.level,
|
||||
"color": node.color,
|
||||
"icon": node.icon,
|
||||
"notes": node.notes,
|
||||
"children": [node_to_dict(child) for child in node.children]
|
||||
}
|
||||
|
||||
return {
|
||||
"title": mindmap.title,
|
||||
"topic": mindmap.topic,
|
||||
"total_nodes": mindmap.total_nodes,
|
||||
"root": node_to_dict(mindmap.root)
|
||||
}
|
||||
|
||||
def to_mermaid(self, mindmap: Mindmap) -> str:
|
||||
"""
|
||||
Konvertiert Mindmap zu Mermaid-Format für Visualisierung.
|
||||
|
||||
Args:
|
||||
mindmap: Mindmap-Objekt
|
||||
|
||||
Returns:
|
||||
Mermaid-Diagramm als String
|
||||
"""
|
||||
lines = ["mindmap"]
|
||||
lines.append(f" root(({mindmap.title}))")
|
||||
|
||||
def add_node(node: MindmapNode, indent: int):
|
||||
for child in node.children:
|
||||
prefix = " " * (indent + 1)
|
||||
if child.children:
|
||||
lines.append(f"{prefix}{child.label}")
|
||||
else:
|
||||
lines.append(f"{prefix}){child.label}(")
|
||||
add_node(child, indent + 1)
|
||||
|
||||
add_node(mindmap.root, 1)
|
||||
return "\n".join(lines)
|
||||
|
||||
def to_json_tree(self, mindmap: Mindmap) -> Dict[str, Any]:
|
||||
"""
|
||||
Konvertiert Mindmap zu JSON-Tree-Format für JS-Bibliotheken.
|
||||
|
||||
Args:
|
||||
mindmap: Mindmap-Objekt
|
||||
|
||||
Returns:
|
||||
JSON-Tree-Format für d3.js, vis.js etc.
|
||||
"""
|
||||
def node_to_tree(node: MindmapNode) -> Dict[str, Any]:
|
||||
result = {
|
||||
"name": node.label,
|
||||
"id": node.id,
|
||||
"color": node.color
|
||||
}
|
||||
if node.children:
|
||||
result["children"] = [node_to_tree(c) for c in node.children]
|
||||
return result
|
||||
|
||||
return node_to_tree(mindmap.root)
|
||||
594
backend/generators/quiz_generator.py
Normal file
594
backend/generators/quiz_generator.py
Normal file
@@ -0,0 +1,594 @@
|
||||
"""
|
||||
Quiz Generator - Erstellt verschiedene Quiz-Typen aus Quelltexten.
|
||||
|
||||
Generiert:
|
||||
- True/False Fragen
|
||||
- Zuordnungsaufgaben (Matching)
|
||||
- Sortieraufgaben
|
||||
- Offene Fragen mit Musterlösungen
|
||||
"""
|
||||
|
||||
import logging
|
||||
import json
|
||||
import re
|
||||
from typing import List, Dict, Any, Optional, Tuple
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class QuizType(str, Enum):
|
||||
"""Typen von Quiz-Aufgaben."""
|
||||
TRUE_FALSE = "true_false"
|
||||
MATCHING = "matching"
|
||||
SORTING = "sorting"
|
||||
OPEN_ENDED = "open_ended"
|
||||
|
||||
|
||||
@dataclass
|
||||
class TrueFalseQuestion:
|
||||
"""Eine Wahr/Falsch-Frage."""
|
||||
statement: str
|
||||
is_true: bool
|
||||
explanation: str
|
||||
source_reference: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class MatchingPair:
|
||||
"""Ein Zuordnungspaar."""
|
||||
left: str
|
||||
right: str
|
||||
hint: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class SortingItem:
|
||||
"""Ein Element zum Sortieren."""
|
||||
text: str
|
||||
correct_position: int
|
||||
category: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class OpenQuestion:
|
||||
"""Eine offene Frage."""
|
||||
question: str
|
||||
model_answer: str
|
||||
keywords: List[str]
|
||||
points: int = 1
|
||||
|
||||
|
||||
@dataclass
|
||||
class Quiz:
|
||||
"""Ein komplettes Quiz."""
|
||||
quiz_type: QuizType
|
||||
title: str
|
||||
questions: List[Any] # Je nach Typ unterschiedlich
|
||||
topic: Optional[str] = None
|
||||
difficulty: str = "medium"
|
||||
|
||||
|
||||
class QuizGenerator:
|
||||
"""
|
||||
Generiert verschiedene Quiz-Typen aus Quelltexten.
|
||||
"""
|
||||
|
||||
def __init__(self, llm_client=None):
|
||||
"""
|
||||
Initialisiert den Generator.
|
||||
|
||||
Args:
|
||||
llm_client: Optional - LLM-Client für intelligente Generierung
|
||||
"""
|
||||
self.llm_client = llm_client
|
||||
logger.info("QuizGenerator initialized")
|
||||
|
||||
def generate(
|
||||
self,
|
||||
source_text: str,
|
||||
quiz_type: QuizType,
|
||||
num_questions: int = 5,
|
||||
title: Optional[str] = None,
|
||||
topic: Optional[str] = None,
|
||||
difficulty: str = "medium"
|
||||
) -> Quiz:
|
||||
"""
|
||||
Generiert ein Quiz aus einem Quelltext.
|
||||
|
||||
Args:
|
||||
source_text: Der Ausgangstext
|
||||
quiz_type: Art des Quiz
|
||||
num_questions: Anzahl der Fragen/Aufgaben
|
||||
title: Optionaler Titel
|
||||
topic: Optionales Thema
|
||||
difficulty: Schwierigkeitsgrad
|
||||
|
||||
Returns:
|
||||
Quiz-Objekt
|
||||
"""
|
||||
logger.info(f"Generating {quiz_type} quiz with {num_questions} questions")
|
||||
|
||||
if not source_text or len(source_text.strip()) < 50:
|
||||
logger.warning("Source text too short")
|
||||
return self._empty_quiz(quiz_type, title or "Quiz")
|
||||
|
||||
generators = {
|
||||
QuizType.TRUE_FALSE: self._generate_true_false,
|
||||
QuizType.MATCHING: self._generate_matching,
|
||||
QuizType.SORTING: self._generate_sorting,
|
||||
QuizType.OPEN_ENDED: self._generate_open_ended,
|
||||
}
|
||||
|
||||
generator = generators.get(quiz_type)
|
||||
if not generator:
|
||||
raise ValueError(f"Unbekannter Quiz-Typ: {quiz_type}")
|
||||
|
||||
questions = generator(source_text, num_questions, difficulty)
|
||||
|
||||
return Quiz(
|
||||
quiz_type=quiz_type,
|
||||
title=title or f"{quiz_type.value.replace('_', ' ').title()} Quiz",
|
||||
questions=questions,
|
||||
topic=topic,
|
||||
difficulty=difficulty
|
||||
)
|
||||
|
||||
def _generate_true_false(
|
||||
self,
|
||||
source_text: str,
|
||||
num_questions: int,
|
||||
difficulty: str
|
||||
) -> List[TrueFalseQuestion]:
|
||||
"""Generiert Wahr/Falsch-Fragen."""
|
||||
if self.llm_client:
|
||||
return self._generate_true_false_llm(source_text, num_questions, difficulty)
|
||||
|
||||
# Automatische Generierung
|
||||
sentences = self._extract_factual_sentences(source_text)
|
||||
questions = []
|
||||
|
||||
for i, sentence in enumerate(sentences[:num_questions]):
|
||||
# Abwechselnd wahre und falsche Aussagen
|
||||
if i % 2 == 0:
|
||||
# Wahre Aussage
|
||||
questions.append(TrueFalseQuestion(
|
||||
statement=sentence,
|
||||
is_true=True,
|
||||
explanation="Diese Aussage entspricht dem Text.",
|
||||
source_reference=sentence[:50]
|
||||
))
|
||||
else:
|
||||
# Falsche Aussage (Negation)
|
||||
false_statement = self._negate_sentence(sentence)
|
||||
questions.append(TrueFalseQuestion(
|
||||
statement=false_statement,
|
||||
is_true=False,
|
||||
explanation=f"Richtig wäre: {sentence}",
|
||||
source_reference=sentence[:50]
|
||||
))
|
||||
|
||||
return questions
|
||||
|
||||
def _generate_true_false_llm(
|
||||
self,
|
||||
source_text: str,
|
||||
num_questions: int,
|
||||
difficulty: str
|
||||
) -> List[TrueFalseQuestion]:
|
||||
"""Generiert Wahr/Falsch-Fragen mit LLM."""
|
||||
prompt = f"""
|
||||
Erstelle {num_questions} Wahr/Falsch-Aussagen auf Deutsch basierend auf folgendem Text.
|
||||
Schwierigkeit: {difficulty}
|
||||
Erstelle etwa gleich viele wahre und falsche Aussagen.
|
||||
|
||||
Text:
|
||||
{source_text}
|
||||
|
||||
Antworte im JSON-Format:
|
||||
{{
|
||||
"questions": [
|
||||
{{
|
||||
"statement": "Die Aussage...",
|
||||
"is_true": true,
|
||||
"explanation": "Erklärung warum wahr/falsch"
|
||||
}}
|
||||
]
|
||||
}}
|
||||
"""
|
||||
try:
|
||||
response = self.llm_client.generate(prompt)
|
||||
data = json.loads(response)
|
||||
return [
|
||||
TrueFalseQuestion(
|
||||
statement=q["statement"],
|
||||
is_true=q["is_true"],
|
||||
explanation=q["explanation"]
|
||||
)
|
||||
for q in data.get("questions", [])
|
||||
]
|
||||
except Exception as e:
|
||||
logger.error(f"LLM error: {e}")
|
||||
return self._generate_true_false(source_text, num_questions, difficulty)
|
||||
|
||||
def _generate_matching(
|
||||
self,
|
||||
source_text: str,
|
||||
num_pairs: int,
|
||||
difficulty: str
|
||||
) -> List[MatchingPair]:
|
||||
"""Generiert Zuordnungsaufgaben."""
|
||||
if self.llm_client:
|
||||
return self._generate_matching_llm(source_text, num_pairs, difficulty)
|
||||
|
||||
# Automatische Generierung: Begriff -> Definition
|
||||
pairs = []
|
||||
definitions = self._extract_definitions(source_text)
|
||||
|
||||
for term, definition in definitions[:num_pairs]:
|
||||
pairs.append(MatchingPair(
|
||||
left=term,
|
||||
right=definition,
|
||||
hint=f"Beginnt mit '{definition[0]}'"
|
||||
))
|
||||
|
||||
return pairs
|
||||
|
||||
def _generate_matching_llm(
|
||||
self,
|
||||
source_text: str,
|
||||
num_pairs: int,
|
||||
difficulty: str
|
||||
) -> List[MatchingPair]:
|
||||
"""Generiert Zuordnungen mit LLM."""
|
||||
prompt = f"""
|
||||
Erstelle {num_pairs} Zuordnungspaare auf Deutsch basierend auf folgendem Text.
|
||||
Jedes Paar besteht aus einem Begriff und seiner Definition/Erklärung.
|
||||
Schwierigkeit: {difficulty}
|
||||
|
||||
Text:
|
||||
{source_text}
|
||||
|
||||
Antworte im JSON-Format:
|
||||
{{
|
||||
"pairs": [
|
||||
{{
|
||||
"term": "Begriff",
|
||||
"definition": "Definition des Begriffs",
|
||||
"hint": "Optionaler Hinweis"
|
||||
}}
|
||||
]
|
||||
}}
|
||||
"""
|
||||
try:
|
||||
response = self.llm_client.generate(prompt)
|
||||
data = json.loads(response)
|
||||
return [
|
||||
MatchingPair(
|
||||
left=p["term"],
|
||||
right=p["definition"],
|
||||
hint=p.get("hint")
|
||||
)
|
||||
for p in data.get("pairs", [])
|
||||
]
|
||||
except Exception as e:
|
||||
logger.error(f"LLM error: {e}")
|
||||
return self._generate_matching(source_text, num_pairs, difficulty)
|
||||
|
||||
def _generate_sorting(
|
||||
self,
|
||||
source_text: str,
|
||||
num_items: int,
|
||||
difficulty: str
|
||||
) -> List[SortingItem]:
|
||||
"""Generiert Sortieraufgaben."""
|
||||
if self.llm_client:
|
||||
return self._generate_sorting_llm(source_text, num_items, difficulty)
|
||||
|
||||
# Automatische Generierung: Chronologische Reihenfolge
|
||||
items = []
|
||||
steps = self._extract_sequence(source_text)
|
||||
|
||||
for i, step in enumerate(steps[:num_items]):
|
||||
items.append(SortingItem(
|
||||
text=step,
|
||||
correct_position=i + 1
|
||||
))
|
||||
|
||||
return items
|
||||
|
||||
def _generate_sorting_llm(
|
||||
self,
|
||||
source_text: str,
|
||||
num_items: int,
|
||||
difficulty: str
|
||||
) -> List[SortingItem]:
|
||||
"""Generiert Sortierung mit LLM."""
|
||||
prompt = f"""
|
||||
Erstelle eine Sortieraufgabe auf Deutsch basierend auf folgendem Text.
|
||||
Finde {num_items} Elemente, die in eine logische Reihenfolge gebracht werden müssen.
|
||||
(z.B. chronologisch, nach Wichtigkeit, nach Größe, etc.)
|
||||
Schwierigkeit: {difficulty}
|
||||
|
||||
Text:
|
||||
{source_text}
|
||||
|
||||
Antworte im JSON-Format:
|
||||
{{
|
||||
"category": "chronologisch/nach Größe/etc.",
|
||||
"items": [
|
||||
{{"text": "Erstes Element", "position": 1}},
|
||||
{{"text": "Zweites Element", "position": 2}}
|
||||
]
|
||||
}}
|
||||
"""
|
||||
try:
|
||||
response = self.llm_client.generate(prompt)
|
||||
data = json.loads(response)
|
||||
category = data.get("category")
|
||||
return [
|
||||
SortingItem(
|
||||
text=item["text"],
|
||||
correct_position=item["position"],
|
||||
category=category
|
||||
)
|
||||
for item in data.get("items", [])
|
||||
]
|
||||
except Exception as e:
|
||||
logger.error(f"LLM error: {e}")
|
||||
return self._generate_sorting(source_text, num_items, difficulty)
|
||||
|
||||
def _generate_open_ended(
|
||||
self,
|
||||
source_text: str,
|
||||
num_questions: int,
|
||||
difficulty: str
|
||||
) -> List[OpenQuestion]:
|
||||
"""Generiert offene Fragen."""
|
||||
if self.llm_client:
|
||||
return self._generate_open_ended_llm(source_text, num_questions, difficulty)
|
||||
|
||||
# Automatische Generierung
|
||||
questions = []
|
||||
sentences = self._extract_factual_sentences(source_text)
|
||||
|
||||
question_starters = [
|
||||
"Was bedeutet",
|
||||
"Erkläre",
|
||||
"Warum",
|
||||
"Wie funktioniert",
|
||||
"Nenne die Hauptmerkmale von"
|
||||
]
|
||||
|
||||
for i, sentence in enumerate(sentences[:num_questions]):
|
||||
# Extrahiere Schlüsselwort
|
||||
keywords = self._extract_keywords(sentence)
|
||||
if keywords:
|
||||
keyword = keywords[0]
|
||||
starter = question_starters[i % len(question_starters)]
|
||||
question = f"{starter} '{keyword}'?"
|
||||
|
||||
questions.append(OpenQuestion(
|
||||
question=question,
|
||||
model_answer=sentence,
|
||||
keywords=keywords,
|
||||
points=1
|
||||
))
|
||||
|
||||
return questions
|
||||
|
||||
def _generate_open_ended_llm(
|
||||
self,
|
||||
source_text: str,
|
||||
num_questions: int,
|
||||
difficulty: str
|
||||
) -> List[OpenQuestion]:
|
||||
"""Generiert offene Fragen mit LLM."""
|
||||
prompt = f"""
|
||||
Erstelle {num_questions} offene Fragen auf Deutsch basierend auf folgendem Text.
|
||||
Jede Frage sollte eine ausführliche Antwort erfordern.
|
||||
Schwierigkeit: {difficulty}
|
||||
|
||||
Text:
|
||||
{source_text}
|
||||
|
||||
Antworte im JSON-Format:
|
||||
{{
|
||||
"questions": [
|
||||
{{
|
||||
"question": "Die Frage...",
|
||||
"model_answer": "Eine vollständige Musterantwort",
|
||||
"keywords": ["Schlüsselwort1", "Schlüsselwort2"],
|
||||
"points": 2
|
||||
}}
|
||||
]
|
||||
}}
|
||||
"""
|
||||
try:
|
||||
response = self.llm_client.generate(prompt)
|
||||
data = json.loads(response)
|
||||
return [
|
||||
OpenQuestion(
|
||||
question=q["question"],
|
||||
model_answer=q["model_answer"],
|
||||
keywords=q.get("keywords", []),
|
||||
points=q.get("points", 1)
|
||||
)
|
||||
for q in data.get("questions", [])
|
||||
]
|
||||
except Exception as e:
|
||||
logger.error(f"LLM error: {e}")
|
||||
return self._generate_open_ended(source_text, num_questions, difficulty)
|
||||
|
||||
# Hilfsmethoden
|
||||
|
||||
def _extract_factual_sentences(self, text: str) -> List[str]:
|
||||
"""Extrahiert Fakten-Sätze aus dem Text."""
|
||||
sentences = re.split(r'[.!?]+', text)
|
||||
factual = []
|
||||
|
||||
for sentence in sentences:
|
||||
sentence = sentence.strip()
|
||||
# Filtere zu kurze oder fragende Sätze
|
||||
if len(sentence) > 20 and '?' not in sentence:
|
||||
factual.append(sentence)
|
||||
|
||||
return factual
|
||||
|
||||
def _negate_sentence(self, sentence: str) -> str:
|
||||
"""Negiert eine Aussage einfach."""
|
||||
# Einfache Negation durch Einfügen von "nicht"
|
||||
words = sentence.split()
|
||||
if len(words) > 2:
|
||||
# Nach erstem Verb "nicht" einfügen
|
||||
for i, word in enumerate(words):
|
||||
if word.endswith(('t', 'en', 'st')) and i > 0:
|
||||
words.insert(i + 1, 'nicht')
|
||||
break
|
||||
return ' '.join(words)
|
||||
|
||||
def _extract_definitions(self, text: str) -> List[Tuple[str, str]]:
|
||||
"""Extrahiert Begriff-Definition-Paare."""
|
||||
definitions = []
|
||||
|
||||
# Suche nach Mustern wie "X ist Y" oder "X bezeichnet Y"
|
||||
patterns = [
|
||||
r'(\w+)\s+ist\s+(.+?)[.]',
|
||||
r'(\w+)\s+bezeichnet\s+(.+?)[.]',
|
||||
r'(\w+)\s+bedeutet\s+(.+?)[.]',
|
||||
r'(\w+):\s+(.+?)[.]',
|
||||
]
|
||||
|
||||
for pattern in patterns:
|
||||
matches = re.findall(pattern, text)
|
||||
for term, definition in matches:
|
||||
if len(definition) > 10:
|
||||
definitions.append((term, definition.strip()))
|
||||
|
||||
return definitions
|
||||
|
||||
def _extract_sequence(self, text: str) -> List[str]:
|
||||
"""Extrahiert eine Sequenz von Schritten."""
|
||||
steps = []
|
||||
|
||||
# Suche nach nummerierten Schritten
|
||||
numbered = re.findall(r'\d+[.)]\s*([^.]+)', text)
|
||||
steps.extend(numbered)
|
||||
|
||||
# Suche nach Signalwörtern
|
||||
signal_words = ['zuerst', 'dann', 'danach', 'anschließend', 'schließlich']
|
||||
for word in signal_words:
|
||||
pattern = rf'{word}\s+([^.]+)'
|
||||
matches = re.findall(pattern, text, re.IGNORECASE)
|
||||
steps.extend(matches)
|
||||
|
||||
return steps
|
||||
|
||||
def _extract_keywords(self, text: str) -> List[str]:
|
||||
"""Extrahiert Schlüsselwörter."""
|
||||
# Längere Wörter mit Großbuchstaben (meist Substantive)
|
||||
words = re.findall(r'\b[A-ZÄÖÜ][a-zäöüß]+\b', text)
|
||||
return list(set(words))[:5]
|
||||
|
||||
def _empty_quiz(self, quiz_type: QuizType, title: str) -> Quiz:
|
||||
"""Erstellt leeres Quiz bei Fehler."""
|
||||
return Quiz(
|
||||
quiz_type=quiz_type,
|
||||
title=title,
|
||||
questions=[],
|
||||
difficulty="medium"
|
||||
)
|
||||
|
||||
def to_dict(self, quiz: Quiz) -> Dict[str, Any]:
|
||||
"""Konvertiert Quiz zu Dictionary-Format."""
|
||||
questions_data = []
|
||||
|
||||
for q in quiz.questions:
|
||||
if isinstance(q, TrueFalseQuestion):
|
||||
questions_data.append({
|
||||
"type": "true_false",
|
||||
"statement": q.statement,
|
||||
"is_true": q.is_true,
|
||||
"explanation": q.explanation
|
||||
})
|
||||
elif isinstance(q, MatchingPair):
|
||||
questions_data.append({
|
||||
"type": "matching",
|
||||
"left": q.left,
|
||||
"right": q.right,
|
||||
"hint": q.hint
|
||||
})
|
||||
elif isinstance(q, SortingItem):
|
||||
questions_data.append({
|
||||
"type": "sorting",
|
||||
"text": q.text,
|
||||
"correct_position": q.correct_position,
|
||||
"category": q.category
|
||||
})
|
||||
elif isinstance(q, OpenQuestion):
|
||||
questions_data.append({
|
||||
"type": "open_ended",
|
||||
"question": q.question,
|
||||
"model_answer": q.model_answer,
|
||||
"keywords": q.keywords,
|
||||
"points": q.points
|
||||
})
|
||||
|
||||
return {
|
||||
"quiz_type": quiz.quiz_type.value,
|
||||
"title": quiz.title,
|
||||
"topic": quiz.topic,
|
||||
"difficulty": quiz.difficulty,
|
||||
"questions": questions_data
|
||||
}
|
||||
|
||||
def to_h5p_format(self, quiz: Quiz) -> Dict[str, Any]:
|
||||
"""Konvertiert Quiz ins H5P-Format."""
|
||||
if quiz.quiz_type == QuizType.TRUE_FALSE:
|
||||
return self._true_false_to_h5p(quiz)
|
||||
elif quiz.quiz_type == QuizType.MATCHING:
|
||||
return self._matching_to_h5p(quiz)
|
||||
# Weitere Typen...
|
||||
return {}
|
||||
|
||||
def _true_false_to_h5p(self, quiz: Quiz) -> Dict[str, Any]:
|
||||
"""Konvertiert True/False zu H5P."""
|
||||
statements = []
|
||||
for q in quiz.questions:
|
||||
statements.append({
|
||||
"text": q.statement,
|
||||
"correct": q.is_true,
|
||||
"feedback": q.explanation
|
||||
})
|
||||
|
||||
return {
|
||||
"library": "H5P.TrueFalse",
|
||||
"params": {
|
||||
"statements": statements,
|
||||
"behaviour": {
|
||||
"enableRetry": True,
|
||||
"enableSolutionsButton": True
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def _matching_to_h5p(self, quiz: Quiz) -> Dict[str, Any]:
|
||||
"""Konvertiert Matching zu H5P."""
|
||||
pairs = []
|
||||
for q in quiz.questions:
|
||||
pairs.append({
|
||||
"question": q.left,
|
||||
"answer": q.right
|
||||
})
|
||||
|
||||
return {
|
||||
"library": "H5P.DragText",
|
||||
"params": {
|
||||
"pairs": pairs,
|
||||
"behaviour": {
|
||||
"enableRetry": True,
|
||||
"enableSolutionsButton": True
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user