fix: Restore all files lost during destructive rebase
A previous `git pull --rebase origin main` dropped 177 local commits,
losing 3400+ files across admin-v2, backend, studio-v2, website,
klausur-service, and many other services. The partial restore attempt
(660295e2) only recovered some files.
This commit restores all missing files from pre-rebase ref 98933f5e
while preserving post-rebase additions (night-scheduler, night-mode UI,
NightModeWidget dashboard integration).
Restored features include:
- AI Module Sidebar (FAB), OCR Labeling, OCR Compare
- GPU Dashboard, RAG Pipeline, Magic Help
- Klausur-Korrektur (8 files), Abitur-Archiv (5+ files)
- Companion, Zeugnisse-Crawler, Screen Flow
- Full backend, studio-v2, website, klausur-service
- All compliance SDKs, agent-core, voice-service
- CI/CD configs, documentation, scripts
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
355
geo-service/services/learning_generator.py
Normal file
355
geo-service/services/learning_generator.py
Normal file
@@ -0,0 +1,355 @@
|
||||
"""
|
||||
Learning Generator Service
|
||||
Generates educational content for geographic areas using LLM
|
||||
"""
|
||||
import os
|
||||
import json
|
||||
import uuid
|
||||
from typing import Optional
|
||||
import structlog
|
||||
import httpx
|
||||
|
||||
from config import settings
|
||||
from models.learning_node import LearningNode, LearningTheme, NodeType
|
||||
|
||||
logger = structlog.get_logger(__name__)
|
||||
|
||||
# In-memory storage for learning nodes (use database in production)
|
||||
_learning_nodes = {}
|
||||
|
||||
|
||||
class LearningGeneratorService:
|
||||
"""
|
||||
Service for generating educational learning nodes using Ollama LLM.
|
||||
|
||||
Generates themed educational content based on geographic features
|
||||
and didactic principles.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.ollama_url = settings.ollama_base_url
|
||||
self.model = settings.ollama_model
|
||||
self.timeout = settings.ollama_timeout
|
||||
|
||||
async def generate_nodes(
|
||||
self,
|
||||
aoi_id: str,
|
||||
theme: LearningTheme,
|
||||
difficulty: str,
|
||||
node_count: int,
|
||||
grade_level: Optional[str] = None,
|
||||
language: str = "de",
|
||||
) -> list[LearningNode]:
|
||||
"""
|
||||
Generate learning nodes for an AOI.
|
||||
|
||||
Uses the Ollama LLM to create educational content appropriate
|
||||
for the theme, difficulty, and grade level.
|
||||
"""
|
||||
# Get AOI information
|
||||
aoi_info = await self._get_aoi_info(aoi_id)
|
||||
if aoi_info is None:
|
||||
raise FileNotFoundError(f"AOI {aoi_id} not found")
|
||||
|
||||
# Build prompt for LLM
|
||||
prompt = self._build_generation_prompt(
|
||||
aoi_info=aoi_info,
|
||||
theme=theme,
|
||||
difficulty=difficulty,
|
||||
node_count=node_count,
|
||||
grade_level=grade_level,
|
||||
language=language,
|
||||
)
|
||||
|
||||
# Call Ollama
|
||||
try:
|
||||
response = await self._call_ollama(prompt)
|
||||
nodes = self._parse_llm_response(response, aoi_id, theme)
|
||||
except ConnectionError:
|
||||
logger.warning("Ollama not available, using mock data")
|
||||
nodes = self._generate_mock_nodes(aoi_id, theme, difficulty, node_count)
|
||||
|
||||
# Store nodes
|
||||
if aoi_id not in _learning_nodes:
|
||||
_learning_nodes[aoi_id] = []
|
||||
_learning_nodes[aoi_id].extend(nodes)
|
||||
|
||||
return nodes
|
||||
|
||||
async def _get_aoi_info(self, aoi_id: str) -> Optional[dict]:
|
||||
"""Get information about an AOI from its manifest."""
|
||||
manifest_path = os.path.join(settings.bundle_dir, aoi_id, "manifest.json")
|
||||
|
||||
if os.path.exists(manifest_path):
|
||||
with open(manifest_path) as f:
|
||||
return json.load(f)
|
||||
|
||||
# Check in-memory storage
|
||||
from services.aoi_packager import _aoi_storage
|
||||
return _aoi_storage.get(aoi_id)
|
||||
|
||||
def _build_generation_prompt(
|
||||
self,
|
||||
aoi_info: dict,
|
||||
theme: LearningTheme,
|
||||
difficulty: str,
|
||||
node_count: int,
|
||||
grade_level: Optional[str],
|
||||
language: str,
|
||||
) -> str:
|
||||
"""Build a prompt for the LLM to generate learning nodes."""
|
||||
theme_descriptions = {
|
||||
LearningTheme.TOPOGRAPHIE: "Landschaftsformen, Höhen und Geländemerkmale",
|
||||
LearningTheme.LANDNUTZUNG: "Siedlungen, Landwirtschaft und Flächennutzung",
|
||||
LearningTheme.ORIENTIERUNG: "Kartenlesen, Kompass und Navigation",
|
||||
LearningTheme.GEOLOGIE: "Gesteinsarten und geologische Formationen",
|
||||
LearningTheme.HYDROLOGIE: "Gewässer, Einzugsgebiete und Wasserkreislauf",
|
||||
LearningTheme.VEGETATION: "Pflanzengemeinschaften und Klimazonen",
|
||||
}
|
||||
|
||||
difficulty_descriptions = {
|
||||
"leicht": "Grundlegende Beobachtungen und einfache Fakten",
|
||||
"mittel": "Verknüpfung von Zusammenhängen und Vergleiche",
|
||||
"schwer": "Analyse, Transfer und kritisches Denken",
|
||||
}
|
||||
|
||||
bounds = aoi_info.get("bounds", {})
|
||||
center = aoi_info.get("center", {})
|
||||
|
||||
prompt = f"""Du bist ein Erdkunde-Didaktiker und erstellst Lernstationen für eine interaktive 3D-Lernwelt.
|
||||
|
||||
GEBIET:
|
||||
- Zentrum: {center.get('latitude', 0):.4f}°N, {center.get('longitude', 0):.4f}°E
|
||||
- Fläche: ca. {aoi_info.get('area_km2', 0):.2f} km²
|
||||
- Grenzen: West {bounds.get('west', 0):.4f}°, Süd {bounds.get('south', 0):.4f}°, Ost {bounds.get('east', 0):.4f}°, Nord {bounds.get('north', 0):.4f}°
|
||||
|
||||
THEMA: {theme.value} - {theme_descriptions.get(theme, '')}
|
||||
|
||||
SCHWIERIGKEITSGRAD: {difficulty} - {difficulty_descriptions.get(difficulty, '')}
|
||||
|
||||
ZIELGRUPPE: {grade_level if grade_level else 'Allgemein (Klasse 5-10)'}
|
||||
|
||||
AUFGABE:
|
||||
Erstelle {node_count} Lernstationen im JSON-Format. Jede Station soll:
|
||||
1. Eine geografische Position innerhalb des Gebiets haben
|
||||
2. Eine Lernfrage oder Aufgabe enthalten
|
||||
3. Hinweise zur Lösung bieten
|
||||
4. Die richtige Antwort mit Erklärung enthalten
|
||||
|
||||
FORMAT (JSON-Array):
|
||||
[
|
||||
{{
|
||||
"title": "Titel der Station",
|
||||
"position": {{"latitude": 0.0, "longitude": 0.0}},
|
||||
"question": "Die Lernfrage",
|
||||
"hints": ["Hinweis 1", "Hinweis 2"],
|
||||
"answer": "Die Antwort",
|
||||
"explanation": "Didaktische Erklärung",
|
||||
"node_type": "question|observation|exploration",
|
||||
"points": 10
|
||||
}}
|
||||
]
|
||||
|
||||
WICHTIG:
|
||||
- Positionen müssen innerhalb der Gebietsgrenzen liegen
|
||||
- Fragen sollen zum Thema {theme.value} passen
|
||||
- Sprache: {"Deutsch" if language == "de" else "English"}
|
||||
- Altersgerechte Formulierungen verwenden
|
||||
|
||||
Antworte NUR mit dem JSON-Array, ohne weitere Erklärungen."""
|
||||
|
||||
return prompt
|
||||
|
||||
async def _call_ollama(self, prompt: str) -> str:
|
||||
"""Call Ollama API to generate content."""
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=self.timeout) as client:
|
||||
response = await client.post(
|
||||
f"{self.ollama_url}/api/generate",
|
||||
json={
|
||||
"model": self.model,
|
||||
"prompt": prompt,
|
||||
"stream": False,
|
||||
"options": {
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.9,
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
raise ConnectionError(f"Ollama returned {response.status_code}")
|
||||
|
||||
result = response.json()
|
||||
return result.get("response", "")
|
||||
|
||||
except httpx.ConnectError:
|
||||
raise ConnectionError("Cannot connect to Ollama")
|
||||
except Exception as e:
|
||||
logger.error("Ollama API error", error=str(e))
|
||||
raise ConnectionError(f"Ollama error: {str(e)}")
|
||||
|
||||
def _parse_llm_response(
|
||||
self, response: str, aoi_id: str, theme: LearningTheme
|
||||
) -> list[LearningNode]:
|
||||
"""Parse LLM response into LearningNode objects."""
|
||||
try:
|
||||
# Find JSON array in response
|
||||
start = response.find("[")
|
||||
end = response.rfind("]") + 1
|
||||
|
||||
if start == -1 or end == 0:
|
||||
raise ValueError("No JSON array found in response")
|
||||
|
||||
json_str = response[start:end]
|
||||
data = json.loads(json_str)
|
||||
|
||||
nodes = []
|
||||
for item in data:
|
||||
node = LearningNode(
|
||||
id=str(uuid.uuid4()),
|
||||
aoi_id=aoi_id,
|
||||
title=item.get("title", "Unbenannte Station"),
|
||||
theme=theme,
|
||||
position={
|
||||
"latitude": item.get("position", {}).get("latitude", 0),
|
||||
"longitude": item.get("position", {}).get("longitude", 0),
|
||||
},
|
||||
question=item.get("question", ""),
|
||||
hints=item.get("hints", []),
|
||||
answer=item.get("answer", ""),
|
||||
explanation=item.get("explanation", ""),
|
||||
node_type=NodeType(item.get("node_type", "question")),
|
||||
points=item.get("points", 10),
|
||||
approved=False,
|
||||
)
|
||||
nodes.append(node)
|
||||
|
||||
return nodes
|
||||
|
||||
except (json.JSONDecodeError, ValueError) as e:
|
||||
logger.error("Failed to parse LLM response", error=str(e))
|
||||
return []
|
||||
|
||||
def _generate_mock_nodes(
|
||||
self,
|
||||
aoi_id: str,
|
||||
theme: LearningTheme,
|
||||
difficulty: str,
|
||||
node_count: int,
|
||||
) -> list[LearningNode]:
|
||||
"""Generate mock learning nodes for development."""
|
||||
mock_questions = {
|
||||
LearningTheme.TOPOGRAPHIE: [
|
||||
("Höhenbestimmung", "Schätze die Höhe dieses Punktes.", "Ca. 500m über NN"),
|
||||
("Hangneigung", "Beschreibe die Steilheit des Hanges.", "Mäßig steil, ca. 15-20°"),
|
||||
("Talform", "Welche Form hat dieses Tal?", "V-förmiges Erosionstal"),
|
||||
],
|
||||
LearningTheme.LANDNUTZUNG: [
|
||||
("Gebäudetypen", "Welche Gebäude siehst du hier?", "Wohnhäuser und landwirtschaftliche Gebäude"),
|
||||
("Flächennutzung", "Wie wird das Land genutzt?", "Landwirtschaft und Siedlung"),
|
||||
("Infrastruktur", "Welche Verkehrswege erkennst du?", "Straße und Feldweg"),
|
||||
],
|
||||
LearningTheme.ORIENTIERUNG: [
|
||||
("Himmelsrichtung", "In welche Richtung fließt der Bach?", "Nach Nordwesten"),
|
||||
("Entfernung", "Wie weit ist es bis zum Waldrand?", "Etwa 200 Meter"),
|
||||
("Wegbeschreibung", "Beschreibe den Weg zum Aussichtspunkt.", "Nordöstlich, bergauf"),
|
||||
],
|
||||
}
|
||||
|
||||
questions = mock_questions.get(theme, mock_questions[LearningTheme.TOPOGRAPHIE])
|
||||
nodes = []
|
||||
|
||||
for i in range(min(node_count, len(questions))):
|
||||
title, question, answer = questions[i]
|
||||
nodes.append(LearningNode(
|
||||
id=str(uuid.uuid4()),
|
||||
aoi_id=aoi_id,
|
||||
title=title,
|
||||
theme=theme,
|
||||
position={"latitude": 47.7 + i * 0.001, "longitude": 9.19 + i * 0.001},
|
||||
question=question,
|
||||
hints=[f"Hinweis {j + 1}" for j in range(2)],
|
||||
answer=answer,
|
||||
explanation=f"Diese Aufgabe trainiert die Beobachtung von {theme.value}.",
|
||||
node_type=NodeType.QUESTION,
|
||||
points=10,
|
||||
approved=False,
|
||||
))
|
||||
|
||||
return nodes
|
||||
|
||||
async def get_nodes_for_aoi(
|
||||
self, aoi_id: str, theme: Optional[LearningTheme] = None
|
||||
) -> Optional[list[LearningNode]]:
|
||||
"""Get all learning nodes for an AOI."""
|
||||
nodes = _learning_nodes.get(aoi_id)
|
||||
|
||||
if nodes is None:
|
||||
return None
|
||||
|
||||
if theme is not None:
|
||||
nodes = [n for n in nodes if n.theme == theme]
|
||||
|
||||
return nodes
|
||||
|
||||
async def update_node(
|
||||
self, aoi_id: str, node_id: str, node_update: LearningNode
|
||||
) -> bool:
|
||||
"""Update a learning node."""
|
||||
nodes = _learning_nodes.get(aoi_id)
|
||||
if nodes is None:
|
||||
return False
|
||||
|
||||
for i, node in enumerate(nodes):
|
||||
if node.id == node_id:
|
||||
_learning_nodes[aoi_id][i] = node_update
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
async def delete_node(self, aoi_id: str, node_id: str) -> bool:
|
||||
"""Delete a learning node."""
|
||||
nodes = _learning_nodes.get(aoi_id)
|
||||
if nodes is None:
|
||||
return False
|
||||
|
||||
for i, node in enumerate(nodes):
|
||||
if node.id == node_id:
|
||||
del _learning_nodes[aoi_id][i]
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
async def approve_node(self, aoi_id: str, node_id: str) -> bool:
|
||||
"""Approve a learning node for student use."""
|
||||
nodes = _learning_nodes.get(aoi_id)
|
||||
if nodes is None:
|
||||
return False
|
||||
|
||||
for node in nodes:
|
||||
if node.id == node_id:
|
||||
node.approved = True
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
async def get_statistics(self) -> dict:
|
||||
"""Get statistics about learning node usage."""
|
||||
total = 0
|
||||
by_theme = {}
|
||||
by_difficulty = {}
|
||||
|
||||
for aoi_nodes in _learning_nodes.values():
|
||||
for node in aoi_nodes:
|
||||
total += 1
|
||||
theme = node.theme.value
|
||||
by_theme[theme] = by_theme.get(theme, 0) + 1
|
||||
|
||||
return {
|
||||
"total_nodes": total,
|
||||
"by_theme": by_theme,
|
||||
"by_difficulty": by_difficulty,
|
||||
"avg_per_aoi": total / len(_learning_nodes) if _learning_nodes else 0,
|
||||
"popular_theme": max(by_theme, key=by_theme.get) if by_theme else "topographie",
|
||||
}
|
||||
Reference in New Issue
Block a user