Files
breakpilot-lehrer/klausur-service/backend/worksheet_editor_ai.py
Benjamin Admin b2a0126f14 [split-required] Split remaining Python monoliths (Phase 1 continued)
klausur-service (7 monoliths):
- grid_editor_helpers.py (1,737 → 5 files: columns, filters, headers, zones)
- cv_cell_grid.py (1,675 → 7 files: build, legacy, streaming, merge, vocab)
- worksheet_editor_api.py (1,305 → 4 files: models, AI, reconstruct, routes)
- legal_corpus_ingestion.py (1,280 → 3 files: registry, chunking, ingestion)
- cv_review.py (1,248 → 4 files: pipeline, spell, LLM, barrel)
- cv_preprocessing.py (1,166 → 3 files: deskew, dewarp, barrel)
- rbac.py, admin_api.py, routes/eh.py remain (next batch)

backend-lehrer (1 monolith):
- classroom_engine/repository.py (1,705 → 7 files by domain)

All re-export barrels preserve backward compatibility.
Zero import errors verified.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-24 22:47:59 +02:00

486 lines
17 KiB
Python

"""
Worksheet Editor AI — AI image generation and AI worksheet modification.
"""
import io
import json
import base64
import logging
import re
import time
import random
from typing import List, Dict
import httpx
from worksheet_editor_models import (
AIImageRequest,
AIImageResponse,
AIImageStyle,
AIModifyRequest,
AIModifyResponse,
OLLAMA_URL,
STYLE_PROMPTS,
)
logger = logging.getLogger(__name__)
# =============================================
# AI IMAGE GENERATION
# =============================================
async def generate_ai_image_logic(request: AIImageRequest) -> AIImageResponse:
"""
Generate an AI image using Ollama with a text-to-image model.
Falls back to a placeholder if Ollama is not available.
"""
from fastapi import HTTPException
try:
# Build enhanced prompt with style
style_modifier = STYLE_PROMPTS.get(request.style, "")
enhanced_prompt = f"{request.prompt}, {style_modifier}"
logger.info(f"Generating AI image: {enhanced_prompt[:100]}...")
# Check if Ollama is available
async with httpx.AsyncClient(timeout=10.0) as check_client:
try:
health_response = await check_client.get(f"{OLLAMA_URL}/api/tags")
if health_response.status_code != 200:
raise HTTPException(status_code=503, detail="Ollama service not available")
except httpx.ConnectError:
logger.warning("Ollama not reachable, returning placeholder")
return _generate_placeholder_image(request, enhanced_prompt)
try:
async with httpx.AsyncClient(timeout=300.0) as client:
tags_response = await client.get(f"{OLLAMA_URL}/api/tags")
available_models = [m.get("name", "") for m in tags_response.json().get("models", [])]
sd_model = None
for model in available_models:
if "stable" in model.lower() or "sd" in model.lower() or "diffusion" in model.lower():
sd_model = model
break
if not sd_model:
logger.warning("No Stable Diffusion model found in Ollama")
return _generate_placeholder_image(request, enhanced_prompt)
logger.info(f"SD model found: {sd_model}, but image generation API not implemented")
return _generate_placeholder_image(request, enhanced_prompt)
except Exception as e:
logger.error(f"Image generation failed: {e}")
return _generate_placeholder_image(request, enhanced_prompt)
except HTTPException:
raise
except Exception as e:
logger.error(f"AI image generation error: {e}")
raise HTTPException(status_code=500, detail=str(e))
def _generate_placeholder_image(request: AIImageRequest, prompt: str) -> AIImageResponse:
"""
Generate a placeholder image when AI generation is not available.
Creates a simple SVG-based placeholder with the prompt text.
"""
from PIL import Image, ImageDraw, ImageFont
width, height = request.width, request.height
style_colors = {
AIImageStyle.REALISTIC: ("#2563eb", "#dbeafe"),
AIImageStyle.CARTOON: ("#f97316", "#ffedd5"),
AIImageStyle.SKETCH: ("#6b7280", "#f3f4f6"),
AIImageStyle.CLIPART: ("#8b5cf6", "#ede9fe"),
AIImageStyle.EDUCATIONAL: ("#059669", "#d1fae5"),
}
fg_color, bg_color = style_colors.get(request.style, ("#6366f1", "#e0e7ff"))
img = Image.new('RGB', (width, height), bg_color)
draw = ImageDraw.Draw(img)
draw.rectangle([5, 5, width-6, height-6], outline=fg_color, width=3)
cx, cy = width // 2, height // 2 - 30
draw.ellipse([cx-40, cy-40, cx+40, cy+40], outline=fg_color, width=3)
draw.line([cx-20, cy-10, cx+20, cy-10], fill=fg_color, width=3)
draw.line([cx, cy-10, cx, cy+20], fill=fg_color, width=3)
try:
font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 14)
except Exception:
font = ImageFont.load_default()
max_chars = 40
lines = []
words = prompt[:200].split()
current_line = ""
for word in words:
if len(current_line) + len(word) + 1 <= max_chars:
current_line += (" " + word if current_line else word)
else:
if current_line:
lines.append(current_line)
current_line = word
if current_line:
lines.append(current_line)
text_y = cy + 60
for line in lines[:4]:
bbox = draw.textbbox((0, 0), line, font=font)
text_width = bbox[2] - bbox[0]
draw.text((cx - text_width // 2, text_y), line, fill=fg_color, font=font)
text_y += 20
badge_text = "KI-Bild (Platzhalter)"
try:
badge_font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 10)
except Exception:
badge_font = font
draw.rectangle([10, height-30, 150, height-10], fill=fg_color)
draw.text((15, height-27), badge_text, fill="white", font=badge_font)
buffer = io.BytesIO()
img.save(buffer, format='PNG')
buffer.seek(0)
image_base64 = f"data:image/png;base64,{base64.b64encode(buffer.getvalue()).decode('utf-8')}"
return AIImageResponse(
image_base64=image_base64,
prompt_used=prompt,
error="AI image generation not available. Using placeholder."
)
# =============================================
# AI WORKSHEET MODIFICATION
# =============================================
async def modify_worksheet_with_ai_logic(request: AIModifyRequest) -> AIModifyResponse:
"""
Modify a worksheet using AI based on natural language prompt.
"""
try:
logger.info(f"AI modify request: {request.prompt[:100]}...")
try:
canvas_data = json.loads(request.canvas_json)
except json.JSONDecodeError:
return AIModifyResponse(
message="Fehler beim Parsen des Canvas",
error="Invalid canvas JSON"
)
system_prompt = """Du bist ein Assistent fuer die Bearbeitung von Arbeitsblaettern.
Du erhaeltst den aktuellen Zustand eines Canvas im JSON-Format und eine Anweisung des Nutzers.
Deine Aufgabe ist es, die gewuenschten Aenderungen am Canvas vorzunehmen.
Der Canvas verwendet Fabric.js. Hier sind die wichtigsten Objekttypen:
- i-text: Interaktiver Text mit fontFamily, fontSize, fill, left, top
- rect: Rechteck mit left, top, width, height, fill, stroke, strokeWidth
- circle: Kreis mit left, top, radius, fill, stroke, strokeWidth
- line: Linie mit x1, y1, x2, y2, stroke, strokeWidth
Das Canvas ist 794x1123 Pixel (A4 bei 96 DPI).
Antworte NUR mit einem JSON-Objekt in diesem Format:
{
"action": "modify" oder "add" oder "delete" oder "info",
"objects": [...], // Neue/modifizierte Objekte (bei modify/add)
"message": "Kurze Beschreibung der Aenderung"
}
Wenn du Objekte hinzufuegst, generiere eindeutige IDs im Format "obj_<timestamp>_<random>".
"""
user_prompt = f"""Aktueller Canvas-Zustand:
```json
{json.dumps(canvas_data, indent=2)[:5000]}
```
Nutzer-Anweisung: {request.prompt}
Fuehre die Aenderung durch und antworte mit dem JSON-Objekt."""
try:
async with httpx.AsyncClient(timeout=120.0) as client:
response = await client.post(
f"{OLLAMA_URL}/api/generate",
json={
"model": request.model,
"prompt": user_prompt,
"system": system_prompt,
"stream": False,
"options": {
"temperature": 0.3,
"num_predict": 4096
}
}
)
if response.status_code != 200:
logger.warning(f"Ollama error: {response.status_code}, trying local fallback")
return _handle_simple_modification(request.prompt, canvas_data)
ai_response = response.json().get("response", "")
except httpx.ConnectError:
logger.warning("Ollama not reachable")
return _handle_simple_modification(request.prompt, canvas_data)
except httpx.TimeoutException:
logger.warning("Ollama timeout, trying local fallback")
return _handle_simple_modification(request.prompt, canvas_data)
try:
json_start = ai_response.find('{')
json_end = ai_response.rfind('}') + 1
if json_start == -1 or json_end <= json_start:
logger.warning(f"No JSON found in AI response: {ai_response[:200]}")
return AIModifyResponse(
message="KI konnte die Anfrage nicht verarbeiten",
error="No JSON in response"
)
ai_json = json.loads(ai_response[json_start:json_end])
action = ai_json.get("action", "info")
message = ai_json.get("message", "Aenderungen angewendet")
new_objects = ai_json.get("objects", [])
if action == "info":
return AIModifyResponse(message=message)
if action == "add" and new_objects:
existing_objects = canvas_data.get("objects", [])
existing_objects.extend(new_objects)
canvas_data["objects"] = existing_objects
return AIModifyResponse(
modified_canvas_json=json.dumps(canvas_data),
message=message
)
if action == "modify" and new_objects:
existing_objects = canvas_data.get("objects", [])
new_ids = {obj.get("id") for obj in new_objects if obj.get("id")}
kept_objects = [obj for obj in existing_objects if obj.get("id") not in new_ids]
kept_objects.extend(new_objects)
canvas_data["objects"] = kept_objects
return AIModifyResponse(
modified_canvas_json=json.dumps(canvas_data),
message=message
)
if action == "delete":
delete_ids = ai_json.get("delete_ids", [])
if delete_ids:
existing_objects = canvas_data.get("objects", [])
canvas_data["objects"] = [obj for obj in existing_objects if obj.get("id") not in delete_ids]
return AIModifyResponse(
modified_canvas_json=json.dumps(canvas_data),
message=message
)
return AIModifyResponse(message=message)
except json.JSONDecodeError as e:
logger.error(f"Failed to parse AI JSON: {e}")
return AIModifyResponse(
message="Fehler beim Verarbeiten der KI-Antwort",
error=str(e)
)
except Exception as e:
logger.error(f"AI modify error: {e}")
return AIModifyResponse(
message="Ein unerwarteter Fehler ist aufgetreten",
error=str(e)
)
def _handle_simple_modification(prompt: str, canvas_data: dict) -> AIModifyResponse:
"""
Handle simple modifications locally when Ollama is not available.
Supports basic commands like adding headings, lines, etc.
"""
prompt_lower = prompt.lower()
objects = canvas_data.get("objects", [])
def generate_id():
return f"obj_{int(time.time()*1000)}_{random.randint(1000, 9999)}"
# Add heading
if "ueberschrift" in prompt_lower or "titel" in prompt_lower or "heading" in prompt_lower:
text_match = re.search(r'"([^"]+)"', prompt)
text = text_match.group(1) if text_match else "Ueberschrift"
new_text = {
"type": "i-text", "id": generate_id(), "text": text,
"left": 397, "top": 50, "originX": "center",
"fontFamily": "Arial", "fontSize": 28, "fontWeight": "bold", "fill": "#000000"
}
objects.append(new_text)
canvas_data["objects"] = objects
return AIModifyResponse(
modified_canvas_json=json.dumps(canvas_data),
message=f"Ueberschrift '{text}' hinzugefuegt"
)
# Add lines for writing
if "linie" in prompt_lower or "line" in prompt_lower or "schreib" in prompt_lower:
num_match = re.search(r'(\d+)', prompt)
num_lines = int(num_match.group(1)) if num_match else 5
num_lines = min(num_lines, 20)
start_y = 150
line_spacing = 40
for i in range(num_lines):
new_line = {
"type": "line", "id": generate_id(),
"x1": 60, "y1": start_y + i * line_spacing,
"x2": 734, "y2": start_y + i * line_spacing,
"stroke": "#cccccc", "strokeWidth": 1
}
objects.append(new_line)
canvas_data["objects"] = objects
return AIModifyResponse(
modified_canvas_json=json.dumps(canvas_data),
message=f"{num_lines} Schreiblinien hinzugefuegt"
)
# Make text bigger
if "groesser" in prompt_lower or "bigger" in prompt_lower or "larger" in prompt_lower:
modified = 0
for obj in objects:
if obj.get("type") in ["i-text", "text", "textbox"]:
current_size = obj.get("fontSize", 16)
obj["fontSize"] = int(current_size * 1.25)
modified += 1
canvas_data["objects"] = objects
if modified > 0:
return AIModifyResponse(
modified_canvas_json=json.dumps(canvas_data),
message=f"{modified} Texte vergroessert"
)
# Center elements
if "zentrier" in prompt_lower or "center" in prompt_lower or "mitte" in prompt_lower:
center_x = 397
for obj in objects:
if not obj.get("isGrid"):
obj["left"] = center_x
obj["originX"] = "center"
canvas_data["objects"] = objects
return AIModifyResponse(
modified_canvas_json=json.dumps(canvas_data),
message="Elemente zentriert"
)
# Add numbering
if "nummer" in prompt_lower or "nummerier" in prompt_lower or "1-10" in prompt_lower:
range_match = re.search(r'(\d+)\s*[-bis]+\s*(\d+)', prompt)
if range_match:
start, end = int(range_match.group(1)), int(range_match.group(2))
else:
start, end = 1, 10
y = 100
for i in range(start, min(end + 1, start + 20)):
new_text = {
"type": "i-text", "id": generate_id(), "text": f"{i}.",
"left": 40, "top": y, "fontFamily": "Arial", "fontSize": 14, "fill": "#000000"
}
objects.append(new_text)
y += 35
canvas_data["objects"] = objects
return AIModifyResponse(
modified_canvas_json=json.dumps(canvas_data),
message=f"Nummerierung {start}-{end} hinzugefuegt"
)
# Add rectangle/box
if "rechteck" in prompt_lower or "box" in prompt_lower or "kasten" in prompt_lower:
new_rect = {
"type": "rect", "id": generate_id(),
"left": 100, "top": 200, "width": 200, "height": 100,
"fill": "transparent", "stroke": "#000000", "strokeWidth": 2
}
objects.append(new_rect)
canvas_data["objects"] = objects
return AIModifyResponse(
modified_canvas_json=json.dumps(canvas_data),
message="Rechteck hinzugefuegt"
)
# Add grid/raster
if "raster" in prompt_lower or "grid" in prompt_lower or "tabelle" in prompt_lower:
dim_match = re.search(r'(\d+)\s*[x/\u00d7\*mal by]\s*(\d+)', prompt_lower)
if dim_match:
cols = int(dim_match.group(1))
rows = int(dim_match.group(2))
else:
nums = re.findall(r'(\d+)', prompt)
if len(nums) >= 2:
cols, rows = int(nums[0]), int(nums[1])
else:
cols, rows = 3, 4
cols = min(max(1, cols), 10)
rows = min(max(1, rows), 15)
canvas_width = 794
canvas_height = 1123
margin = 60
available_width = canvas_width - 2 * margin
available_height = canvas_height - 2 * margin - 80
cell_width = available_width / cols
cell_height = min(available_height / rows, 80)
start_x = margin
start_y = 120
grid_objects = []
for r in range(rows + 1):
y = start_y + r * cell_height
grid_objects.append({
"type": "line", "id": generate_id(),
"x1": start_x, "y1": y,
"x2": start_x + cols * cell_width, "y2": y,
"stroke": "#666666", "strokeWidth": 1, "isGrid": True
})
for c in range(cols + 1):
x = start_x + c * cell_width
grid_objects.append({
"type": "line", "id": generate_id(),
"x1": x, "y1": start_y,
"x2": x, "y2": start_y + rows * cell_height,
"stroke": "#666666", "strokeWidth": 1, "isGrid": True
})
objects.extend(grid_objects)
canvas_data["objects"] = objects
return AIModifyResponse(
modified_canvas_json=json.dumps(canvas_data),
message=f"{cols}x{rows} Raster hinzugefuegt ({cols} Spalten, {rows} Zeilen)"
)
# Default: Ollama needed
return AIModifyResponse(
message="Diese Aenderung erfordert den KI-Service. Bitte stellen Sie sicher, dass Ollama laeuft.",
error="Complex modification requires Ollama"
)