'use client' /** * Magic Help Admin Page - Admin v2 Migration * * Comprehensive admin interface for TrOCR Handwriting Recognition and Exam Correction. * Features: * - Model status monitoring * - OCR testing with image upload * - Training data management * - Fine-tuning controls * - Architecture documentation * - Configuration settings * * Phase 1 Enhancements: * - Clipboard Paste (Ctrl+V) support * - Global Drag & Drop anywhere on window * - Skeleton loading states * - Live OCR preview with debounce * - Keyboard shortcuts * * Phase 2-4 Enhancements: * - Batch processing with SSE progress * - Confidence heatmap visualization * - Training metrics dashboard * - Model export functionality */ import { useState, useEffect, useCallback, useRef } from 'react' import Link from 'next/link' import { SkeletonOCRResult, SkeletonText, SkeletonDots } from '@/components/common/SkeletonText' import { ConfidenceHeatmap, ConfidenceStats } from '@/components/ai/ConfidenceHeatmap' import { TrainingMetrics } from '@/components/ai/TrainingMetrics' import { BatchUploader } from '@/components/ai/BatchUploader' import { AIModuleSidebarResponsive } from '@/components/ai/AIModuleSidebar' import { PagePurpose } from '@/components/common/PagePurpose' type TabId = 'overview' | 'test' | 'batch' | 'training' | 'architecture' | 'settings' interface TrOCRStatus { status: 'available' | 'not_installed' | 'error' model_name?: string model_id?: string device?: string is_loaded?: boolean has_lora_adapter?: boolean training_examples_count?: number error?: string install_command?: string } interface OCRResult { text: string confidence: number processing_time_ms: number model: string has_lora_adapter: boolean char_confidences?: number[] word_boxes?: Array<{ text: string; confidence: number; bbox: number[] }> } interface TrainingExample { image_path: string ground_truth: string teacher_id: string created_at: string } interface MagicSettings { autoDetectLines: boolean confidenceThreshold: number maxImageSize: number loraRank: number loraAlpha: number learningRate: number epochs: number batchSize: number enableCache: boolean cacheMaxAge: number livePreview: boolean soundFeedback: boolean } const DEFAULT_SETTINGS: MagicSettings = { autoDetectLines: true, confidenceThreshold: 0.7, maxImageSize: 4096, loraRank: 8, loraAlpha: 32, learningRate: 0.00005, epochs: 3, batchSize: 4, enableCache: true, cacheMaxAge: 3600, livePreview: true, soundFeedback: false, } export default function MagicHelpPage() { const [activeTab, setActiveTab] = useState('overview') const [status, setStatus] = useState(null) const [loading, setLoading] = useState(true) const [ocrResult, setOcrResult] = useState(null) const [ocrLoading, setOcrLoading] = useState(false) const [examples, setExamples] = useState([]) const [trainingImage, setTrainingImage] = useState(null) const [trainingText, setTrainingText] = useState('') const [fineTuning, setFineTuning] = useState(false) const [settings, setSettings] = useState(DEFAULT_SETTINGS) const [settingsSaved, setSettingsSaved] = useState(false) // Phase 1: New state for enhanced features const [globalDragActive, setGlobalDragActive] = useState(false) const [uploadedImage, setUploadedImage] = useState(null) const [imagePreview, setImagePreview] = useState(null) const [showShortcutHint, setShowShortcutHint] = useState(false) const debounceTimer = useRef(null) const dragCounter = useRef(0) // Use same-origin nginx proxy to avoid CORS issues const API_BASE = '/klausur-api' const fetchStatus = useCallback(async () => { try { const res = await fetch(`${API_BASE}/api/klausur/trocr/status`) const data = await res.json() setStatus(data) } catch { setStatus({ status: 'error', error: 'Failed to fetch status' }) } finally { setLoading(false) } }, [API_BASE]) const fetchExamples = useCallback(async () => { try { const res = await fetch(`${API_BASE}/api/klausur/trocr/training/examples`) const data = await res.json() setExamples(data.examples || []) } catch (error) { console.error('Failed to fetch examples:', error) } }, [API_BASE]) // Phase 1: Live OCR with debounce const triggerOCR = useCallback(async (file: File) => { setOcrLoading(true) setOcrResult(null) const formData = new FormData() formData.append('file', file) try { const res = await fetch(`${API_BASE}/api/klausur/trocr/extract?detect_lines=${settings.autoDetectLines}`, { method: 'POST', body: formData, }) const data = await res.json() if (data.text !== undefined) { setOcrResult(data) // Play sound feedback if enabled if (settings.soundFeedback && data.confidence > 0.7) { playSuccessSound() } } else { setOcrResult({ text: `Error: ${data.detail || 'Unknown error'}`, confidence: 0, processing_time_ms: 0, model: '', has_lora_adapter: false }) } } catch (error) { setOcrResult({ text: `Error: ${error}`, confidence: 0, processing_time_ms: 0, model: '', has_lora_adapter: false }) } finally { setOcrLoading(false) } }, [API_BASE, settings.autoDetectLines, settings.soundFeedback]) // Play subtle success sound const playSuccessSound = () => { try { const audioContext = new (window.AudioContext || (window as unknown as { webkitAudioContext: typeof AudioContext }).webkitAudioContext)() const oscillator = audioContext.createOscillator() const gainNode = audioContext.createGain() oscillator.connect(gainNode) gainNode.connect(audioContext.destination) oscillator.frequency.value = 800 oscillator.type = 'sine' gainNode.gain.setValueAtTime(0.1, audioContext.currentTime) gainNode.gain.exponentialRampToValueAtTime(0.01, audioContext.currentTime + 0.2) oscillator.start(audioContext.currentTime) oscillator.stop(audioContext.currentTime + 0.2) } catch { // Audio not supported, ignore } } // Handle file upload with live preview const handleFileUpload = useCallback((file: File) => { if (!file.type.startsWith('image/')) return setUploadedImage(file) // Create preview URL const previewUrl = URL.createObjectURL(file) setImagePreview(previewUrl) // Auto-switch to test tab if not there setActiveTab('test') // Live preview: trigger OCR with debounce if (settings.livePreview) { if (debounceTimer.current) { clearTimeout(debounceTimer.current) } debounceTimer.current = setTimeout(() => { triggerOCR(file) }, 500) } }, [settings.livePreview, triggerOCR]) // Manual OCR trigger const handleManualOCR = () => { if (uploadedImage) { triggerOCR(uploadedImage) } } // Phase 1: Global Drag & Drop handler useEffect(() => { const handleDragEnter = (e: DragEvent) => { e.preventDefault() e.stopPropagation() dragCounter.current++ if (e.dataTransfer?.types.includes('Files')) { setGlobalDragActive(true) } } const handleDragLeave = (e: DragEvent) => { e.preventDefault() e.stopPropagation() dragCounter.current-- if (dragCounter.current === 0) { setGlobalDragActive(false) } } const handleDragOver = (e: DragEvent) => { e.preventDefault() e.stopPropagation() } const handleDrop = (e: DragEvent) => { e.preventDefault() e.stopPropagation() dragCounter.current = 0 setGlobalDragActive(false) const file = e.dataTransfer?.files[0] if (file?.type.startsWith('image/')) { handleFileUpload(file) } } document.addEventListener('dragenter', handleDragEnter) document.addEventListener('dragleave', handleDragLeave) document.addEventListener('dragover', handleDragOver) document.addEventListener('drop', handleDrop) return () => { document.removeEventListener('dragenter', handleDragEnter) document.removeEventListener('dragleave', handleDragLeave) document.removeEventListener('dragover', handleDragOver) document.removeEventListener('drop', handleDrop) } }, [handleFileUpload]) // Phase 1: Clipboard paste handler (Ctrl+V) useEffect(() => { const handlePaste = async (e: ClipboardEvent) => { const items = e.clipboardData?.items if (!items) return for (const item of items) { if (item.type.startsWith('image/')) { e.preventDefault() const file = item.getAsFile() if (file) { handleFileUpload(file) } break } } } document.addEventListener('paste', handlePaste) return () => document.removeEventListener('paste', handlePaste) }, [handleFileUpload]) // Phase 1: Keyboard shortcuts useEffect(() => { const handleKeyDown = (e: KeyboardEvent) => { // Ctrl+Enter: Start OCR if (e.ctrlKey && e.key === 'Enter' && uploadedImage) { e.preventDefault() handleManualOCR() } // Tab: Switch tabs (with numbers 1-6) if (e.key >= '1' && e.key <= '6' && e.altKey) { e.preventDefault() const tabIndex = parseInt(e.key) - 1 const tabIds: TabId[] = ['overview', 'test', 'batch', 'training', 'architecture', 'settings'] if (tabIds[tabIndex]) { setActiveTab(tabIds[tabIndex]) } } // Escape: Clear uploaded image if (e.key === 'Escape' && uploadedImage) { setUploadedImage(null) setImagePreview(null) setOcrResult(null) } // ? : Show shortcuts if (e.key === '?') { setShowShortcutHint(prev => !prev) } } document.addEventListener('keydown', handleKeyDown) return () => document.removeEventListener('keydown', handleKeyDown) }, [uploadedImage]) useEffect(() => { fetchStatus() fetchExamples() // Load settings from localStorage const saved = localStorage.getItem('magic-help-settings') if (saved) { try { setSettings({ ...DEFAULT_SETTINGS, ...JSON.parse(saved) }) } catch { // ignore parse errors } } }, [fetchStatus, fetchExamples]) // Cleanup preview URL useEffect(() => { return () => { if (imagePreview) { URL.revokeObjectURL(imagePreview) } } }, [imagePreview]) const handleAddTrainingExample = async () => { if (!trainingImage || !trainingText.trim()) { alert('Please provide both an image and the correct text') return } const formData = new FormData() formData.append('file', trainingImage) try { const res = await fetch(`${API_BASE}/api/klausur/trocr/training/add?ground_truth=${encodeURIComponent(trainingText)}`, { method: 'POST', body: formData, }) const data = await res.json() if (data.example_id) { alert(`Training example added! Total: ${data.total_examples}`) setTrainingImage(null) setTrainingText('') fetchStatus() fetchExamples() } else { alert(`Error: ${data.detail || 'Unknown error'}`) } } catch (error) { alert(`Error: ${error}`) } } const handleFineTune = async () => { if (!confirm('Start fine-tuning? This may take several minutes.')) return setFineTuning(true) try { const res = await fetch(`${API_BASE}/api/klausur/trocr/training/fine-tune`, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ epochs: settings.epochs, learning_rate: settings.learningRate, lora_rank: settings.loraRank, lora_alpha: settings.loraAlpha, }), }) const data = await res.json() if (data.status === 'success') { alert(`Fine-tuning successful!\nExamples used: ${data.examples_used}\nEpochs: ${data.epochs}`) fetchStatus() } else { alert(`Fine-tuning failed: ${data.message}`) } } catch (error) { alert(`Error: ${error}`) } finally { setFineTuning(false) } } const saveSettings = () => { localStorage.setItem('magic-help-settings', JSON.stringify(settings)) setSettingsSaved(true) setTimeout(() => setSettingsSaved(false), 2000) } const getStatusBadge = () => { if (!status) return null switch (status.status) { case 'available': return Verfuegbar case 'not_installed': return Nicht installiert case 'error': return Fehler } } // Get confidence color for visualization const getConfidenceColor = (confidence: number) => { if (confidence >= 0.9) return 'bg-green-500' if (confidence >= 0.7) return 'bg-yellow-500' return 'bg-red-500' } // State for new features const [showHeatmap, setShowHeatmap] = useState(false) const [showTrainingDashboard, setShowTrainingDashboard] = useState(false) const tabs = [ { id: 'overview' as TabId, label: 'Uebersicht', icon: 'πŸ“Š', shortcut: 'Alt+1' }, { id: 'test' as TabId, label: 'OCR Test', icon: 'πŸ”', shortcut: 'Alt+2' }, { id: 'batch' as TabId, label: 'Batch OCR', icon: 'πŸ“', shortcut: 'Alt+3' }, { id: 'training' as TabId, label: 'Training', icon: '🎯', shortcut: 'Alt+4' }, { id: 'architecture' as TabId, label: 'Architektur', icon: 'πŸ—οΈ', shortcut: 'Alt+5' }, { id: 'settings' as TabId, label: 'Einstellungen', icon: 'βš™οΈ', shortcut: 'Alt+6' }, ] return (
{/* Global Drag Overlay */} {globalDragActive && (
πŸ“„
Bild hier ablegen
PNG, JPG - Handgeschriebener Text
)} {/* Keyboard Shortcuts Modal */} {showShortcutHint && (
setShowShortcutHint(false)}>
e.stopPropagation()}>

Tastenkuerzel

Bild einfuegen Ctrl+V
OCR starten Ctrl+Enter
Tab wechseln Alt+1-6
Bild entfernen Escape
Shortcuts anzeigen ?
)} {/* Header */}

✨ Magic Help - Handschrifterkennung

KI-gestuetzte Klausurkorrektur mit TrOCR und Privacy-by-Design

{getStatusBadge()}
{/* Page Purpose with Related Pages */} {/* AI Module Sidebar - Desktop: Fixed, Mobile: FAB + Drawer */} {/* Quick paste hint */}
πŸ’‘ Tipp: Druecke Ctrl+V um ein Bild aus der Zwischenablage einzufuegen, oder ziehe es einfach irgendwo ins Fenster.
{/* Tabs */}
{tabs.map((tab) => ( ))}
{/* Tab Content */} {activeTab === 'overview' && (
{/* Status Card */}

Systemstatus

{loading ? (
{[1, 2, 3, 4].map((i) => (
))}
) : status?.status === 'available' ? (
{status.model_name || 'trocr-base'}
Modell
{status.device || 'CPU'}
Geraet
{status.training_examples_count || 0}
Trainingsbeispiele
{status.has_lora_adapter ? 'Aktiv' : 'Keiner'}
LoRA Adapter
) : status?.status === 'not_installed' ? (

TrOCR ist nicht installiert. Fuehre aus:

{status.install_command}
) : (
{status?.error || 'Unbekannter Fehler'}
)}
{/* Quick Overview Cards */}
🎯

Handschrifterkennung

TrOCR erkennt automatisch handgeschriebenen Text in Klausuren. Das Modell wurde speziell fuer deutsche Handschriften optimiert.

πŸ”’

Privacy by Design

Alle Daten werden lokal verarbeitet. Schuelernamen werden durch QR-Codes pseudonymisiert - DSGVO-konform.

πŸ“ˆ

Kontinuierliches Lernen

Mit LoRA Fine-Tuning passt sich das Modell an individuelle Handschriften an - ohne das Basismodell zu veraendern.

{/* Workflow Overview */}

Magic Onboarding Workflow

πŸ“„
1. Upload
25 Klausuren hochladen
β†’
πŸ”
2. Analyse
Lokale OCR in 5-10 Sek
β†’
βœ…
3. Bestaetigung
Klasse, Schueler, Fach
β†’
πŸ€–
4. KI-Korrektur
Cloud mit Pseudonymisierung
β†’
πŸ“Š
5. Integration
Notenbuch, Zeugnisse
)} {activeTab === 'test' && (
{/* OCR Test */}

OCR Test

Teste die Handschrifterkennung mit einem eigenen Bild. Das Ergebnis zeigt den erkannten Text, Konfidenz und Verarbeitungszeit. {settings.livePreview && ( (Live-Vorschau aktiv) )}

{/* Upload Area */}
document.getElementById('ocr-file-input')?.click()} onDragOver={(e) => { e.preventDefault(); e.currentTarget.classList.add('border-purple-500', 'bg-purple-50') }} onDragLeave={(e) => { e.currentTarget.classList.remove('border-purple-500', 'bg-purple-50') }} onDrop={(e) => { e.preventDefault() e.stopPropagation() e.currentTarget.classList.remove('border-purple-500', 'bg-purple-50') const file = e.dataTransfer.files[0] if (file?.type.startsWith('image/')) handleFileUpload(file) }} > {imagePreview ? (
Hochgeladenes Bild
) : ( <>
πŸ“„
Bild hierher ziehen oder klicken zum Hochladen
PNG, JPG - Handgeschriebener Text
oder Ctrl+V zum Einfuegen
)}
{ const file = e.target.files?.[0] if (file) handleFileUpload(file) }} /> {/* Manual trigger button if live preview is off */} {uploadedImage && !settings.livePreview && ( )}
{/* Results Area */}
{ocrLoading ? ( ) : ocrResult ? (

Erkannter Text:

= 0.9 ? 'bg-green-100 text-green-700' : ocrResult.confidence >= 0.7 ? 'bg-yellow-100 text-yellow-700' : 'bg-red-100 text-red-700' }`}> {(ocrResult.confidence * 100).toFixed(0)}% Konfidenz
                      {ocrResult.text || '(Kein Text erkannt)'}
                    
{/* Confidence bar visualization */}
Konfidenz
{(ocrResult.confidence * 100).toFixed(1)}%
Verarbeitungszeit
{ocrResult.processing_time_ms}ms
Modell
{ocrResult.model || 'TrOCR'}
LoRA Adapter
{ocrResult.has_lora_adapter ? 'Ja' : 'Nein'}
{/* Quick training action */} {ocrResult.confidence < 0.9 && (

Die Erkennung koennte verbessert werden! Moechtest du dieses Beispiel zum Training hinzufuegen?

)}
) : (
πŸ”
Lade ein Bild hoch um die Erkennung zu testen
)}
{/* Confidence Heatmap (when image and result available) */} {imagePreview && ocrResult && ocrResult.confidence > 0 && (

Konfidenz-Visualisierung

{showHeatmap && ( ({ text: w.text, confidence: w.confidence, bbox: w.bbox as [number, number, number, number] })) || []} charConfidences={ocrResult.char_confidences || []} showLegend={true} toggleable={true} /> )}
)} {/* Confidence Interpretation */}

Konfidenz-Interpretation

90-100%
Sehr hohe Sicherheit - Text kann direkt uebernommen werden
70-90%
Gute Sicherheit - manuelle Ueberpruefung empfohlen
< 70%
Niedrige Sicherheit - manuelle Eingabe erforderlich
)} {activeTab === 'batch' && (
{/* Batch OCR Processing */}

Batch-Verarbeitung

Verarbeite mehrere Bilder gleichzeitig mit Echtzeit-Fortschrittsanzeige. Die Ergebnisse werden per Server-Sent Events gestreamt.

{ console.log('Batch complete:', results) }} />
{/* Batch Processing Info */}
πŸš€

Parallele Verarbeitung

Mehrere Bilder werden parallel verarbeitet fuer maximale Geschwindigkeit.

πŸ’Ύ

Smart Caching

Identische Bilder werden automatisch aus dem Cache geladen (unter 50ms).

πŸ“Š

Live-Fortschritt

Echtzeit-Updates via Server-Sent Events zeigen den Verarbeitungsfortschritt.

)} {activeTab === 'training' && (
{/* Training Overview */}

Training mit LoRA

LoRA (Low-Rank Adaptation) ermoeglicht effizientes Fine-Tuning ohne das Basismodell zu veraendern. Das Training erfolgt lokal auf Ihrem System.

{status?.training_examples_count || 0}
Trainingsbeispiele
10
Minimum benoetigt
{settings.loraRank}
LoRA Rank
{status?.has_lora_adapter ? 'βœ“' : 'βœ—'}
Adapter aktiv
{/* Progress Bar */}
Fortschritt zum Fine-Tuning {Math.min(100, ((status?.training_examples_count || 0) / 10) * 100).toFixed(0)}%
{/* Add Training Example */}

Trainingsbeispiel hinzufuegen

Lade ein Bild mit handgeschriebenem Text hoch und gib die korrekte Transkription ein.

setTrainingImage(e.target.files?.[0] || null)} /> {trainingImage && (
Bild ausgewaehlt: {trainingImage.name}
)}