'use client'
import Link from 'next/link'
import { SkeletonDots } from '@/components/common/SkeletonText'
import { TrainingMetrics } from '@/components/ai/TrainingMetrics'
import type { TrOCRStatus, TrainingExample, MagicSettings } from '../types'
import { API_BASE } from '../types'
interface TabTrainingProps {
status: TrOCRStatus | null
examples: TrainingExample[]
trainingImage: File | null
trainingText: string
fineTuning: boolean
settings: MagicSettings
showTrainingDashboard: boolean
onSetTrainingImage: (file: File | null) => void
onSetTrainingText: (text: string) => void
onAddExample: () => void
onFineTune: () => void
onToggleDashboard: () => void
}
export function TabTraining({
status,
examples,
trainingImage,
trainingText,
fineTuning,
settings,
showTrainingDashboard,
onSetTrainingImage,
onSetTrainingText,
onAddExample,
onFineTune,
onToggleDashboard,
}: TabTrainingProps) {
const exampleCount = status?.training_examples_count || 0
const progressPct = Math.min(100, (exampleCount / 10) * 100)
return (
{/* Training Overview */}
{/* Add Training Example */}
{/* Fine-Tuning */}
{/* Training Examples List */}
{examples.length > 0 && (
)}
{/* Training Dashboard Demo */}
)
}
/* ------------------------------------------------------------------ */
function TrainingOverviewCard({
status,
settings,
exampleCount,
progressPct,
}: {
status: TrOCRStatus | null
settings: MagicSettings
exampleCount: number
progressPct: number
}) {
return (
Training mit LoRA
LoRA (Low-Rank Adaptation) ermoeglicht effizientes Fine-Tuning ohne das Basismodell zu veraendern.
Das Training erfolgt lokal auf Ihrem System.
{exampleCount}
Trainingsbeispiele
{settings.loraRank}
LoRA Rank
{status?.has_lora_adapter ? '\u2713' : '\u2717'}
Adapter aktiv
Fortschritt zum Fine-Tuning
{progressPct.toFixed(0)}%
)
}
function AddExampleCard({
trainingImage,
trainingText,
onSetTrainingImage,
onSetTrainingText,
onAddExample,
}: {
trainingImage: File | null
trainingText: string
onSetTrainingImage: (file: File | null) => void
onSetTrainingText: (text: string) => void
onAddExample: () => void
}) {
return (
Trainingsbeispiel hinzufuegen
Lade ein Bild mit handgeschriebenem Text hoch und gib die korrekte Transkription ein.
onSetTrainingImage(e.target.files?.[0] || null)}
/>
{trainingImage && (
Bild ausgewaehlt: {trainingImage.name}
)}
)
}
function FineTuningCard({
settings,
fineTuning,
exampleCount,
hasLoraAdapter,
onFineTune,
}: {
settings: MagicSettings
fineTuning: boolean
exampleCount: number
hasLoraAdapter: boolean
onFineTune: () => void
}) {
return (
Fine-Tuning starten
Trainiere das Modell mit den gesammelten Beispielen. Der Prozess dauert
je nach Anzahl der Beispiele einige Minuten.
Epochen:
{settings.epochs}
Learning Rate:
{settings.learningRate}
LoRA Rank:
{settings.loraRank}
Batch Size:
{settings.batchSize}
{exampleCount < 10 && (
Noch {10 - exampleCount} Beispiele benoetigt
)}
🏷️
Ground Truth in OCR-Labeling sammeln
)
}
function ExamplesListCard({ examples }: { examples: TrainingExample[] }) {
return (
Trainingsbeispiele ({examples.length})
{examples.map((ex, i) => (
{i + 1}.
{ex.ground_truth}
{new Date(ex.created_at).toLocaleDateString('de-DE')}
))}
)
}
function TrainingDashboardCard({
showDashboard,
onToggle,
}: {
showDashboard: boolean
onToggle: () => void
}) {
return (
Training Dashboard
Live-Metriken waehrend des Trainings
{showDashboard ? (
) : (
📈
Das Training Dashboard zeigt Echtzeit-Metriken waehrend des Fine-Tunings
Klicke "Demo starten" um eine simulierte Training-Session zu sehen
)}
)
}