Files
breakpilot-lehrer/website/app/admin/llm-compare/wizard/page.tsx
Benjamin Boenisch 5a31f52310 Initial commit: breakpilot-lehrer - Lehrer KI Platform
Services: Admin-Lehrer, Backend-Lehrer, Studio v2, Website,
Klausur-Service, School-Service, Voice-Service, Geo-Service,
BreakPilot Drive, Agent-Core

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-11 23:47:26 +01:00

343 lines
11 KiB
TypeScript

'use client'
import { useState } from 'react'
import AdminLayout from '@/components/admin/AdminLayout'
import {
WizardStepper,
WizardNavigation,
EducationCard,
ArchitectureContext,
TestRunner,
TestSummary,
type WizardStep,
type TestCategoryResult,
type FullTestResults,
type EducationContent,
type ArchitectureContextType,
} from '@/components/wizard'
// ==============================================
// Constants
// ==============================================
const BACKEND_URL = process.env.NEXT_PUBLIC_BACKEND_URL || 'http://localhost:8000'
const STEPS: WizardStep[] = [
{ id: 'welcome', name: 'Willkommen', icon: '👋', status: 'pending' },
{ id: 'gateway', name: 'LLM Gateway', icon: '🌐', status: 'pending', category: 'gateway' },
{ id: 'providers', name: 'Provider', icon: '🤖', status: 'pending', category: 'providers' },
{ id: 'summary', name: 'Zusammenfassung', icon: '📊', status: 'pending' },
]
const EDUCATION_CONTENT: Record<string, EducationContent> = {
'welcome': {
title: 'Willkommen zum LLM Compare Wizard',
content: [
'Large Language Models (LLMs) sind das Herzstueck moderner KI.',
'',
'BreakPilot unterstuetzt mehrere Provider:',
'• OpenAI: GPT-4o, GPT-4, GPT-3.5-turbo',
'• Anthropic: Claude 3.5 Sonnet, Claude 3 Opus',
'• Lokale Modelle: Ollama (Llama 3, Mistral)',
'',
'Das LLM Gateway abstrahiert die Provider:',
'• Einheitliche API fuer alle Modelle',
'• Automatisches Fallback bei Ausfaellen',
'• Token-Counting und Kosten-Tracking',
'• Playbooks fuer vordefinierte Prompts',
],
},
'gateway': {
title: 'LLM Gateway - Zentrale Schnittstelle',
content: [
'Das Gateway routet Anfragen an die passenden Provider.',
'',
'Features:',
'• /llm/v1/chat - Unified Chat API',
'• /llm/playbooks - Vordefinierte Prompts',
'• /llm/health - Provider-Status',
'',
'Vorteile:',
'• Provider-Wechsel ohne Code-Aenderung',
'• Caching fuer haeufige Anfragen',
'• Rate-Limiting pro Benutzer',
'• Audit-Log aller Anfragen',
'',
'Aktivierung: LLM_GATEWAY_ENABLED=true',
],
},
'providers': {
title: 'LLM Provider - Modell-Auswahl',
content: [
'Verschiedene Provider fuer verschiedene Anforderungen.',
'',
'OpenAI:',
'• Beste allgemeine Leistung',
'• Hoechste Geschwindigkeit',
'• ~$0.01-0.03 pro 1K Tokens',
'',
'Anthropic (Claude):',
'• Beste lange Kontexte (200K)',
'• Sehr sicher und aligned',
'• ~$0.01-0.015 pro 1K Tokens',
'',
'Ollama (Lokal):',
'• Kostenlos nach Hardware',
'• Volle Datenkontrolle',
'• Langsamer ohne GPU',
],
},
'summary': {
title: 'Test-Zusammenfassung',
content: [
'Hier sehen Sie eine Uebersicht aller durchgefuehrten Tests:',
'• Gateway-Verfuegbarkeit',
'• Provider-Konnektivitaet',
'• Lokale LLM-Optionen',
],
},
}
const ARCHITECTURE_CONTEXTS: Record<string, ArchitectureContextType> = {
'gateway': {
layer: 'api',
services: ['backend'],
dependencies: ['OpenAI API', 'Anthropic API', 'Ollama'],
dataFlow: ['Browser', 'FastAPI', 'LLM Gateway', 'Provider API'],
},
'providers': {
layer: 'service',
services: ['backend'],
dependencies: ['API Keys', 'Rate Limits', 'Token Counter'],
dataFlow: ['LLM Gateway', 'Provider Selection', 'API Call', 'Response'],
},
}
// ==============================================
// Main Component
// ==============================================
export default function LLMCompareWizardPage() {
const [currentStep, setCurrentStep] = useState(0)
const [steps, setSteps] = useState<WizardStep[]>(STEPS)
const [categoryResults, setCategoryResults] = useState<Record<string, TestCategoryResult>>({})
const [fullResults, setFullResults] = useState<FullTestResults | null>(null)
const [isLoading, setIsLoading] = useState(false)
const [error, setError] = useState<string | null>(null)
const currentStepData = steps[currentStep]
const isTestStep = currentStepData?.category !== undefined
const isWelcome = currentStepData?.id === 'welcome'
const isSummary = currentStepData?.id === 'summary'
const runCategoryTest = async (category: string) => {
setIsLoading(true)
setError(null)
try {
const response = await fetch(`${BACKEND_URL}/api/admin/llm-tests/${category}`, {
method: 'POST',
})
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`)
}
const result: TestCategoryResult = await response.json()
setCategoryResults((prev) => ({ ...prev, [category]: result }))
setSteps((prev) =>
prev.map((step) =>
step.category === category
? { ...step, status: result.failed === 0 ? 'completed' : 'failed' }
: step
)
)
} catch (err) {
setError(err instanceof Error ? err.message : 'Unbekannter Fehler')
} finally {
setIsLoading(false)
}
}
const runAllTests = async () => {
setIsLoading(true)
setError(null)
try {
const response = await fetch(`${BACKEND_URL}/api/admin/llm-tests/run-all`, {
method: 'POST',
})
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`)
}
const results: FullTestResults = await response.json()
setFullResults(results)
setSteps((prev) =>
prev.map((step) => {
if (step.category) {
const catResult = results.categories.find((c) => c.category === step.category)
if (catResult) {
return { ...step, status: catResult.failed === 0 ? 'completed' : 'failed' }
}
}
return step
})
)
const newCategoryResults: Record<string, TestCategoryResult> = {}
results.categories.forEach((cat) => {
newCategoryResults[cat.category] = cat
})
setCategoryResults(newCategoryResults)
} catch (err) {
setError(err instanceof Error ? err.message : 'Unbekannter Fehler')
} finally {
setIsLoading(false)
}
}
const goToNext = () => {
if (currentStep < steps.length - 1) {
setSteps((prev) =>
prev.map((step, idx) =>
idx === currentStep && step.status === 'pending'
? { ...step, status: 'completed' }
: step
)
)
setCurrentStep((prev) => prev + 1)
}
}
const goToPrev = () => {
if (currentStep > 0) {
setCurrentStep((prev) => prev - 1)
}
}
const handleStepClick = (index: number) => {
if (index <= currentStep || steps[index - 1]?.status !== 'pending') {
setCurrentStep(index)
}
}
return (
<AdminLayout
title="LLM Compare Wizard"
description="Interaktives Lernen und Testen der LLM Provider"
>
{/* Header */}
<div className="bg-white rounded-lg shadow p-4 mb-6 flex items-center justify-between">
<div className="flex items-center">
<span className="text-3xl mr-3">🤖</span>
<div>
<h2 className="text-lg font-bold text-gray-800">LLM Compare Wizard</h2>
<p className="text-sm text-gray-600">OpenAI, Anthropic & Ollama</p>
</div>
</div>
<a href="/admin/llm-compare" className="text-blue-600 hover:text-blue-800 text-sm">
&larr; Zurueck zu LLM Compare
</a>
</div>
{/* Stepper */}
<div className="bg-white rounded-lg shadow p-6 mb-6">
<WizardStepper steps={steps} currentStep={currentStep} onStepClick={handleStepClick} />
</div>
{/* Content */}
<div className="bg-white rounded-lg shadow p-6">
<div className="flex items-center mb-6">
<span className="text-3xl mr-3">{currentStepData?.icon}</span>
<div>
<h2 className="text-xl font-bold text-gray-800">
Schritt {currentStep + 1}: {currentStepData?.name}
</h2>
<p className="text-gray-500 text-sm">
{currentStep + 1} von {steps.length}
</p>
</div>
</div>
<EducationCard content={EDUCATION_CONTENT[currentStepData?.id || '']} />
{isTestStep && currentStepData?.category && ARCHITECTURE_CONTEXTS[currentStepData.category] && (
<ArchitectureContext
context={ARCHITECTURE_CONTEXTS[currentStepData.category]}
currentStep={currentStepData.name}
/>
)}
{error && (
<div className="bg-red-50 border border-red-200 text-red-700 rounded-lg p-4 mb-6">
<strong>Fehler:</strong> {error}
</div>
)}
{isWelcome && (
<div className="text-center py-8">
<button
onClick={goToNext}
className="bg-blue-600 text-white px-8 py-3 rounded-lg font-medium hover:bg-blue-700 transition-colors"
>
Wizard starten
</button>
</div>
)}
{isTestStep && currentStepData?.category && (
<TestRunner
category={currentStepData.category}
categoryResult={categoryResults[currentStepData.category]}
isLoading={isLoading}
onRunTests={() => runCategoryTest(currentStepData.category!)}
/>
)}
{isSummary && (
<div>
{!fullResults ? (
<div className="text-center py-8">
<p className="text-gray-600 mb-4">
Fuehren Sie alle Tests aus um eine Zusammenfassung zu sehen.
</p>
<button
onClick={runAllTests}
disabled={isLoading}
className={`px-6 py-3 rounded-lg font-medium transition-colors ${
isLoading
? 'bg-gray-400 cursor-not-allowed'
: 'bg-blue-600 text-white hover:bg-blue-700'
}`}
>
{isLoading ? 'Alle Tests laufen...' : 'Alle Tests ausfuehren'}
</button>
</div>
) : (
<TestSummary results={fullResults} />
)}
</div>
)}
<WizardNavigation
currentStep={currentStep}
totalSteps={steps.length}
onPrev={goToPrev}
onNext={goToNext}
showNext={!isSummary}
isLoading={isLoading}
/>
</div>
<div className="text-center text-gray-500 text-sm mt-6">
Diese Tests pruefen die LLM-Integration.
Bei Fragen wenden Sie sich an das KI-Team.
</div>
</AdminLayout>
)
}