fix: Restore all files lost during destructive rebase

A previous `git pull --rebase origin main` dropped 177 local commits,
losing 3400+ files across admin-v2, backend, studio-v2, website,
klausur-service, and many other services. The partial restore attempt
(660295e2) only recovered some files.

This commit restores all missing files from pre-rebase ref 98933f5e
while preserving post-rebase additions (night-scheduler, night-mode UI,
NightModeWidget dashboard integration).

Restored features include:
- AI Module Sidebar (FAB), OCR Labeling, OCR Compare
- GPU Dashboard, RAG Pipeline, Magic Help
- Klausur-Korrektur (8 files), Abitur-Archiv (5+ files)
- Companion, Zeugnisse-Crawler, Screen Flow
- Full backend, studio-v2, website, klausur-service
- All compliance SDKs, agent-core, voice-service
- CI/CD configs, documentation, scripts

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Benjamin Admin
2026-02-09 09:51:32 +01:00
parent f7487ee240
commit bfdaf63ba9
2009 changed files with 749983 additions and 1731 deletions

View File

@@ -0,0 +1,244 @@
'use client'
import { useEffect, useRef, useState, useCallback } from 'react'
import { VoiceAPI, VoiceMessage, VoiceTask } from '@/lib/voice/voice-api'
import { VoiceIndicator } from './VoiceIndicator'
interface VoiceCaptureProps {
onTranscript?: (text: string, isFinal: boolean) => void
onIntent?: (intent: string, parameters: Record<string, unknown>) => void
onResponse?: (text: string) => void
onTaskCreated?: (task: VoiceTask) => void
onError?: (error: Error) => void
className?: string
}
/**
* Voice capture component with microphone button
* Handles WebSocket connection and audio streaming
*/
export function VoiceCapture({
onTranscript,
onIntent,
onResponse,
onTaskCreated,
onError,
className = '',
}: VoiceCaptureProps) {
const voiceApiRef = useRef<VoiceAPI | null>(null)
const [isInitialized, setIsInitialized] = useState(false)
const [isConnected, setIsConnected] = useState(false)
const [isListening, setIsListening] = useState(false)
const [status, setStatus] = useState<string>('idle')
const [audioLevel, setAudioLevel] = useState(0)
const [transcript, setTranscript] = useState<string>('')
const [error, setError] = useState<string | null>(null)
// Initialize voice API
useEffect(() => {
const init = async () => {
try {
const api = new VoiceAPI()
await api.initialize()
voiceApiRef.current = api
// Set up event handlers
api.setOnMessage(handleMessage)
api.setOnError(handleError)
api.setOnStatusChange(handleStatusChange)
setIsInitialized(true)
} catch (e) {
console.error('Failed to initialize voice API:', e)
setError('Sprachdienst konnte nicht initialisiert werden')
}
}
init()
return () => {
voiceApiRef.current?.disconnect()
}
}, [])
const handleMessage = useCallback(
(message: VoiceMessage) => {
switch (message.type) {
case 'transcript':
setTranscript(message.text)
onTranscript?.(message.text, message.final)
break
case 'intent':
onIntent?.(message.intent, message.parameters)
break
case 'response':
onResponse?.(message.text)
break
case 'task_created':
onTaskCreated?.({
id: message.task_id,
session_id: '',
type: message.task_type,
state: message.state,
created_at: new Date().toISOString(),
updated_at: new Date().toISOString(),
result_available: false,
})
break
case 'error':
setError(message.message)
onError?.(new Error(message.message))
break
}
},
[onTranscript, onIntent, onResponse, onTaskCreated, onError]
)
const handleError = useCallback(
(error: Error) => {
setError(error.message)
setIsListening(false)
onError?.(error)
},
[onError]
)
const handleStatusChange = useCallback((newStatus: string) => {
setStatus(newStatus)
if (newStatus === 'connected') {
setIsConnected(true)
} else if (newStatus === 'disconnected') {
setIsConnected(false)
setIsListening(false)
} else if (newStatus === 'listening') {
setIsListening(true)
} else if (newStatus === 'processing') {
setIsListening(false)
}
}, [])
const toggleListening = async () => {
if (!voiceApiRef.current) return
try {
setError(null)
if (isListening) {
// Stop listening
voiceApiRef.current.stopCapture()
setIsListening(false)
} else {
// Start listening
if (!isConnected) {
await voiceApiRef.current.connect()
}
await voiceApiRef.current.startCapture()
setIsListening(true)
}
} catch (e) {
console.error('Failed to toggle listening:', e)
setError('Mikrofon konnte nicht aktiviert werden')
}
}
const interrupt = () => {
voiceApiRef.current?.interrupt()
}
if (!isInitialized) {
return (
<div className={`flex items-center gap-2 ${className}`}>
<div className="animate-spin w-6 h-6 border-2 border-gray-300 border-t-blue-500 rounded-full" />
<span className="text-sm text-gray-500">Initialisiere...</span>
</div>
)
}
return (
<div className={`flex flex-col gap-4 ${className}`}>
{/* Error display */}
{error && (
<div className="bg-red-50 border border-red-200 text-red-700 px-4 py-2 rounded-lg text-sm">
{error}
</div>
)}
{/* Main controls */}
<div className="flex items-center gap-4">
{/* Microphone button */}
<button
onClick={toggleListening}
disabled={status === 'processing'}
className={`
relative w-16 h-16 rounded-full flex items-center justify-center
transition-all duration-200 focus:outline-none focus:ring-4
${
isListening
? 'bg-red-500 hover:bg-red-600 focus:ring-red-200'
: 'bg-blue-500 hover:bg-blue-600 focus:ring-blue-200'
}
${status === 'processing' ? 'opacity-50 cursor-not-allowed' : ''}
`}
>
{/* Microphone icon */}
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 24 24"
fill="white"
className="w-8 h-8"
>
{isListening ? (
// Stop icon
<path d="M6 6h12v12H6z" />
) : (
// Microphone icon
<path d="M12 14c1.66 0 3-1.34 3-3V5c0-1.66-1.34-3-3-3S9 3.34 9 5v6c0 1.66 1.34 3 3 3zm-1 1.93c-3.94-.49-7-3.85-7-7.93h2c0 2.76 2.24 5 5 5s5-2.24 5-5h2c0 4.08-3.06 7.44-7 7.93V18h4v2H8v-2h4v-2.07z" />
)}
</svg>
{/* Pulsing ring when listening */}
{isListening && (
<span className="absolute inset-0 rounded-full animate-ping bg-red-400 opacity-25" />
)}
</button>
{/* Status indicator */}
<VoiceIndicator
isListening={isListening}
audioLevel={audioLevel}
status={status}
/>
{/* Interrupt button (when responding) */}
{status === 'responding' && (
<button
onClick={interrupt}
className="px-4 py-2 text-sm bg-gray-200 hover:bg-gray-300 rounded-lg"
>
Unterbrechen
</button>
)}
</div>
{/* Transcript display */}
{transcript && (
<div className="bg-gray-50 border border-gray-200 rounded-lg p-4">
<p className="text-sm text-gray-500 mb-1">Erkannt:</p>
<p className="text-gray-800">{transcript}</p>
</div>
)}
{/* Instructions */}
<p className="text-xs text-gray-400">
{isListening
? 'Sprechen Sie jetzt... Klicken Sie erneut zum Beenden.'
: 'Klicken Sie auf das Mikrofon und sprechen Sie Ihren Befehl.'}
</p>
</div>
)
}

View File

@@ -0,0 +1,337 @@
'use client'
import { useState, useEffect, useRef, useCallback } from 'react'
import { VoiceAPI, VoiceMessage, VoiceTask } from '@/lib/voice/voice-api'
import { VoiceIndicator } from './VoiceIndicator'
interface Message {
id: string
role: 'user' | 'assistant'
content: string
timestamp: Date
intent?: string
task?: VoiceTask
}
interface VoiceCommandBarProps {
onTaskCreated?: (task: VoiceTask) => void
onTaskApproved?: (taskId: string) => void
className?: string
}
/**
* Full voice command bar with conversation history
* Shows transcript, responses, and pending tasks
*/
export function VoiceCommandBar({
onTaskCreated,
onTaskApproved,
className = '',
}: VoiceCommandBarProps) {
const voiceApiRef = useRef<VoiceAPI | null>(null)
const messagesEndRef = useRef<HTMLDivElement>(null)
const [isInitialized, setIsInitialized] = useState(false)
const [isConnected, setIsConnected] = useState(false)
const [isListening, setIsListening] = useState(false)
const [status, setStatus] = useState<string>('idle')
const [messages, setMessages] = useState<Message[]>([])
const [pendingTasks, setPendingTasks] = useState<VoiceTask[]>([])
const [error, setError] = useState<string | null>(null)
// Auto-scroll to bottom
useEffect(() => {
messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' })
}, [messages])
// Initialize voice API
useEffect(() => {
const init = async () => {
try {
const api = new VoiceAPI()
await api.initialize()
voiceApiRef.current = api
api.setOnMessage(handleMessage)
api.setOnError(handleError)
api.setOnStatusChange(handleStatusChange)
setIsInitialized(true)
} catch (e) {
console.error('Failed to initialize:', e)
setError('Sprachdienst konnte nicht initialisiert werden')
}
}
init()
return () => {
voiceApiRef.current?.disconnect()
}
}, [])
const handleMessage = useCallback(
(message: VoiceMessage) => {
switch (message.type) {
case 'transcript':
if (message.final) {
setMessages((prev) => [
...prev,
{
id: `msg-${Date.now()}`,
role: 'user',
content: message.text,
timestamp: new Date(),
},
])
}
break
case 'intent':
// Update last user message with intent
setMessages((prev) => {
const updated = [...prev]
const lastUserMsg = [...updated].reverse().find((m) => m.role === 'user')
if (lastUserMsg) {
lastUserMsg.intent = message.intent
}
return updated
})
break
case 'response':
setMessages((prev) => [
...prev,
{
id: `msg-${Date.now()}`,
role: 'assistant',
content: message.text,
timestamp: new Date(),
},
])
break
case 'task_created':
const task: VoiceTask = {
id: message.task_id,
session_id: '',
type: message.task_type,
state: message.state,
created_at: new Date().toISOString(),
updated_at: new Date().toISOString(),
result_available: false,
}
setPendingTasks((prev) => [...prev, task])
onTaskCreated?.(task)
// Update last assistant message with task
setMessages((prev) => {
const updated = [...prev]
const lastAssistantMsg = [...updated].reverse().find((m) => m.role === 'assistant')
if (lastAssistantMsg) {
lastAssistantMsg.task = task
}
return updated
})
break
case 'error':
setError(message.message)
break
}
},
[onTaskCreated]
)
const handleError = useCallback((error: Error) => {
setError(error.message)
setIsListening(false)
}, [])
const handleStatusChange = useCallback((newStatus: string) => {
setStatus(newStatus)
setIsConnected(newStatus !== 'idle' && newStatus !== 'disconnected')
setIsListening(newStatus === 'listening')
}, [])
const toggleListening = async () => {
if (!voiceApiRef.current) return
try {
setError(null)
if (isListening) {
voiceApiRef.current.stopCapture()
} else {
if (!isConnected) {
await voiceApiRef.current.connect()
}
await voiceApiRef.current.startCapture()
}
} catch (e) {
console.error('Failed to toggle listening:', e)
setError('Mikrofon konnte nicht aktiviert werden')
}
}
const approveTask = async (taskId: string) => {
try {
await voiceApiRef.current?.approveTask(taskId)
setPendingTasks((prev) => prev.filter((t) => t.id !== taskId))
onTaskApproved?.(taskId)
} catch (e) {
console.error('Failed to approve task:', e)
}
}
const rejectTask = async (taskId: string) => {
try {
await voiceApiRef.current?.rejectTask(taskId)
setPendingTasks((prev) => prev.filter((t) => t.id !== taskId))
} catch (e) {
console.error('Failed to reject task:', e)
}
}
if (!isInitialized) {
return (
<div className={`flex items-center justify-center p-8 ${className}`}>
<div className="animate-spin w-8 h-8 border-2 border-gray-300 border-t-blue-500 rounded-full" />
</div>
)
}
return (
<div
className={`flex flex-col bg-white rounded-xl shadow-lg overflow-hidden ${className}`}
>
{/* Header */}
<div className="flex items-center justify-between p-4 bg-gray-50 border-b">
<div className="flex items-center gap-3">
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 24 24"
fill="currentColor"
className="w-6 h-6 text-blue-500"
>
<path d="M12 14c1.66 0 3-1.34 3-3V5c0-1.66-1.34-3-3-3S9 3.34 9 5v6c0 1.66 1.34 3 3 3z" />
<path d="M17 11c0 2.76-2.24 5-5 5s-5-2.24-5-5H5c0 3.53 2.61 6.43 6 6.92V21h2v-3.08c3.39-.49 6-3.39 6-6.92h-2z" />
</svg>
<span className="font-medium text-gray-800">Breakpilot Voice</span>
</div>
<VoiceIndicator isListening={isListening} status={status} />
</div>
{/* Messages */}
<div className="flex-1 overflow-y-auto p-4 space-y-4 min-h-[200px] max-h-[400px]">
{messages.length === 0 ? (
<div className="text-center text-gray-400 py-8">
<p className="mb-2">Willkommen bei Breakpilot Voice!</p>
<p className="text-sm">
Klicken Sie auf das Mikrofon und sprechen Sie Ihren Befehl.
</p>
</div>
) : (
messages.map((msg) => (
<div
key={msg.id}
className={`flex ${msg.role === 'user' ? 'justify-end' : 'justify-start'}`}
>
<div
className={`max-w-[80%] rounded-lg px-4 py-2 ${
msg.role === 'user'
? 'bg-blue-500 text-white'
: 'bg-gray-100 text-gray-800'
}`}
>
<p>{msg.content}</p>
{msg.intent && (
<p className="text-xs mt-1 opacity-70">
Intent: {msg.intent}
</p>
)}
{msg.task && msg.task.state === 'ready' && (
<div className="flex gap-2 mt-2">
<button
onClick={() => approveTask(msg.task!.id)}
className="px-2 py-1 text-xs bg-green-500 text-white rounded hover:bg-green-600"
>
Bestaetigen
</button>
<button
onClick={() => rejectTask(msg.task!.id)}
className="px-2 py-1 text-xs bg-gray-300 text-gray-700 rounded hover:bg-gray-400"
>
Abbrechen
</button>
</div>
)}
</div>
</div>
))
)}
<div ref={messagesEndRef} />
</div>
{/* Error */}
{error && (
<div className="px-4 py-2 bg-red-50 border-t border-red-200 text-red-700 text-sm">
{error}
</div>
)}
{/* Input area */}
<div className="p-4 bg-gray-50 border-t">
<div className="flex items-center gap-4">
{/* Microphone button */}
<button
onClick={toggleListening}
disabled={status === 'processing'}
className={`
w-12 h-12 rounded-full flex items-center justify-center
transition-all duration-200 focus:outline-none focus:ring-4
${
isListening
? 'bg-red-500 hover:bg-red-600 focus:ring-red-200'
: 'bg-blue-500 hover:bg-blue-600 focus:ring-blue-200'
}
${status === 'processing' ? 'opacity-50 cursor-not-allowed' : ''}
`}
>
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 24 24"
fill="white"
className="w-6 h-6"
>
{isListening ? (
<path d="M6 6h12v12H6z" />
) : (
<path d="M12 14c1.66 0 3-1.34 3-3V5c0-1.66-1.34-3-3-3S9 3.34 9 5v6c0 1.66 1.34 3 3 3zm-1 1.93c-3.94-.49-7-3.85-7-7.93h2c0 2.76 2.24 5 5 5s5-2.24 5-5h2c0 4.08-3.06 7.44-7 7.93V18h4v2H8v-2h4v-2.07z" />
)}
</svg>
</button>
{/* Text hint */}
<div className="flex-1 text-sm text-gray-500">
{isListening
? 'Ich hoere zu... Sprechen Sie jetzt.'
: status === 'processing'
? 'Verarbeite...'
: 'Tippen Sie auf das Mikrofon um zu sprechen'}
</div>
{/* Pending tasks indicator */}
{pendingTasks.length > 0 && (
<div className="flex items-center gap-2">
<span className="text-xs text-gray-500">
{pendingTasks.length} Aufgabe(n)
</span>
<span className="w-2 h-2 bg-yellow-400 rounded-full animate-pulse" />
</div>
)}
</div>
</div>
</div>
)
}

View File

@@ -0,0 +1,90 @@
'use client'
import { useEffect, useState } from 'react'
interface VoiceIndicatorProps {
isListening: boolean
audioLevel?: number // 0-100
status?: string
}
/**
* Visual indicator for voice activity
* Shows audio level and status
*/
export function VoiceIndicator({
isListening,
audioLevel = 0,
status = 'idle',
}: VoiceIndicatorProps) {
const [bars, setBars] = useState<number[]>([0, 0, 0, 0, 0])
// Animate bars based on audio level
useEffect(() => {
if (!isListening) {
setBars([0, 0, 0, 0, 0])
return
}
const interval = setInterval(() => {
setBars((prev) =>
prev.map(() => {
const base = audioLevel / 100
const variance = Math.random() * 0.4
return Math.min(1, base + variance)
})
)
}, 100)
return () => clearInterval(interval)
}, [isListening, audioLevel])
const statusColors: Record<string, string> = {
idle: 'bg-gray-400',
connected: 'bg-blue-500',
listening: 'bg-green-500',
processing: 'bg-yellow-500',
responding: 'bg-purple-500',
error: 'bg-red-500',
}
const statusLabels: Record<string, string> = {
idle: 'Bereit',
connected: 'Verbunden',
listening: 'Hoert zu...',
processing: 'Verarbeitet...',
responding: 'Antwortet...',
error: 'Fehler',
}
return (
<div className="flex items-center gap-3">
{/* Status dot */}
<div
className={`w-3 h-3 rounded-full ${statusColors[status] || statusColors.idle} ${
isListening ? 'animate-pulse' : ''
}`}
/>
{/* Audio level bars */}
<div className="flex items-end gap-0.5 h-6">
{bars.map((level, i) => (
<div
key={i}
className={`w-1 rounded-full transition-all duration-100 ${
isListening ? 'bg-green-500' : 'bg-gray-300'
}`}
style={{
height: `${Math.max(4, level * 24)}px`,
}}
/>
))}
</div>
{/* Status text */}
<span className="text-sm text-gray-600">
{statusLabels[status] || status}
</span>
</div>
)
}

View File

@@ -0,0 +1,6 @@
/**
* Voice Components
*/
export { VoiceCapture } from './VoiceCapture'
export { VoiceIndicator } from './VoiceIndicator'
export { VoiceCommandBar } from './VoiceCommandBar'