Add Phases 3.2-4.3: STT, stories, syllables, gamification
Some checks failed
CI / go-lint (push) Has been skipped
CI / python-lint (push) Has been skipped
CI / nodejs-lint (push) Has been skipped
CI / test-go-school (push) Successful in 37s
CI / test-go-edu-search (push) Successful in 45s
CI / test-python-agent-core (push) Has been cancelled
CI / test-nodejs-website (push) Has been cancelled
CI / test-python-klausur (push) Has started running

Phase 3.2 — MicrophoneInput.tsx: Browser Web Speech API for
speech-to-text recognition (EN+DE), integrated for pronunciation practice.

Phase 4.1 — Story Generator: LLM-powered mini-stories using vocabulary
words, with highlighted vocab in HTML output. Backend endpoint
POST /learning-units/{id}/generate-story + frontend /learn/[unitId]/story.

Phase 4.2 — SyllableBow.tsx: SVG arc component for syllable visualization
under words, clickable for per-syllable TTS.

Phase 4.3 — Gamification system:
- CoinAnimation.tsx: Floating coin rewards with accumulator
- CrownBadge.tsx: Crown/medal display for milestones
- ProgressRing.tsx: Circular progress indicator
- progress_api.py: Backend tracking coins, crowns, streaks per unit

Also adds "Geschichte" exercise type button to UnitCard.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Benjamin Admin
2026-04-16 07:22:52 +02:00
parent 20a0585eb1
commit 9dddd80d7a
11 changed files with 904 additions and 0 deletions

View File

@@ -0,0 +1,140 @@
'use client'
import React, { useState, useRef, useCallback } from 'react'
interface MicrophoneInputProps {
expectedText: string
lang: 'en' | 'de'
onResult: (transcript: string, correct: boolean) => void
isDark: boolean
}
export function MicrophoneInput({ expectedText, lang, onResult, isDark }: MicrophoneInputProps) {
const [isListening, setIsListening] = useState(false)
const [transcript, setTranscript] = useState('')
const [feedback, setFeedback] = useState<'correct' | 'wrong' | null>(null)
const recognitionRef = useRef<any>(null)
const startListening = useCallback(() => {
const SpeechRecognition = (window as any).SpeechRecognition || (window as any).webkitSpeechRecognition
if (!SpeechRecognition) {
setTranscript('Spracherkennung nicht verfuegbar')
return
}
const recognition = new SpeechRecognition()
recognition.lang = lang === 'de' ? 'de-DE' : 'en-GB'
recognition.interimResults = false
recognition.maxAlternatives = 3
recognition.continuous = false
recognition.onresult = (event: any) => {
const results = event.results[0]
let bestMatch = ''
let isCorrect = false
// Check all alternatives for a match
for (let i = 0; i < results.length; i++) {
const alt = results[i].transcript.trim().toLowerCase()
if (alt === expectedText.trim().toLowerCase()) {
bestMatch = results[i].transcript
isCorrect = true
break
}
if (!bestMatch) bestMatch = results[i].transcript
}
setTranscript(bestMatch)
setFeedback(isCorrect ? 'correct' : 'wrong')
setIsListening(false)
setTimeout(() => {
onResult(bestMatch, isCorrect)
setFeedback(null)
setTranscript('')
}, isCorrect ? 1000 : 2500)
}
recognition.onerror = (event: any) => {
console.error('Speech recognition error:', event.error)
setIsListening(false)
if (event.error === 'no-speech') {
setTranscript('Kein Ton erkannt. Nochmal versuchen.')
} else if (event.error === 'not-allowed') {
setTranscript('Mikrofon-Zugriff nicht erlaubt.')
}
}
recognition.onend = () => {
setIsListening(false)
}
recognitionRef.current = recognition
recognition.start()
setIsListening(true)
setTranscript('')
setFeedback(null)
}, [lang, expectedText, onResult])
const stopListening = useCallback(() => {
recognitionRef.current?.stop()
setIsListening(false)
}, [])
return (
<div className="flex flex-col items-center gap-4">
{/* Microphone Button */}
<button
onClick={isListening ? stopListening : startListening}
className={`w-20 h-20 rounded-full flex items-center justify-center transition-all ${
isListening
? 'bg-red-500 text-white animate-pulse shadow-lg shadow-red-500/30'
: feedback === 'correct'
? 'bg-green-500 text-white shadow-lg shadow-green-500/30'
: feedback === 'wrong'
? 'bg-red-500/60 text-white'
: isDark
? 'bg-white/10 text-white/70 hover:bg-white/20 hover:text-white'
: 'bg-slate-100 text-slate-500 hover:bg-slate-200 hover:text-slate-700'
}`}
>
<svg className="w-8 h-8" fill="none" stroke="currentColor" viewBox="0 0 24 24">
{isListening ? (
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M21 12a9 9 0 11-18 0 9 9 0 0118 0zM10 9v6m4-6v6" />
) : (
<>
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M19 11a7 7 0 01-7 7m0 0a7 7 0 01-7-7m7 7v4m0 0H8m4 0h4" />
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M12 1a3 3 0 00-3 3v8a3 3 0 006 0V4a3 3 0 00-3-3z" />
</>
)}
</svg>
</button>
{/* Status Text */}
<p className={`text-sm text-center ${isDark ? 'text-white/50' : 'text-slate-500'}`}>
{isListening
? 'Sprich jetzt...'
: transcript
? transcript
: 'Tippe auf das Mikrofon'}
</p>
{/* Feedback */}
{feedback === 'correct' && (
<p className={`text-lg font-semibold ${isDark ? 'text-green-300' : 'text-green-600'}`}>
Richtig ausgesprochen!
</p>
)}
{feedback === 'wrong' && (
<div className="text-center">
<p className={`text-sm ${isDark ? 'text-red-300' : 'text-red-600'}`}>
Erkannt: &quot;{transcript}&quot;
</p>
<p className={`text-sm mt-1 ${isDark ? 'text-white/50' : 'text-slate-500'}`}>
Erwartet: &quot;{expectedText}&quot;
</p>
</div>
)}
</div>
)
}