Add Phases 3.2-4.3: STT, stories, syllables, gamification
Some checks failed
CI / go-lint (push) Has been skipped
CI / python-lint (push) Has been skipped
CI / nodejs-lint (push) Has been skipped
CI / test-go-school (push) Successful in 37s
CI / test-go-edu-search (push) Successful in 45s
CI / test-python-agent-core (push) Has been cancelled
CI / test-nodejs-website (push) Has been cancelled
CI / test-python-klausur (push) Has started running
Some checks failed
CI / go-lint (push) Has been skipped
CI / python-lint (push) Has been skipped
CI / nodejs-lint (push) Has been skipped
CI / test-go-school (push) Successful in 37s
CI / test-go-edu-search (push) Successful in 45s
CI / test-python-agent-core (push) Has been cancelled
CI / test-nodejs-website (push) Has been cancelled
CI / test-python-klausur (push) Has started running
Phase 3.2 — MicrophoneInput.tsx: Browser Web Speech API for
speech-to-text recognition (EN+DE), integrated for pronunciation practice.
Phase 4.1 — Story Generator: LLM-powered mini-stories using vocabulary
words, with highlighted vocab in HTML output. Backend endpoint
POST /learning-units/{id}/generate-story + frontend /learn/[unitId]/story.
Phase 4.2 — SyllableBow.tsx: SVG arc component for syllable visualization
under words, clickable for per-syllable TTS.
Phase 4.3 — Gamification system:
- CoinAnimation.tsx: Floating coin rewards with accumulator
- CrownBadge.tsx: Crown/medal display for milestones
- ProgressRing.tsx: Circular progress indicator
- progress_api.py: Backend tracking coins, crowns, streaks per unit
Also adds "Geschichte" exercise type button to UnitCard.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
140
studio-v2/components/learn/MicrophoneInput.tsx
Normal file
140
studio-v2/components/learn/MicrophoneInput.tsx
Normal file
@@ -0,0 +1,140 @@
|
||||
'use client'
|
||||
|
||||
import React, { useState, useRef, useCallback } from 'react'
|
||||
|
||||
interface MicrophoneInputProps {
|
||||
expectedText: string
|
||||
lang: 'en' | 'de'
|
||||
onResult: (transcript: string, correct: boolean) => void
|
||||
isDark: boolean
|
||||
}
|
||||
|
||||
export function MicrophoneInput({ expectedText, lang, onResult, isDark }: MicrophoneInputProps) {
|
||||
const [isListening, setIsListening] = useState(false)
|
||||
const [transcript, setTranscript] = useState('')
|
||||
const [feedback, setFeedback] = useState<'correct' | 'wrong' | null>(null)
|
||||
const recognitionRef = useRef<any>(null)
|
||||
|
||||
const startListening = useCallback(() => {
|
||||
const SpeechRecognition = (window as any).SpeechRecognition || (window as any).webkitSpeechRecognition
|
||||
if (!SpeechRecognition) {
|
||||
setTranscript('Spracherkennung nicht verfuegbar')
|
||||
return
|
||||
}
|
||||
|
||||
const recognition = new SpeechRecognition()
|
||||
recognition.lang = lang === 'de' ? 'de-DE' : 'en-GB'
|
||||
recognition.interimResults = false
|
||||
recognition.maxAlternatives = 3
|
||||
recognition.continuous = false
|
||||
|
||||
recognition.onresult = (event: any) => {
|
||||
const results = event.results[0]
|
||||
let bestMatch = ''
|
||||
let isCorrect = false
|
||||
|
||||
// Check all alternatives for a match
|
||||
for (let i = 0; i < results.length; i++) {
|
||||
const alt = results[i].transcript.trim().toLowerCase()
|
||||
if (alt === expectedText.trim().toLowerCase()) {
|
||||
bestMatch = results[i].transcript
|
||||
isCorrect = true
|
||||
break
|
||||
}
|
||||
if (!bestMatch) bestMatch = results[i].transcript
|
||||
}
|
||||
|
||||
setTranscript(bestMatch)
|
||||
setFeedback(isCorrect ? 'correct' : 'wrong')
|
||||
setIsListening(false)
|
||||
|
||||
setTimeout(() => {
|
||||
onResult(bestMatch, isCorrect)
|
||||
setFeedback(null)
|
||||
setTranscript('')
|
||||
}, isCorrect ? 1000 : 2500)
|
||||
}
|
||||
|
||||
recognition.onerror = (event: any) => {
|
||||
console.error('Speech recognition error:', event.error)
|
||||
setIsListening(false)
|
||||
if (event.error === 'no-speech') {
|
||||
setTranscript('Kein Ton erkannt. Nochmal versuchen.')
|
||||
} else if (event.error === 'not-allowed') {
|
||||
setTranscript('Mikrofon-Zugriff nicht erlaubt.')
|
||||
}
|
||||
}
|
||||
|
||||
recognition.onend = () => {
|
||||
setIsListening(false)
|
||||
}
|
||||
|
||||
recognitionRef.current = recognition
|
||||
recognition.start()
|
||||
setIsListening(true)
|
||||
setTranscript('')
|
||||
setFeedback(null)
|
||||
}, [lang, expectedText, onResult])
|
||||
|
||||
const stopListening = useCallback(() => {
|
||||
recognitionRef.current?.stop()
|
||||
setIsListening(false)
|
||||
}, [])
|
||||
|
||||
return (
|
||||
<div className="flex flex-col items-center gap-4">
|
||||
{/* Microphone Button */}
|
||||
<button
|
||||
onClick={isListening ? stopListening : startListening}
|
||||
className={`w-20 h-20 rounded-full flex items-center justify-center transition-all ${
|
||||
isListening
|
||||
? 'bg-red-500 text-white animate-pulse shadow-lg shadow-red-500/30'
|
||||
: feedback === 'correct'
|
||||
? 'bg-green-500 text-white shadow-lg shadow-green-500/30'
|
||||
: feedback === 'wrong'
|
||||
? 'bg-red-500/60 text-white'
|
||||
: isDark
|
||||
? 'bg-white/10 text-white/70 hover:bg-white/20 hover:text-white'
|
||||
: 'bg-slate-100 text-slate-500 hover:bg-slate-200 hover:text-slate-700'
|
||||
}`}
|
||||
>
|
||||
<svg className="w-8 h-8" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
{isListening ? (
|
||||
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M21 12a9 9 0 11-18 0 9 9 0 0118 0zM10 9v6m4-6v6" />
|
||||
) : (
|
||||
<>
|
||||
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M19 11a7 7 0 01-7 7m0 0a7 7 0 01-7-7m7 7v4m0 0H8m4 0h4" />
|
||||
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M12 1a3 3 0 00-3 3v8a3 3 0 006 0V4a3 3 0 00-3-3z" />
|
||||
</>
|
||||
)}
|
||||
</svg>
|
||||
</button>
|
||||
|
||||
{/* Status Text */}
|
||||
<p className={`text-sm text-center ${isDark ? 'text-white/50' : 'text-slate-500'}`}>
|
||||
{isListening
|
||||
? 'Sprich jetzt...'
|
||||
: transcript
|
||||
? transcript
|
||||
: 'Tippe auf das Mikrofon'}
|
||||
</p>
|
||||
|
||||
{/* Feedback */}
|
||||
{feedback === 'correct' && (
|
||||
<p className={`text-lg font-semibold ${isDark ? 'text-green-300' : 'text-green-600'}`}>
|
||||
Richtig ausgesprochen!
|
||||
</p>
|
||||
)}
|
||||
{feedback === 'wrong' && (
|
||||
<div className="text-center">
|
||||
<p className={`text-sm ${isDark ? 'text-red-300' : 'text-red-600'}`}>
|
||||
Erkannt: "{transcript}"
|
||||
</p>
|
||||
<p className={`text-sm mt-1 ${isDark ? 'text-white/50' : 'text-slate-500'}`}>
|
||||
Erwartet: "{expectedText}"
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
117
studio-v2/components/learn/SyllableBow.tsx
Normal file
117
studio-v2/components/learn/SyllableBow.tsx
Normal file
@@ -0,0 +1,117 @@
|
||||
'use client'
|
||||
|
||||
import React, { useMemo } from 'react'
|
||||
|
||||
interface SyllableBowProps {
|
||||
word: string
|
||||
syllables: string[]
|
||||
onSyllableClick?: (syllable: string, index: number) => void
|
||||
isDark: boolean
|
||||
size?: 'sm' | 'md' | 'lg'
|
||||
}
|
||||
|
||||
/**
|
||||
* SyllableBow — Renders a word with SVG arcs under each syllable.
|
||||
*
|
||||
* Uses pyphen syllable data from the backend.
|
||||
* Each syllable is clickable (triggers TTS for that syllable).
|
||||
*/
|
||||
export function SyllableBow({ word, syllables, onSyllableClick, isDark, size = 'md' }: SyllableBowProps) {
|
||||
const fontSize = size === 'sm' ? 20 : size === 'md' ? 32 : 44
|
||||
const charWidth = fontSize * 0.6
|
||||
const bowHeight = size === 'sm' ? 12 : size === 'md' ? 18 : 24
|
||||
const gap = 4
|
||||
|
||||
const layout = useMemo(() => {
|
||||
let x = 0
|
||||
return syllables.map((syl) => {
|
||||
const width = syl.length * charWidth
|
||||
const entry = { syllable: syl, x, width }
|
||||
x += width + gap
|
||||
return entry
|
||||
})
|
||||
}, [syllables, charWidth])
|
||||
|
||||
const totalWidth = layout.length > 0
|
||||
? layout[layout.length - 1].x + layout[layout.length - 1].width
|
||||
: 0
|
||||
|
||||
const svgHeight = bowHeight + 6
|
||||
|
||||
return (
|
||||
<div className="inline-flex flex-col items-center">
|
||||
{/* Letters */}
|
||||
<div className="flex" style={{ gap: `${gap}px` }}>
|
||||
{layout.map((item, idx) => (
|
||||
<span
|
||||
key={idx}
|
||||
onClick={() => onSyllableClick?.(item.syllable, idx)}
|
||||
className={`font-bold cursor-pointer select-none transition-colors ${
|
||||
onSyllableClick
|
||||
? (isDark ? 'hover:text-blue-300' : 'hover:text-blue-600')
|
||||
: ''
|
||||
} ${isDark ? 'text-white' : 'text-slate-900'}`}
|
||||
style={{ fontSize: `${fontSize}px`, letterSpacing: '0.02em' }}
|
||||
>
|
||||
{item.syllable}
|
||||
</span>
|
||||
))}
|
||||
</div>
|
||||
|
||||
{/* SVG Bows */}
|
||||
<svg
|
||||
width={totalWidth}
|
||||
height={svgHeight}
|
||||
viewBox={`0 0 ${totalWidth} ${svgHeight}`}
|
||||
className="mt-0.5"
|
||||
>
|
||||
{layout.map((item, idx) => {
|
||||
const cx = item.x + item.width / 2
|
||||
const startX = item.x + 2
|
||||
const endX = item.x + item.width - 2
|
||||
const controlY = svgHeight - 2
|
||||
|
||||
return (
|
||||
<path
|
||||
key={idx}
|
||||
d={`M ${startX} 2 Q ${cx} ${controlY} ${endX} 2`}
|
||||
fill="none"
|
||||
stroke={isDark ? 'rgba(96, 165, 250, 0.6)' : 'rgba(37, 99, 235, 0.5)'}
|
||||
strokeWidth={size === 'sm' ? 1.5 : 2}
|
||||
strokeLinecap="round"
|
||||
/>
|
||||
)
|
||||
})}
|
||||
</svg>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple client-side syllable splitting fallback.
|
||||
* For accurate results, use the backend pyphen endpoint.
|
||||
*/
|
||||
export function simpleSyllableSplit(word: string): string[] {
|
||||
// Very basic vowel-based heuristic for display purposes
|
||||
const vowels = /[aeiouyäöü]/i
|
||||
const chars = word.split('')
|
||||
const syllables: string[] = []
|
||||
let current = ''
|
||||
|
||||
for (let i = 0; i < chars.length; i++) {
|
||||
current += chars[i]
|
||||
if (
|
||||
vowels.test(chars[i]) &&
|
||||
i < chars.length - 1 &&
|
||||
current.length >= 2
|
||||
) {
|
||||
// Check if next char starts a new consonant cluster
|
||||
if (!vowels.test(chars[i + 1]) && i + 2 < chars.length && vowels.test(chars[i + 2])) {
|
||||
syllables.push(current)
|
||||
current = ''
|
||||
}
|
||||
}
|
||||
}
|
||||
if (current) syllables.push(current)
|
||||
return syllables.length > 0 ? syllables : [word]
|
||||
}
|
||||
@@ -25,6 +25,7 @@ const exerciseTypes = [
|
||||
{ key: 'flashcards', label: 'Karteikarten', icon: 'M19 11H5m14 0a2 2 0 012 2v6a2 2 0 01-2 2H5a2 2 0 01-2-2v-6a2 2 0 012-2m14 0V9a2 2 0 00-2-2M5 11V9a2 2 0 012-2m0 0V5a2 2 0 012-2h6a2 2 0 012 2v2M7 7h10', color: 'from-amber-500 to-orange-500' },
|
||||
{ key: 'quiz', label: 'Quiz', icon: 'M8.228 9c.549-1.165 2.03-2 3.772-2 2.21 0 4 1.343 4 3 0 1.4-1.278 2.575-3.006 2.907-.542.104-.994.54-.994 1.093m0 3h.01M21 12a9 9 0 11-18 0 9 9 0 0118 0z', color: 'from-purple-500 to-pink-500' },
|
||||
{ key: 'type', label: 'Eintippen', icon: 'M9.75 17L9 20l-1 1h8l-1-1-.75-3M3 13h18M5 17h14a2 2 0 002-2V5a2 2 0 00-2-2H5a2 2 0 00-2 2v10a2 2 0 002 2z', color: 'from-blue-500 to-cyan-500' },
|
||||
{ key: 'story', label: 'Geschichte', icon: 'M12 6.253v13m0-13C10.832 5.477 9.246 5 7.5 5S4.168 5.477 3 6.253v13C4.168 18.477 5.754 18 7.5 18s3.332.477 4.5 1.253m0-13C13.168 5.477 14.754 5 16.5 5c1.747 0 3.332.477 4.5 1.253v13C19.832 18.477 18.247 18 16.5 18c-1.746 0-3.332.477-4.5 1.253', color: 'from-amber-500 to-yellow-500' },
|
||||
]
|
||||
|
||||
export function UnitCard({ unit, isDark, glassCard, onDelete }: UnitCardProps) {
|
||||
|
||||
Reference in New Issue
Block a user