Files
breakpilot-core/pitch-deck/lib/hooks/usePresenterMode.ts
Benjamin Admin f126b40574 feat(presenter): continuous speech — no gaps between paragraphs/slides
- Concatenate all paragraphs + transition hint into one TTS call per slide
  → natural prosody, zero gaps within a slide
- Pre-fetch next slide's audio during current playback → seamless transitions
- Advance slide during transition phrase ("Let us look at...")
- Pause/resume without destroying audio → instant continue
- Subtitle display synced to playback position via timeupdate

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-20 17:02:13 +01:00

434 lines
13 KiB
TypeScript

'use client'
import { useState, useCallback, useRef, useEffect } from 'react'
import { Language } from '../types'
import { PresenterState, SlideScript } from '../presenter/types'
import { PRESENTER_SCRIPT } from '../presenter/presenter-script'
import { SLIDE_ORDER } from './useSlideNavigation'
interface UsePresenterModeConfig {
goToSlide: (index: number) => void
currentSlide: number
totalSlides: number
language: Language
ttsEnabled?: boolean
}
interface UsePresenterModeReturn {
state: PresenterState
currentParagraph: number
start: () => void
stop: () => void
pause: () => void
resume: () => void
skipSlide: () => void
toggle: () => void
displayText: string
progress: number
isSpeaking: boolean
ttsAvailable: boolean
ttsEnabled: boolean
setTtsEnabled: (enabled: boolean) => void
}
// Client-side audio cache: text hash → blob URL
const audioCache = new Map<string, string>()
function cacheKey(text: string, lang: string): string {
let hash = 0
const str = text + '|' + lang
for (let i = 0; i < str.length; i++) {
hash = ((hash << 5) - hash + str.charCodeAt(i)) | 0
}
return 'tts_' + (hash >>> 0).toString(36)
}
// --- Slide audio plan: concatenates all paragraphs + transition into one text ---
interface AudioSegment {
text: string
startRatio: number // 0..1 — where in the audio this segment starts
isTransition: boolean
}
interface SlideAudioPlan {
fullText: string
segments: AudioSegment[]
}
function buildSlideAudioPlan(slideIdx: number, lang: Language): SlideAudioPlan | null {
const slideId = SLIDE_ORDER[slideIdx]
const script = PRESENTER_SCRIPT.find(s => s.slideId === slideId)
if (!script || script.paragraphs.length === 0) return null
const segments: AudioSegment[] = []
let totalWords = 0
const parts: string[] = []
for (const para of script.paragraphs) {
const text = lang === 'de' ? para.text_de : para.text_en
segments.push({ text, startRatio: totalWords, isTransition: false })
totalWords += text.split(/\s+/).length
parts.push(text)
}
const hint = lang === 'de'
? (script.transition_hint_de || '')
: (script.transition_hint_en || '')
if (hint) {
segments.push({ text: hint, startRatio: totalWords, isTransition: true })
totalWords += hint.split(/\s+/).length
parts.push(hint)
}
// Normalize ratios to 0..1
if (totalWords > 0) {
for (const s of segments) {
s.startRatio /= totalWords
}
}
return { fullText: parts.join(' '), segments }
}
async function fetchAudio(text: string, lang: string, signal?: AbortSignal): Promise<string | null> {
const key = cacheKey(text, lang)
const cached = audioCache.get(key)
if (cached) return cached
try {
const res = await fetch('/api/presenter/tts', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ text, language: lang }),
signal,
})
if (!res.ok) return null
const blob = await res.blob()
const url = URL.createObjectURL(blob)
audioCache.set(key, url)
return url
} catch {
return null
}
}
export function usePresenterMode({
goToSlide,
currentSlide,
totalSlides,
language,
ttsEnabled: initialTtsEnabled = true,
}: UsePresenterModeConfig): UsePresenterModeReturn {
const [state, setState] = useState<PresenterState>('idle')
const [currentParagraph, setCurrentParagraph] = useState(0)
const [displayText, setDisplayText] = useState('')
const [isSpeaking, setIsSpeaking] = useState(false)
const [ttsEnabled, setTtsEnabled] = useState(initialTtsEnabled)
const [ttsAvailable, setTtsAvailable] = useState(false)
const stateRef = useRef<PresenterState>('idle')
const slideIndexRef = useRef(currentSlide)
const audioRef = useRef<HTMLAudioElement | null>(null)
const abortRef = useRef<AbortController | null>(null)
const audioUnlockedRef = useRef(false)
const slideAdvancedRef = useRef(false)
const timerRefs = useRef<NodeJS.Timeout[]>([])
// Ref for playSlide — avoids stale closure in audio callbacks
const playSlideRef = useRef<(slideIdx: number) => void>(() => {})
// Unlock browser audio (must be called from user gesture)
const unlockAudio = useCallback(() => {
if (audioUnlockedRef.current) return
try {
const ctx = new (window.AudioContext || (window as any).webkitAudioContext)()
const buffer = ctx.createBuffer(1, 1, 22050)
const source = ctx.createBufferSource()
source.buffer = buffer
source.connect(ctx.destination)
source.start(0)
audioUnlockedRef.current = true
} catch {}
}, [])
// Check TTS availability on mount
useEffect(() => {
fetch('/api/presenter/tts', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ text: 'Test', language: 'de' }),
signal: AbortSignal.timeout(5000),
})
.then(res => setTtsAvailable(res.ok))
.catch(() => setTtsAvailable(false))
}, [])
// Cancel all audio and timers
const cancelAll = useCallback(() => {
if (audioRef.current) {
audioRef.current.pause()
audioRef.current.onended = null
audioRef.current.ontimeupdate = null
audioRef.current.onerror = null
audioRef.current = null
}
if (abortRef.current) {
abortRef.current.abort()
abortRef.current = null
}
for (const t of timerRefs.current) clearTimeout(t)
timerRefs.current = []
setIsSpeaking(false)
}, [])
// Keep refs in sync
useEffect(() => { slideIndexRef.current = currentSlide }, [currentSlide])
useEffect(() => { stateRef.current = state }, [state])
// --- Core: play one slide's audio (all paragraphs concatenated) ---
useEffect(() => {
playSlideRef.current = async (slideIdx: number) => {
if (stateRef.current !== 'presenting') return
const plan = buildSlideAudioPlan(slideIdx, language)
if (!plan) {
// No script for this slide — skip to next
if (slideIdx < totalSlides - 1) {
goToSlide(slideIdx + 1)
slideIndexRef.current = slideIdx + 1
const t = setTimeout(() => playSlideRef.current(slideIdx + 1), 300)
timerRefs.current.push(t)
} else {
setState('idle')
stateRef.current = 'idle'
setDisplayText('')
}
return
}
// Show first segment immediately
setDisplayText(plan.segments[0]?.text || '')
setCurrentParagraph(0)
slideAdvancedRef.current = false
// Pre-fetch next slide's audio in background
if (slideIdx < totalSlides - 1) {
const nextPlan = buildSlideAudioPlan(slideIdx + 1, language)
if (nextPlan) fetchAudio(nextPlan.fullText, language).catch(() => {})
}
// --- Non-TTS path: word-count based timing ---
if (!ttsAvailable || !ttsEnabled) {
const words = plan.fullText.split(/\s+/).length
const totalTime = Math.max(words * 130, 3000)
for (const seg of plan.segments) {
const t = setTimeout(() => {
if (stateRef.current !== 'presenting') return
setDisplayText(seg.text)
if (seg.isTransition && slideIdx < totalSlides - 1 && !slideAdvancedRef.current) {
slideAdvancedRef.current = true
goToSlide(slideIdx + 1)
}
}, seg.startRatio * totalTime)
timerRefs.current.push(t)
}
const t = setTimeout(() => {
if (stateRef.current !== 'presenting') return
const next = slideIdx + 1
if (next < totalSlides) {
if (!slideAdvancedRef.current) goToSlide(next)
slideIndexRef.current = next
playSlideRef.current(next)
} else {
setState('idle')
stateRef.current = 'idle'
setDisplayText('')
}
}, totalTime)
timerRefs.current.push(t)
return
}
// --- TTS path: fetch + play full-slide audio ---
setIsSpeaking(true)
const controller = new AbortController()
abortRef.current = controller
const blobUrl = await fetchAudio(plan.fullText, language, controller.signal)
if (!blobUrl || controller.signal.aborted) {
setIsSpeaking(false)
return
}
if (stateRef.current !== 'presenting') {
setIsSpeaking(false)
return
}
const audio = new Audio(blobUrl)
audioRef.current = audio
// Sync subtitle text to playback position
audio.ontimeupdate = () => {
if (!audio.duration || stateRef.current !== 'presenting') return
const ratio = audio.currentTime / audio.duration
for (let i = plan.segments.length - 1; i >= 0; i--) {
if (ratio >= plan.segments[i].startRatio) {
setDisplayText(plan.segments[i].text)
setCurrentParagraph(Math.min(i, plan.segments.length - 1))
// Advance slide when transition phrase starts playing
if (plan.segments[i].isTransition && !slideAdvancedRef.current && slideIdx < totalSlides - 1) {
slideAdvancedRef.current = true
goToSlide(slideIdx + 1)
}
break
}
}
}
// When audio finishes → immediately play next slide (pre-fetched)
audio.onended = () => {
setIsSpeaking(false)
audioRef.current = null
if (stateRef.current !== 'presenting') return
const next = slideIdx + 1
if (next < totalSlides) {
if (!slideAdvancedRef.current) goToSlide(next)
slideIndexRef.current = next
playSlideRef.current(next)
} else {
setState('idle')
stateRef.current = 'idle'
setDisplayText('')
}
}
audio.onerror = () => {
setIsSpeaking(false)
audioRef.current = null
// Skip to next slide on error
if (stateRef.current !== 'presenting') return
const next = slideIdx + 1
if (next < totalSlides) {
goToSlide(next)
slideIndexRef.current = next
playSlideRef.current(next)
}
}
try {
await audio.play()
} catch {
setIsSpeaking(false)
}
}
}, [language, totalSlides, goToSlide, ttsAvailable, ttsEnabled])
const start = useCallback(() => {
unlockAudio()
cancelAll()
setState('presenting')
stateRef.current = 'presenting'
playSlideRef.current(slideIndexRef.current)
}, [unlockAudio, cancelAll])
const stop = useCallback(() => {
cancelAll()
setState('idle')
stateRef.current = 'idle'
setDisplayText('')
setCurrentParagraph(0)
}, [cancelAll])
// Pause: keep audio alive, just pause playback
const pause = useCallback(() => {
if (audioRef.current) {
audioRef.current.pause()
}
for (const t of timerRefs.current) clearTimeout(t)
timerRefs.current = []
setState('paused')
stateRef.current = 'paused'
setIsSpeaking(false)
}, [])
// Resume: continue paused audio, or restart current slide
const resume = useCallback(() => {
setState('presenting')
stateRef.current = 'presenting'
if (audioRef.current && audioRef.current.paused && audioRef.current.currentTime > 0) {
setIsSpeaking(true)
audioRef.current.play().catch(() => {
setIsSpeaking(false)
playSlideRef.current(slideIndexRef.current)
})
} else {
playSlideRef.current(slideIndexRef.current)
}
}, [])
const skipSlide = useCallback(() => {
cancelAll()
const nextIdx = slideIndexRef.current + 1
if (nextIdx < totalSlides) {
goToSlide(nextIdx)
slideIndexRef.current = nextIdx
if (stateRef.current === 'presenting') {
playSlideRef.current(nextIdx)
}
}
}, [cancelAll, totalSlides, goToSlide])
const toggle = useCallback(() => {
unlockAudio()
if (stateRef.current === 'idle') {
start()
} else {
stop()
}
}, [unlockAudio, start, stop])
// Calculate overall progress
const progress = (() => {
if (state === 'idle') return 0
const totalScripts = PRESENTER_SCRIPT.length
const currentScriptIdx = PRESENTER_SCRIPT.findIndex(s => s.slideId === SLIDE_ORDER[currentSlide])
if (currentScriptIdx < 0) return (currentSlide / totalSlides) * 100
const script = PRESENTER_SCRIPT[currentScriptIdx]
const slideProgress = script.paragraphs.length > 0
? currentParagraph / script.paragraphs.length
: 0
return ((currentScriptIdx + slideProgress) / totalScripts) * 100
})()
// Cleanup on unmount
useEffect(() => {
return () => { cancelAll() }
}, [cancelAll])
return {
state,
currentParagraph,
start,
stop,
pause,
resume,
skipSlide,
toggle,
displayText,
progress,
isSpeaking,
ttsAvailable,
ttsEnabled,
setTtsEnabled,
}
}