This repository has been archived on 2026-02-15. You can view files and clone it. You cannot open issues or pull requests or push a commit.
Files
breakpilot-pwa/website/lib/llm-mode-context.tsx
Benjamin Admin bfdaf63ba9 fix: Restore all files lost during destructive rebase
A previous `git pull --rebase origin main` dropped 177 local commits,
losing 3400+ files across admin-v2, backend, studio-v2, website,
klausur-service, and many other services. The partial restore attempt
(660295e2) only recovered some files.

This commit restores all missing files from pre-rebase ref 98933f5e
while preserving post-rebase additions (night-scheduler, night-mode UI,
NightModeWidget dashboard integration).

Restored features include:
- AI Module Sidebar (FAB), OCR Labeling, OCR Compare
- GPU Dashboard, RAG Pipeline, Magic Help
- Klausur-Korrektur (8 files), Abitur-Archiv (5+ files)
- Companion, Zeugnisse-Crawler, Screen Flow
- Full backend, studio-v2, website, klausur-service
- All compliance SDKs, agent-core, voice-service
- CI/CD configs, documentation, scripts

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-09 09:51:32 +01:00

212 lines
5.9 KiB
TypeScript

'use client'
/**
* LLM Mode Context
*
* Globaler Kontext fuer den LLM-Modus (Hybrid, Local-Only, Cloud-Only, Auto).
* Steuert welche Provider fuer KI-Anfragen verwendet werden.
*
* Modi:
* - hybrid: Lokal zuerst (Ollama), Cloud als Fallback (Claude/OpenAI)
* - local-only: Nur lokale Modelle (Ollama) - maximaler Datenschutz
* - cloud-only: Nur Cloud-Provider (Claude, OpenAI) - beste Qualitaet
* - auto: Automatische Auswahl basierend auf Komplexitaet
*/
import { createContext, useContext, useState, useEffect, ReactNode, useCallback } from 'react'
// LLM Mode Types
export type LLMMode = 'hybrid' | 'local-only' | 'cloud-only' | 'auto'
export interface LLMModeConfig {
mode: LLMMode
label: string
description: string
providers: {
enableOllama: boolean
enableClaude: boolean
enableOpenAI: boolean
}
icon: string
}
// Mode configurations
export const LLM_MODE_CONFIGS: Record<LLMMode, LLMModeConfig> = {
hybrid: {
mode: 'hybrid',
label: 'Hybrid',
description: 'Lokal zuerst, Cloud als Fallback',
providers: {
enableOllama: true,
enableClaude: true,
enableOpenAI: false,
},
icon: '🔄',
},
'local-only': {
mode: 'local-only',
label: 'Nur Lokal',
description: 'Maximaler Datenschutz (nur Ollama)',
providers: {
enableOllama: true,
enableClaude: false,
enableOpenAI: false,
},
icon: '🔒',
},
'cloud-only': {
mode: 'cloud-only',
label: 'Nur Cloud',
description: 'Beste Qualitaet (Claude/OpenAI)',
providers: {
enableOllama: false,
enableClaude: true,
enableOpenAI: true,
},
icon: '☁️',
},
auto: {
mode: 'auto',
label: 'Auto',
description: 'Automatisch nach Komplexitaet',
providers: {
enableOllama: true,
enableClaude: true,
enableOpenAI: true,
},
icon: '⚡',
},
}
// Context type
interface LLMModeContextType {
mode: LLMMode
config: LLMModeConfig
setMode: (mode: LLMMode) => void
// Convenience getters for provider states
enableOllama: boolean
enableClaude: boolean
enableOpenAI: boolean
// Override individual providers (for fine-tuning within a mode)
setProviderOverrides: (overrides: Partial<LLMModeConfig['providers']>) => void
providerOverrides: Partial<LLMModeConfig['providers']>
clearOverrides: () => void
}
const LLMModeContext = createContext<LLMModeContextType | undefined>(undefined)
const STORAGE_KEY = 'breakpilot-llm-mode'
const OVERRIDES_KEY = 'breakpilot-llm-overrides'
const DEFAULT_MODE: LLMMode = 'hybrid'
export function LLMModeProvider({ children }: { children: ReactNode }) {
const [mode, setModeState] = useState<LLMMode>(DEFAULT_MODE)
const [providerOverrides, setProviderOverridesState] = useState<Partial<LLMModeConfig['providers']>>({})
const [mounted, setMounted] = useState(false)
// Load from localStorage on mount
useEffect(() => {
try {
const storedMode = localStorage.getItem(STORAGE_KEY)
if (storedMode && storedMode in LLM_MODE_CONFIGS) {
setModeState(storedMode as LLMMode)
}
const storedOverrides = localStorage.getItem(OVERRIDES_KEY)
if (storedOverrides) {
setProviderOverridesState(JSON.parse(storedOverrides))
}
} catch (e) {
console.warn('Failed to load LLM mode from localStorage:', e)
}
setMounted(true)
}, [])
const setMode = useCallback((newMode: LLMMode) => {
setModeState(newMode)
// Clear overrides when switching modes
setProviderOverridesState({})
try {
localStorage.setItem(STORAGE_KEY, newMode)
localStorage.removeItem(OVERRIDES_KEY)
} catch (e) {
console.warn('Failed to save LLM mode to localStorage:', e)
}
}, [])
const setProviderOverrides = useCallback((overrides: Partial<LLMModeConfig['providers']>) => {
setProviderOverridesState(prev => {
const newOverrides = { ...prev, ...overrides }
try {
localStorage.setItem(OVERRIDES_KEY, JSON.stringify(newOverrides))
} catch (e) {
console.warn('Failed to save provider overrides:', e)
}
return newOverrides
})
}, [])
const clearOverrides = useCallback(() => {
setProviderOverridesState({})
try {
localStorage.removeItem(OVERRIDES_KEY)
} catch (e) {
console.warn('Failed to clear provider overrides:', e)
}
}, [])
const config = LLM_MODE_CONFIGS[mode]
// Compute effective provider states (mode defaults + overrides)
const enableOllama = providerOverrides.enableOllama ?? config.providers.enableOllama
const enableClaude = providerOverrides.enableClaude ?? config.providers.enableClaude
const enableOpenAI = providerOverrides.enableOpenAI ?? config.providers.enableOpenAI
const value: LLMModeContextType = {
mode,
config,
setMode,
enableOllama,
enableClaude,
enableOpenAI,
setProviderOverrides,
providerOverrides,
clearOverrides,
}
// Prevent hydration mismatch
if (!mounted) {
return (
<LLMModeContext.Provider
value={{
...value,
mode: DEFAULT_MODE,
config: LLM_MODE_CONFIGS[DEFAULT_MODE],
enableOllama: LLM_MODE_CONFIGS[DEFAULT_MODE].providers.enableOllama,
enableClaude: LLM_MODE_CONFIGS[DEFAULT_MODE].providers.enableClaude,
enableOpenAI: LLM_MODE_CONFIGS[DEFAULT_MODE].providers.enableOpenAI,
providerOverrides: {},
}}
>
{children}
</LLMModeContext.Provider>
)
}
return <LLMModeContext.Provider value={value}>{children}</LLMModeContext.Provider>
}
export function useLLMMode() {
const context = useContext(LLMModeContext)
if (context === undefined) {
throw new Error('useLLMMode must be used within a LLMModeProvider')
}
return context
}
// Utility hook for getting provider settings without full context
export function useLLMProviders() {
const { enableOllama, enableClaude, enableOpenAI, mode } = useLLMMode()
return { enableOllama, enableClaude, enableOpenAI, mode }
}