Services: Admin-Lehrer, Backend-Lehrer, Studio v2, Website, Klausur-Service, School-Service, Voice-Service, Geo-Service, BreakPilot Drive, Agent-Core Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
212 lines
5.9 KiB
TypeScript
212 lines
5.9 KiB
TypeScript
'use client'
|
|
|
|
/**
|
|
* LLM Mode Context
|
|
*
|
|
* Globaler Kontext fuer den LLM-Modus (Hybrid, Local-Only, Cloud-Only, Auto).
|
|
* Steuert welche Provider fuer KI-Anfragen verwendet werden.
|
|
*
|
|
* Modi:
|
|
* - hybrid: Lokal zuerst (Ollama), Cloud als Fallback (Claude/OpenAI)
|
|
* - local-only: Nur lokale Modelle (Ollama) - maximaler Datenschutz
|
|
* - cloud-only: Nur Cloud-Provider (Claude, OpenAI) - beste Qualitaet
|
|
* - auto: Automatische Auswahl basierend auf Komplexitaet
|
|
*/
|
|
|
|
import { createContext, useContext, useState, useEffect, ReactNode, useCallback } from 'react'
|
|
|
|
// LLM Mode Types
|
|
export type LLMMode = 'hybrid' | 'local-only' | 'cloud-only' | 'auto'
|
|
|
|
export interface LLMModeConfig {
|
|
mode: LLMMode
|
|
label: string
|
|
description: string
|
|
providers: {
|
|
enableOllama: boolean
|
|
enableClaude: boolean
|
|
enableOpenAI: boolean
|
|
}
|
|
icon: string
|
|
}
|
|
|
|
// Mode configurations
|
|
export const LLM_MODE_CONFIGS: Record<LLMMode, LLMModeConfig> = {
|
|
hybrid: {
|
|
mode: 'hybrid',
|
|
label: 'Hybrid',
|
|
description: 'Lokal zuerst, Cloud als Fallback',
|
|
providers: {
|
|
enableOllama: true,
|
|
enableClaude: true,
|
|
enableOpenAI: false,
|
|
},
|
|
icon: '🔄',
|
|
},
|
|
'local-only': {
|
|
mode: 'local-only',
|
|
label: 'Nur Lokal',
|
|
description: 'Maximaler Datenschutz (nur Ollama)',
|
|
providers: {
|
|
enableOllama: true,
|
|
enableClaude: false,
|
|
enableOpenAI: false,
|
|
},
|
|
icon: '🔒',
|
|
},
|
|
'cloud-only': {
|
|
mode: 'cloud-only',
|
|
label: 'Nur Cloud',
|
|
description: 'Beste Qualitaet (Claude/OpenAI)',
|
|
providers: {
|
|
enableOllama: false,
|
|
enableClaude: true,
|
|
enableOpenAI: true,
|
|
},
|
|
icon: '☁️',
|
|
},
|
|
auto: {
|
|
mode: 'auto',
|
|
label: 'Auto',
|
|
description: 'Automatisch nach Komplexitaet',
|
|
providers: {
|
|
enableOllama: true,
|
|
enableClaude: true,
|
|
enableOpenAI: true,
|
|
},
|
|
icon: '⚡',
|
|
},
|
|
}
|
|
|
|
// Context type
|
|
interface LLMModeContextType {
|
|
mode: LLMMode
|
|
config: LLMModeConfig
|
|
setMode: (mode: LLMMode) => void
|
|
// Convenience getters for provider states
|
|
enableOllama: boolean
|
|
enableClaude: boolean
|
|
enableOpenAI: boolean
|
|
// Override individual providers (for fine-tuning within a mode)
|
|
setProviderOverrides: (overrides: Partial<LLMModeConfig['providers']>) => void
|
|
providerOverrides: Partial<LLMModeConfig['providers']>
|
|
clearOverrides: () => void
|
|
}
|
|
|
|
const LLMModeContext = createContext<LLMModeContextType | undefined>(undefined)
|
|
|
|
const STORAGE_KEY = 'breakpilot-llm-mode'
|
|
const OVERRIDES_KEY = 'breakpilot-llm-overrides'
|
|
const DEFAULT_MODE: LLMMode = 'hybrid'
|
|
|
|
export function LLMModeProvider({ children }: { children: ReactNode }) {
|
|
const [mode, setModeState] = useState<LLMMode>(DEFAULT_MODE)
|
|
const [providerOverrides, setProviderOverridesState] = useState<Partial<LLMModeConfig['providers']>>({})
|
|
const [mounted, setMounted] = useState(false)
|
|
|
|
// Load from localStorage on mount
|
|
useEffect(() => {
|
|
try {
|
|
const storedMode = localStorage.getItem(STORAGE_KEY)
|
|
if (storedMode && storedMode in LLM_MODE_CONFIGS) {
|
|
setModeState(storedMode as LLMMode)
|
|
}
|
|
|
|
const storedOverrides = localStorage.getItem(OVERRIDES_KEY)
|
|
if (storedOverrides) {
|
|
setProviderOverridesState(JSON.parse(storedOverrides))
|
|
}
|
|
} catch (e) {
|
|
console.warn('Failed to load LLM mode from localStorage:', e)
|
|
}
|
|
setMounted(true)
|
|
}, [])
|
|
|
|
const setMode = useCallback((newMode: LLMMode) => {
|
|
setModeState(newMode)
|
|
// Clear overrides when switching modes
|
|
setProviderOverridesState({})
|
|
try {
|
|
localStorage.setItem(STORAGE_KEY, newMode)
|
|
localStorage.removeItem(OVERRIDES_KEY)
|
|
} catch (e) {
|
|
console.warn('Failed to save LLM mode to localStorage:', e)
|
|
}
|
|
}, [])
|
|
|
|
const setProviderOverrides = useCallback((overrides: Partial<LLMModeConfig['providers']>) => {
|
|
setProviderOverridesState(prev => {
|
|
const newOverrides = { ...prev, ...overrides }
|
|
try {
|
|
localStorage.setItem(OVERRIDES_KEY, JSON.stringify(newOverrides))
|
|
} catch (e) {
|
|
console.warn('Failed to save provider overrides:', e)
|
|
}
|
|
return newOverrides
|
|
})
|
|
}, [])
|
|
|
|
const clearOverrides = useCallback(() => {
|
|
setProviderOverridesState({})
|
|
try {
|
|
localStorage.removeItem(OVERRIDES_KEY)
|
|
} catch (e) {
|
|
console.warn('Failed to clear provider overrides:', e)
|
|
}
|
|
}, [])
|
|
|
|
const config = LLM_MODE_CONFIGS[mode]
|
|
|
|
// Compute effective provider states (mode defaults + overrides)
|
|
const enableOllama = providerOverrides.enableOllama ?? config.providers.enableOllama
|
|
const enableClaude = providerOverrides.enableClaude ?? config.providers.enableClaude
|
|
const enableOpenAI = providerOverrides.enableOpenAI ?? config.providers.enableOpenAI
|
|
|
|
const value: LLMModeContextType = {
|
|
mode,
|
|
config,
|
|
setMode,
|
|
enableOllama,
|
|
enableClaude,
|
|
enableOpenAI,
|
|
setProviderOverrides,
|
|
providerOverrides,
|
|
clearOverrides,
|
|
}
|
|
|
|
// Prevent hydration mismatch
|
|
if (!mounted) {
|
|
return (
|
|
<LLMModeContext.Provider
|
|
value={{
|
|
...value,
|
|
mode: DEFAULT_MODE,
|
|
config: LLM_MODE_CONFIGS[DEFAULT_MODE],
|
|
enableOllama: LLM_MODE_CONFIGS[DEFAULT_MODE].providers.enableOllama,
|
|
enableClaude: LLM_MODE_CONFIGS[DEFAULT_MODE].providers.enableClaude,
|
|
enableOpenAI: LLM_MODE_CONFIGS[DEFAULT_MODE].providers.enableOpenAI,
|
|
providerOverrides: {},
|
|
}}
|
|
>
|
|
{children}
|
|
</LLMModeContext.Provider>
|
|
)
|
|
}
|
|
|
|
return <LLMModeContext.Provider value={value}>{children}</LLMModeContext.Provider>
|
|
}
|
|
|
|
export function useLLMMode() {
|
|
const context = useContext(LLMModeContext)
|
|
if (context === undefined) {
|
|
throw new Error('useLLMMode must be used within a LLMModeProvider')
|
|
}
|
|
return context
|
|
}
|
|
|
|
// Utility hook for getting provider settings without full context
|
|
export function useLLMProviders() {
|
|
const { enableOllama, enableClaude, enableOpenAI, mode } = useLLMMode()
|
|
return { enableOllama, enableClaude, enableOpenAI, mode }
|
|
}
|