This repository has been archived on 2026-02-15. You can view files and clone it. You cannot open issues or pull requests or push a commit.
Files
breakpilot-pwa/website/lib/llm-mode-context.tsx
BreakPilot Dev 19855efacc
Some checks failed
Tests / Go Tests (push) Has been cancelled
Tests / Python Tests (push) Has been cancelled
Tests / Integration Tests (push) Has been cancelled
Tests / Go Lint (push) Has been cancelled
Tests / Python Lint (push) Has been cancelled
Tests / Security Scan (push) Has been cancelled
Tests / All Checks Passed (push) Has been cancelled
Security Scanning / Secret Scanning (push) Has been cancelled
Security Scanning / Dependency Vulnerability Scan (push) Has been cancelled
Security Scanning / Go Security Scan (push) Has been cancelled
Security Scanning / Python Security Scan (push) Has been cancelled
Security Scanning / Node.js Security Scan (push) Has been cancelled
Security Scanning / Docker Image Security (push) Has been cancelled
Security Scanning / Security Summary (push) Has been cancelled
CI/CD Pipeline / Go Tests (push) Has been cancelled
CI/CD Pipeline / Python Tests (push) Has been cancelled
CI/CD Pipeline / Website Tests (push) Has been cancelled
CI/CD Pipeline / Linting (push) Has been cancelled
CI/CD Pipeline / Security Scan (push) Has been cancelled
CI/CD Pipeline / Docker Build & Push (push) Has been cancelled
CI/CD Pipeline / Integration Tests (push) Has been cancelled
CI/CD Pipeline / Deploy to Staging (push) Has been cancelled
CI/CD Pipeline / Deploy to Production (push) Has been cancelled
CI/CD Pipeline / CI Summary (push) Has been cancelled
ci/woodpecker/manual/build-ci-image Pipeline was successful
ci/woodpecker/manual/main Pipeline failed
feat: BreakPilot PWA - Full codebase (clean push without large binaries)
All services: admin-v2, studio-v2, website, ai-compliance-sdk,
consent-service, klausur-service, voice-service, and infrastructure.
Large PDFs and compiled binaries excluded via .gitignore.
2026-02-11 13:25:58 +01:00

212 lines
5.9 KiB
TypeScript

'use client'
/**
* LLM Mode Context
*
* Globaler Kontext fuer den LLM-Modus (Hybrid, Local-Only, Cloud-Only, Auto).
* Steuert welche Provider fuer KI-Anfragen verwendet werden.
*
* Modi:
* - hybrid: Lokal zuerst (Ollama), Cloud als Fallback (Claude/OpenAI)
* - local-only: Nur lokale Modelle (Ollama) - maximaler Datenschutz
* - cloud-only: Nur Cloud-Provider (Claude, OpenAI) - beste Qualitaet
* - auto: Automatische Auswahl basierend auf Komplexitaet
*/
import { createContext, useContext, useState, useEffect, ReactNode, useCallback } from 'react'
// LLM Mode Types
export type LLMMode = 'hybrid' | 'local-only' | 'cloud-only' | 'auto'
export interface LLMModeConfig {
mode: LLMMode
label: string
description: string
providers: {
enableOllama: boolean
enableClaude: boolean
enableOpenAI: boolean
}
icon: string
}
// Mode configurations
export const LLM_MODE_CONFIGS: Record<LLMMode, LLMModeConfig> = {
hybrid: {
mode: 'hybrid',
label: 'Hybrid',
description: 'Lokal zuerst, Cloud als Fallback',
providers: {
enableOllama: true,
enableClaude: true,
enableOpenAI: false,
},
icon: '🔄',
},
'local-only': {
mode: 'local-only',
label: 'Nur Lokal',
description: 'Maximaler Datenschutz (nur Ollama)',
providers: {
enableOllama: true,
enableClaude: false,
enableOpenAI: false,
},
icon: '🔒',
},
'cloud-only': {
mode: 'cloud-only',
label: 'Nur Cloud',
description: 'Beste Qualitaet (Claude/OpenAI)',
providers: {
enableOllama: false,
enableClaude: true,
enableOpenAI: true,
},
icon: '☁️',
},
auto: {
mode: 'auto',
label: 'Auto',
description: 'Automatisch nach Komplexitaet',
providers: {
enableOllama: true,
enableClaude: true,
enableOpenAI: true,
},
icon: '⚡',
},
}
// Context type
interface LLMModeContextType {
mode: LLMMode
config: LLMModeConfig
setMode: (mode: LLMMode) => void
// Convenience getters for provider states
enableOllama: boolean
enableClaude: boolean
enableOpenAI: boolean
// Override individual providers (for fine-tuning within a mode)
setProviderOverrides: (overrides: Partial<LLMModeConfig['providers']>) => void
providerOverrides: Partial<LLMModeConfig['providers']>
clearOverrides: () => void
}
const LLMModeContext = createContext<LLMModeContextType | undefined>(undefined)
const STORAGE_KEY = 'breakpilot-llm-mode'
const OVERRIDES_KEY = 'breakpilot-llm-overrides'
const DEFAULT_MODE: LLMMode = 'hybrid'
export function LLMModeProvider({ children }: { children: ReactNode }) {
const [mode, setModeState] = useState<LLMMode>(DEFAULT_MODE)
const [providerOverrides, setProviderOverridesState] = useState<Partial<LLMModeConfig['providers']>>({})
const [mounted, setMounted] = useState(false)
// Load from localStorage on mount
useEffect(() => {
try {
const storedMode = localStorage.getItem(STORAGE_KEY)
if (storedMode && storedMode in LLM_MODE_CONFIGS) {
setModeState(storedMode as LLMMode)
}
const storedOverrides = localStorage.getItem(OVERRIDES_KEY)
if (storedOverrides) {
setProviderOverridesState(JSON.parse(storedOverrides))
}
} catch (e) {
console.warn('Failed to load LLM mode from localStorage:', e)
}
setMounted(true)
}, [])
const setMode = useCallback((newMode: LLMMode) => {
setModeState(newMode)
// Clear overrides when switching modes
setProviderOverridesState({})
try {
localStorage.setItem(STORAGE_KEY, newMode)
localStorage.removeItem(OVERRIDES_KEY)
} catch (e) {
console.warn('Failed to save LLM mode to localStorage:', e)
}
}, [])
const setProviderOverrides = useCallback((overrides: Partial<LLMModeConfig['providers']>) => {
setProviderOverridesState(prev => {
const newOverrides = { ...prev, ...overrides }
try {
localStorage.setItem(OVERRIDES_KEY, JSON.stringify(newOverrides))
} catch (e) {
console.warn('Failed to save provider overrides:', e)
}
return newOverrides
})
}, [])
const clearOverrides = useCallback(() => {
setProviderOverridesState({})
try {
localStorage.removeItem(OVERRIDES_KEY)
} catch (e) {
console.warn('Failed to clear provider overrides:', e)
}
}, [])
const config = LLM_MODE_CONFIGS[mode]
// Compute effective provider states (mode defaults + overrides)
const enableOllama = providerOverrides.enableOllama ?? config.providers.enableOllama
const enableClaude = providerOverrides.enableClaude ?? config.providers.enableClaude
const enableOpenAI = providerOverrides.enableOpenAI ?? config.providers.enableOpenAI
const value: LLMModeContextType = {
mode,
config,
setMode,
enableOllama,
enableClaude,
enableOpenAI,
setProviderOverrides,
providerOverrides,
clearOverrides,
}
// Prevent hydration mismatch
if (!mounted) {
return (
<LLMModeContext.Provider
value={{
...value,
mode: DEFAULT_MODE,
config: LLM_MODE_CONFIGS[DEFAULT_MODE],
enableOllama: LLM_MODE_CONFIGS[DEFAULT_MODE].providers.enableOllama,
enableClaude: LLM_MODE_CONFIGS[DEFAULT_MODE].providers.enableClaude,
enableOpenAI: LLM_MODE_CONFIGS[DEFAULT_MODE].providers.enableOpenAI,
providerOverrides: {},
}}
>
{children}
</LLMModeContext.Provider>
)
}
return <LLMModeContext.Provider value={value}>{children}</LLMModeContext.Provider>
}
export function useLLMMode() {
const context = useContext(LLMModeContext)
if (context === undefined) {
throw new Error('useLLMMode must be used within a LLMModeProvider')
}
return context
}
// Utility hook for getting provider settings without full context
export function useLLMProviders() {
const { enableOllama, enableClaude, enableOpenAI, mode } = useLLMMode()
return { enableOllama, enableClaude, enableOpenAI, mode }
}