'use client' import { useState } from 'react' import type { MacMiniStatus, DownloadProgress, ModelDescription } from '../types' import { MODEL_DATABASE, RECOMMENDED_MODELS } from '../constants' function getModelInfo(modelName: string): ModelDescription | null { if (MODEL_DATABASE[modelName]) return MODEL_DATABASE[modelName] const baseName = modelName.split(':')[0] const matchingKey = Object.keys(MODEL_DATABASE).find(key => key.startsWith(baseName) || key === baseName ) return matchingKey ? MODEL_DATABASE[matchingKey] : null } function formatBytes(bytes: number) { if (bytes === 0) return '0 B' const k = 1024 const sizes = ['B', 'KB', 'MB', 'GB', 'TB'] const i = Math.floor(Math.log(bytes) / Math.log(k)) return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i] } export default function OllamaSection({ status, actionLoading, downloadProgress, modelInput, setModelInput, onPullModel, }: { status: MacMiniStatus | null actionLoading: string | null downloadProgress: DownloadProgress | null modelInput: string setModelInput: (v: string) => void onPullModel: () => void }) { const [selectedModel, setSelectedModel] = useState(null) const [showRecommendations, setShowRecommendations] = useState(false) const isModelInstalled = (modelName: string): boolean => { if (!status?.models) return false return status.models.some(m => m.name === modelName || m.name.startsWith(modelName.split(':')[0]) ) } return (

🤖 Ollama LLM Modelle

{/* Installed Models */} {status?.models && status.models.length > 0 ? (
{status.models.map((model, idx) => { const modelInfo = getModelInfo(model.name) return (
{model.name} {modelInfo && ( )} {modelInfo?.category === 'vision' && ( Vision )}
{model.size} {model.modified}
) })}
) : (

{status?.ollama ? 'Keine Modelle installiert' : 'Ollama nicht erreichbar'}

)} {/* Model Info Modal */} {selectedModel && (
setSelectedModel(null)}>
e.stopPropagation()}> {(() => { const info = getModelInfo(selectedModel) if (!info) return

Keine Informationen verfügbar

return ( <>

{info.name}

{info.category === 'vision' ? '👁️ Vision' : info.category === 'text' ? '📝 Text' : info.category} {info.size}

{info.description}

Geeignet für:

{info.useCases.map((useCase, i) => ( {useCase} ))}
) })()}
)} {/* Download New Model */}

Neues Modell herunterladen

setModelInput(e.target.value)} placeholder="z.B. llama3.2, mistral, qwen2.5:14b" className="flex-1 px-4 py-2 border border-slate-300 rounded-lg focus:outline-none focus:ring-2 focus:ring-primary-500 focus:border-transparent" disabled={actionLoading === 'pull'} />
{/* Download Progress */} {downloadProgress && (
{downloadProgress.model} {formatBytes(downloadProgress.completed)} / {formatBytes(downloadProgress.total)}
{downloadProgress.percent}%
)} {/* Toggle Recommendations */}
{/* Recommendations Section */} {showRecommendations && (

📚 Empfohlene Modelle

{/* Handwriting Recognition */}
✍️ Handschrifterkennung (Vision-Modelle)
{RECOMMENDED_MODELS.handwriting.map((rec, idx) => { const info = MODEL_DATABASE[rec.model] const installed = isModelInstalled(rec.model) return (
{info?.name || rec.model} Vision {info?.recommended && ⭐ Empfohlen} {installed && ✓ Installiert}

{rec.reason}

Größe: {info?.size || 'unbekannt'}

{!installed && ( )}
) })}
{/* Grading / Text Analysis */}
📝 Klausurkorrektur (Text-Modelle)
{RECOMMENDED_MODELS.grading.map((rec, idx) => { const info = MODEL_DATABASE[rec.model] const installed = isModelInstalled(rec.model) return (
{info?.name || rec.model} Text {info?.recommended && ⭐ Empfohlen} {installed && ✓ Installiert}

{rec.reason}

Größe: {info?.size || 'unbekannt'}

{!installed && ( )}
) })}
{/* Info Box */}
💡
Tipp: Modell-Kombinationen

Für beste Ergebnisse bei Klausuren mit Handschrift kombiniere ein Vision-Modell (für OCR/Handschrifterkennung) mit einem Text-Modell (für Bewertung und Feedback). Beispiel: llama3.2-vision:11b + qwen2.5:14b

)}
) }