'use client' import { useState } from 'react' import type { LLMRoutingOption } from '@/types/infrastructure-modules' import type { FailedTest, BacklogItem, BacklogPriority } from '../types' // ============================================================================== // FailedTestCard // ============================================================================== function FailedTestCard({ test, onStatusChange, onPriorityChange, priority = 'medium', failureCount = 1, }: { test: FailedTest onStatusChange: (testId: string, status: string) => void onPriorityChange?: (testId: string, priority: string) => void priority?: BacklogPriority failureCount?: number }) { const errorTypeColors: Record = { assertion: 'bg-amber-100 text-amber-700', nil_pointer: 'bg-red-100 text-red-700', type_error: 'bg-purple-100 text-purple-700', network: 'bg-blue-100 text-blue-700', timeout: 'bg-orange-100 text-orange-700', logic_error: 'bg-slate-100 text-slate-700', unknown: 'bg-slate-100 text-slate-700', } const statusColors: Record = { open: 'bg-red-100 text-red-700', in_progress: 'bg-blue-100 text-blue-700', fixed: 'bg-emerald-100 text-emerald-700', wont_fix: 'bg-slate-100 text-slate-700', flaky: 'bg-purple-100 text-purple-700', } const priorityColors: Record = { critical: 'bg-red-500 text-white', high: 'bg-orange-500 text-white', medium: 'bg-yellow-500 text-white', low: 'bg-slate-400 text-white', } const priorityLabels: Record = { critical: '!!! Kritisch', high: '!! Hoch', medium: '! Mittel', low: 'Niedrig', } return (
{priorityLabels[priority]} {test.error_type.replace('_', ' ')} {test.service} {failureCount > 1 && ( {failureCount}x fehlgeschlagen )}

{test.name}

{test.file_path}

{onPriorityChange && ( )}

Fehlermeldung:

{test.error_message || 'Keine Details verfuegbar'}

{test.suggestion && (

Loesungsvorschlag:

{test.suggestion}

)}
Zuletzt fehlgeschlagen: {test.last_failed ? new Date(test.last_failed).toLocaleString('de-DE') : 'Unbekannt'}
) } // ============================================================================== // BacklogTab // ============================================================================== export function BacklogTab({ failedTests, onStatusChange, onPriorityChange, isLoading, backlogItems, usePostgres = false, }: { failedTests: FailedTest[] onStatusChange: (testId: string, status: string) => void onPriorityChange?: (testId: string, priority: string) => void isLoading: boolean backlogItems?: BacklogItem[] usePostgres?: boolean }) { const [filterStatus, setFilterStatus] = useState('open') const [filterService, setFilterService] = useState('all') const [filterPriority, setFilterPriority] = useState('all') const [llmAutoAnalysis, setLlmAutoAnalysis] = useState(true) const [llmRouting, setLlmRouting] = useState('smart_routing') // Nutze PostgreSQL-Backlog wenn verfuegbar, sonst Legacy const items = usePostgres && backlogItems ? backlogItems : failedTests // Gruppiere nach Service const services = [...new Set(items.map(t => 'service' in t ? t.service : (t as BacklogItem).service))] // Filtere Items const filteredItems = items.filter(item => { const status = 'status' in item ? item.status : 'open' const service = 'service' in item ? item.service : '' const priority = 'priority' in item ? (item as BacklogItem).priority : 'medium' if (filterStatus !== 'all' && status !== filterStatus) return false if (filterService !== 'all' && service !== filterService) return false if (filterPriority !== 'all' && priority !== filterPriority) return false return true }) // Zaehle nach Status const openCount = items.filter(t => t.status === 'open').length const inProgressCount = items.filter(t => t.status === 'in_progress').length const fixedCount = items.filter(t => t.status === 'fixed').length const flakyCount = items.filter(t => t.status === 'flaky').length // Zaehle nach Prioritaet (nur bei PostgreSQL) const criticalCount = backlogItems?.filter(t => t.priority === 'critical').length || 0 const highCount = backlogItems?.filter(t => t.priority === 'high').length || 0 if (isLoading) { return (
) } // Konvertiere BacklogItem zu FailedTest fuer die Anzeige const convertToFailedTest = (item: BacklogItem): FailedTest => ({ id: String(item.id), name: item.test_name, service: item.service, file_path: item.test_file || '', error_message: item.error_message || '', error_type: item.error_type || 'unknown', suggestion: item.fix_suggestion || '', run_id: '', last_failed: item.last_failed_at, status: item.status, }) return (
{/* Stats */}

{openCount}

Offene Fehler

{inProgressCount}

In Arbeit

{fixedCount}

Behoben

{flakyCount}

Flaky

{usePostgres && criticalCount + highCount > 0 && (

{criticalCount + highCount}

Kritisch/Hoch

)}
{/* PostgreSQL Badge */} {usePostgres && (
Persistente Speicherung aktiv (PostgreSQL)
)} {/* LLM Analysis Toggle */} {/* Filter */}
{usePostgres && (
)}
{filteredItems.length} von {items.length} Tests angezeigt
{/* Test-Liste */} {filteredItems.length === 0 ? (

{filterStatus === 'open' ? 'Keine offenen Fehler!' : 'Keine Tests mit diesem Filter gefunden.'}

{filterStatus === 'open' && (

Alle Tests bestanden. Bereit fuer Go-Live!

)}
) : (
{filteredItems.map((item) => { const test = usePostgres && 'test_name' in item ? convertToFailedTest(item as BacklogItem) : item as FailedTest const priority = usePostgres && 'priority' in item ? (item as BacklogItem).priority : 'medium' const failureCount = usePostgres && 'failure_count' in item ? (item as BacklogItem).failure_count : 1 return ( ) })}
)} {/* Info */}

Workflow fuer fehlgeschlagene Tests:

  1. Markiere den Test als "In Arbeit" wenn du daran arbeitest
  2. Analysiere die Fehlermeldung und den Loesungsvorschlag
  3. Behebe den Fehler im Code
  4. Fuehre den Test erneut aus (Button im Service-Tab)
  5. Markiere als "Behoben" wenn der Test besteht
  6. {usePostgres &&
  7. Setze "Flaky" fuer sporadisch fehlschlagende Tests
  8. }
) } // ============================================================================== // LLM Analysis Panel (internal) // ============================================================================== function LLMAnalysisPanel({ llmAutoAnalysis, setLlmAutoAnalysis, llmRouting, setLlmRouting, }: { llmAutoAnalysis: boolean setLlmAutoAnalysis: (v: boolean) => void llmRouting: LLMRoutingOption setLlmRouting: (v: LLMRoutingOption) => void }) { return (

Automatische LLM-Analyse

KI-gestuetzte Fix-Vorschlaege fuer Backlog-Eintraege

{llmAutoAnalysis && (

LLM-Routing Strategie:

{llmRouting === 'local_only' && 'Alle Analysen werden mit Qwen2.5-32B lokal durchgefuehrt. Keine Daten verlassen den Server.'} {llmRouting === 'claude_preferred' && 'Verwendet Claude fuer beste Fix-Qualitaet. Nur Code-Snippets werden uebertragen.'} {llmRouting === 'smart_routing' && 'Privacy Classifier entscheidet automatisch: Sensitive Daten → lokal, Code → Claude.'}

)}
) } function RoutingOption({ value, current, onChange, label, badge, badgeColor, }: { value: LLMRoutingOption current: LLMRoutingOption onChange: (v: LLMRoutingOption) => void label: string badge: string badgeColor: string }) { const isActive = current === value return ( ) }