A previous `git pull --rebase origin main` dropped 177 local commits,
losing 3400+ files across admin-v2, backend, studio-v2, website,
klausur-service, and many other services. The partial restore attempt
(660295e2) only recovered some files.
This commit restores all missing files from pre-rebase ref 98933f5e
while preserving post-rebase additions (night-scheduler, night-mode UI,
NightModeWidget dashboard integration).
Restored features include:
- AI Module Sidebar (FAB), OCR Labeling, OCR Compare
- GPU Dashboard, RAG Pipeline, Magic Help
- Klausur-Korrektur (8 files), Abitur-Archiv (5+ files)
- Companion, Zeugnisse-Crawler, Screen Flow
- Full backend, studio-v2, website, klausur-service
- All compliance SDKs, agent-core, voice-service
- CI/CD configs, documentation, scripts
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
396 lines
10 KiB
TypeScript
396 lines
10 KiB
TypeScript
import { NextRequest, NextResponse } from 'next/server'
|
|
import type { ExtractedError, ErrorCategory, LogExtractionResponse } from '@/types/infrastructure-modules'
|
|
|
|
// Woodpecker API configuration
|
|
const WOODPECKER_URL = process.env.WOODPECKER_URL || 'http://woodpecker-server:8000'
|
|
const WOODPECKER_TOKEN = process.env.WOODPECKER_TOKEN || ''
|
|
|
|
// =============================================================================
|
|
// Error Pattern Matching
|
|
// =============================================================================
|
|
|
|
interface ErrorPattern {
|
|
pattern: RegExp
|
|
category: ErrorCategory
|
|
extractMessage?: (match: RegExpMatchArray, line: string) => string
|
|
}
|
|
|
|
/**
|
|
* Patterns fuer verschiedene Fehlertypen in CI/CD Logs
|
|
*/
|
|
const ERROR_PATTERNS: ErrorPattern[] = [
|
|
// Test Failures
|
|
{
|
|
pattern: /^(FAIL|FAILED|ERROR):?\s+(.+)$/i,
|
|
category: 'test_failure',
|
|
extractMessage: (match, line) => match[2] || line,
|
|
},
|
|
{
|
|
pattern: /^---\s+FAIL:\s+(.+)\s+\([\d.]+s\)$/,
|
|
category: 'test_failure',
|
|
extractMessage: (match) => `Test failed: ${match[1]}`,
|
|
},
|
|
{
|
|
pattern: /pytest.*FAILED\s+(.+)$/,
|
|
category: 'test_failure',
|
|
extractMessage: (match) => `pytest: ${match[1]}`,
|
|
},
|
|
{
|
|
pattern: /AssertionError:\s+(.+)$/,
|
|
category: 'test_failure',
|
|
extractMessage: (match) => `Assertion failed: ${match[1]}`,
|
|
},
|
|
{
|
|
pattern: /FAIL\s+[\w\/]+\s+\[build failed\]/,
|
|
category: 'build_error',
|
|
},
|
|
|
|
// Build Errors
|
|
{
|
|
pattern: /^(error|Error)\[[\w-]+\]:\s+(.+)$/,
|
|
category: 'build_error',
|
|
extractMessage: (match) => match[2],
|
|
},
|
|
{
|
|
pattern: /cannot find (module|package)\s+["'](.+)["']/i,
|
|
category: 'build_error',
|
|
extractMessage: (match) => `Missing ${match[1]}: ${match[2]}`,
|
|
},
|
|
{
|
|
pattern: /undefined:\s+(.+)$/,
|
|
category: 'build_error',
|
|
extractMessage: (match) => `Undefined: ${match[1]}`,
|
|
},
|
|
{
|
|
pattern: /compilation failed/i,
|
|
category: 'build_error',
|
|
},
|
|
{
|
|
pattern: /npm ERR!\s+(.+)$/,
|
|
category: 'build_error',
|
|
extractMessage: (match) => `npm error: ${match[1]}`,
|
|
},
|
|
{
|
|
pattern: /go:\s+(.+):\s+(.+)$/,
|
|
category: 'build_error',
|
|
extractMessage: (match) => `Go: ${match[1]}: ${match[2]}`,
|
|
},
|
|
|
|
// Security Warnings
|
|
{
|
|
pattern: /\[CRITICAL\]\s+(.+)$/i,
|
|
category: 'security_warning',
|
|
extractMessage: (match) => `Critical: ${match[1]}`,
|
|
},
|
|
{
|
|
pattern: /\[HIGH\]\s+(.+)$/i,
|
|
category: 'security_warning',
|
|
extractMessage: (match) => `High severity: ${match[1]}`,
|
|
},
|
|
{
|
|
pattern: /CVE-\d{4}-\d+/,
|
|
category: 'security_warning',
|
|
extractMessage: (match, line) => line.trim(),
|
|
},
|
|
{
|
|
pattern: /vulnerability found/i,
|
|
category: 'security_warning',
|
|
},
|
|
{
|
|
pattern: /secret.*detected/i,
|
|
category: 'security_warning',
|
|
},
|
|
{
|
|
pattern: /gitleaks.*found/i,
|
|
category: 'security_warning',
|
|
},
|
|
{
|
|
pattern: /semgrep.*finding/i,
|
|
category: 'security_warning',
|
|
},
|
|
|
|
// License Violations
|
|
{
|
|
pattern: /license.*violation/i,
|
|
category: 'license_violation',
|
|
},
|
|
{
|
|
pattern: /incompatible license/i,
|
|
category: 'license_violation',
|
|
},
|
|
{
|
|
pattern: /AGPL|GPL-3|SSPL/,
|
|
category: 'license_violation',
|
|
extractMessage: (match, line) => `Potentially problematic license found: ${match[0]}`,
|
|
},
|
|
|
|
// Dependency Issues
|
|
{
|
|
pattern: /dependency.*not found/i,
|
|
category: 'dependency_issue',
|
|
},
|
|
{
|
|
pattern: /outdated.*dependency/i,
|
|
category: 'dependency_issue',
|
|
},
|
|
{
|
|
pattern: /version conflict/i,
|
|
category: 'dependency_issue',
|
|
},
|
|
]
|
|
|
|
/**
|
|
* Patterns to extract file paths from error lines
|
|
*/
|
|
const FILE_PATH_PATTERNS = [
|
|
/([\/\w.-]+\.(go|py|ts|tsx|js|jsx|rs)):(\d+)/,
|
|
/File "([^"]+)", line (\d+)/,
|
|
/at ([\/\w.-]+):(\d+):\d+/,
|
|
]
|
|
|
|
/**
|
|
* Patterns to extract service names from log lines or paths
|
|
*/
|
|
const SERVICE_PATTERNS = [
|
|
/service[s]?\/([a-z-]+)/i,
|
|
/\/([a-z-]+-service)\//i,
|
|
/^([a-z-]+):\s/,
|
|
]
|
|
|
|
// =============================================================================
|
|
// Log Parsing Functions
|
|
// =============================================================================
|
|
|
|
interface LogLine {
|
|
pos: number
|
|
out: string
|
|
time: number
|
|
}
|
|
|
|
function extractFilePath(line: string): { path?: string; lineNumber?: number } {
|
|
for (const pattern of FILE_PATH_PATTERNS) {
|
|
const match = line.match(pattern)
|
|
if (match) {
|
|
return {
|
|
path: match[1],
|
|
lineNumber: parseInt(match[2] || match[3], 10) || undefined,
|
|
}
|
|
}
|
|
}
|
|
return {}
|
|
}
|
|
|
|
function extractService(line: string, filePath?: string): string | undefined {
|
|
// First try to extract from file path
|
|
if (filePath) {
|
|
for (const pattern of SERVICE_PATTERNS) {
|
|
const match = filePath.match(pattern)
|
|
if (match) return match[1]
|
|
}
|
|
}
|
|
|
|
// Then try from the line itself
|
|
for (const pattern of SERVICE_PATTERNS) {
|
|
const match = line.match(pattern)
|
|
if (match) return match[1]
|
|
}
|
|
|
|
return undefined
|
|
}
|
|
|
|
function parseLogLines(logs: LogLine[], stepName: string): ExtractedError[] {
|
|
const errors: ExtractedError[] = []
|
|
const seenMessages = new Set<string>()
|
|
|
|
for (const logLine of logs) {
|
|
const line = logLine.out.trim()
|
|
if (!line) continue
|
|
|
|
for (const errorPattern of ERROR_PATTERNS) {
|
|
const match = line.match(errorPattern.pattern)
|
|
if (match) {
|
|
const message = errorPattern.extractMessage
|
|
? errorPattern.extractMessage(match, line)
|
|
: line
|
|
|
|
// Deduplicate similar errors
|
|
const messageKey = `${errorPattern.category}:${message.substring(0, 100)}`
|
|
if (seenMessages.has(messageKey)) continue
|
|
seenMessages.add(messageKey)
|
|
|
|
const fileInfo = extractFilePath(line)
|
|
const service = extractService(line, fileInfo.path)
|
|
|
|
errors.push({
|
|
step: stepName,
|
|
line: logLine.pos,
|
|
message,
|
|
category: errorPattern.category,
|
|
file_path: fileInfo.path,
|
|
service,
|
|
})
|
|
|
|
break // Only match first pattern per line
|
|
}
|
|
}
|
|
}
|
|
|
|
return errors
|
|
}
|
|
|
|
// =============================================================================
|
|
// API Handler
|
|
// =============================================================================
|
|
|
|
/**
|
|
* POST /api/infrastructure/logs/extract
|
|
*
|
|
* Extrahiert Fehler aus Woodpecker Pipeline Logs.
|
|
*
|
|
* Request Body:
|
|
* - pipeline_number: number (required)
|
|
* - repo_id?: string (default: '1')
|
|
*
|
|
* Response:
|
|
* - errors: ExtractedError[]
|
|
* - pipeline_number: number
|
|
* - extracted_at: string
|
|
* - lines_parsed: number
|
|
*/
|
|
export async function POST(request: NextRequest) {
|
|
try {
|
|
const body = await request.json()
|
|
const { pipeline_number, repo_id = '1' } = body
|
|
|
|
if (!pipeline_number) {
|
|
return NextResponse.json(
|
|
{ error: 'pipeline_number ist erforderlich' },
|
|
{ status: 400 }
|
|
)
|
|
}
|
|
|
|
// 1. Fetch pipeline details to get step IDs
|
|
const pipelineResponse = await fetch(
|
|
`${WOODPECKER_URL}/api/repos/${repo_id}/pipelines/${pipeline_number}`,
|
|
{
|
|
headers: {
|
|
'Authorization': `Bearer ${WOODPECKER_TOKEN}`,
|
|
'Content-Type': 'application/json',
|
|
},
|
|
cache: 'no-store',
|
|
}
|
|
)
|
|
|
|
if (!pipelineResponse.ok) {
|
|
return NextResponse.json(
|
|
{ error: `Pipeline ${pipeline_number} nicht gefunden` },
|
|
{ status: 404 }
|
|
)
|
|
}
|
|
|
|
const pipeline = await pipelineResponse.json()
|
|
|
|
// 2. Extract step IDs from workflows
|
|
const failedSteps: { id: number; name: string }[] = []
|
|
|
|
if (pipeline.workflows) {
|
|
for (const workflow of pipeline.workflows) {
|
|
if (workflow.children) {
|
|
for (const child of workflow.children) {
|
|
if (child.state === 'failure' || child.state === 'error') {
|
|
failedSteps.push({
|
|
id: child.id,
|
|
name: child.name,
|
|
})
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// 3. Fetch logs for each failed step
|
|
const allErrors: ExtractedError[] = []
|
|
let totalLinesParsed = 0
|
|
|
|
for (const step of failedSteps) {
|
|
try {
|
|
const logsResponse = await fetch(
|
|
`${WOODPECKER_URL}/api/repos/${repo_id}/pipelines/${pipeline_number}/logs/${step.id}`,
|
|
{
|
|
headers: {
|
|
'Authorization': `Bearer ${WOODPECKER_TOKEN}`,
|
|
'Content-Type': 'application/json',
|
|
},
|
|
}
|
|
)
|
|
|
|
if (logsResponse.ok) {
|
|
const logs: LogLine[] = await logsResponse.json()
|
|
totalLinesParsed += logs.length
|
|
|
|
const stepErrors = parseLogLines(logs, step.name)
|
|
allErrors.push(...stepErrors)
|
|
}
|
|
} catch (logError) {
|
|
console.error(`Failed to fetch logs for step ${step.name}:`, logError)
|
|
}
|
|
}
|
|
|
|
// 4. Sort errors by severity (security > license > build > test > dependency)
|
|
const categoryPriority: Record<ErrorCategory, number> = {
|
|
'security_warning': 1,
|
|
'license_violation': 2,
|
|
'build_error': 3,
|
|
'test_failure': 4,
|
|
'dependency_issue': 5,
|
|
}
|
|
|
|
allErrors.sort((a, b) => categoryPriority[a.category] - categoryPriority[b.category])
|
|
|
|
const response: LogExtractionResponse = {
|
|
errors: allErrors,
|
|
pipeline_number,
|
|
extracted_at: new Date().toISOString(),
|
|
lines_parsed: totalLinesParsed,
|
|
}
|
|
|
|
return NextResponse.json(response)
|
|
|
|
} catch (error) {
|
|
console.error('Log extraction error:', error)
|
|
return NextResponse.json(
|
|
{ error: 'Fehler bei der Log-Extraktion' },
|
|
{ status: 500 }
|
|
)
|
|
}
|
|
}
|
|
|
|
/**
|
|
* GET /api/infrastructure/logs/extract?pipeline_number=123
|
|
*
|
|
* Convenience method - calls POST internally
|
|
*/
|
|
export async function GET(request: NextRequest) {
|
|
const searchParams = request.nextUrl.searchParams
|
|
const pipeline_number = searchParams.get('pipeline_number')
|
|
const repo_id = searchParams.get('repo_id') || '1'
|
|
|
|
if (!pipeline_number) {
|
|
return NextResponse.json(
|
|
{ error: 'pipeline_number Query-Parameter ist erforderlich' },
|
|
{ status: 400 }
|
|
)
|
|
}
|
|
|
|
// Create a mock request with JSON body
|
|
const mockRequest = new NextRequest(request.url, {
|
|
method: 'POST',
|
|
body: JSON.stringify({ pipeline_number: parseInt(pipeline_number, 10), repo_id }),
|
|
headers: {
|
|
'Content-Type': 'application/json',
|
|
},
|
|
})
|
|
|
|
return POST(mockRequest)
|
|
}
|