From 979fe20ea5bde452e3649930d15678fa4c387101 Mon Sep 17 00:00:00 2001 From: Benjamin Admin Date: Tue, 12 May 2026 18:02:05 +0200 Subject: [PATCH] fix(use-case-compiler): increase LLM timeout to 45s, reduce batch to 5 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mac Mini M4 needs more time for qwen3:30b. Reduced batch from 10→5 MCs and increased timeout from 20→45s to give LLM a fair chance. Co-Authored-By: Claude Opus 4.6 (1M context) --- ai-compliance-sdk/internal/usecase/compiler_llm.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ai-compliance-sdk/internal/usecase/compiler_llm.go b/ai-compliance-sdk/internal/usecase/compiler_llm.go index b425977..5c78593 100644 --- a/ai-compliance-sdk/internal/usecase/compiler_llm.go +++ b/ai-compliance-sdk/internal/usecase/compiler_llm.go @@ -31,7 +31,7 @@ type llmQuestion struct { } // maxLLMMCs limits how many MCs we send to the LLM in one batch. -const maxLLMMCs = 10 +const maxLLMMCs = 5 // GenerateQuestions generates questions for MCs using a single batched LLM call. func (g *LLMQuestionGenerator) GenerateQuestions(mcs []MCInfo, regulations []string) ([]Question, error) { @@ -45,7 +45,7 @@ func (g *LLMQuestionGenerator) GenerateQuestions(mcs []MCInfo, regulations []str batch = batch[:maxLLMMCs] } - ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second) defer cancel() prompt := buildBatchPrompt(batch, regulations)