fix(ocr-pipeline): increase LLM timeout to 300s and disable qwen3 thinking
- Add /no_think tag to prompt (qwen3 thinking mode causes massive slowdown) - Increase httpx timeout from 120s to 300s for large vocab tables - Improve error logging with traceback and exception type Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -1420,8 +1420,9 @@ async def run_llm_review(session_id: str, request: Request):
|
||||
try:
|
||||
result = await llm_review_entries(entries, model=model)
|
||||
except Exception as e:
|
||||
logger.error(f"LLM review failed for session {session_id}: {e}")
|
||||
raise HTTPException(status_code=502, detail=f"LLM review failed: {e}")
|
||||
import traceback
|
||||
logger.error(f"LLM review failed for session {session_id}: {type(e).__name__}: {e}\n{traceback.format_exc()}")
|
||||
raise HTTPException(status_code=502, detail=f"LLM review failed ({type(e).__name__}): {e}")
|
||||
|
||||
# Store result inside word_result as a sub-key
|
||||
word_result["llm_review"] = {
|
||||
|
||||
Reference in New Issue
Block a user