Fix formatting and clippy warnings across workspace
All checks were successful
CI / Format (push) Successful in 3s
CI / Clippy (push) Successful in 2m15s
CI / Security Audit (push) Successful in 1m34s
CI / Tests (push) Successful in 3m4s

- Run cargo fmt on all crates
- Fix regex patterns using unsupported lookahead in patterns.rs
- Replace unwrap() calls with compile_regex() helper
- Fix never type fallback in GitHub tracker
- Fix redundant field name in findings page
- Allow enum_variant_names for Dioxus Route enum
- Fix &mut Vec -> &mut [T] clippy lint in sbom.rs
- Mark unused-but-intended APIs with #[allow(dead_code)]

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Sharang Parnerkar
2026-03-02 17:41:03 +01:00
parent 62196e5d74
commit 03ee69834d
37 changed files with 519 additions and 220 deletions

View File

@@ -58,7 +58,10 @@ impl LlmClient {
user_prompt: &str,
temperature: Option<f64>,
) -> Result<String, AgentError> {
let url = format!("{}/v1/chat/completions", self.base_url.trim_end_matches('/'));
let url = format!(
"{}/v1/chat/completions",
self.base_url.trim_end_matches('/')
);
let request_body = ChatCompletionRequest {
model: self.model.clone(),
@@ -87,19 +90,23 @@ impl LlmClient {
req = req.header("Authorization", format!("Bearer {key}"));
}
let resp = req.send().await.map_err(|e| {
AgentError::Other(format!("LiteLLM request failed: {e}"))
})?;
let resp = req
.send()
.await
.map_err(|e| AgentError::Other(format!("LiteLLM request failed: {e}")))?;
if !resp.status().is_success() {
let status = resp.status();
let body = resp.text().await.unwrap_or_default();
return Err(AgentError::Other(format!("LiteLLM returned {status}: {body}")));
return Err(AgentError::Other(format!(
"LiteLLM returned {status}: {body}"
)));
}
let body: ChatCompletionResponse = resp.json().await.map_err(|e| {
AgentError::Other(format!("Failed to parse LiteLLM response: {e}"))
})?;
let body: ChatCompletionResponse = resp
.json()
.await
.map_err(|e| AgentError::Other(format!("Failed to parse LiteLLM response: {e}")))?;
body.choices
.first()
@@ -107,12 +114,16 @@ impl LlmClient {
.ok_or_else(|| AgentError::Other("Empty response from LiteLLM".to_string()))
}
#[allow(dead_code)]
pub async fn chat_with_messages(
&self,
messages: Vec<(String, String)>,
temperature: Option<f64>,
) -> Result<String, AgentError> {
let url = format!("{}/v1/chat/completions", self.base_url.trim_end_matches('/'));
let url = format!(
"{}/v1/chat/completions",
self.base_url.trim_end_matches('/')
);
let request_body = ChatCompletionRequest {
model: self.model.clone(),
@@ -135,19 +146,23 @@ impl LlmClient {
req = req.header("Authorization", format!("Bearer {key}"));
}
let resp = req.send().await.map_err(|e| {
AgentError::Other(format!("LiteLLM request failed: {e}"))
})?;
let resp = req
.send()
.await
.map_err(|e| AgentError::Other(format!("LiteLLM request failed: {e}")))?;
if !resp.status().is_success() {
let status = resp.status();
let body = resp.text().await.unwrap_or_default();
return Err(AgentError::Other(format!("LiteLLM returned {status}: {body}")));
return Err(AgentError::Other(format!(
"LiteLLM returned {status}: {body}"
)));
}
let body: ChatCompletionResponse = resp.json().await.map_err(|e| {
AgentError::Other(format!("Failed to parse LiteLLM response: {e}"))
})?;
let body: ChatCompletionResponse = resp
.json()
.await
.map_err(|e| AgentError::Other(format!("Failed to parse LiteLLM response: {e}")))?;
body.choices
.first()

View File

@@ -40,14 +40,19 @@ pub async fn generate_issue_description(
finding.title,
finding.description,
finding.file_path.as_deref().unwrap_or("N/A"),
finding.line_number.map(|n| n.to_string()).unwrap_or_else(|| "N/A".to_string()),
finding
.line_number
.map(|n| n.to_string())
.unwrap_or_else(|| "N/A".to_string()),
finding.code_snippet.as_deref().unwrap_or("N/A"),
finding.cwe.as_deref().unwrap_or("N/A"),
finding.cve.as_deref().unwrap_or("N/A"),
finding.remediation.as_deref().unwrap_or("N/A"),
);
let response = llm.chat(DESCRIPTION_SYSTEM_PROMPT, &user_prompt, Some(0.3)).await?;
let response = llm
.chat(DESCRIPTION_SYSTEM_PROMPT, &user_prompt, Some(0.3))
.await?;
// Extract title from first line, rest is body
let mut lines = response.lines();

View File

@@ -7,10 +7,7 @@ use crate::llm::LlmClient;
const FIX_SYSTEM_PROMPT: &str = r#"You are a security engineer. Given a security finding with code context, suggest a concrete code fix. Return ONLY the fixed code snippet that can directly replace the vulnerable code. Include brief inline comments explaining the fix."#;
pub async fn suggest_fix(
llm: &Arc<LlmClient>,
finding: &Finding,
) -> Result<String, AgentError> {
pub async fn suggest_fix(llm: &Arc<LlmClient>, finding: &Finding) -> Result<String, AgentError> {
let user_prompt = format!(
"Suggest a fix for this vulnerability:\n\
Language context from file: {}\n\

View File

@@ -19,7 +19,10 @@ pub async fn generate_pr_review(
findings: &[Finding],
) -> Result<(String, Vec<ReviewComment>), AgentError> {
if findings.is_empty() {
return Ok(("No security issues found in this PR.".to_string(), Vec::new()));
return Ok((
"No security issues found in this PR.".to_string(),
Vec::new(),
));
}
let findings_text: Vec<String> = findings
@@ -30,7 +33,10 @@ pub async fn generate_pr_review(
severity = f.severity,
title = f.title,
file = f.file_path.as_deref().unwrap_or("unknown"),
line = f.line_number.map(|n| n.to_string()).unwrap_or_else(|| "?".to_string()),
line = f
.line_number
.map(|n| n.to_string())
.unwrap_or_else(|| "?".to_string()),
code = f.code_snippet.as_deref().unwrap_or("N/A"),
rule = f.rule_id.as_deref().unwrap_or("N/A"),
)
@@ -43,7 +49,9 @@ pub async fn generate_pr_review(
findings_text.join("\n"),
);
let response = llm.chat(PR_REVIEW_SYSTEM_PROMPT, &user_prompt, Some(0.3)).await?;
let response = llm
.chat(PR_REVIEW_SYSTEM_PROMPT, &user_prompt, Some(0.3))
.await?;
// Parse comments from LLM response
let comments: Vec<ReviewComment> = serde_json::from_str::<Vec<PrComment>>(&response)
@@ -61,7 +69,12 @@ pub async fn generate_pr_review(
findings.len(),
findings
.iter()
.map(|f| format!("- **[{}]** {} in `{}`", f.severity, f.title, f.file_path.as_deref().unwrap_or("unknown")))
.map(|f| format!(
"- **[{}]** {} in `{}`",
f.severity,
f.title,
f.file_path.as_deref().unwrap_or("unknown")
))
.collect::<Vec<_>>()
.join("\n"),
);

View File

@@ -28,7 +28,10 @@ pub async fn triage_findings(llm: &Arc<LlmClient>, findings: &mut Vec<Finding>)
finding.code_snippet.as_deref().unwrap_or("N/A"),
);
match llm.chat(TRIAGE_SYSTEM_PROMPT, &user_prompt, Some(0.1)).await {
match llm
.chat(TRIAGE_SYSTEM_PROMPT, &user_prompt, Some(0.1))
.await
{
Ok(response) => {
if let Ok(result) = serde_json::from_str::<TriageResult>(&response) {
finding.confidence = Some(result.confidence);
@@ -46,7 +49,10 @@ pub async fn triage_findings(llm: &Arc<LlmClient>, findings: &mut Vec<Finding>)
// If LLM response doesn't parse, keep the finding
finding.status = FindingStatus::Triaged;
passed += 1;
tracing::warn!("Failed to parse triage response for {}: {response}", finding.fingerprint);
tracing::warn!(
"Failed to parse triage response for {}: {response}",
finding.fingerprint
);
}
}
Err(e) => {
@@ -66,6 +72,7 @@ pub async fn triage_findings(llm: &Arc<LlmClient>, findings: &mut Vec<Finding>)
#[derive(serde::Deserialize)]
struct TriageResult {
#[serde(default)]
#[allow(dead_code)]
true_positive: bool,
#[serde(default)]
confidence: f64,