Fix formatting and clippy warnings across workspace
All checks were successful
CI / Format (push) Successful in 3s
CI / Clippy (push) Successful in 2m15s
CI / Security Audit (push) Successful in 1m34s
CI / Tests (push) Successful in 3m4s

- Run cargo fmt on all crates
- Fix regex patterns using unsupported lookahead in patterns.rs
- Replace unwrap() calls with compile_regex() helper
- Fix never type fallback in GitHub tracker
- Fix redundant field name in findings page
- Allow enum_variant_names for Dioxus Route enum
- Fix &mut Vec -> &mut [T] clippy lint in sbom.rs
- Mark unused-but-intended APIs with #[allow(dead_code)]

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Sharang Parnerkar
2026-03-02 17:41:03 +01:00
parent 62196e5d74
commit 03ee69834d
37 changed files with 519 additions and 220 deletions

View File

@@ -58,7 +58,10 @@ impl LlmClient {
user_prompt: &str,
temperature: Option<f64>,
) -> Result<String, AgentError> {
let url = format!("{}/v1/chat/completions", self.base_url.trim_end_matches('/'));
let url = format!(
"{}/v1/chat/completions",
self.base_url.trim_end_matches('/')
);
let request_body = ChatCompletionRequest {
model: self.model.clone(),
@@ -87,19 +90,23 @@ impl LlmClient {
req = req.header("Authorization", format!("Bearer {key}"));
}
let resp = req.send().await.map_err(|e| {
AgentError::Other(format!("LiteLLM request failed: {e}"))
})?;
let resp = req
.send()
.await
.map_err(|e| AgentError::Other(format!("LiteLLM request failed: {e}")))?;
if !resp.status().is_success() {
let status = resp.status();
let body = resp.text().await.unwrap_or_default();
return Err(AgentError::Other(format!("LiteLLM returned {status}: {body}")));
return Err(AgentError::Other(format!(
"LiteLLM returned {status}: {body}"
)));
}
let body: ChatCompletionResponse = resp.json().await.map_err(|e| {
AgentError::Other(format!("Failed to parse LiteLLM response: {e}"))
})?;
let body: ChatCompletionResponse = resp
.json()
.await
.map_err(|e| AgentError::Other(format!("Failed to parse LiteLLM response: {e}")))?;
body.choices
.first()
@@ -107,12 +114,16 @@ impl LlmClient {
.ok_or_else(|| AgentError::Other("Empty response from LiteLLM".to_string()))
}
#[allow(dead_code)]
pub async fn chat_with_messages(
&self,
messages: Vec<(String, String)>,
temperature: Option<f64>,
) -> Result<String, AgentError> {
let url = format!("{}/v1/chat/completions", self.base_url.trim_end_matches('/'));
let url = format!(
"{}/v1/chat/completions",
self.base_url.trim_end_matches('/')
);
let request_body = ChatCompletionRequest {
model: self.model.clone(),
@@ -135,19 +146,23 @@ impl LlmClient {
req = req.header("Authorization", format!("Bearer {key}"));
}
let resp = req.send().await.map_err(|e| {
AgentError::Other(format!("LiteLLM request failed: {e}"))
})?;
let resp = req
.send()
.await
.map_err(|e| AgentError::Other(format!("LiteLLM request failed: {e}")))?;
if !resp.status().is_success() {
let status = resp.status();
let body = resp.text().await.unwrap_or_default();
return Err(AgentError::Other(format!("LiteLLM returned {status}: {body}")));
return Err(AgentError::Other(format!(
"LiteLLM returned {status}: {body}"
)));
}
let body: ChatCompletionResponse = resp.json().await.map_err(|e| {
AgentError::Other(format!("Failed to parse LiteLLM response: {e}"))
})?;
let body: ChatCompletionResponse = resp
.json()
.await
.map_err(|e| AgentError::Other(format!("Failed to parse LiteLLM response: {e}")))?;
body.choices
.first()