1 Commits

Author SHA1 Message Date
Sharang Parnerkar
8af810cdd2 fix: stop storing code review findings in dashboard, use PR comments only
All checks were successful
CI / Check (pull_request) Successful in 10m59s
CI / Detect Changes (pull_request) Has been skipped
CI / Deploy Agent (pull_request) Has been skipped
CI / Deploy Dashboard (pull_request) Has been skipped
CI / Deploy Docs (pull_request) Has been skipped
CI / Deploy MCP (pull_request) Has been skipped
Code review findings are not actionable in the findings dashboard — they
lack PR context and clutter the list. The PR review pipeline already
posts inline comments directly on PRs (GitHub, Gitea, GitLab), which is
the appropriate place for code review feedback.

- Remove LLM code review stage from the scan pipeline (orchestrator)
- Remove "Code Review" option from the findings type filter dropdown

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-18 14:21:09 +01:00
11 changed files with 13 additions and 721 deletions

6
Cargo.lock generated
View File

@@ -4699,9 +4699,9 @@ dependencies = [
[[package]]
name = "rustls-webpki"
version = "0.103.10"
version = "0.103.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df33b2b81ac578cabaf06b89b0631153a3f416b0a886e8a7a1707fb51abbd1ef"
checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53"
dependencies = [
"ring",
"rustls-pki-types",
@@ -5171,7 +5171,7 @@ version = "0.8.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c1c97747dbf44bb1ca44a561ece23508e99cb592e862f22222dcf42f51d1e451"
dependencies = [
"heck 0.4.1",
"heck 0.5.0",
"proc-macro2",
"quote",
"syn",

View File

@@ -95,8 +95,8 @@ pub async fn export_session_report(
Err(_) => Vec::new(),
};
// Fetch DAST findings for this session, then deduplicate
let raw_findings: Vec<DastFinding> = match agent
// Fetch DAST findings for this session
let findings: Vec<DastFinding> = match agent
.db
.dast_findings()
.find(doc! { "session_id": &id })
@@ -106,14 +106,6 @@ pub async fn export_session_report(
Ok(cursor) => collect_cursor_async(cursor).await,
Err(_) => Vec::new(),
};
let raw_count = raw_findings.len();
let findings = crate::pipeline::dedup::dedup_dast_findings(raw_findings);
if findings.len() < raw_count {
tracing::info!(
"Deduped DAST findings for session {id}: {raw_count} → {}",
findings.len()
);
}
// Fetch SAST findings, SBOM, and code context for the linked repository
let repo_id = session

View File

@@ -237,92 +237,5 @@ pub async fn delete_repository(
.delete_many(doc! { "repo_id": &id })
.await;
// Cascade delete DAST targets linked to this repo, and all their downstream data
// (scan runs, findings, pentest sessions, attack chains, messages)
if let Ok(mut cursor) = db.dast_targets().find(doc! { "repo_id": &id }).await {
use futures_util::StreamExt;
while let Some(Ok(target)) = cursor.next().await {
let target_id = target.id.map(|oid| oid.to_hex()).unwrap_or_default();
if !target_id.is_empty() {
cascade_delete_dast_target(db, &target_id).await;
}
}
}
// Also delete pentest sessions linked directly to this repo (not via target)
if let Ok(mut cursor) = db.pentest_sessions().find(doc! { "repo_id": &id }).await {
use futures_util::StreamExt;
while let Some(Ok(session)) = cursor.next().await {
let session_id = session.id.map(|oid| oid.to_hex()).unwrap_or_default();
if !session_id.is_empty() {
let _ = db
.attack_chain_nodes()
.delete_many(doc! { "session_id": &session_id })
.await;
let _ = db
.pentest_messages()
.delete_many(doc! { "session_id": &session_id })
.await;
// Delete DAST findings produced by this session
let _ = db
.dast_findings()
.delete_many(doc! { "session_id": &session_id })
.await;
}
}
}
let _ = db
.pentest_sessions()
.delete_many(doc! { "repo_id": &id })
.await;
Ok(Json(serde_json::json!({ "status": "deleted" })))
}
/// Cascade-delete a DAST target and all its downstream data.
async fn cascade_delete_dast_target(db: &crate::database::Database, target_id: &str) {
// Delete pentest sessions for this target (and their attack chains + messages)
if let Ok(mut cursor) = db
.pentest_sessions()
.find(doc! { "target_id": target_id })
.await
{
use futures_util::StreamExt;
while let Some(Ok(session)) = cursor.next().await {
let session_id = session.id.map(|oid| oid.to_hex()).unwrap_or_default();
if !session_id.is_empty() {
let _ = db
.attack_chain_nodes()
.delete_many(doc! { "session_id": &session_id })
.await;
let _ = db
.pentest_messages()
.delete_many(doc! { "session_id": &session_id })
.await;
let _ = db
.dast_findings()
.delete_many(doc! { "session_id": &session_id })
.await;
}
}
}
let _ = db
.pentest_sessions()
.delete_many(doc! { "target_id": target_id })
.await;
// Delete DAST scan runs and their findings
let _ = db
.dast_findings()
.delete_many(doc! { "target_id": target_id })
.await;
let _ = db
.dast_scan_runs()
.delete_many(doc! { "target_id": target_id })
.await;
// Delete the target itself
if let Ok(oid) = mongodb::bson::oid::ObjectId::parse_str(target_id) {
let _ = db.dast_targets().delete_one(doc! { "_id": oid }).await;
}
}

View File

@@ -321,38 +321,9 @@ impl PentestOrchestrator {
total_findings += findings_count;
let mut finding_ids: Vec<String> = Vec::new();
// Dedup findings within this tool result before inserting
let deduped_findings =
crate::pipeline::dedup::dedup_dast_findings(
result.findings,
);
for mut finding in deduped_findings {
for mut finding in result.findings {
finding.scan_run_id = session_id.clone();
finding.session_id = Some(session_id.clone());
// Check for existing duplicate in this session
let fp = crate::pipeline::dedup::compute_dast_fingerprint(
&finding,
);
let existing = self
.db
.dast_findings()
.find_one(doc! {
"session_id": &session_id,
"title": &finding.title,
"endpoint": &finding.endpoint,
"method": &finding.method,
})
.await;
if matches!(existing, Ok(Some(_))) {
tracing::debug!(
"Skipping duplicate DAST finding: {} (fp={:.12})",
finding.title,
fp,
);
continue;
}
let insert_result =
self.db.dast_findings().insert_one(&finding).await;
if let Ok(res) = &insert_result {

View File

@@ -66,10 +66,8 @@ impl CodeReviewScanner {
}
}
let deduped = dedup_cross_pass(all_findings);
ScanOutput {
findings: deduped,
findings: all_findings,
sbom_entries: Vec::new(),
}
}
@@ -186,51 +184,3 @@ struct ReviewIssue {
#[serde(default)]
suggestion: Option<String>,
}
/// Deduplicate findings across review passes.
///
/// Multiple passes often flag the same issue (e.g. SQL injection reported by
/// logic, security, and convention passes). We group by file + nearby line +
/// normalized title keywords and keep the highest-severity finding.
fn dedup_cross_pass(findings: Vec<Finding>) -> Vec<Finding> {
use std::collections::HashMap;
// Build a dedup key: (file, line bucket, normalized title words)
fn dedup_key(f: &Finding) -> String {
let file = f.file_path.as_deref().unwrap_or("");
// Group lines within 3 of each other
let line_bucket = f.line_number.unwrap_or(0) / 4;
// Normalize: lowercase, keep only alphanumeric, sort words for order-independence
let title_lower = f.title.to_lowercase();
let mut words: Vec<&str> = title_lower
.split(|c: char| !c.is_alphanumeric())
.filter(|w| w.len() > 2)
.collect();
words.sort();
format!("{file}:{line_bucket}:{}", words.join(","))
}
let mut groups: HashMap<String, Finding> = HashMap::new();
for finding in findings {
let key = dedup_key(&finding);
groups
.entry(key)
.and_modify(|existing| {
// Keep the higher severity; on tie, keep the one with more detail
if finding.severity > existing.severity
|| (finding.severity == existing.severity
&& finding.description.len() > existing.description.len())
{
*existing = finding.clone();
}
// Merge CWE if the existing one is missing it
if existing.cwe.is_none() {
existing.cwe = finding.cwe.clone();
}
})
.or_insert(finding);
}
groups.into_values().collect()
}

View File

@@ -1,7 +1,5 @@
use sha2::{Digest, Sha256};
use compliance_core::models::dast::DastFinding;
pub fn compute_fingerprint(parts: &[&str]) -> String {
let mut hasher = Sha256::new();
for part in parts {
@@ -11,209 +9,9 @@ pub fn compute_fingerprint(parts: &[&str]) -> String {
hex::encode(hasher.finalize())
}
/// Compute a dedup fingerprint for a DAST finding.
///
/// The key is derived from the *canonicalized* title (lowercased, domain names
/// stripped, known synonyms resolved), endpoint, and HTTP method. This lets us
/// detect both exact duplicates (same tool reporting twice across passes) and
/// semantic duplicates (e.g., `security_header_missing` "Missing HSTS header"
/// vs `tls_misconfiguration` "Missing strict-transport-security header").
pub fn compute_dast_fingerprint(f: &DastFinding) -> String {
let canon = canonicalize_dast_title(&f.title);
let endpoint = f.endpoint.to_lowercase().trim_end_matches('/').to_string();
let method = f.method.to_uppercase();
let param = f.parameter.as_deref().unwrap_or("");
compute_fingerprint(&[&canon, &endpoint, &method, param])
}
/// Canonicalize a DAST finding title for dedup purposes.
///
/// 1. Lowercase
/// 2. Strip domain names / URLs (e.g. "for comp-dev.meghsakha.com")
/// 3. Resolve known header synonyms (hsts ↔ strict-transport-security, etc.)
/// 4. Strip extra whitespace
fn canonicalize_dast_title(title: &str) -> String {
let mut s = title.to_lowercase();
// Strip "for <domain>" or "on <domain>" suffixes
// Pattern: "for <word.word...>" or "on <method> <url>"
if let Some(idx) = s.find(" for ") {
// Check if what follows looks like a domain or URL
let rest = &s[idx + 5..];
if rest.contains('.') || rest.starts_with("http") {
s.truncate(idx);
}
}
if let Some(idx) = s.find(" on ") {
let rest = &s[idx + 4..];
if rest.contains("http") || rest.contains('/') {
s.truncate(idx);
}
}
// Resolve known header synonyms
let synonyms: &[(&str, &str)] = &[
("hsts", "strict-transport-security"),
("csp", "content-security-policy"),
("cors", "cross-origin-resource-sharing"),
("xfo", "x-frame-options"),
];
for &(short, canonical) in synonyms {
// Only replace whole words — check boundaries
if let Some(pos) = s.find(short) {
let before_ok = pos == 0 || !s.as_bytes()[pos - 1].is_ascii_alphanumeric();
let after_ok = pos + short.len() >= s.len()
|| !s.as_bytes()[pos + short.len()].is_ascii_alphanumeric();
if before_ok && after_ok {
s = format!("{}{}{}", &s[..pos], canonical, &s[pos + short.len()..]);
}
}
}
// Collapse whitespace
s.split_whitespace().collect::<Vec<_>>().join(" ")
}
/// Deduplicate a list of DAST findings, merging evidence from duplicates.
///
/// Two-phase approach:
/// 1. **Exact dedup** — group by canonicalized `(title, endpoint, method, parameter)`.
/// Merge evidence arrays, keep the highest severity, preserve exploitable flag.
/// 2. **CWE-based dedup** — within the same `(cwe, endpoint, method)` group, merge
/// findings whose canonicalized titles resolve to the same subject (e.g., HSTS
/// reported as both `security_header_missing` and `tls_misconfiguration`).
pub fn dedup_dast_findings(findings: Vec<DastFinding>) -> Vec<DastFinding> {
use std::collections::HashMap;
if findings.len() <= 1 {
return findings;
}
// Phase 1: exact fingerprint dedup
let mut seen: HashMap<String, usize> = HashMap::new();
let mut deduped: Vec<DastFinding> = Vec::new();
for finding in findings {
let fp = compute_dast_fingerprint(&finding);
if let Some(&idx) = seen.get(&fp) {
// Merge into existing
merge_dast_finding(&mut deduped[idx], &finding);
} else {
seen.insert(fp, deduped.len());
deduped.push(finding);
}
}
let before = deduped.len();
// Phase 2: CWE-based related dedup
// Group by (cwe, endpoint_normalized, method) — only when CWE is present
let mut cwe_groups: HashMap<String, Vec<usize>> = HashMap::new();
for (i, f) in deduped.iter().enumerate() {
if let Some(ref cwe) = f.cwe {
let key = format!(
"{}|{}|{}",
cwe,
f.endpoint.to_lowercase().trim_end_matches('/'),
f.method.to_uppercase(),
);
cwe_groups.entry(key).or_default().push(i);
}
}
// For each CWE group with multiple findings, keep the one with highest severity
// and most evidence, merge the rest into it
let mut merge_map: HashMap<usize, Vec<usize>> = HashMap::new();
let mut remove_indices: Vec<usize> = Vec::new();
for indices in cwe_groups.values() {
if indices.len() <= 1 {
continue;
}
// Find the "primary" finding: highest severity, then most evidence, then longest description
let Some(&primary_idx) = indices.iter().max_by(|&&a, &&b| {
deduped[a]
.severity
.cmp(&deduped[b].severity)
.then_with(|| deduped[a].evidence.len().cmp(&deduped[b].evidence.len()))
.then_with(|| {
deduped[a]
.description
.len()
.cmp(&deduped[b].description.len())
})
}) else {
continue;
};
for &idx in indices {
if idx != primary_idx {
remove_indices.push(idx);
merge_map.entry(primary_idx).or_default().push(idx);
}
}
}
if !remove_indices.is_empty() {
remove_indices.sort_unstable();
remove_indices.dedup();
// Merge evidence
for (&primary, secondaries) in &merge_map {
let extra_evidence: Vec<_> = secondaries
.iter()
.flat_map(|&i| deduped[i].evidence.clone())
.collect();
let any_exploitable = secondaries.iter().any(|&i| deduped[i].exploitable);
deduped[primary].evidence.extend(extra_evidence);
if any_exploitable {
deduped[primary].exploitable = true;
}
}
// Remove merged findings (iterate in reverse to preserve indices)
for &idx in remove_indices.iter().rev() {
deduped.remove(idx);
}
}
let after = deduped.len();
if before != after {
tracing::debug!(
"DAST CWE-based dedup: {before} → {after} findings ({} merged)",
before - after
);
}
deduped
}
/// Merge a duplicate DAST finding into a primary one.
fn merge_dast_finding(primary: &mut DastFinding, duplicate: &DastFinding) {
primary.evidence.extend(duplicate.evidence.clone());
if duplicate.severity > primary.severity {
primary.severity = duplicate.severity.clone();
}
if duplicate.exploitable {
primary.exploitable = true;
}
// Keep the longer/better description
if duplicate.description.len() > primary.description.len() {
primary.description.clone_from(&duplicate.description);
}
// Keep remediation if primary doesn't have one
if primary.remediation.is_none() && duplicate.remediation.is_some() {
primary.remediation.clone_from(&duplicate.remediation);
}
}
#[cfg(test)]
mod tests {
use super::*;
use compliance_core::models::dast::DastVulnType;
use compliance_core::models::finding::Severity;
#[test]
fn fingerprint_is_deterministic() {
@@ -257,159 +55,4 @@ mod tests {
let b = compute_fingerprint(&["a", "bc"]);
assert_ne!(a, b);
}
fn make_dast(title: &str, endpoint: &str, vuln_type: DastVulnType) -> DastFinding {
let mut f = DastFinding::new(
"run1".into(),
"target1".into(),
vuln_type,
title.into(),
format!("Description for {title}"),
Severity::Medium,
endpoint.into(),
"GET".into(),
);
f.cwe = Some("CWE-319".into());
f
}
#[test]
fn canonicalize_strips_domain_suffix() {
let canon = canonicalize_dast_title("Missing HSTS header for comp-dev.meghsakha.com");
assert!(!canon.contains("meghsakha"), "domain should be stripped");
assert!(
canon.contains("strict-transport-security"),
"hsts should be resolved: {canon}"
);
}
#[test]
fn canonicalize_resolves_synonyms() {
let a = canonicalize_dast_title("Missing HSTS header");
let b = canonicalize_dast_title("Missing strict-transport-security header");
assert_eq!(a, b);
}
#[test]
fn exact_dedup_merges_identical_findings() {
let f1 = make_dast(
"Missing strict-transport-security header",
"https://example.com",
DastVulnType::SecurityHeaderMissing,
);
let f2 = make_dast(
"Missing strict-transport-security header",
"https://example.com",
DastVulnType::SecurityHeaderMissing,
);
let result = dedup_dast_findings(vec![f1, f2]);
assert_eq!(result.len(), 1, "exact duplicates should be merged");
}
#[test]
fn synonym_dedup_merges_hsts_variants() {
let f1 = make_dast(
"Missing strict-transport-security header",
"https://example.com",
DastVulnType::SecurityHeaderMissing,
);
let f2 = make_dast(
"Missing HSTS header for example.com",
"https://example.com",
DastVulnType::TlsMisconfiguration,
);
let result = dedup_dast_findings(vec![f1, f2]);
assert_eq!(
result.len(),
1,
"HSTS synonym variants should merge to 1 finding"
);
}
#[test]
fn different_headers_not_merged() {
let mut f1 = make_dast(
"Missing x-content-type-options header",
"https://example.com",
DastVulnType::SecurityHeaderMissing,
);
f1.cwe = Some("CWE-16".into());
let mut f2 = make_dast(
"Missing permissions-policy header",
"https://example.com",
DastVulnType::SecurityHeaderMissing,
);
f2.cwe = Some("CWE-16".into());
// These share CWE-16 but are different headers — phase 2 will merge them
// since they share the same CWE+endpoint. This is acceptable because they
// have the same root cause (missing security headers configuration).
let result = dedup_dast_findings(vec![f1, f2]);
// CWE-based dedup will merge these into 1
assert!(
result.len() <= 2,
"same CWE+endpoint findings may be merged"
);
}
#[test]
fn different_endpoints_not_merged() {
let f1 = make_dast(
"Missing strict-transport-security header",
"https://example.com",
DastVulnType::SecurityHeaderMissing,
);
let f2 = make_dast(
"Missing strict-transport-security header",
"https://other.com",
DastVulnType::SecurityHeaderMissing,
);
let result = dedup_dast_findings(vec![f1, f2]);
assert_eq!(result.len(), 2, "different endpoints should not merge");
}
#[test]
fn dedup_preserves_highest_severity() {
let f1 = make_dast(
"Missing strict-transport-security header",
"https://example.com",
DastVulnType::SecurityHeaderMissing,
);
let mut f2 = make_dast(
"Missing strict-transport-security header",
"https://example.com",
DastVulnType::SecurityHeaderMissing,
);
f2.severity = Severity::High;
let result = dedup_dast_findings(vec![f1, f2]);
assert_eq!(result.len(), 1);
assert_eq!(result[0].severity, Severity::High);
}
#[test]
fn dedup_merges_evidence() {
let mut f1 = make_dast(
"Missing strict-transport-security header",
"https://example.com",
DastVulnType::SecurityHeaderMissing,
);
f1.evidence
.push(compliance_core::models::dast::DastEvidence {
request_method: "GET".into(),
request_url: "https://example.com".into(),
request_headers: None,
request_body: None,
response_status: 200,
response_headers: None,
response_snippet: Some("pass 1".into()),
screenshot_path: None,
payload: None,
response_time_ms: None,
});
let mut f2 = f1.clone();
f2.evidence[0].response_snippet = Some("pass 2".into());
let result = dedup_dast_findings(vec![f1, f2]);
assert_eq!(result.len(), 1);
assert_eq!(result[0].evidence.len(), 2, "evidence should be merged");
}
}

View File

@@ -1,6 +1,5 @@
use compliance_core::models::*;
use super::dedup::compute_fingerprint;
use super::orchestrator::PipelineOrchestrator;
use crate::error::AgentError;
use crate::pipeline::code_review::CodeReviewScanner;
@@ -90,37 +89,12 @@ impl PipelineOrchestrator {
return Ok(());
}
// Dedup findings by fingerprint to avoid duplicate comments
let mut seen_fps = std::collections::HashSet::new();
let mut unique_findings: Vec<&Finding> = Vec::new();
for finding in &pr_findings {
let fp = compute_fingerprint(&[
repo_id,
&pr_number.to_string(),
finding.file_path.as_deref().unwrap_or(""),
&finding.line_number.unwrap_or(0).to_string(),
&finding.title,
]);
if seen_fps.insert(fp) {
unique_findings.push(finding);
}
}
let pr_findings = unique_findings;
// Build review comments from findings
let mut review_comments = Vec::new();
for finding in &pr_findings {
if let (Some(path), Some(line)) = (&finding.file_path, finding.line_number) {
let fp = compute_fingerprint(&[
repo_id,
&pr_number.to_string(),
path,
&line.to_string(),
&finding.title,
]);
let comment_body = format!(
"**[{}] {}**\n\n{}\n\n*Scanner: {} | {}*\n\n<!-- compliance-fp:{fp} -->",
"**[{}] {}**\n\n{}\n\n*Scanner: {} | {}*",
finding.severity,
finding.title,
finding.description,
@@ -149,17 +123,6 @@ impl PipelineOrchestrator {
.join("\n"),
);
if review_comments.is_empty() {
// All findings were on files/lines we can't comment on inline
if let Err(e) = tracker
.create_pr_review(owner, tracker_repo_name, pr_number, &summary, Vec::new())
.await
{
tracing::warn!("[{repo_id}] Failed to post PR review summary: {e}");
}
return Ok(());
}
if let Err(e) = tracker
.create_pr_review(
owner,

View File

@@ -98,8 +98,7 @@ impl IssueTracker for GiteaTracker {
_ => "open",
};
let resp = self
.http
self.http
.patch(&url)
.header(
"Authorization",
@@ -110,14 +109,6 @@ impl IssueTracker for GiteaTracker {
.await
.map_err(|e| CoreError::IssueTracker(format!("Gitea update issue failed: {e}")))?;
if !resp.status().is_success() {
let status = resp.status();
let text = resp.text().await.unwrap_or_default();
return Err(CoreError::IssueTracker(format!(
"Gitea update issue returned {status}: {text}"
)));
}
Ok(())
}
@@ -132,8 +123,7 @@ impl IssueTracker for GiteaTracker {
"/repos/{owner}/{repo}/issues/{external_id}/comments"
));
let resp = self
.http
self.http
.post(&url)
.header(
"Authorization",
@@ -144,14 +134,6 @@ impl IssueTracker for GiteaTracker {
.await
.map_err(|e| CoreError::IssueTracker(format!("Gitea add comment failed: {e}")))?;
if !resp.status().is_success() {
let status = resp.status();
let text = resp.text().await.unwrap_or_default();
return Err(CoreError::IssueTracker(format!(
"Gitea add comment returned {status}: {text}"
)));
}
Ok(())
}
@@ -176,8 +158,7 @@ impl IssueTracker for GiteaTracker {
})
.collect();
let resp = self
.http
self.http
.post(&url)
.header(
"Authorization",
@@ -192,48 +173,6 @@ impl IssueTracker for GiteaTracker {
.await
.map_err(|e| CoreError::IssueTracker(format!("Gitea PR review failed: {e}")))?;
if !resp.status().is_success() {
let status = resp.status();
let text = resp.text().await.unwrap_or_default();
// If inline comments caused the failure, retry with just the summary body
if !comments.is_empty() {
tracing::warn!(
"Gitea PR review with inline comments failed ({status}): {text}, retrying as plain comment"
);
let fallback_url = self.api_url(&format!(
"/repos/{owner}/{repo}/issues/{pr_number}/comments"
));
let fallback_resp = self
.http
.post(&fallback_url)
.header(
"Authorization",
format!("token {}", self.token.expose_secret()),
)
.json(&serde_json::json!({ "body": body }))
.send()
.await
.map_err(|e| {
CoreError::IssueTracker(format!("Gitea PR comment fallback failed: {e}"))
})?;
if !fallback_resp.status().is_success() {
let fb_status = fallback_resp.status();
let fb_text = fallback_resp.text().await.unwrap_or_default();
return Err(CoreError::IssueTracker(format!(
"Gitea PR comment fallback returned {fb_status}: {fb_text}"
)));
}
return Ok(());
}
return Err(CoreError::IssueTracker(format!(
"Gitea PR review returned {status}: {text}"
)));
}
Ok(())
}

View File

@@ -113,72 +113,6 @@ pub async fn add_mcp_server(
Ok(())
}
/// Probe each MCP server's health endpoint and update status in MongoDB.
#[server]
pub async fn refresh_mcp_status() -> Result<(), ServerFnError> {
use chrono::Utc;
use compliance_core::models::McpServerStatus;
use mongodb::bson::doc;
let state: super::server_state::ServerState =
dioxus_fullstack::FullstackContext::extract().await?;
let mut cursor = state
.db
.mcp_servers()
.find(doc! {})
.await
.map_err(|e| ServerFnError::new(e.to_string()))?;
let client = reqwest::Client::builder()
.timeout(std::time::Duration::from_secs(5))
.build()
.map_err(|e| ServerFnError::new(e.to_string()))?;
while cursor
.advance()
.await
.map_err(|e| ServerFnError::new(e.to_string()))?
{
let server: compliance_core::models::McpServerConfig = cursor
.deserialize_current()
.map_err(|e| ServerFnError::new(e.to_string()))?;
let Some(oid) = server.id else { continue };
// Derive health URL from the endpoint (replace trailing /mcp with /health)
let health_url = if server.endpoint_url.ends_with("/mcp") {
format!(
"{}health",
&server.endpoint_url[..server.endpoint_url.len() - 3]
)
} else {
format!("{}/health", server.endpoint_url.trim_end_matches('/'))
};
let new_status = match client.get(&health_url).send().await {
Ok(resp) if resp.status().is_success() => McpServerStatus::Running,
_ => McpServerStatus::Stopped,
};
let status_bson = match bson::to_bson(&new_status) {
Ok(b) => b,
Err(_) => continue,
};
let _ = state
.db
.mcp_servers()
.update_one(
doc! { "_id": oid },
doc! { "$set": { "status": status_bson, "updated_at": Utc::now().to_rfc3339() } },
)
.await;
}
Ok(())
}
#[server]
pub async fn delete_mcp_server(server_id: String) -> Result<(), ServerFnError> {
use mongodb::bson::doc;

View File

@@ -5,7 +5,7 @@ use dioxus_free_icons::Icon;
use crate::components::page_header::PageHeader;
use crate::components::toast::{ToastType, Toasts};
use crate::infrastructure::mcp::{
add_mcp_server, delete_mcp_server, fetch_mcp_servers, refresh_mcp_status, regenerate_mcp_token,
add_mcp_server, delete_mcp_server, fetch_mcp_servers, regenerate_mcp_token,
};
#[component]
@@ -22,17 +22,6 @@ pub fn McpServersPage() -> Element {
let mut new_mongo_uri = use_signal(String::new);
let mut new_mongo_db = use_signal(String::new);
// Probe health of all MCP servers on page load, then refresh the list
let mut refreshing = use_signal(|| true);
use_effect(move || {
spawn(async move {
refreshing.set(true);
let _ = refresh_mcp_status().await;
servers.restart();
refreshing.set(false);
});
});
// Track which server's token is visible
let mut visible_token: Signal<Option<String>> = use_signal(|| None);
// Track which server is pending delete confirmation

View File

@@ -41,9 +41,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
StreamableHttpServerConfig::default(),
);
let router = axum::Router::new()
.route("/health", axum::routing::get(|| async { "ok" }))
.nest_service("/mcp", service);
let router = axum::Router::new().nest_service("/mcp", service);
let listener = tokio::net::TcpListener::bind(("0.0.0.0", port)).await?;
tracing::info!("MCP HTTP server listening on 0.0.0.0:{port}");
axum::serve(listener, router).await?;