Some checks failed
CI / Format (push) Failing after 4s
CI / Format (pull_request) Failing after 4s
CI / Clippy (pull_request) Failing after 1m41s
CI / Security Audit (pull_request) Has been skipped
CI / Tests (pull_request) Has been skipped
CI / Clippy (push) Failing after 1m46s
CI / Security Audit (push) Has been skipped
CI / Tests (push) Has been skipped
CI / Detect Changes (push) Has been skipped
CI / Detect Changes (pull_request) Has been skipped
CI / Deploy Agent (push) Has been skipped
CI / Deploy Dashboard (push) Has been skipped
CI / Deploy Docs (push) Has been skipped
CI / Deploy MCP (push) Has been skipped
CI / Deploy Agent (pull_request) Has been skipped
CI / Deploy Dashboard (pull_request) Has been skipped
CI / Deploy Docs (pull_request) Has been skipped
CI / Deploy MCP (pull_request) Has been skipped
Split large files into focused modules across all crates while maintaining API compatibility via re-exports. Add comprehensive unit tests covering core models, pipeline parsers, LLM triage, DAST security tools, graph algorithms, and MCP parameter validation. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
215 lines
6.8 KiB
Rust
215 lines
6.8 KiB
Rust
use mongodb::bson::doc;
|
|
use rmcp::{model::*, ErrorData as McpError};
|
|
use schemars::JsonSchema;
|
|
use serde::Deserialize;
|
|
|
|
use crate::database::Database;
|
|
|
|
const MAX_LIMIT: i64 = 200;
|
|
const DEFAULT_LIMIT: i64 = 50;
|
|
|
|
fn cap_limit(limit: Option<i64>) -> i64 {
|
|
limit.unwrap_or(DEFAULT_LIMIT).clamp(1, MAX_LIMIT)
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use super::*;
|
|
|
|
#[test]
|
|
fn cap_limit_default() {
|
|
assert_eq!(cap_limit(None), DEFAULT_LIMIT);
|
|
}
|
|
|
|
#[test]
|
|
fn cap_limit_clamps_high() {
|
|
assert_eq!(cap_limit(Some(300)), MAX_LIMIT);
|
|
}
|
|
|
|
#[test]
|
|
fn cap_limit_clamps_low() {
|
|
assert_eq!(cap_limit(Some(0)), 1);
|
|
}
|
|
|
|
#[test]
|
|
fn list_dast_findings_params_deserialize() {
|
|
let json = serde_json::json!({
|
|
"target_id": "t1",
|
|
"scan_run_id": "sr1",
|
|
"severity": "critical",
|
|
"exploitable": true,
|
|
"vuln_type": "sql_injection",
|
|
"limit": 10
|
|
});
|
|
let params: ListDastFindingsParams = serde_json::from_value(json).unwrap();
|
|
assert_eq!(params.target_id.as_deref(), Some("t1"));
|
|
assert_eq!(params.scan_run_id.as_deref(), Some("sr1"));
|
|
assert_eq!(params.severity.as_deref(), Some("critical"));
|
|
assert_eq!(params.exploitable, Some(true));
|
|
assert_eq!(params.vuln_type.as_deref(), Some("sql_injection"));
|
|
assert_eq!(params.limit, Some(10));
|
|
}
|
|
|
|
#[test]
|
|
fn list_dast_findings_params_all_optional() {
|
|
let params: ListDastFindingsParams = serde_json::from_value(serde_json::json!({})).unwrap();
|
|
assert!(params.target_id.is_none());
|
|
assert!(params.scan_run_id.is_none());
|
|
assert!(params.severity.is_none());
|
|
assert!(params.exploitable.is_none());
|
|
assert!(params.vuln_type.is_none());
|
|
assert!(params.limit.is_none());
|
|
}
|
|
|
|
#[test]
|
|
fn dast_scan_summary_params_deserialize() {
|
|
let params: DastScanSummaryParams =
|
|
serde_json::from_value(serde_json::json!({ "target_id": "abc" })).unwrap();
|
|
assert_eq!(params.target_id.as_deref(), Some("abc"));
|
|
|
|
let params2: DastScanSummaryParams = serde_json::from_value(serde_json::json!({})).unwrap();
|
|
assert!(params2.target_id.is_none());
|
|
}
|
|
}
|
|
|
|
#[derive(Debug, Deserialize, JsonSchema)]
|
|
pub struct ListDastFindingsParams {
|
|
/// Filter by DAST target ID
|
|
pub target_id: Option<String>,
|
|
/// Filter by scan run ID
|
|
pub scan_run_id: Option<String>,
|
|
/// Filter by severity: info, low, medium, high, critical
|
|
pub severity: Option<String>,
|
|
/// Only show confirmed exploitable findings
|
|
pub exploitable: Option<bool>,
|
|
/// Filter by vulnerability type (e.g. sql_injection, xss, ssrf)
|
|
pub vuln_type: Option<String>,
|
|
/// Maximum number of results (default 50, max 200)
|
|
pub limit: Option<i64>,
|
|
}
|
|
|
|
pub async fn list_dast_findings(
|
|
db: &Database,
|
|
params: ListDastFindingsParams,
|
|
) -> Result<CallToolResult, McpError> {
|
|
let mut filter = doc! {};
|
|
if let Some(ref target_id) = params.target_id {
|
|
filter.insert("target_id", target_id);
|
|
}
|
|
if let Some(ref scan_run_id) = params.scan_run_id {
|
|
filter.insert("scan_run_id", scan_run_id);
|
|
}
|
|
if let Some(ref severity) = params.severity {
|
|
filter.insert("severity", severity);
|
|
}
|
|
if let Some(exploitable) = params.exploitable {
|
|
filter.insert("exploitable", exploitable);
|
|
}
|
|
if let Some(ref vuln_type) = params.vuln_type {
|
|
filter.insert("vuln_type", vuln_type);
|
|
}
|
|
|
|
let limit = cap_limit(params.limit);
|
|
|
|
let mut cursor = db
|
|
.dast_findings()
|
|
.find(filter)
|
|
.sort(doc! { "created_at": -1 })
|
|
.limit(limit)
|
|
.await
|
|
.map_err(|e| McpError::internal_error(format!("DB error: {e}"), None))?;
|
|
|
|
let mut results = Vec::new();
|
|
while cursor
|
|
.advance()
|
|
.await
|
|
.map_err(|e| McpError::internal_error(format!("cursor error: {e}"), None))?
|
|
{
|
|
let finding = cursor
|
|
.deserialize_current()
|
|
.map_err(|e| McpError::internal_error(format!("deserialize error: {e}"), None))?;
|
|
results.push(finding);
|
|
}
|
|
|
|
let json = serde_json::to_string_pretty(&results)
|
|
.map_err(|e| McpError::internal_error(format!("json error: {e}"), None))?;
|
|
|
|
Ok(CallToolResult::success(vec![Content::text(json)]))
|
|
}
|
|
|
|
#[derive(Debug, Deserialize, JsonSchema)]
|
|
pub struct DastScanSummaryParams {
|
|
/// Filter by DAST target ID
|
|
pub target_id: Option<String>,
|
|
}
|
|
|
|
pub async fn dast_scan_summary(
|
|
db: &Database,
|
|
params: DastScanSummaryParams,
|
|
) -> Result<CallToolResult, McpError> {
|
|
let mut filter = doc! {};
|
|
if let Some(ref target_id) = params.target_id {
|
|
filter.insert("target_id", target_id);
|
|
}
|
|
|
|
// Get recent scan runs
|
|
let mut cursor = db
|
|
.dast_scan_runs()
|
|
.find(filter.clone())
|
|
.sort(doc! { "started_at": -1 })
|
|
.limit(10)
|
|
.await
|
|
.map_err(|e| McpError::internal_error(format!("DB error: {e}"), None))?;
|
|
|
|
let mut scan_runs = Vec::new();
|
|
while cursor
|
|
.advance()
|
|
.await
|
|
.map_err(|e| McpError::internal_error(format!("cursor error: {e}"), None))?
|
|
{
|
|
let run = cursor
|
|
.deserialize_current()
|
|
.map_err(|e| McpError::internal_error(format!("deserialize error: {e}"), None))?;
|
|
scan_runs.push(serde_json::json!({
|
|
"id": run.id.map(|id| id.to_hex()),
|
|
"target_id": run.target_id,
|
|
"status": run.status,
|
|
"findings_count": run.findings_count,
|
|
"exploitable_count": run.exploitable_count,
|
|
"endpoints_discovered": run.endpoints_discovered,
|
|
"started_at": run.started_at.to_rfc3339(),
|
|
"completed_at": run.completed_at.map(|t| t.to_rfc3339()),
|
|
}));
|
|
}
|
|
|
|
// Count findings by severity
|
|
let mut findings_filter = doc! {};
|
|
if let Some(ref target_id) = params.target_id {
|
|
findings_filter.insert("target_id", target_id);
|
|
}
|
|
let total_findings = db
|
|
.dast_findings()
|
|
.count_documents(findings_filter.clone())
|
|
.await
|
|
.map_err(|e| McpError::internal_error(format!("DB error: {e}"), None))?;
|
|
|
|
let mut exploitable_filter = findings_filter.clone();
|
|
exploitable_filter.insert("exploitable", true);
|
|
let exploitable_count = db
|
|
.dast_findings()
|
|
.count_documents(exploitable_filter)
|
|
.await
|
|
.map_err(|e| McpError::internal_error(format!("DB error: {e}"), None))?;
|
|
|
|
let summary = serde_json::json!({
|
|
"total_findings": total_findings,
|
|
"exploitable_findings": exploitable_count,
|
|
"recent_scan_runs": scan_runs,
|
|
});
|
|
|
|
let json = serde_json::to_string_pretty(&summary)
|
|
.map_err(|e| McpError::internal_error(format!("json error: {e}"), None))?;
|
|
|
|
Ok(CallToolResult::success(vec![Content::text(json)]))
|
|
}
|