Co-authored-by: Sharang Parnerkar <parnerkarsharang@gmail.com> Reviewed-on: #1
205 lines
6.7 KiB
Rust
205 lines
6.7 KiB
Rust
use chrono::Utc;
|
|
use compliance_core::error::CoreError;
|
|
use compliance_core::models::dast::{
|
|
DastFinding, DastScanPhase, DastScanRun, DastScanStatus, DastTarget,
|
|
};
|
|
use compliance_core::traits::dast_agent::DastContext;
|
|
use tracing::{error, info};
|
|
|
|
use crate::crawler::WebCrawler;
|
|
use crate::recon::ReconAgent;
|
|
|
|
/// State machine orchestrator for DAST scanning
|
|
pub struct DastOrchestrator {
|
|
http: reqwest::Client,
|
|
rate_limit_ms: u64,
|
|
}
|
|
|
|
impl DastOrchestrator {
|
|
pub fn new(rate_limit_ms: u64) -> Self {
|
|
Self {
|
|
http: reqwest::Client::new(),
|
|
rate_limit_ms,
|
|
}
|
|
}
|
|
|
|
/// Run a complete DAST scan against a target
|
|
pub async fn run_scan(
|
|
&self,
|
|
target: &DastTarget,
|
|
sast_hints: Vec<String>,
|
|
) -> Result<(DastScanRun, Vec<DastFinding>), CoreError> {
|
|
let target_id = target
|
|
.id
|
|
.map(|oid| oid.to_hex())
|
|
.unwrap_or_else(|| "unknown".to_string());
|
|
|
|
let mut scan_run = DastScanRun::new(target_id);
|
|
let mut all_findings = Vec::new();
|
|
|
|
info!(target = %target.base_url, "Starting DAST scan");
|
|
|
|
// Phase 1: Reconnaissance
|
|
scan_run.current_phase = DastScanPhase::Reconnaissance;
|
|
let recon = ReconAgent::new(self.http.clone());
|
|
let recon_result = match recon.scan(&target.base_url).await {
|
|
Ok(r) => r,
|
|
Err(e) => {
|
|
error!(error = %e, "Reconnaissance failed");
|
|
scan_run.status = DastScanStatus::Failed;
|
|
scan_run.error_message = Some(format!("Reconnaissance failed: {e}"));
|
|
scan_run.completed_at = Some(Utc::now());
|
|
return Ok((scan_run, all_findings));
|
|
}
|
|
};
|
|
scan_run
|
|
.phases_completed
|
|
.push(DastScanPhase::Reconnaissance);
|
|
|
|
info!(
|
|
technologies = ?recon_result.technologies,
|
|
headers = recon_result.interesting_headers.len(),
|
|
"Reconnaissance complete"
|
|
);
|
|
|
|
// Phase 2: Crawling
|
|
scan_run.current_phase = DastScanPhase::Crawling;
|
|
let crawler = WebCrawler::new(
|
|
self.http.clone(),
|
|
target.max_crawl_depth,
|
|
self.rate_limit_ms,
|
|
);
|
|
let endpoints = match crawler
|
|
.crawl(&target.base_url, &target.excluded_paths)
|
|
.await
|
|
{
|
|
Ok(e) => e,
|
|
Err(e) => {
|
|
error!(error = %e, "Crawling failed");
|
|
scan_run.status = DastScanStatus::Failed;
|
|
scan_run.error_message = Some(format!("Crawling failed: {e}"));
|
|
scan_run.completed_at = Some(Utc::now());
|
|
return Ok((scan_run, all_findings));
|
|
}
|
|
};
|
|
scan_run.endpoints_discovered = endpoints.len() as u32;
|
|
scan_run.phases_completed.push(DastScanPhase::Crawling);
|
|
|
|
info!(endpoints = endpoints.len(), "Crawling complete");
|
|
|
|
// Build context for vulnerability agents
|
|
let context = DastContext {
|
|
endpoints,
|
|
technologies: recon_result.technologies,
|
|
sast_hints,
|
|
};
|
|
|
|
// Phase 3: Vulnerability Analysis
|
|
scan_run.current_phase = DastScanPhase::VulnerabilityAnalysis;
|
|
let vuln_findings = self.run_vulnerability_agents(target, &context).await?;
|
|
all_findings.extend(vuln_findings);
|
|
scan_run
|
|
.phases_completed
|
|
.push(DastScanPhase::VulnerabilityAnalysis);
|
|
|
|
// Phase 4: Exploitation (verify findings)
|
|
scan_run.current_phase = DastScanPhase::Exploitation;
|
|
// Exploitation is handled within each agent's evidence collection
|
|
scan_run.phases_completed.push(DastScanPhase::Exploitation);
|
|
|
|
// Phase 5: Reporting
|
|
scan_run.current_phase = DastScanPhase::Reporting;
|
|
scan_run.findings_count = all_findings.len() as u32;
|
|
scan_run.exploitable_count = all_findings.iter().filter(|f| f.exploitable).count() as u32;
|
|
scan_run.phases_completed.push(DastScanPhase::Reporting);
|
|
|
|
scan_run.status = DastScanStatus::Completed;
|
|
scan_run.current_phase = DastScanPhase::Completed;
|
|
scan_run.completed_at = Some(Utc::now());
|
|
|
|
info!(
|
|
findings = scan_run.findings_count,
|
|
exploitable = scan_run.exploitable_count,
|
|
"DAST scan complete"
|
|
);
|
|
|
|
Ok((scan_run, all_findings))
|
|
}
|
|
|
|
/// Run all vulnerability testing agents in parallel
|
|
async fn run_vulnerability_agents(
|
|
&self,
|
|
target: &DastTarget,
|
|
context: &DastContext,
|
|
) -> Result<Vec<DastFinding>, CoreError> {
|
|
use compliance_core::traits::DastAgent;
|
|
|
|
let http = self.http.clone();
|
|
|
|
// Spawn each agent as a separate tokio task
|
|
let t1 = target.clone();
|
|
let c1 = context.clone();
|
|
let h1 = http.clone();
|
|
let sqli_handle = tokio::spawn(async move {
|
|
crate::agents::injection::SqlInjectionAgent::new(h1)
|
|
.run(&t1, &c1)
|
|
.await
|
|
});
|
|
|
|
let t2 = target.clone();
|
|
let c2 = context.clone();
|
|
let h2 = http.clone();
|
|
let xss_handle =
|
|
tokio::spawn(async move { crate::agents::xss::XssAgent::new(h2).run(&t2, &c2).await });
|
|
|
|
let t3 = target.clone();
|
|
let c3 = context.clone();
|
|
let h3 = http.clone();
|
|
let auth_handle = tokio::spawn(async move {
|
|
crate::agents::auth_bypass::AuthBypassAgent::new(h3)
|
|
.run(&t3, &c3)
|
|
.await
|
|
});
|
|
|
|
let t4 = target.clone();
|
|
let c4 = context.clone();
|
|
let h4 = http.clone();
|
|
let ssrf_handle =
|
|
tokio::spawn(
|
|
async move { crate::agents::ssrf::SsrfAgent::new(h4).run(&t4, &c4).await },
|
|
);
|
|
|
|
let t5 = target.clone();
|
|
let c5 = context.clone();
|
|
let h5 = http;
|
|
let api_handle = tokio::spawn(async move {
|
|
crate::agents::api_fuzzer::ApiFuzzerAgent::new(h5)
|
|
.run(&t5, &c5)
|
|
.await
|
|
});
|
|
|
|
let handles: Vec<tokio::task::JoinHandle<Result<Vec<DastFinding>, CoreError>>> = vec![
|
|
sqli_handle,
|
|
xss_handle,
|
|
auth_handle,
|
|
ssrf_handle,
|
|
api_handle,
|
|
];
|
|
|
|
let mut all_findings = Vec::new();
|
|
for handle in handles {
|
|
match handle.await {
|
|
Ok(Ok(findings)) => all_findings.extend(findings),
|
|
Ok(Err(e)) => {
|
|
error!(error = %e, "Agent failed");
|
|
}
|
|
Err(e) => {
|
|
error!(error = %e, "Agent task panicked");
|
|
}
|
|
}
|
|
}
|
|
|
|
Ok(all_findings)
|
|
}
|
|
}
|