feat: pure Dioxus attack chain visualization, PDF report redesign, and orchestrator data fixes
Some checks failed
CI / Format (push) Has been cancelled
CI / Deploy Docs (push) Has been cancelled
CI / Tests (push) Has been cancelled
CI / Detect Changes (push) Has been cancelled
CI / Deploy Agent (push) Has been cancelled
CI / Deploy Dashboard (push) Has been cancelled
CI / Deploy MCP (push) Has been cancelled
CI / Clippy (push) Has been cancelled
CI / Security Audit (push) Has been cancelled
CI / Format (pull_request) Has been cancelled
CI / Clippy (pull_request) Has been cancelled
CI / Security Audit (pull_request) Has been cancelled
CI / Tests (pull_request) Has been cancelled
CI / Detect Changes (pull_request) Has been cancelled
CI / Deploy Agent (pull_request) Has been cancelled
CI / Deploy Dashboard (pull_request) Has been cancelled
CI / Deploy Docs (pull_request) Has been cancelled
CI / Deploy MCP (pull_request) Has been cancelled

- Replace vis-network JS graph with pure RSX attack chain component
  featuring KPI header, phase rail, expandable accordion with tool
  category chips, risk scores, and findings pills
- Redesign pentest report as professional PDF-first document with
  cover page, table of contents, severity bar chart, phased attack
  chain timeline, and print-friendly light theme
- Fix orchestrator to populate findings_produced, risk_score, and
  llm_reasoning on attack chain nodes
- Capture LLM reasoning text alongside tool calls in LlmResponse enum
- Add session-level KPI fallback for older pentest data
- Remove attack-chain-viz.js and prototype files
- Add encrypted ZIP report export endpoint with password protection

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Sharang Parnerkar
2026-03-12 15:21:20 +01:00
parent 1e91277040
commit 9f495e5215
19 changed files with 3693 additions and 1164 deletions

View File

@@ -36,3 +36,4 @@ base64 = "0.22"
urlencoding = "2"
futures-util = "0.3"
jsonwebtoken = "9"
zip = { workspace = true }

View File

@@ -361,6 +361,59 @@ pub async fn session_stream(
Ok(Sse::new(stream::iter(events)))
}
/// POST /api/v1/pentest/sessions/:id/stop — Stop a running pentest session
#[tracing::instrument(skip_all, fields(session_id = %id))]
pub async fn stop_session(
Extension(agent): AgentExt,
Path(id): Path<String>,
) -> Result<Json<ApiResponse<PentestSession>>, (StatusCode, String)> {
let oid = mongodb::bson::oid::ObjectId::parse_str(&id)
.map_err(|_| (StatusCode::BAD_REQUEST, "Invalid session ID".to_string()))?;
let session = agent
.db
.pentest_sessions()
.find_one(doc! { "_id": oid })
.await
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Database error: {e}")))?
.ok_or_else(|| (StatusCode::NOT_FOUND, "Session not found".to_string()))?;
if session.status != PentestStatus::Running {
return Err((
StatusCode::BAD_REQUEST,
format!("Session is {}, not running", session.status),
));
}
agent
.db
.pentest_sessions()
.update_one(
doc! { "_id": oid },
doc! { "$set": {
"status": "failed",
"completed_at": mongodb::bson::DateTime::now(),
"error_message": "Stopped by user",
}},
)
.await
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Database error: {e}")))?;
let updated = agent
.db
.pentest_sessions()
.find_one(doc! { "_id": oid })
.await
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Database error: {e}")))?
.ok_or_else(|| (StatusCode::NOT_FOUND, "Session not found after update".to_string()))?;
Ok(Json(ApiResponse {
data: updated,
total: None,
page: None,
}))
}
/// GET /api/v1/pentest/sessions/:id/attack-chain — Get attack chain nodes for a session
#[tracing::instrument(skip_all, fields(session_id = %id))]
pub async fn get_attack_chain(
@@ -556,50 +609,62 @@ pub async fn get_session_findings(
}
#[derive(Deserialize)]
pub struct ExportParams {
#[serde(default = "default_export_format")]
pub format: String,
pub struct ExportBody {
pub password: String,
/// Requester display name (from auth)
#[serde(default)]
pub requester_name: String,
/// Requester email (from auth)
#[serde(default)]
pub requester_email: String,
}
fn default_export_format() -> String {
"json".to_string()
}
/// GET /api/v1/pentest/sessions/:id/export?format=json|markdown — Export a session report
/// POST /api/v1/pentest/sessions/:id/export — Export an encrypted pentest report archive
#[tracing::instrument(skip_all, fields(session_id = %id))]
pub async fn export_session_report(
Extension(agent): AgentExt,
Path(id): Path<String>,
Query(params): Query<ExportParams>,
Json(body): Json<ExportBody>,
) -> Result<axum::response::Response, (StatusCode, String)> {
let oid = mongodb::bson::oid::ObjectId::parse_str(&id)
.map_err(|_| (StatusCode::BAD_REQUEST, "Invalid session ID".to_string()))?;
if body.password.len() < 8 {
return Err((
StatusCode::BAD_REQUEST,
"Password must be at least 8 characters".to_string(),
));
}
// Fetch session
let session = agent
.db
.pentest_sessions()
.find_one(doc! { "_id": oid })
.await
.map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Database error: {e}"),
)
})?
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Database error: {e}")))?
.ok_or_else(|| (StatusCode::NOT_FOUND, "Session not found".to_string()))?;
// Fetch messages
let messages: Vec<PentestMessage> = match agent
.db
.pentest_messages()
.find(doc! { "session_id": &id })
.sort(doc! { "created_at": 1 })
.await
{
Ok(cursor) => collect_cursor_async(cursor).await,
Err(_) => Vec::new(),
// Resolve target name
let target = if let Ok(tid) = mongodb::bson::oid::ObjectId::parse_str(&session.target_id) {
agent
.db
.dast_targets()
.find_one(doc! { "_id": tid })
.await
.ok()
.flatten()
} else {
None
};
let target_name = target
.as_ref()
.map(|t| t.name.clone())
.unwrap_or_else(|| "Unknown Target".to_string());
let target_url = target
.as_ref()
.map(|t| t.base_url.clone())
.unwrap_or_default();
// Fetch attack chain nodes
let nodes: Vec<AttackChainNode> = match agent
@@ -618,155 +683,35 @@ pub async fn export_session_report(
.db
.dast_findings()
.find(doc! { "session_id": &id })
.sort(doc! { "created_at": -1 })
.sort(doc! { "severity": -1, "created_at": -1 })
.await
{
Ok(cursor) => collect_cursor_async(cursor).await,
Err(_) => Vec::new(),
};
// Compute severity counts
let critical = findings.iter().filter(|f| f.severity.to_string() == "critical").count();
let high = findings.iter().filter(|f| f.severity.to_string() == "high").count();
let medium = findings.iter().filter(|f| f.severity.to_string() == "medium").count();
let low = findings.iter().filter(|f| f.severity.to_string() == "low").count();
let info = findings.iter().filter(|f| f.severity.to_string() == "info").count();
let ctx = crate::pentest::report::ReportContext {
session,
target_name,
target_url,
findings,
attack_chain: nodes,
requester_name: if body.requester_name.is_empty() {
"Unknown".to_string()
} else {
body.requester_name
},
requester_email: body.requester_email,
};
match params.format.as_str() {
"markdown" => {
let mut md = String::new();
md.push_str("# Penetration Test Report\n\n");
let report = crate::pentest::generate_encrypted_report(&ctx, &body.password)
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e))?;
// Executive summary
md.push_str("## Executive Summary\n\n");
md.push_str(&format!("| Field | Value |\n"));
md.push_str("| --- | --- |\n");
md.push_str(&format!("| **Session ID** | {} |\n", id));
md.push_str(&format!("| **Status** | {} |\n", session.status));
md.push_str(&format!("| **Strategy** | {} |\n", session.strategy));
md.push_str(&format!("| **Target ID** | {} |\n", session.target_id));
md.push_str(&format!(
"| **Started** | {} |\n",
session.started_at.to_rfc3339()
));
if let Some(ref completed) = session.completed_at {
md.push_str(&format!(
"| **Completed** | {} |\n",
completed.to_rfc3339()
));
}
md.push_str(&format!(
"| **Tool Invocations** | {} |\n",
session.tool_invocations
));
md.push_str(&format!(
"| **Success Rate** | {:.1}% |\n",
session.success_rate()
));
md.push('\n');
let response = serde_json::json!({
"archive_base64": base64::Engine::encode(&base64::engine::general_purpose::STANDARD, &report.archive),
"sha256": report.sha256,
"filename": format!("pentest-report-{id}.zip"),
});
// Findings by severity
md.push_str("## Findings Summary\n\n");
md.push_str(&format!(
"| Severity | Count |\n| --- | --- |\n| Critical | {} |\n| High | {} |\n| Medium | {} |\n| Low | {} |\n| Info | {} |\n| **Total** | **{}** |\n\n",
critical, high, medium, low, info, findings.len()
));
// Findings table
if !findings.is_empty() {
md.push_str("## Findings Detail\n\n");
md.push_str("| # | Severity | Title | Endpoint | Exploitable |\n");
md.push_str("| --- | --- | --- | --- | --- |\n");
for (i, f) in findings.iter().enumerate() {
md.push_str(&format!(
"| {} | {} | {} | {} {} | {} |\n",
i + 1,
f.severity,
f.title,
f.method,
f.endpoint,
if f.exploitable { "Yes" } else { "No" },
));
}
md.push('\n');
}
// Attack chain timeline
if !nodes.is_empty() {
md.push_str("## Attack Chain Timeline\n\n");
md.push_str("| # | Tool | Status | Findings | Reasoning |\n");
md.push_str("| --- | --- | --- | --- | --- |\n");
for (i, node) in nodes.iter().enumerate() {
let reasoning_short = if node.llm_reasoning.len() > 80 {
format!("{}...", &node.llm_reasoning[..80])
} else {
node.llm_reasoning.clone()
};
md.push_str(&format!(
"| {} | {} | {} | {} | {} |\n",
i + 1,
node.tool_name,
format!("{:?}", node.status).to_lowercase(),
node.findings_produced.len(),
reasoning_short,
));
}
md.push('\n');
}
// Statistics
md.push_str("## Statistics\n\n");
md.push_str(&format!("- **Total Findings:** {}\n", findings.len()));
md.push_str(&format!("- **Exploitable Findings:** {}\n", session.exploitable_count));
md.push_str(&format!("- **Attack Chain Steps:** {}\n", nodes.len()));
md.push_str(&format!("- **Messages Exchanged:** {}\n", messages.len()));
md.push_str(&format!("- **Tool Invocations:** {}\n", session.tool_invocations));
md.push_str(&format!("- **Tool Success Rate:** {:.1}%\n", session.success_rate()));
Ok((
StatusCode::OK,
[
(axum::http::header::CONTENT_TYPE, "text/markdown; charset=utf-8"),
],
md,
)
.into_response())
}
_ => {
// JSON format
let report = serde_json::json!({
"session": {
"id": id,
"target_id": session.target_id,
"repo_id": session.repo_id,
"status": session.status,
"strategy": session.strategy,
"started_at": session.started_at.to_rfc3339(),
"completed_at": session.completed_at.map(|d| d.to_rfc3339()),
"tool_invocations": session.tool_invocations,
"tool_successes": session.tool_successes,
"success_rate": session.success_rate(),
"findings_count": session.findings_count,
"exploitable_count": session.exploitable_count,
},
"findings": findings,
"attack_chain": nodes,
"messages": messages,
"summary": {
"total_findings": findings.len(),
"severity_distribution": {
"critical": critical,
"high": high,
"medium": medium,
"low": low,
"info": info,
},
"attack_chain_steps": nodes.len(),
"messages_exchanged": messages.len(),
},
});
Ok(Json(report).into_response())
}
}
Ok(Json(response).into_response())
}

View File

@@ -112,6 +112,10 @@ pub fn build_router() -> Router {
"/api/v1/pentest/sessions/{id}/chat",
post(handlers::pentest::send_message),
)
.route(
"/api/v1/pentest/sessions/{id}/stop",
post(handlers::pentest::stop_session),
)
.route(
"/api/v1/pentest/sessions/{id}/stream",
get(handlers::pentest::session_stream),
@@ -130,7 +134,7 @@ pub fn build_router() -> Router {
)
.route(
"/api/v1/pentest/sessions/{id}/export",
get(handlers::pentest::export_session_report),
post(handlers::pentest::export_session_report),
)
.route("/api/v1/pentest/stats", get(handlers::pentest::pentest_stats))
// Webhook endpoints (proxied through dashboard)

View File

@@ -117,7 +117,8 @@ pub struct ToolCallRequestFunction {
#[derive(Debug, Clone)]
pub enum LlmResponse {
Content(String),
ToolCalls(Vec<LlmToolCall>),
/// Tool calls with optional reasoning text from the LLM
ToolCalls { calls: Vec<LlmToolCall>, reasoning: String },
}
// ── Embedding types ────────────────────────────────────────────
@@ -210,7 +211,7 @@ impl LlmClient {
self.send_chat_request(&request_body).await.map(|resp| {
match resp {
LlmResponse::Content(c) => c,
LlmResponse::ToolCalls(_) => String::new(), // shouldn't happen without tools
LlmResponse::ToolCalls { .. } => String::new(), // shouldn't happen without tools
}
})
}
@@ -243,7 +244,7 @@ impl LlmClient {
self.send_chat_request(&request_body).await.map(|resp| {
match resp {
LlmResponse::Content(c) => c,
LlmResponse::ToolCalls(_) => String::new(),
LlmResponse::ToolCalls { .. } => String::new(),
}
})
}
@@ -337,7 +338,9 @@ impl LlmClient {
}
})
.collect();
return Ok(LlmResponse::ToolCalls(calls));
// Capture any reasoning text the LLM included alongside tool calls
let reasoning = choice.message.content.clone().unwrap_or_default();
return Ok(LlmResponse::ToolCalls { calls, reasoning });
}
}

View File

@@ -1,3 +1,5 @@
pub mod orchestrator;
pub mod report;
pub use orchestrator::PentestOrchestrator;
pub use report::generate_encrypted_report;

View File

@@ -213,7 +213,7 @@ impl PentestOrchestrator {
}
break;
}
LlmResponse::ToolCalls(tool_calls) => {
LlmResponse::ToolCalls { calls: tool_calls, reasoning } => {
let tc_requests: Vec<ToolCallRequest> = tool_calls
.iter()
.map(|tc| ToolCallRequest {
@@ -229,7 +229,7 @@ impl PentestOrchestrator {
messages.push(ChatMessage {
role: "assistant".to_string(),
content: None,
content: if reasoning.is_empty() { None } else { Some(reasoning.clone()) },
tool_calls: Some(tc_requests),
tool_call_id: None,
});
@@ -245,7 +245,7 @@ impl PentestOrchestrator {
node_id.clone(),
tc.name.clone(),
tc.arguments.clone(),
String::new(),
reasoning.clone(),
);
// Link to previous iteration's nodes
node.parent_node_ids = prev_node_ids.clone();
@@ -267,11 +267,15 @@ impl PentestOrchestrator {
let findings_count = result.findings.len() as u32;
total_findings += findings_count;
let mut finding_ids: Vec<String> = Vec::new();
for mut finding in result.findings {
finding.scan_run_id = session_id.clone();
finding.session_id = Some(session_id.clone());
let _ =
let insert_result =
self.db.dast_findings().insert_one(&finding).await;
if let Ok(res) = &insert_result {
finding_ids.push(res.inserted_id.as_object_id().map(|oid| oid.to_hex()).unwrap_or_default());
}
let _ =
self.event_tx.send(PentestEvent::Finding {
finding_id: finding
@@ -283,12 +287,38 @@ impl PentestOrchestrator {
});
}
// Compute risk score based on findings severity
let risk_score: Option<u8> = if findings_count > 0 {
Some(std::cmp::min(
100,
(findings_count as u8).saturating_mul(15).saturating_add(20),
))
} else {
None
};
let _ = self.event_tx.send(PentestEvent::ToolComplete {
node_id: node_id.clone(),
summary: result.summary.clone(),
findings_count,
});
let finding_ids_bson: Vec<mongodb::bson::Bson> = finding_ids
.iter()
.map(|id| mongodb::bson::Bson::String(id.clone()))
.collect();
let mut update_doc = doc! {
"status": "completed",
"tool_output": mongodb::bson::to_bson(&result.data)
.unwrap_or(mongodb::bson::Bson::Null),
"completed_at": mongodb::bson::DateTime::now(),
"findings_produced": finding_ids_bson,
};
if let Some(rs) = risk_score {
update_doc.insert("risk_score", rs as i32);
}
let _ = self
.db
.attack_chain_nodes()
@@ -297,12 +327,7 @@ impl PentestOrchestrator {
"session_id": &session_id,
"node_id": &node_id,
},
doc! { "$set": {
"status": "completed",
"tool_output": mongodb::bson::to_bson(&result.data)
.unwrap_or(mongodb::bson::Bson::Null),
"completed_at": mongodb::bson::DateTime::now(),
}},
doc! { "$set": update_doc },
)
.await;

File diff suppressed because it is too large Load Diff