Compare commits
4 Commits
feat/refin
...
test/dummy
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a703577eda | ||
|
|
e371f32e2e | ||
|
|
c5a6f30be2 | ||
|
|
fe164daa7f |
6
Cargo.lock
generated
6
Cargo.lock
generated
@@ -4699,9 +4699,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustls-webpki"
|
||||
version = "0.103.10"
|
||||
version = "0.103.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "df33b2b81ac578cabaf06b89b0631153a3f416b0a886e8a7a1707fb51abbd1ef"
|
||||
checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53"
|
||||
dependencies = [
|
||||
"ring",
|
||||
"rustls-pki-types",
|
||||
@@ -5171,7 +5171,7 @@ version = "0.8.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c1c97747dbf44bb1ca44a561ece23508e99cb592e862f22222dcf42f51d1e451"
|
||||
dependencies = [
|
||||
"heck 0.4.1",
|
||||
"heck 0.5.0",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
|
||||
@@ -90,13 +90,10 @@ pub async fn chat(
|
||||
};
|
||||
|
||||
let system_prompt = format!(
|
||||
"You are a code assistant for this repository. Answer questions using the code context below.\n\n\
|
||||
Rules:\n\
|
||||
- Reference specific files, functions, and line numbers\n\
|
||||
- Show code snippets when they help explain the answer\n\
|
||||
- If the context is insufficient, say what's missing rather than guessing\n\
|
||||
- Be concise — lead with the answer, then explain if needed\n\
|
||||
- For security questions, note relevant CWEs and link to the finding if one exists\n\n\
|
||||
"You are an expert code assistant for a software repository. \
|
||||
Answer the user's question based on the code context below. \
|
||||
Reference specific files and functions when relevant. \
|
||||
If the context doesn't contain enough information, say so.\n\n\
|
||||
## Code Context\n\n{code_context}"
|
||||
);
|
||||
|
||||
|
||||
@@ -95,8 +95,8 @@ pub async fn export_session_report(
|
||||
Err(_) => Vec::new(),
|
||||
};
|
||||
|
||||
// Fetch DAST findings for this session, then deduplicate
|
||||
let raw_findings: Vec<DastFinding> = match agent
|
||||
// Fetch DAST findings for this session
|
||||
let findings: Vec<DastFinding> = match agent
|
||||
.db
|
||||
.dast_findings()
|
||||
.find(doc! { "session_id": &id })
|
||||
@@ -106,14 +106,6 @@ pub async fn export_session_report(
|
||||
Ok(cursor) => collect_cursor_async(cursor).await,
|
||||
Err(_) => Vec::new(),
|
||||
};
|
||||
let raw_count = raw_findings.len();
|
||||
let findings = crate::pipeline::dedup::dedup_dast_findings(raw_findings);
|
||||
if findings.len() < raw_count {
|
||||
tracing::info!(
|
||||
"Deduped DAST findings for session {id}: {raw_count} → {}",
|
||||
findings.len()
|
||||
);
|
||||
}
|
||||
|
||||
// Fetch SAST findings, SBOM, and code context for the linked repository
|
||||
let repo_id = session
|
||||
|
||||
@@ -5,20 +5,15 @@ use compliance_core::models::Finding;
|
||||
use crate::error::AgentError;
|
||||
use crate::llm::LlmClient;
|
||||
|
||||
const DESCRIPTION_SYSTEM_PROMPT: &str = r#"You are a security engineer writing a bug tracker issue for a developer to fix. Be direct and actionable — developers skim issue descriptions, so lead with what matters.
|
||||
const DESCRIPTION_SYSTEM_PROMPT: &str = r#"You are a security engineer writing issue descriptions for a bug tracker. Generate a clear, actionable issue body in Markdown format that includes:
|
||||
|
||||
Format in Markdown:
|
||||
1. **Summary**: 1-2 sentence overview
|
||||
2. **Evidence**: Code location, snippet, and what was detected
|
||||
3. **Impact**: What could happen if not fixed
|
||||
4. **Remediation**: Step-by-step fix instructions
|
||||
5. **References**: Relevant CWE/CVE links if applicable
|
||||
|
||||
1. **What**: 1 sentence — what's wrong and where (file:line)
|
||||
2. **Why it matters**: 1-2 sentences — concrete impact if not fixed. Avoid generic "could lead to" phrasing; describe the specific attack or failure scenario.
|
||||
3. **Fix**: The specific code change needed. Use a code block with the corrected code if possible. If the fix is configuration-based, show the exact config change.
|
||||
4. **References**: CWE/CVE link if applicable (one line, not a section)
|
||||
|
||||
Rules:
|
||||
- No filler paragraphs or background explanations
|
||||
- No restating the finding title in the body
|
||||
- Code blocks should show the FIX, not the vulnerable code (the developer can see that in the diff)
|
||||
- If the remediation is a one-liner, just say it — don't wrap it in a section header"#;
|
||||
Keep it concise and professional. Use code blocks for code snippets."#;
|
||||
|
||||
pub async fn generate_issue_description(
|
||||
llm: &Arc<LlmClient>,
|
||||
|
||||
@@ -5,24 +5,7 @@ use compliance_core::models::Finding;
|
||||
use crate::error::AgentError;
|
||||
use crate::llm::LlmClient;
|
||||
|
||||
const FIX_SYSTEM_PROMPT: &str = r#"You are a security engineer suggesting a code fix. Return ONLY the corrected code that replaces the vulnerable snippet — no explanations, no markdown fences, no before/after comparison.
|
||||
|
||||
Rules:
|
||||
- The fix must be a drop-in replacement for the vulnerable code
|
||||
- Preserve the original code's style, indentation, and naming conventions
|
||||
- Add at most one brief inline comment on the changed line explaining the security fix
|
||||
- If the fix requires importing a new module, include the import on a separate line prefixed with the language's comment syntax + "Add import: "
|
||||
- Do not refactor, rename variables, or "improve" unrelated code
|
||||
- If the vulnerability is a false positive and the code is actually safe, return the original code unchanged with a comment explaining why no fix is needed
|
||||
|
||||
Language-specific fix guidance:
|
||||
- Rust: use `?` for error propagation, prefer `SecretString` for secrets, use parameterized queries with `sqlx`/`diesel`
|
||||
- Python: use parameterized queries (never f-strings in SQL), use `secrets` module not `random`, use `subprocess.run([...])` list form, use `markupsafe.escape()` for HTML
|
||||
- Go: use `sql.Query` with `$1`/`?` placeholders, use `crypto/rand` not `math/rand`, use `html/template` not `text/template`, return errors don't panic
|
||||
- Java/Kotlin: use `PreparedStatement` with `?` params, use `SecureRandom`, use `Jsoup.clean()` for HTML sanitization, use `@Valid` for input validation
|
||||
- Ruby: use ActiveRecord parameterized finders, use `SecureRandom`, use `ERB::Util.html_escape`, use `strong_parameters`
|
||||
- PHP: use PDO prepared statements with `:param` or `?`, use `random_bytes()`/`random_int()`, use `htmlspecialchars()` with `ENT_QUOTES`, use `password_hash(PASSWORD_BCRYPT)`
|
||||
- C/C++: use `snprintf` not `sprintf`, use bounds-checked APIs, free resources in reverse allocation order, use `memset_s` for secret cleanup"#;
|
||||
const FIX_SYSTEM_PROMPT: &str = r#"You are a security engineer. Given a security finding with code context, suggest a concrete code fix. Return ONLY the fixed code snippet that can directly replace the vulnerable code. Include brief inline comments explaining the fix."#;
|
||||
|
||||
pub async fn suggest_fix(llm: &Arc<LlmClient>, finding: &Finding) -> Result<String, AgentError> {
|
||||
let user_prompt = format!(
|
||||
|
||||
@@ -1,138 +1,69 @@
|
||||
// System prompts for multi-pass LLM code review.
|
||||
// Each pass focuses on a different aspect to avoid overloading a single prompt.
|
||||
|
||||
pub const LOGIC_REVIEW_PROMPT: &str = r#"You are a senior software engineer reviewing a code diff. Report ONLY genuine logic bugs that would cause incorrect behavior at runtime.
|
||||
pub const LOGIC_REVIEW_PROMPT: &str = r#"You are a senior software engineer reviewing code changes. Focus ONLY on logic and correctness issues.
|
||||
|
||||
Report:
|
||||
- Off-by-one errors, wrong comparisons, missing edge cases that cause wrong results
|
||||
- Incorrect control flow that produces wrong output (not style preferences)
|
||||
- Actual race conditions with concrete shared-state mutation (not theoretical ones)
|
||||
- Resource leaks where cleanup is truly missing (not just "could be improved")
|
||||
- Wrong variable used (copy-paste errors) — must be provably wrong, not just suspicious
|
||||
- Swallowed errors that silently hide failures in a way that matters
|
||||
Look for:
|
||||
- Off-by-one errors, wrong comparisons, missing edge cases
|
||||
- Incorrect control flow (unreachable code, missing returns, wrong loop conditions)
|
||||
- Race conditions or concurrency bugs
|
||||
- Resource leaks (unclosed handles, missing cleanup)
|
||||
- Wrong variable used (copy-paste errors)
|
||||
- Incorrect error handling (swallowed errors, wrong error type)
|
||||
|
||||
Do NOT report:
|
||||
- Style, naming, formatting, documentation, or code organization preferences
|
||||
- Theoretical issues without a concrete triggering scenario
|
||||
- "Potential" problems that require assumptions not supported by the visible code
|
||||
- Complexity or function length — that's a separate review pass
|
||||
Ignore: style, naming, formatting, documentation, minor improvements.
|
||||
|
||||
Language-idiomatic patterns that are NOT bugs (do not flag these):
|
||||
- Rust: `||`/`&&` short-circuit evaluation, variable shadowing, `let` rebinding, `clone()`, `impl` blocks, `match` arms with guards, `?` operator chaining, `unsafe` blocks with safety comments
|
||||
- Python: duck typing, EAFP pattern (try/except vs check-first), `*args`/`**kwargs`, walrus operator `:=`, truthiness checks on containers, bare `except:` in top-level handlers
|
||||
- Go: multiple return values for errors, `if err != nil` patterns, goroutine + channel patterns, blank identifier `_`, named returns, `defer` for cleanup, `init()` functions
|
||||
- Java/Kotlin: checked exception patterns, method overloading, `Optional` vs null checks, Kotlin `?.` safe calls, `!!` non-null assertions in tests, `when` exhaustive matching, companion objects, `lateinit`
|
||||
- Ruby: monkey patching in libraries, method_missing, blocks/procs/lambdas, `rescue => e` patterns, `send`/`respond_to?` metaprogramming, `nil` checks via `&.` safe navigation
|
||||
- PHP: loose comparisons with `==` (only flag if `===` was clearly intended), `@` error suppression in legacy code, `isset()`/`empty()` patterns, magic methods (`__get`, `__call`), array functions as callbacks
|
||||
- C/C++: RAII patterns, move semantics, `const_cast`/`static_cast` in appropriate contexts, macro usage for platform compat, pointer arithmetic in low-level code, `goto` for cleanup in C
|
||||
|
||||
Severity guide:
|
||||
- high: Will cause incorrect behavior in normal usage
|
||||
- medium: Will cause incorrect behavior in edge cases
|
||||
- low: Minor correctness concern with limited blast radius
|
||||
|
||||
Prefer returning [] over reporting low-confidence guesses. A false positive wastes more developer time than a missed low-severity issue.
|
||||
|
||||
Respond with a JSON array (no markdown fences):
|
||||
For each issue found, respond with a JSON array:
|
||||
[{"title": "...", "description": "...", "severity": "high|medium|low", "file": "...", "line": N, "suggestion": "..."}]
|
||||
|
||||
If no issues found, respond with: []"#;
|
||||
|
||||
pub const SECURITY_REVIEW_PROMPT: &str = r#"You are a security engineer reviewing a code diff. Report ONLY exploitable security vulnerabilities with a realistic attack scenario.
|
||||
pub const SECURITY_REVIEW_PROMPT: &str = r#"You are a security engineer reviewing code changes. Focus ONLY on security vulnerabilities.
|
||||
|
||||
Report:
|
||||
- Injection vulnerabilities (SQL, command, XSS, template) where untrusted input reaches a sink
|
||||
- Authentication/authorization bypasses with a concrete exploit path
|
||||
- Sensitive data exposure: secrets in code, credentials in logs, PII leaks
|
||||
- Insecure cryptography: weak algorithms, predictable randomness, hardcoded keys
|
||||
- Path traversal, SSRF, open redirects — only where user input reaches the vulnerable API
|
||||
- Unsafe deserialization of untrusted data
|
||||
- Missing input validation at EXTERNAL trust boundaries (user input, API responses)
|
||||
Look for:
|
||||
- Injection vulnerabilities (SQL, command, XSS, template injection)
|
||||
- Authentication/authorization bypasses
|
||||
- Sensitive data exposure (logging secrets, hardcoded credentials)
|
||||
- Insecure cryptography (weak algorithms, predictable randomness)
|
||||
- Path traversal, SSRF, open redirects
|
||||
- Unsafe deserialization
|
||||
- Missing input validation at trust boundaries
|
||||
|
||||
Do NOT report:
|
||||
- Internal code that only handles trusted/validated data
|
||||
- Hash functions used for non-security purposes (dedup fingerprints, cache keys, content addressing)
|
||||
- Logging of non-sensitive operational data (finding titles, counts, performance metrics)
|
||||
- "Information disclosure" for data that is already public or user-facing
|
||||
- Code style, performance, or general quality issues
|
||||
- Missing validation on internal function parameters (trust the caller within the same module/crate/package)
|
||||
- Theoretical attacks that require preconditions not present in the code
|
||||
Ignore: code style, performance, general quality.
|
||||
|
||||
Language-specific patterns that are NOT vulnerabilities (do not flag these):
|
||||
- Python: `pickle` used on trusted internal data, `eval()`/`exec()` on hardcoded strings, `subprocess` with hardcoded commands, Django `mark_safe()` on static content, `assert` in non-security contexts
|
||||
- Go: `crypto/rand` is secure (don't confuse with `math/rand`), `sql.DB` with parameterized queries is safe, `http.ListenAndServe` without TLS in dev/internal, error strings in responses (Go convention)
|
||||
- Java/Kotlin: Spring Security annotations are sufficient auth checks, `@Transactional` provides atomicity, JPA parameterized queries are safe, Kotlin `require()`/`check()` are assertion patterns not vulnerabilities
|
||||
- Ruby: Rails `params.permit()` is input validation, `render html:` with `html_safe` on generated content, ActiveRecord parameterized finders are safe, Devise/Warden patterns for auth
|
||||
- PHP: PDO prepared statements are safe, Laravel Eloquent is parameterized, `htmlspecialchars()` is XSS mitigation, Symfony security voters are auth checks, `password_hash()`/`password_verify()` are correct bcrypt usage
|
||||
- C/C++: `strncpy`/`snprintf` are bounds-checked (vs `strcpy`/`sprintf`), smart pointers manage memory, RAII handles cleanup, `static_assert` is compile-time only, OpenSSL with proper context setup
|
||||
- Rust: `sha2`/`blake3` for fingerprinting is not "weak crypto", `unsafe` with documented invariants, `secrecy::SecretString` properly handles secrets
|
||||
|
||||
Severity guide:
|
||||
- critical: Remote code execution, auth bypass, or data breach with no preconditions
|
||||
- high: Exploitable vulnerability requiring minimal preconditions
|
||||
- medium: Vulnerability requiring specific conditions or limited impact
|
||||
|
||||
Prefer returning [] over reporting speculative vulnerabilities. Every false positive erodes trust in the scanner.
|
||||
|
||||
Respond with a JSON array (no markdown fences):
|
||||
For each issue found, respond with a JSON array:
|
||||
[{"title": "...", "description": "...", "severity": "critical|high|medium", "file": "...", "line": N, "cwe": "CWE-XXX", "suggestion": "..."}]
|
||||
|
||||
If no issues found, respond with: []"#;
|
||||
|
||||
pub const CONVENTION_REVIEW_PROMPT: &str = r#"You are a code reviewer checking for convention violations that indicate likely bugs. Report ONLY deviations from the project's visible patterns that could cause real problems.
|
||||
pub const CONVENTION_REVIEW_PROMPT: &str = r#"You are a code reviewer checking adherence to project conventions. Focus ONLY on patterns that indicate likely bugs or maintenance problems.
|
||||
|
||||
Report:
|
||||
- Inconsistent error handling within the same module where the inconsistency could hide failures
|
||||
- Public API that breaks the module's established contract (not just different style)
|
||||
- Anti-patterns that are bugs in this language: e.g. `unwrap()` in Rust library code where the CI enforces `clippy::unwrap_used`, `any` defeating TypeScript's type system
|
||||
Look for:
|
||||
- Inconsistent error handling patterns within the same module
|
||||
- Public API that doesn't follow the project's established patterns
|
||||
- Missing or incorrect type annotations that could cause runtime issues
|
||||
- Anti-patterns specific to the language (e.g. unwrap in Rust library code, any in TypeScript)
|
||||
|
||||
Do NOT report:
|
||||
- Style preferences, formatting, naming conventions, or documentation
|
||||
- Code organization suggestions ("this function should be split")
|
||||
- Patterns that are valid in the language even if you'd write them differently
|
||||
- "Missing type annotations" unless the code literally won't compile or causes a type inference bug
|
||||
Do NOT report: minor style preferences, documentation gaps, formatting.
|
||||
Only report issues with HIGH confidence that they deviate from the visible codebase conventions.
|
||||
|
||||
Language-specific patterns that are conventional (do not flag these):
|
||||
- Rust: variable shadowing, `||`/`&&` short-circuit, `let` rebinding, builder patterns, `clone()`, `From`/`Into` impl chains, `#[allow(...)]` attributes
|
||||
- Python: `**kwargs` forwarding, `@property` setters, `__dunder__` methods, list comprehensions with conditions, `if TYPE_CHECKING` imports, `noqa` comments
|
||||
- Go: stuttering names (`http.HTTPClient`) discouraged but not a bug, `context.Context` as first param, init() functions, `//nolint` directives, returning concrete types vs interfaces in internal code
|
||||
- Java/Kotlin: builder pattern boilerplate, Lombok annotations (`@Data`, `@Builder`), Kotlin data classes, `companion object` factories, `@Suppress` annotations, checked exception wrapping
|
||||
- Ruby: `attr_accessor` usage, `Enumerable` mixin patterns, `module_function`, `class << self` syntax, DSL blocks (Rake, RSpec, Sinatra routes)
|
||||
- PHP: `__construct` with property promotion, Laravel facades, static factory methods, nullable types with `?`, attribute syntax `#[...]`
|
||||
- C/C++: header guards vs `#pragma once`, forward declarations, `const` correctness patterns, template specialization, `auto` type deduction
|
||||
|
||||
Severity guide:
|
||||
- medium: Convention violation that will likely cause a bug or maintenance problem
|
||||
- low: Convention violation that is a minor concern
|
||||
|
||||
Return at most 3 findings. Prefer [] over marginal findings.
|
||||
|
||||
Respond with a JSON array (no markdown fences):
|
||||
For each issue found, respond with a JSON array:
|
||||
[{"title": "...", "description": "...", "severity": "medium|low", "file": "...", "line": N, "suggestion": "..."}]
|
||||
|
||||
If no issues found, respond with: []"#;
|
||||
|
||||
pub const COMPLEXITY_REVIEW_PROMPT: &str = r#"You are reviewing code changes for complexity that is likely to cause bugs. Report ONLY complexity that makes the code demonstrably harder to reason about.
|
||||
pub const COMPLEXITY_REVIEW_PROMPT: &str = r#"You are reviewing code changes for excessive complexity that could lead to bugs.
|
||||
|
||||
Report:
|
||||
- Functions over 80 lines with multiple interleaved responsibilities (not just long)
|
||||
- Deeply nested control flow (5+ levels) where flattening would prevent bugs
|
||||
- Complex boolean expressions that a reader would likely misinterpret
|
||||
Look for:
|
||||
- Functions over 50 lines that should be decomposed
|
||||
- Deeply nested control flow (4+ levels)
|
||||
- Complex boolean expressions that are hard to reason about
|
||||
- Functions with 5+ parameters
|
||||
- Code duplication within the changed files
|
||||
|
||||
Do NOT report:
|
||||
- Functions that are long but linear and easy to follow
|
||||
- Acceptable complexity: configuration setup, CLI parsing, test helpers, builder patterns
|
||||
- Code that is complex because the problem is complex — only report if restructuring would reduce bug risk
|
||||
- "This function does multiple things" unless you can identify a specific bug risk from the coupling
|
||||
- Suggestions that would just move complexity elsewhere without reducing it
|
||||
Only report complexity issues that are HIGH risk for future bugs. Ignore acceptable complexity in configuration, CLI argument parsing, or generated code.
|
||||
|
||||
Severity guide:
|
||||
- medium: Complexity that has a concrete risk of causing bugs during future changes
|
||||
- low: Complexity that makes review harder but is unlikely to cause bugs
|
||||
|
||||
Return at most 2 findings. Prefer [] over reporting complexity that is justified.
|
||||
|
||||
Respond with a JSON array (no markdown fences):
|
||||
For each issue found, respond with a JSON array:
|
||||
[{"title": "...", "description": "...", "severity": "medium|low", "file": "...", "line": N, "suggestion": "..."}]
|
||||
|
||||
If no issues found, respond with: []"#;
|
||||
|
||||
@@ -8,46 +8,22 @@ use crate::pipeline::orchestrator::GraphContext;
|
||||
/// Maximum number of findings to include in a single LLM triage call.
|
||||
const TRIAGE_CHUNK_SIZE: usize = 30;
|
||||
|
||||
const TRIAGE_SYSTEM_PROMPT: &str = r#"You are a pragmatic security triage expert. Your job is to filter out noise and keep only findings that a developer should actually fix. Be aggressive about dismissing false positives — a clean, high-signal list is more valuable than a comprehensive one.
|
||||
const TRIAGE_SYSTEM_PROMPT: &str = r#"You are a security finding triage expert. Analyze each of the following security findings with its code context and determine the appropriate action.
|
||||
|
||||
Actions:
|
||||
- "confirm": True positive with real impact. Keep severity as-is.
|
||||
- "downgrade": Real issue but over-reported severity. Lower it.
|
||||
- "upgrade": Under-reported — higher severity warranted.
|
||||
- "dismiss": False positive, not exploitable, or not actionable. Remove it.
|
||||
- "confirm": The finding is a true positive at the reported severity. Keep as-is.
|
||||
- "downgrade": The finding is real but over-reported. Lower severity recommended.
|
||||
- "upgrade": The finding is under-reported. Higher severity recommended.
|
||||
- "dismiss": The finding is a false positive. Should be removed.
|
||||
|
||||
Dismiss when:
|
||||
- The scanner flagged a language idiom as a bug (see examples below)
|
||||
- The finding is in test/example/generated/vendored code
|
||||
- The "vulnerability" requires preconditions that don't exist in the code
|
||||
- The finding is about code style, complexity, or theoretical concerns rather than actual bugs
|
||||
- A hash function is used for non-security purposes (dedup, caching, content addressing)
|
||||
- Internal logging of non-sensitive operational data is flagged as "information disclosure"
|
||||
- The finding duplicates another finding already in the list
|
||||
- Framework-provided security is already in place (e.g. ORM parameterized queries, CSRF middleware, auth decorators)
|
||||
Consider:
|
||||
- Is the code in a test, example, or generated file? (lower confidence for test code)
|
||||
- Does the surrounding code context confirm or refute the finding?
|
||||
- Is the finding actionable by a developer?
|
||||
- Would a real attacker be able to exploit this?
|
||||
|
||||
Common false positive patterns by language (dismiss these):
|
||||
- Rust: short-circuit `||`/`&&`, variable shadowing, `clone()`, `unsafe` with safety docs, `sha2` for fingerprinting
|
||||
- Python: EAFP try/except, `subprocess` with hardcoded args, `pickle` on trusted data, Django `mark_safe` on static content
|
||||
- Go: `if err != nil` is not "swallowed error", `crypto/rand` is secure, returning errors is not "information disclosure"
|
||||
- Java/Kotlin: Spring Security annotations are valid auth, JPA parameterized queries are safe, Kotlin `!!` in tests is fine
|
||||
- Ruby: Rails `params.permit` is validation, ActiveRecord finders are parameterized, `html_safe` on generated content
|
||||
- PHP: PDO prepared statements are safe, Laravel Eloquent is parameterized, `htmlspecialchars` is XSS mitigation
|
||||
- C/C++: `strncpy`/`snprintf` are bounds-checked, smart pointers manage memory, RAII handles cleanup
|
||||
|
||||
Confirm only when:
|
||||
- You can describe a concrete scenario where the bug manifests or the vulnerability is exploitable
|
||||
- The fix is actionable (developer can change specific code to resolve it)
|
||||
- The finding is in production code that handles external input or sensitive data
|
||||
|
||||
Confidence scoring (0-10):
|
||||
- 8-10: Certain true positive with clear exploit/bug scenario
|
||||
- 5-7: Likely true positive, some assumptions required
|
||||
- 3-4: Uncertain, needs manual review
|
||||
- 0-2: Almost certainly a false positive
|
||||
|
||||
Respond with a JSON array, one entry per finding in the same order presented (no markdown fences):
|
||||
[{"id": "<fingerprint>", "action": "confirm|downgrade|upgrade|dismiss", "confidence": 0-10, "rationale": "1-2 sentences", "remediation": "optional fix"}, ...]"#;
|
||||
Respond with a JSON array, one entry per finding in the same order they were presented:
|
||||
[{"id": "<fingerprint>", "action": "confirm|downgrade|upgrade|dismiss", "confidence": 0-10, "rationale": "brief explanation", "remediation": "optional fix suggestion"}, ...]"#;
|
||||
|
||||
pub async fn triage_findings(
|
||||
llm: &Arc<LlmClient>,
|
||||
|
||||
@@ -321,38 +321,9 @@ impl PentestOrchestrator {
|
||||
total_findings += findings_count;
|
||||
|
||||
let mut finding_ids: Vec<String> = Vec::new();
|
||||
// Dedup findings within this tool result before inserting
|
||||
let deduped_findings =
|
||||
crate::pipeline::dedup::dedup_dast_findings(
|
||||
result.findings,
|
||||
);
|
||||
for mut finding in deduped_findings {
|
||||
for mut finding in result.findings {
|
||||
finding.scan_run_id = session_id.clone();
|
||||
finding.session_id = Some(session_id.clone());
|
||||
|
||||
// Check for existing duplicate in this session
|
||||
let fp = crate::pipeline::dedup::compute_dast_fingerprint(
|
||||
&finding,
|
||||
);
|
||||
let existing = self
|
||||
.db
|
||||
.dast_findings()
|
||||
.find_one(doc! {
|
||||
"session_id": &session_id,
|
||||
"title": &finding.title,
|
||||
"endpoint": &finding.endpoint,
|
||||
"method": &finding.method,
|
||||
})
|
||||
.await;
|
||||
if matches!(existing, Ok(Some(_))) {
|
||||
tracing::debug!(
|
||||
"Skipping duplicate DAST finding: {} (fp={:.12})",
|
||||
finding.title,
|
||||
fp,
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
let insert_result =
|
||||
self.db.dast_findings().insert_one(&finding).await;
|
||||
if let Ok(res) = &insert_result {
|
||||
|
||||
@@ -314,21 +314,6 @@ impl PentestOrchestrator {
|
||||
- For SPA apps: a 200 HTTP status does NOT mean the page is accessible — check the actual
|
||||
page content with the browser tool to verify if it shows real data or a login redirect.
|
||||
|
||||
## Finding Quality Rules
|
||||
- **Do not report the same issue twice.** If multiple tools detect the same missing header or
|
||||
vulnerability on the same endpoint, report it ONCE with the most specific tool's output.
|
||||
For example, if the recon tool and the header scanner both find missing HSTS, report it only
|
||||
from the header scanner (more specific).
|
||||
- **Group related findings.** Missing security headers on the same endpoint are ONE finding
|
||||
("Missing security headers") listing all missing headers, not separate findings per header.
|
||||
- **Severity must match real impact:**
|
||||
- critical/high: Exploitable vulnerability (you can demonstrate the exploit)
|
||||
- medium: Real misconfiguration with security implications but not directly exploitable
|
||||
- low: Best-practice recommendation, defense-in-depth, or informational
|
||||
- **Missing headers are medium at most** unless you can demonstrate a concrete exploit enabled
|
||||
by the missing header (e.g., missing CSP + confirmed XSS = high for CSP finding).
|
||||
- Console.log in third-party/vendored JS (node_modules, minified libraries) is informational only.
|
||||
|
||||
## Important
|
||||
- This is an authorized penetration test. All testing is permitted within the target scope.
|
||||
- Respect the rate limit of {rate_limit} requests per second.
|
||||
|
||||
@@ -66,10 +66,8 @@ impl CodeReviewScanner {
|
||||
}
|
||||
}
|
||||
|
||||
let deduped = dedup_cross_pass(all_findings);
|
||||
|
||||
ScanOutput {
|
||||
findings: deduped,
|
||||
findings: all_findings,
|
||||
sbom_entries: Vec::new(),
|
||||
}
|
||||
}
|
||||
@@ -186,51 +184,3 @@ struct ReviewIssue {
|
||||
#[serde(default)]
|
||||
suggestion: Option<String>,
|
||||
}
|
||||
|
||||
/// Deduplicate findings across review passes.
|
||||
///
|
||||
/// Multiple passes often flag the same issue (e.g. SQL injection reported by
|
||||
/// logic, security, and convention passes). We group by file + nearby line +
|
||||
/// normalized title keywords and keep the highest-severity finding.
|
||||
fn dedup_cross_pass(findings: Vec<Finding>) -> Vec<Finding> {
|
||||
use std::collections::HashMap;
|
||||
|
||||
// Build a dedup key: (file, line bucket, normalized title words)
|
||||
fn dedup_key(f: &Finding) -> String {
|
||||
let file = f.file_path.as_deref().unwrap_or("");
|
||||
// Group lines within 3 of each other
|
||||
let line_bucket = f.line_number.unwrap_or(0) / 4;
|
||||
// Normalize: lowercase, keep only alphanumeric, sort words for order-independence
|
||||
let title_lower = f.title.to_lowercase();
|
||||
let mut words: Vec<&str> = title_lower
|
||||
.split(|c: char| !c.is_alphanumeric())
|
||||
.filter(|w| w.len() > 2)
|
||||
.collect();
|
||||
words.sort();
|
||||
format!("{file}:{line_bucket}:{}", words.join(","))
|
||||
}
|
||||
|
||||
let mut groups: HashMap<String, Finding> = HashMap::new();
|
||||
|
||||
for finding in findings {
|
||||
let key = dedup_key(&finding);
|
||||
groups
|
||||
.entry(key)
|
||||
.and_modify(|existing| {
|
||||
// Keep the higher severity; on tie, keep the one with more detail
|
||||
if finding.severity > existing.severity
|
||||
|| (finding.severity == existing.severity
|
||||
&& finding.description.len() > existing.description.len())
|
||||
{
|
||||
*existing = finding.clone();
|
||||
}
|
||||
// Merge CWE if the existing one is missing it
|
||||
if existing.cwe.is_none() {
|
||||
existing.cwe = finding.cwe.clone();
|
||||
}
|
||||
})
|
||||
.or_insert(finding);
|
||||
}
|
||||
|
||||
groups.into_values().collect()
|
||||
}
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
use compliance_core::models::dast::DastFinding;
|
||||
|
||||
pub fn compute_fingerprint(parts: &[&str]) -> String {
|
||||
let mut hasher = Sha256::new();
|
||||
for part in parts {
|
||||
@@ -11,209 +9,9 @@ pub fn compute_fingerprint(parts: &[&str]) -> String {
|
||||
hex::encode(hasher.finalize())
|
||||
}
|
||||
|
||||
/// Compute a dedup fingerprint for a DAST finding.
|
||||
///
|
||||
/// The key is derived from the *canonicalized* title (lowercased, domain names
|
||||
/// stripped, known synonyms resolved), endpoint, and HTTP method. This lets us
|
||||
/// detect both exact duplicates (same tool reporting twice across passes) and
|
||||
/// semantic duplicates (e.g., `security_header_missing` "Missing HSTS header"
|
||||
/// vs `tls_misconfiguration` "Missing strict-transport-security header").
|
||||
pub fn compute_dast_fingerprint(f: &DastFinding) -> String {
|
||||
let canon = canonicalize_dast_title(&f.title);
|
||||
let endpoint = f.endpoint.to_lowercase().trim_end_matches('/').to_string();
|
||||
let method = f.method.to_uppercase();
|
||||
let param = f.parameter.as_deref().unwrap_or("");
|
||||
compute_fingerprint(&[&canon, &endpoint, &method, param])
|
||||
}
|
||||
|
||||
/// Canonicalize a DAST finding title for dedup purposes.
|
||||
///
|
||||
/// 1. Lowercase
|
||||
/// 2. Strip domain names / URLs (e.g. "for comp-dev.meghsakha.com")
|
||||
/// 3. Resolve known header synonyms (hsts ↔ strict-transport-security, etc.)
|
||||
/// 4. Strip extra whitespace
|
||||
fn canonicalize_dast_title(title: &str) -> String {
|
||||
let mut s = title.to_lowercase();
|
||||
|
||||
// Strip "for <domain>" or "on <domain>" suffixes
|
||||
// Pattern: "for <word.word...>" or "on <method> <url>"
|
||||
if let Some(idx) = s.find(" for ") {
|
||||
// Check if what follows looks like a domain or URL
|
||||
let rest = &s[idx + 5..];
|
||||
if rest.contains('.') || rest.starts_with("http") {
|
||||
s.truncate(idx);
|
||||
}
|
||||
}
|
||||
if let Some(idx) = s.find(" on ") {
|
||||
let rest = &s[idx + 4..];
|
||||
if rest.contains("http") || rest.contains('/') {
|
||||
s.truncate(idx);
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve known header synonyms
|
||||
let synonyms: &[(&str, &str)] = &[
|
||||
("hsts", "strict-transport-security"),
|
||||
("csp", "content-security-policy"),
|
||||
("cors", "cross-origin-resource-sharing"),
|
||||
("xfo", "x-frame-options"),
|
||||
];
|
||||
for &(short, canonical) in synonyms {
|
||||
// Only replace whole words — check boundaries
|
||||
if let Some(pos) = s.find(short) {
|
||||
let before_ok = pos == 0 || !s.as_bytes()[pos - 1].is_ascii_alphanumeric();
|
||||
let after_ok = pos + short.len() >= s.len()
|
||||
|| !s.as_bytes()[pos + short.len()].is_ascii_alphanumeric();
|
||||
if before_ok && after_ok {
|
||||
s = format!("{}{}{}", &s[..pos], canonical, &s[pos + short.len()..]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Collapse whitespace
|
||||
s.split_whitespace().collect::<Vec<_>>().join(" ")
|
||||
}
|
||||
|
||||
/// Deduplicate a list of DAST findings, merging evidence from duplicates.
|
||||
///
|
||||
/// Two-phase approach:
|
||||
/// 1. **Exact dedup** — group by canonicalized `(title, endpoint, method, parameter)`.
|
||||
/// Merge evidence arrays, keep the highest severity, preserve exploitable flag.
|
||||
/// 2. **CWE-based dedup** — within the same `(cwe, endpoint, method)` group, merge
|
||||
/// findings whose canonicalized titles resolve to the same subject (e.g., HSTS
|
||||
/// reported as both `security_header_missing` and `tls_misconfiguration`).
|
||||
pub fn dedup_dast_findings(findings: Vec<DastFinding>) -> Vec<DastFinding> {
|
||||
use std::collections::HashMap;
|
||||
|
||||
if findings.len() <= 1 {
|
||||
return findings;
|
||||
}
|
||||
|
||||
// Phase 1: exact fingerprint dedup
|
||||
let mut seen: HashMap<String, usize> = HashMap::new();
|
||||
let mut deduped: Vec<DastFinding> = Vec::new();
|
||||
|
||||
for finding in findings {
|
||||
let fp = compute_dast_fingerprint(&finding);
|
||||
|
||||
if let Some(&idx) = seen.get(&fp) {
|
||||
// Merge into existing
|
||||
merge_dast_finding(&mut deduped[idx], &finding);
|
||||
} else {
|
||||
seen.insert(fp, deduped.len());
|
||||
deduped.push(finding);
|
||||
}
|
||||
}
|
||||
|
||||
let before = deduped.len();
|
||||
|
||||
// Phase 2: CWE-based related dedup
|
||||
// Group by (cwe, endpoint_normalized, method) — only when CWE is present
|
||||
let mut cwe_groups: HashMap<String, Vec<usize>> = HashMap::new();
|
||||
for (i, f) in deduped.iter().enumerate() {
|
||||
if let Some(ref cwe) = f.cwe {
|
||||
let key = format!(
|
||||
"{}|{}|{}",
|
||||
cwe,
|
||||
f.endpoint.to_lowercase().trim_end_matches('/'),
|
||||
f.method.to_uppercase(),
|
||||
);
|
||||
cwe_groups.entry(key).or_default().push(i);
|
||||
}
|
||||
}
|
||||
|
||||
// For each CWE group with multiple findings, keep the one with highest severity
|
||||
// and most evidence, merge the rest into it
|
||||
let mut merge_map: HashMap<usize, Vec<usize>> = HashMap::new();
|
||||
let mut remove_indices: Vec<usize> = Vec::new();
|
||||
|
||||
for indices in cwe_groups.values() {
|
||||
if indices.len() <= 1 {
|
||||
continue;
|
||||
}
|
||||
// Find the "primary" finding: highest severity, then most evidence, then longest description
|
||||
let Some(&primary_idx) = indices.iter().max_by(|&&a, &&b| {
|
||||
deduped[a]
|
||||
.severity
|
||||
.cmp(&deduped[b].severity)
|
||||
.then_with(|| deduped[a].evidence.len().cmp(&deduped[b].evidence.len()))
|
||||
.then_with(|| {
|
||||
deduped[a]
|
||||
.description
|
||||
.len()
|
||||
.cmp(&deduped[b].description.len())
|
||||
})
|
||||
}) else {
|
||||
continue;
|
||||
};
|
||||
|
||||
for &idx in indices {
|
||||
if idx != primary_idx {
|
||||
remove_indices.push(idx);
|
||||
merge_map.entry(primary_idx).or_default().push(idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !remove_indices.is_empty() {
|
||||
remove_indices.sort_unstable();
|
||||
remove_indices.dedup();
|
||||
|
||||
// Merge evidence
|
||||
for (&primary, secondaries) in &merge_map {
|
||||
let extra_evidence: Vec<_> = secondaries
|
||||
.iter()
|
||||
.flat_map(|&i| deduped[i].evidence.clone())
|
||||
.collect();
|
||||
let any_exploitable = secondaries.iter().any(|&i| deduped[i].exploitable);
|
||||
|
||||
deduped[primary].evidence.extend(extra_evidence);
|
||||
if any_exploitable {
|
||||
deduped[primary].exploitable = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Remove merged findings (iterate in reverse to preserve indices)
|
||||
for &idx in remove_indices.iter().rev() {
|
||||
deduped.remove(idx);
|
||||
}
|
||||
}
|
||||
|
||||
let after = deduped.len();
|
||||
if before != after {
|
||||
tracing::debug!(
|
||||
"DAST CWE-based dedup: {before} → {after} findings ({} merged)",
|
||||
before - after
|
||||
);
|
||||
}
|
||||
|
||||
deduped
|
||||
}
|
||||
|
||||
/// Merge a duplicate DAST finding into a primary one.
|
||||
fn merge_dast_finding(primary: &mut DastFinding, duplicate: &DastFinding) {
|
||||
primary.evidence.extend(duplicate.evidence.clone());
|
||||
if duplicate.severity > primary.severity {
|
||||
primary.severity = duplicate.severity.clone();
|
||||
}
|
||||
if duplicate.exploitable {
|
||||
primary.exploitable = true;
|
||||
}
|
||||
// Keep the longer/better description
|
||||
if duplicate.description.len() > primary.description.len() {
|
||||
primary.description.clone_from(&duplicate.description);
|
||||
}
|
||||
// Keep remediation if primary doesn't have one
|
||||
if primary.remediation.is_none() && duplicate.remediation.is_some() {
|
||||
primary.remediation.clone_from(&duplicate.remediation);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use compliance_core::models::dast::DastVulnType;
|
||||
use compliance_core::models::finding::Severity;
|
||||
|
||||
#[test]
|
||||
fn fingerprint_is_deterministic() {
|
||||
@@ -257,159 +55,4 @@ mod tests {
|
||||
let b = compute_fingerprint(&["a", "bc"]);
|
||||
assert_ne!(a, b);
|
||||
}
|
||||
|
||||
fn make_dast(title: &str, endpoint: &str, vuln_type: DastVulnType) -> DastFinding {
|
||||
let mut f = DastFinding::new(
|
||||
"run1".into(),
|
||||
"target1".into(),
|
||||
vuln_type,
|
||||
title.into(),
|
||||
format!("Description for {title}"),
|
||||
Severity::Medium,
|
||||
endpoint.into(),
|
||||
"GET".into(),
|
||||
);
|
||||
f.cwe = Some("CWE-319".into());
|
||||
f
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn canonicalize_strips_domain_suffix() {
|
||||
let canon = canonicalize_dast_title("Missing HSTS header for comp-dev.meghsakha.com");
|
||||
assert!(!canon.contains("meghsakha"), "domain should be stripped");
|
||||
assert!(
|
||||
canon.contains("strict-transport-security"),
|
||||
"hsts should be resolved: {canon}"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn canonicalize_resolves_synonyms() {
|
||||
let a = canonicalize_dast_title("Missing HSTS header");
|
||||
let b = canonicalize_dast_title("Missing strict-transport-security header");
|
||||
assert_eq!(a, b);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn exact_dedup_merges_identical_findings() {
|
||||
let f1 = make_dast(
|
||||
"Missing strict-transport-security header",
|
||||
"https://example.com",
|
||||
DastVulnType::SecurityHeaderMissing,
|
||||
);
|
||||
let f2 = make_dast(
|
||||
"Missing strict-transport-security header",
|
||||
"https://example.com",
|
||||
DastVulnType::SecurityHeaderMissing,
|
||||
);
|
||||
let result = dedup_dast_findings(vec![f1, f2]);
|
||||
assert_eq!(result.len(), 1, "exact duplicates should be merged");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn synonym_dedup_merges_hsts_variants() {
|
||||
let f1 = make_dast(
|
||||
"Missing strict-transport-security header",
|
||||
"https://example.com",
|
||||
DastVulnType::SecurityHeaderMissing,
|
||||
);
|
||||
let f2 = make_dast(
|
||||
"Missing HSTS header for example.com",
|
||||
"https://example.com",
|
||||
DastVulnType::TlsMisconfiguration,
|
||||
);
|
||||
let result = dedup_dast_findings(vec![f1, f2]);
|
||||
assert_eq!(
|
||||
result.len(),
|
||||
1,
|
||||
"HSTS synonym variants should merge to 1 finding"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn different_headers_not_merged() {
|
||||
let mut f1 = make_dast(
|
||||
"Missing x-content-type-options header",
|
||||
"https://example.com",
|
||||
DastVulnType::SecurityHeaderMissing,
|
||||
);
|
||||
f1.cwe = Some("CWE-16".into());
|
||||
let mut f2 = make_dast(
|
||||
"Missing permissions-policy header",
|
||||
"https://example.com",
|
||||
DastVulnType::SecurityHeaderMissing,
|
||||
);
|
||||
f2.cwe = Some("CWE-16".into());
|
||||
// These share CWE-16 but are different headers — phase 2 will merge them
|
||||
// since they share the same CWE+endpoint. This is acceptable because they
|
||||
// have the same root cause (missing security headers configuration).
|
||||
let result = dedup_dast_findings(vec![f1, f2]);
|
||||
// CWE-based dedup will merge these into 1
|
||||
assert!(
|
||||
result.len() <= 2,
|
||||
"same CWE+endpoint findings may be merged"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn different_endpoints_not_merged() {
|
||||
let f1 = make_dast(
|
||||
"Missing strict-transport-security header",
|
||||
"https://example.com",
|
||||
DastVulnType::SecurityHeaderMissing,
|
||||
);
|
||||
let f2 = make_dast(
|
||||
"Missing strict-transport-security header",
|
||||
"https://other.com",
|
||||
DastVulnType::SecurityHeaderMissing,
|
||||
);
|
||||
let result = dedup_dast_findings(vec![f1, f2]);
|
||||
assert_eq!(result.len(), 2, "different endpoints should not merge");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dedup_preserves_highest_severity() {
|
||||
let f1 = make_dast(
|
||||
"Missing strict-transport-security header",
|
||||
"https://example.com",
|
||||
DastVulnType::SecurityHeaderMissing,
|
||||
);
|
||||
let mut f2 = make_dast(
|
||||
"Missing strict-transport-security header",
|
||||
"https://example.com",
|
||||
DastVulnType::SecurityHeaderMissing,
|
||||
);
|
||||
f2.severity = Severity::High;
|
||||
let result = dedup_dast_findings(vec![f1, f2]);
|
||||
assert_eq!(result.len(), 1);
|
||||
assert_eq!(result[0].severity, Severity::High);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dedup_merges_evidence() {
|
||||
let mut f1 = make_dast(
|
||||
"Missing strict-transport-security header",
|
||||
"https://example.com",
|
||||
DastVulnType::SecurityHeaderMissing,
|
||||
);
|
||||
f1.evidence
|
||||
.push(compliance_core::models::dast::DastEvidence {
|
||||
request_method: "GET".into(),
|
||||
request_url: "https://example.com".into(),
|
||||
request_headers: None,
|
||||
request_body: None,
|
||||
response_status: 200,
|
||||
response_headers: None,
|
||||
response_snippet: Some("pass 1".into()),
|
||||
screenshot_path: None,
|
||||
payload: None,
|
||||
response_time_ms: None,
|
||||
});
|
||||
let mut f2 = f1.clone();
|
||||
f2.evidence[0].response_snippet = Some("pass 2".into());
|
||||
|
||||
let result = dedup_dast_findings(vec![f1, f2]);
|
||||
assert_eq!(result.len(), 1);
|
||||
assert_eq!(result[0].evidence.len(), 2, "evidence should be merged");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use compliance_core::models::*;
|
||||
|
||||
use super::dedup::compute_fingerprint;
|
||||
use super::orchestrator::PipelineOrchestrator;
|
||||
use crate::error::AgentError;
|
||||
use crate::pipeline::code_review::CodeReviewScanner;
|
||||
@@ -90,37 +89,12 @@ impl PipelineOrchestrator {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Dedup findings by fingerprint to avoid duplicate comments
|
||||
let mut seen_fps = std::collections::HashSet::new();
|
||||
let mut unique_findings: Vec<&Finding> = Vec::new();
|
||||
for finding in &pr_findings {
|
||||
let fp = compute_fingerprint(&[
|
||||
repo_id,
|
||||
&pr_number.to_string(),
|
||||
finding.file_path.as_deref().unwrap_or(""),
|
||||
&finding.line_number.unwrap_or(0).to_string(),
|
||||
&finding.title,
|
||||
]);
|
||||
if seen_fps.insert(fp) {
|
||||
unique_findings.push(finding);
|
||||
}
|
||||
}
|
||||
|
||||
let pr_findings = unique_findings;
|
||||
|
||||
// Build review comments from findings
|
||||
let mut review_comments = Vec::new();
|
||||
for finding in &pr_findings {
|
||||
if let (Some(path), Some(line)) = (&finding.file_path, finding.line_number) {
|
||||
let fp = compute_fingerprint(&[
|
||||
repo_id,
|
||||
&pr_number.to_string(),
|
||||
path,
|
||||
&line.to_string(),
|
||||
&finding.title,
|
||||
]);
|
||||
let comment_body = format!(
|
||||
"**[{}] {}**\n\n{}\n\n*Scanner: {} | {}*\n\n<!-- compliance-fp:{fp} -->",
|
||||
"**[{}] {}**\n\n{}\n\n*Scanner: {} | {}*",
|
||||
finding.severity,
|
||||
finding.title,
|
||||
finding.description,
|
||||
@@ -149,17 +123,6 @@ impl PipelineOrchestrator {
|
||||
.join("\n"),
|
||||
);
|
||||
|
||||
if review_comments.is_empty() {
|
||||
// All findings were on files/lines we can't comment on inline
|
||||
if let Err(e) = tracker
|
||||
.create_pr_review(owner, tracker_repo_name, pr_number, &summary, Vec::new())
|
||||
.await
|
||||
{
|
||||
tracing::warn!("[{repo_id}] Failed to post PR review summary: {e}");
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Err(e) = tracker
|
||||
.create_pr_review(
|
||||
owner,
|
||||
|
||||
@@ -98,8 +98,7 @@ impl IssueTracker for GiteaTracker {
|
||||
_ => "open",
|
||||
};
|
||||
|
||||
let resp = self
|
||||
.http
|
||||
self.http
|
||||
.patch(&url)
|
||||
.header(
|
||||
"Authorization",
|
||||
@@ -110,14 +109,6 @@ impl IssueTracker for GiteaTracker {
|
||||
.await
|
||||
.map_err(|e| CoreError::IssueTracker(format!("Gitea update issue failed: {e}")))?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
let status = resp.status();
|
||||
let text = resp.text().await.unwrap_or_default();
|
||||
return Err(CoreError::IssueTracker(format!(
|
||||
"Gitea update issue returned {status}: {text}"
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -132,8 +123,7 @@ impl IssueTracker for GiteaTracker {
|
||||
"/repos/{owner}/{repo}/issues/{external_id}/comments"
|
||||
));
|
||||
|
||||
let resp = self
|
||||
.http
|
||||
self.http
|
||||
.post(&url)
|
||||
.header(
|
||||
"Authorization",
|
||||
@@ -144,14 +134,6 @@ impl IssueTracker for GiteaTracker {
|
||||
.await
|
||||
.map_err(|e| CoreError::IssueTracker(format!("Gitea add comment failed: {e}")))?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
let status = resp.status();
|
||||
let text = resp.text().await.unwrap_or_default();
|
||||
return Err(CoreError::IssueTracker(format!(
|
||||
"Gitea add comment returned {status}: {text}"
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -176,8 +158,7 @@ impl IssueTracker for GiteaTracker {
|
||||
})
|
||||
.collect();
|
||||
|
||||
let resp = self
|
||||
.http
|
||||
self.http
|
||||
.post(&url)
|
||||
.header(
|
||||
"Authorization",
|
||||
@@ -192,48 +173,6 @@ impl IssueTracker for GiteaTracker {
|
||||
.await
|
||||
.map_err(|e| CoreError::IssueTracker(format!("Gitea PR review failed: {e}")))?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
let status = resp.status();
|
||||
let text = resp.text().await.unwrap_or_default();
|
||||
|
||||
// If inline comments caused the failure, retry with just the summary body
|
||||
if !comments.is_empty() {
|
||||
tracing::warn!(
|
||||
"Gitea PR review with inline comments failed ({status}): {text}, retrying as plain comment"
|
||||
);
|
||||
let fallback_url = self.api_url(&format!(
|
||||
"/repos/{owner}/{repo}/issues/{pr_number}/comments"
|
||||
));
|
||||
let fallback_resp = self
|
||||
.http
|
||||
.post(&fallback_url)
|
||||
.header(
|
||||
"Authorization",
|
||||
format!("token {}", self.token.expose_secret()),
|
||||
)
|
||||
.json(&serde_json::json!({ "body": body }))
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
CoreError::IssueTracker(format!("Gitea PR comment fallback failed: {e}"))
|
||||
})?;
|
||||
|
||||
if !fallback_resp.status().is_success() {
|
||||
let fb_status = fallback_resp.status();
|
||||
let fb_text = fallback_resp.text().await.unwrap_or_default();
|
||||
return Err(CoreError::IssueTracker(format!(
|
||||
"Gitea PR comment fallback returned {fb_status}: {fb_text}"
|
||||
)));
|
||||
}
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
return Err(CoreError::IssueTracker(format!(
|
||||
"Gitea PR review returned {status}: {text}"
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -113,72 +113,6 @@ pub async fn add_mcp_server(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Probe each MCP server's health endpoint and update status in MongoDB.
|
||||
#[server]
|
||||
pub async fn refresh_mcp_status() -> Result<(), ServerFnError> {
|
||||
use chrono::Utc;
|
||||
use compliance_core::models::McpServerStatus;
|
||||
use mongodb::bson::doc;
|
||||
|
||||
let state: super::server_state::ServerState =
|
||||
dioxus_fullstack::FullstackContext::extract().await?;
|
||||
|
||||
let mut cursor = state
|
||||
.db
|
||||
.mcp_servers()
|
||||
.find(doc! {})
|
||||
.await
|
||||
.map_err(|e| ServerFnError::new(e.to_string()))?;
|
||||
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(std::time::Duration::from_secs(5))
|
||||
.build()
|
||||
.map_err(|e| ServerFnError::new(e.to_string()))?;
|
||||
|
||||
while cursor
|
||||
.advance()
|
||||
.await
|
||||
.map_err(|e| ServerFnError::new(e.to_string()))?
|
||||
{
|
||||
let server: compliance_core::models::McpServerConfig = cursor
|
||||
.deserialize_current()
|
||||
.map_err(|e| ServerFnError::new(e.to_string()))?;
|
||||
|
||||
let Some(oid) = server.id else { continue };
|
||||
|
||||
// Derive health URL from the endpoint (replace trailing /mcp with /health)
|
||||
let health_url = if server.endpoint_url.ends_with("/mcp") {
|
||||
format!(
|
||||
"{}health",
|
||||
&server.endpoint_url[..server.endpoint_url.len() - 3]
|
||||
)
|
||||
} else {
|
||||
format!("{}/health", server.endpoint_url.trim_end_matches('/'))
|
||||
};
|
||||
|
||||
let new_status = match client.get(&health_url).send().await {
|
||||
Ok(resp) if resp.status().is_success() => McpServerStatus::Running,
|
||||
_ => McpServerStatus::Stopped,
|
||||
};
|
||||
|
||||
let status_bson = match bson::to_bson(&new_status) {
|
||||
Ok(b) => b,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
let _ = state
|
||||
.db
|
||||
.mcp_servers()
|
||||
.update_one(
|
||||
doc! { "_id": oid },
|
||||
doc! { "$set": { "status": status_bson, "updated_at": Utc::now().to_rfc3339() } },
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[server]
|
||||
pub async fn delete_mcp_server(server_id: String) -> Result<(), ServerFnError> {
|
||||
use mongodb::bson::doc;
|
||||
|
||||
@@ -5,7 +5,7 @@ use dioxus_free_icons::Icon;
|
||||
use crate::components::page_header::PageHeader;
|
||||
use crate::components::toast::{ToastType, Toasts};
|
||||
use crate::infrastructure::mcp::{
|
||||
add_mcp_server, delete_mcp_server, fetch_mcp_servers, refresh_mcp_status, regenerate_mcp_token,
|
||||
add_mcp_server, delete_mcp_server, fetch_mcp_servers, regenerate_mcp_token,
|
||||
};
|
||||
|
||||
#[component]
|
||||
@@ -22,17 +22,6 @@ pub fn McpServersPage() -> Element {
|
||||
let mut new_mongo_uri = use_signal(String::new);
|
||||
let mut new_mongo_db = use_signal(String::new);
|
||||
|
||||
// Probe health of all MCP servers on page load, then refresh the list
|
||||
let mut refreshing = use_signal(|| true);
|
||||
use_effect(move || {
|
||||
spawn(async move {
|
||||
refreshing.set(true);
|
||||
let _ = refresh_mcp_status().await;
|
||||
servers.restart();
|
||||
refreshing.set(false);
|
||||
});
|
||||
});
|
||||
|
||||
// Track which server's token is visible
|
||||
let mut visible_token: Signal<Option<String>> = use_signal(|| None);
|
||||
// Track which server is pending delete confirmation
|
||||
|
||||
@@ -41,9 +41,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
StreamableHttpServerConfig::default(),
|
||||
);
|
||||
|
||||
let router = axum::Router::new()
|
||||
.route("/health", axum::routing::get(|| async { "ok" }))
|
||||
.nest_service("/mcp", service);
|
||||
let router = axum::Router::new().nest_service("/mcp", service);
|
||||
let listener = tokio::net::TcpListener::bind(("0.0.0.0", port)).await?;
|
||||
tracing::info!("MCP HTTP server listening on 0.0.0.0:{port}");
|
||||
axum::serve(listener, router).await?;
|
||||
|
||||
71
test_endpoint.rs
Normal file
71
test_endpoint.rs
Normal file
@@ -0,0 +1,71 @@
|
||||
use std::process::Command;
|
||||
|
||||
/// Handles user login - totally secure, trust me
|
||||
pub fn handle_login(username: &str, password: &str) -> bool {
|
||||
// SQL injection vulnerability
|
||||
let query = format!(
|
||||
"SELECT * FROM users WHERE username = '{}' AND password = '{}'",
|
||||
username, password
|
||||
);
|
||||
println!("Running query: {}", query);
|
||||
|
||||
// Hardcoded credentials
|
||||
if username == "admin" && password == "admin123" {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Command injection vulnerability
|
||||
let output = Command::new("sh")
|
||||
.arg("-c")
|
||||
.arg(format!("echo 'User logged in: {}'", username))
|
||||
.output()
|
||||
.expect("failed to execute");
|
||||
|
||||
// Storing password in plain text log
|
||||
println!("Login attempt: user={}, pass={}", username, password);
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
/// Process user data with no input validation
|
||||
pub fn process_data(input: &str) -> String {
|
||||
// Path traversal vulnerability
|
||||
let file_path = format!("/var/data/{}", input);
|
||||
std::fs::read_to_string(&file_path).unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Super safe token generation
|
||||
pub fn generate_token() -> String {
|
||||
// Predictable "random" token
|
||||
let token = "abc123fixedtoken";
|
||||
token.to_string()
|
||||
}
|
||||
|
||||
// Off-by-one error
|
||||
pub fn get_items(items: &[String], count: usize) -> Vec<&String> {
|
||||
let mut result = Vec::new();
|
||||
for i in 0..=count {
|
||||
result.push(&items[i]);
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
// Unused variables, deeply nested logic, too many params
|
||||
pub fn do_everything(
|
||||
a: i32, b: i32, c: i32, d: i32, e: i32, f: i32, g: i32,
|
||||
) -> i32 {
|
||||
let _unused = a + b;
|
||||
let _also_unused = c * d;
|
||||
if a > 0 {
|
||||
if b > 0 {
|
||||
if c > 0 {
|
||||
if d > 0 {
|
||||
if e > 0 {
|
||||
return f + g;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
0
|
||||
}
|
||||
Reference in New Issue
Block a user