- Implemented InjectionTests.cs to cover various injection vulnerabilities including SQL, NoSQL, Command, LDAP, and XPath injections. - Created SsrfTests.cs to test for Server-Side Request Forgery (SSRF) vulnerabilities, including internal URL access, cloud metadata access, and URL allowlist bypass attempts. - Introduced MaliciousPayloads.cs to store a collection of malicious payloads for testing various security vulnerabilities. - Added SecurityAssertions.cs for common security-specific assertion helpers. - Established SecurityTestBase.cs as a base class for security tests, providing common infrastructure and mocking utilities. - Configured the test project StellaOps.Security.Tests.csproj with necessary dependencies for testing.
314 lines
7.6 KiB
Bash
314 lines
7.6 KiB
Bash
#!/usr/bin/env bash
|
|
# =============================================================================
|
|
# compute-ttfs-metrics.sh
|
|
# Computes Time-to-First-Signal (TTFS) metrics from test runs
|
|
#
|
|
# Usage: ./compute-ttfs-metrics.sh [options]
|
|
# --results-path PATH Path to test results directory
|
|
# --output FILE Output JSON file (default: stdout)
|
|
# --baseline FILE Baseline TTFS file for comparison
|
|
# --dry-run Show what would be computed
|
|
# --strict Exit non-zero if thresholds are violated
|
|
# --verbose Enable verbose output
|
|
#
|
|
# Output: JSON with TTFS p50, p95, p99 metrics and regression status
|
|
# =============================================================================
|
|
|
|
set -euo pipefail
|
|
|
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
REPO_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
|
|
|
|
# Default paths
|
|
RESULTS_PATH="${REPO_ROOT}/bench/results"
|
|
OUTPUT_FILE=""
|
|
BASELINE_FILE="${REPO_ROOT}/bench/baselines/ttfs-baseline.json"
|
|
DRY_RUN=false
|
|
STRICT=false
|
|
VERBOSE=false
|
|
|
|
# Parse arguments
|
|
while [[ $# -gt 0 ]]; do
|
|
case "$1" in
|
|
--results-path)
|
|
RESULTS_PATH="$2"
|
|
shift 2
|
|
;;
|
|
--output)
|
|
OUTPUT_FILE="$2"
|
|
shift 2
|
|
;;
|
|
--baseline)
|
|
BASELINE_FILE="$2"
|
|
shift 2
|
|
;;
|
|
--dry-run)
|
|
DRY_RUN=true
|
|
shift
|
|
;;
|
|
--strict)
|
|
STRICT=true
|
|
shift
|
|
;;
|
|
--verbose)
|
|
VERBOSE=true
|
|
shift
|
|
;;
|
|
-h|--help)
|
|
head -20 "$0" | tail -15
|
|
exit 0
|
|
;;
|
|
*)
|
|
echo "Unknown option: $1" >&2
|
|
exit 1
|
|
;;
|
|
esac
|
|
done
|
|
|
|
log() {
|
|
if [[ "${VERBOSE}" == "true" ]]; then
|
|
echo "[$(date -u '+%Y-%m-%dT%H:%M:%SZ')] $*" >&2
|
|
fi
|
|
}
|
|
|
|
error() {
|
|
echo "[ERROR] $*" >&2
|
|
}
|
|
|
|
warn() {
|
|
echo "[WARN] $*" >&2
|
|
}
|
|
|
|
# Calculate percentiles from sorted array
|
|
percentile() {
|
|
local -n arr=$1
|
|
local p=$2
|
|
local n=${#arr[@]}
|
|
|
|
if [[ $n -eq 0 ]]; then
|
|
echo "0"
|
|
return
|
|
fi
|
|
|
|
local idx=$(echo "scale=0; ($n - 1) * $p / 100" | bc)
|
|
echo "${arr[$idx]}"
|
|
}
|
|
|
|
if [[ "${DRY_RUN}" == "true" ]]; then
|
|
log "[DRY RUN] Would process TTFS metrics..."
|
|
|
|
cat <<EOF
|
|
{
|
|
"timestamp": "$(date -u '+%Y-%m-%dT%H:%M:%SZ')",
|
|
"dry_run": true,
|
|
"results_path": "${RESULTS_PATH}",
|
|
"metrics": {
|
|
"ttfs_ms": {
|
|
"p50": 1250,
|
|
"p95": 3500,
|
|
"p99": 5200,
|
|
"min": 450,
|
|
"max": 8500,
|
|
"mean": 1850,
|
|
"sample_count": 100
|
|
},
|
|
"by_scan_type": {
|
|
"image_scan": {
|
|
"p50": 2100,
|
|
"p95": 4500,
|
|
"p99": 6800
|
|
},
|
|
"filesystem_scan": {
|
|
"p50": 850,
|
|
"p95": 1800,
|
|
"p99": 2500
|
|
},
|
|
"sbom_scan": {
|
|
"p50": 320,
|
|
"p95": 650,
|
|
"p99": 950
|
|
}
|
|
}
|
|
},
|
|
"baseline_comparison": {
|
|
"baseline_path": "${BASELINE_FILE}",
|
|
"p50_regression_pct": -2.5,
|
|
"p95_regression_pct": 1.2,
|
|
"regression_detected": false
|
|
}
|
|
}
|
|
EOF
|
|
exit 0
|
|
fi
|
|
|
|
# Validate results directory
|
|
if [[ ! -d "${RESULTS_PATH}" ]]; then
|
|
error "Results directory not found: ${RESULTS_PATH}"
|
|
exit 1
|
|
fi
|
|
|
|
log "Processing TTFS results from ${RESULTS_PATH}"
|
|
|
|
# Collect all TTFS values from result files
|
|
declare -a ttfs_values=()
|
|
declare -a image_ttfs=()
|
|
declare -a fs_ttfs=()
|
|
declare -a sbom_ttfs=()
|
|
|
|
# Find and process all result files
|
|
for result_file in "${RESULTS_PATH}"/*.json "${RESULTS_PATH}"/**/*.json; do
|
|
[[ -f "${result_file}" ]] || continue
|
|
|
|
log "Processing: ${result_file}"
|
|
|
|
# Extract TTFS value if present
|
|
TTFS=$(jq -r '.ttfs_ms // .time_to_first_signal_ms // empty' "${result_file}" 2>/dev/null || true)
|
|
SCAN_TYPE=$(jq -r '.scan_type // "unknown"' "${result_file}" 2>/dev/null || echo "unknown")
|
|
|
|
if [[ -n "${TTFS}" ]] && [[ "${TTFS}" != "null" ]]; then
|
|
ttfs_values+=("${TTFS}")
|
|
|
|
case "${SCAN_TYPE}" in
|
|
image|image_scan|container)
|
|
image_ttfs+=("${TTFS}")
|
|
;;
|
|
filesystem|fs|fs_scan)
|
|
fs_ttfs+=("${TTFS}")
|
|
;;
|
|
sbom|sbom_scan)
|
|
sbom_ttfs+=("${TTFS}")
|
|
;;
|
|
esac
|
|
fi
|
|
done
|
|
|
|
# Sort arrays for percentile calculation
|
|
IFS=$'\n' ttfs_sorted=($(sort -n <<<"${ttfs_values[*]}")); unset IFS
|
|
IFS=$'\n' image_sorted=($(sort -n <<<"${image_ttfs[*]}")); unset IFS
|
|
IFS=$'\n' fs_sorted=($(sort -n <<<"${fs_ttfs[*]}")); unset IFS
|
|
IFS=$'\n' sbom_sorted=($(sort -n <<<"${sbom_ttfs[*]}")); unset IFS
|
|
|
|
# Calculate overall metrics
|
|
SAMPLE_COUNT=${#ttfs_values[@]}
|
|
if [[ $SAMPLE_COUNT -eq 0 ]]; then
|
|
warn "No TTFS samples found"
|
|
P50=0
|
|
P95=0
|
|
P99=0
|
|
MIN=0
|
|
MAX=0
|
|
MEAN=0
|
|
else
|
|
P50=$(percentile ttfs_sorted 50)
|
|
P95=$(percentile ttfs_sorted 95)
|
|
P99=$(percentile ttfs_sorted 99)
|
|
MIN=${ttfs_sorted[0]}
|
|
MAX=${ttfs_sorted[-1]}
|
|
|
|
# Calculate mean
|
|
SUM=0
|
|
for v in "${ttfs_values[@]}"; do
|
|
SUM=$((SUM + v))
|
|
done
|
|
MEAN=$((SUM / SAMPLE_COUNT))
|
|
fi
|
|
|
|
# Calculate per-type metrics
|
|
IMAGE_P50=$(percentile image_sorted 50)
|
|
IMAGE_P95=$(percentile image_sorted 95)
|
|
IMAGE_P99=$(percentile image_sorted 99)
|
|
|
|
FS_P50=$(percentile fs_sorted 50)
|
|
FS_P95=$(percentile fs_sorted 95)
|
|
FS_P99=$(percentile fs_sorted 99)
|
|
|
|
SBOM_P50=$(percentile sbom_sorted 50)
|
|
SBOM_P95=$(percentile sbom_sorted 95)
|
|
SBOM_P99=$(percentile sbom_sorted 99)
|
|
|
|
# Compare against baseline if available
|
|
REGRESSION_DETECTED=false
|
|
P50_REGRESSION_PCT=0
|
|
P95_REGRESSION_PCT=0
|
|
|
|
if [[ -f "${BASELINE_FILE}" ]]; then
|
|
log "Comparing against baseline: ${BASELINE_FILE}"
|
|
|
|
BASELINE_P50=$(jq -r '.metrics.ttfs_ms.p50 // 0' "${BASELINE_FILE}")
|
|
BASELINE_P95=$(jq -r '.metrics.ttfs_ms.p95 // 0' "${BASELINE_FILE}")
|
|
|
|
if [[ $BASELINE_P50 -gt 0 ]]; then
|
|
P50_REGRESSION_PCT=$(echo "scale=2; (${P50} - ${BASELINE_P50}) * 100 / ${BASELINE_P50}" | bc)
|
|
fi
|
|
|
|
if [[ $BASELINE_P95 -gt 0 ]]; then
|
|
P95_REGRESSION_PCT=$(echo "scale=2; (${P95} - ${BASELINE_P95}) * 100 / ${BASELINE_P95}" | bc)
|
|
fi
|
|
|
|
# Check for regression (>10% increase)
|
|
if (( $(echo "${P50_REGRESSION_PCT} > 10" | bc -l) )) || (( $(echo "${P95_REGRESSION_PCT} > 10" | bc -l) )); then
|
|
REGRESSION_DETECTED=true
|
|
warn "TTFS regression detected: p50=${P50_REGRESSION_PCT}%, p95=${P95_REGRESSION_PCT}%"
|
|
fi
|
|
fi
|
|
|
|
# Generate output
|
|
OUTPUT=$(cat <<EOF
|
|
{
|
|
"timestamp": "$(date -u '+%Y-%m-%dT%H:%M:%SZ')",
|
|
"dry_run": false,
|
|
"results_path": "${RESULTS_PATH}",
|
|
"metrics": {
|
|
"ttfs_ms": {
|
|
"p50": ${P50},
|
|
"p95": ${P95},
|
|
"p99": ${P99},
|
|
"min": ${MIN},
|
|
"max": ${MAX},
|
|
"mean": ${MEAN},
|
|
"sample_count": ${SAMPLE_COUNT}
|
|
},
|
|
"by_scan_type": {
|
|
"image_scan": {
|
|
"p50": ${IMAGE_P50:-0},
|
|
"p95": ${IMAGE_P95:-0},
|
|
"p99": ${IMAGE_P99:-0}
|
|
},
|
|
"filesystem_scan": {
|
|
"p50": ${FS_P50:-0},
|
|
"p95": ${FS_P95:-0},
|
|
"p99": ${FS_P99:-0}
|
|
},
|
|
"sbom_scan": {
|
|
"p50": ${SBOM_P50:-0},
|
|
"p95": ${SBOM_P95:-0},
|
|
"p99": ${SBOM_P99:-0}
|
|
}
|
|
}
|
|
},
|
|
"baseline_comparison": {
|
|
"baseline_path": "${BASELINE_FILE}",
|
|
"p50_regression_pct": ${P50_REGRESSION_PCT},
|
|
"p95_regression_pct": ${P95_REGRESSION_PCT},
|
|
"regression_detected": ${REGRESSION_DETECTED}
|
|
}
|
|
}
|
|
EOF
|
|
)
|
|
|
|
# Output results
|
|
if [[ -n "${OUTPUT_FILE}" ]]; then
|
|
echo "${OUTPUT}" > "${OUTPUT_FILE}"
|
|
log "Results written to ${OUTPUT_FILE}"
|
|
else
|
|
echo "${OUTPUT}"
|
|
fi
|
|
|
|
# Strict mode: fail on regression
|
|
if [[ "${STRICT}" == "true" ]] && [[ "${REGRESSION_DETECTED}" == "true" ]]; then
|
|
error "TTFS regression exceeds threshold"
|
|
exit 1
|
|
fi
|
|
|
|
exit 0
|