From df941367273bd26da6f89cca11c0bd122536ab45 Mon Sep 17 00:00:00 2001 From: StellaOps Bot Date: Mon, 22 Dec 2025 09:49:38 +0200 Subject: [PATCH] feat: Implement distro-native version comparison for RPM, Debian, and Alpine packages - Add RpmVersionComparer for RPM version comparison with epoch, version, and release handling. - Introduce DebianVersion for parsing Debian EVR (Epoch:Version-Release) strings. - Create ApkVersion for parsing Alpine APK version strings with suffix support. - Define IVersionComparator interface for version comparison with proof-line generation. - Implement VersionComparisonResult struct to encapsulate comparison results and proof lines. - Add tests for Debian and RPM version comparers to ensure correct functionality and edge case handling. - Create project files for the version comparison library and its tests. --- docs/api/scanner-drift-api.md | 526 ++++++ docs/claims-index.md | 173 ++ docs/guides/compare-workflow-user-guide.md | 363 ++++ .../SPRINT_2000_0003_0001_alpine_connector.md | 346 ++++ ...INT_2000_0003_0002_distro_version_tests.md | 357 ++++ ...PRINT_3500_0001_0001_deeper_moat_master.md | 2 +- docs/implplan/SPRINT_3500_SUMMARY.md | 2 +- ...3600_0004_0001_nodejs_babel_integration.md | 293 ++++ ...00_0005_0001_policy_ci_gate_integration.md | 325 ++++ ...00_0006_0001_documentation_finalization.md | 224 +++ ..._0001_0001_binary_call_edge_enhancement.md | 218 +++ docs/implplan/SPRINT_3800_SUMMARY.md | 120 ++ ...01_0001_cve_symbol_mapping_slice_format.md | 262 +++ ..._3820_0001_0001_slice_query_replay_apis.md | 241 +++ ...001_0001_vex_integration_policy_binding.md | 234 +++ ...RINT_3840_0001_0001_runtime_trace_merge.md | 241 +++ .../SPRINT_3850_0001_0001_oci_storage_cli.md | 308 ++++ ...001_0001_exception_objects_schema_model.md | 2 +- ...3_0001_exploit_path_inbox_proof_bundles.md | 1298 ++++++++++++++ ...0003_0002_recheck_policy_evidence_hooks.md | 1521 +++++++++++++++++ .../SPRINT_4000_0002_0001_backport_ux.md | 412 +++++ ...PRINT_4200_0002_0003_delta_compare_view.md | 617 ++++++- ...SPRINT_4200_0002_0006_delta_compare_api.md | 884 ++++++++++ ...T_4300_0001_0001_cli_attestation_verify.md | 624 +++++++ ..._0001_0001_oci_verdict_attestation_push.md | 181 ++ ...NT_4300_0001_0002_findings_evidence_api.md | 511 ++++++ ...4300_0001_0002_one_command_audit_replay.md | 181 ++ ...300_0002_0001_evidence_privacy_controls.md | 376 ++++ ...T_4300_0002_0001_unknowns_budget_policy.md | 167 ++ ...4300_0002_0002_evidence_ttl_enforcement.md | 477 ++++++ ...02_0002_unknowns_attestation_predicates.md | 104 ++ ...SPRINT_4300_0003_0001_predicate_schemas.md | 388 +++++ ...300_0003_0001_sealed_knowledge_snapshot.md | 165 ++ ...RINT_4300_0003_0002_attestation_metrics.md | 341 ++++ docs/implplan/SPRINT_4300_MOAT_SUMMARY.md | 126 ++ docs/implplan/SPRINT_4300_SUMMARY.md | 171 ++ ...INT_4400_0001_0001_signed_delta_verdict.md | 112 ++ ..._0002_reachability_subgraph_attestation.md | 119 ++ docs/implplan/SPRINT_4400_SUMMARY.md | 50 + ...RINT_4500_0001_0001_vex_hub_aggregation.md | 183 ++ ...SPRINT_4500_0001_0002_vex_trust_scoring.md | 180 ++ docs/implplan/SPRINT_4500_SUMMARY.md | 67 + ...RINT_4600_0001_0001_sbom_lineage_ledger.md | 171 ++ .../SPRINT_4600_0001_0002_byos_ingestion.md | 136 ++ docs/implplan/SPRINT_4600_SUMMARY.md | 57 + ...002_0003_version_comparator_integration.md | 256 +++ docs/implplan/SPRINT_6000_SUMMARY.md | 17 +- ...7000_0001_0001_competitive_benchmarking.md | 265 +++ .../SPRINT_7000_0001_0002_sbom_lineage.md | 281 +++ .../SPRINT_7000_0001_0003_explainability.md | 325 ++++ ...7000_0001_0004_three_layer_reachability.md | 367 ++++ ...7000_0002_0001_unified_confidence_model.md | 865 ++++++++++ ...00_0002_0002_vulnerability_first_ux_api.md | 844 +++++++++ ...PRINT_7000_0003_0001_evidence_graph_api.md | 550 ++++++ ...7000_0003_0002_reachability_minimap_api.md | 602 +++++++ ...INT_7000_0003_0003_runtime_timeline_api.md | 624 +++++++ ...INT_7000_0004_0001_progressive_fidelity.md | 654 +++++++ ...NT_7000_0004_0002_evidence_size_budgets.md | 606 +++++++ ...NT_7000_0005_0001_quality_kpis_tracking.md | 681 ++++++++ docs/implplan/SPRINT_7000_SUMMARY.md | 414 +++++ ..._7100_0001_0001_trust_vector_foundation.md | 356 ++++ ..._7100_0001_0002_verdict_manifest_replay.md | 462 +++++ ...PRINT_7100_0002_0001_policy_gates_merge.md | 422 +++++ ...0_0002_0002_source_defaults_calibration.md | 537 ++++++ .../SPRINT_7100_0003_0001_ui_trust_algebra.md | 365 ++++ ...100_0003_0002_integration_documentation.md | 338 ++++ docs/implplan/SPRINT_7100_SUMMARY.md | 268 +++ .../4300_explainable_triage_gap_analysis.md | 305 ++++ ...NT_3600_0001_0001_trust_algebra_lattice.md | 2 +- ...200_001_000_router_rate_limiting_master.md | 4 +- .../SPRINT_1200_001_IMPLEMENTATION_GUIDE.md | 2 +- .../archived/SPRINT_1200_001_README.md | 6 +- ...401_0002_0001_score_replay_proof_bundle.md | 2 +- ..._0003_0001_ground_truth_corpus_ci_gates.md | 2 +- ..._0002_0001_unknowns_ranking_containment.md | 2 +- .../SPRINT_3700_0001_0001_triage_db_schema.md | 2 +- docs/modules/authority/verdict-manifest.md | 462 +++++ docs/modules/benchmark/architecture.md | 444 +++++ docs/modules/excititor/trust-lattice.md | 460 +++++ .../explainable-triage-implementation-plan.md | 212 +++ docs/modules/platform/moat-gap-analysis.md | 276 +++ docs/modules/scanner/reachability-drift.md | 371 ++++ .../modules/web/smart-diff-ui-architecture.md | 358 ++++ docs/operations/reachability-drift-guide.md | 519 ++++++ ...inable Triage and Proof‑Linked Evidence.md | 751 -------- ...026 - UI Patterns for Triage and Replay.md | 140 ++ docs/product-advisories/archive | 1028 ----------- ...Dec-2025 - Reachability Drift Detection.md | 47 +- ...didate features mapped to moat strength.md | 0 ...ayered binary + call‑stack reachability.md | 0 ...inable Triage and Proof-Linked Evidence.md | 231 +++ ... Designing Explainable Triage Workflows.md | 0 ...5 - Getting Distro Backport Logic Right.md | 93 + ...g Container Scanners Against Stella Ops.md | 0 ...art Diff - Reproducibility as a Feature.md | 0 ...uilding a Trust Lattice for VEX Sources.md | 184 ++ docs/reachability/cve-symbol-mapping.md | 296 ++++ docs/reachability/replay-verification.md | 332 ++++ docs/reachability/slice-schema.md | 287 ++++ src/Concelier/AGENTS.md | 29 + src/Scanner/AGENTS.md | 47 + .../Comparers/DebianVersionComparer.cs | 261 +++ .../Comparers/RpmVersionComparer.cs | 259 +++ .../IVersionComparator.cs | 81 + .../Models/ApkVersion.cs | 158 ++ .../Models/DebianVersion.cs | 126 ++ .../Models/RpmVersion.cs | 130 ++ .../StellaOps.VersionComparison.csproj | 13 + .../DebianVersionComparerTests.cs | 192 +++ .../RpmVersionComparerTests.cs | 138 ++ .../StellaOps.VersionComparison.Tests.csproj | 19 + 111 files changed, 30413 insertions(+), 1813 deletions(-) create mode 100644 docs/api/scanner-drift-api.md create mode 100644 docs/claims-index.md create mode 100644 docs/guides/compare-workflow-user-guide.md create mode 100644 docs/implplan/SPRINT_2000_0003_0001_alpine_connector.md create mode 100644 docs/implplan/SPRINT_2000_0003_0002_distro_version_tests.md create mode 100644 docs/implplan/SPRINT_3600_0004_0001_nodejs_babel_integration.md create mode 100644 docs/implplan/SPRINT_3600_0005_0001_policy_ci_gate_integration.md create mode 100644 docs/implplan/SPRINT_3600_0006_0001_documentation_finalization.md create mode 100644 docs/implplan/SPRINT_3800_0001_0001_binary_call_edge_enhancement.md create mode 100644 docs/implplan/SPRINT_3800_SUMMARY.md create mode 100644 docs/implplan/SPRINT_3810_0001_0001_cve_symbol_mapping_slice_format.md create mode 100644 docs/implplan/SPRINT_3820_0001_0001_slice_query_replay_apis.md create mode 100644 docs/implplan/SPRINT_3830_0001_0001_vex_integration_policy_binding.md create mode 100644 docs/implplan/SPRINT_3840_0001_0001_runtime_trace_merge.md create mode 100644 docs/implplan/SPRINT_3850_0001_0001_oci_storage_cli.md create mode 100644 docs/implplan/SPRINT_3900_0003_0001_exploit_path_inbox_proof_bundles.md create mode 100644 docs/implplan/SPRINT_3900_0003_0002_recheck_policy_evidence_hooks.md create mode 100644 docs/implplan/SPRINT_4000_0002_0001_backport_ux.md create mode 100644 docs/implplan/SPRINT_4200_0002_0006_delta_compare_api.md create mode 100644 docs/implplan/SPRINT_4300_0001_0001_cli_attestation_verify.md create mode 100644 docs/implplan/SPRINT_4300_0001_0001_oci_verdict_attestation_push.md create mode 100644 docs/implplan/SPRINT_4300_0001_0002_findings_evidence_api.md create mode 100644 docs/implplan/SPRINT_4300_0001_0002_one_command_audit_replay.md create mode 100644 docs/implplan/SPRINT_4300_0002_0001_evidence_privacy_controls.md create mode 100644 docs/implplan/SPRINT_4300_0002_0001_unknowns_budget_policy.md create mode 100644 docs/implplan/SPRINT_4300_0002_0002_evidence_ttl_enforcement.md create mode 100644 docs/implplan/SPRINT_4300_0002_0002_unknowns_attestation_predicates.md create mode 100644 docs/implplan/SPRINT_4300_0003_0001_predicate_schemas.md create mode 100644 docs/implplan/SPRINT_4300_0003_0001_sealed_knowledge_snapshot.md create mode 100644 docs/implplan/SPRINT_4300_0003_0002_attestation_metrics.md create mode 100644 docs/implplan/SPRINT_4300_MOAT_SUMMARY.md create mode 100644 docs/implplan/SPRINT_4300_SUMMARY.md create mode 100644 docs/implplan/SPRINT_4400_0001_0001_signed_delta_verdict.md create mode 100644 docs/implplan/SPRINT_4400_0001_0002_reachability_subgraph_attestation.md create mode 100644 docs/implplan/SPRINT_4400_SUMMARY.md create mode 100644 docs/implplan/SPRINT_4500_0001_0001_vex_hub_aggregation.md create mode 100644 docs/implplan/SPRINT_4500_0001_0002_vex_trust_scoring.md create mode 100644 docs/implplan/SPRINT_4500_SUMMARY.md create mode 100644 docs/implplan/SPRINT_4600_0001_0001_sbom_lineage_ledger.md create mode 100644 docs/implplan/SPRINT_4600_0001_0002_byos_ingestion.md create mode 100644 docs/implplan/SPRINT_4600_SUMMARY.md create mode 100644 docs/implplan/SPRINT_6000_0002_0003_version_comparator_integration.md create mode 100644 docs/implplan/SPRINT_7000_0001_0001_competitive_benchmarking.md create mode 100644 docs/implplan/SPRINT_7000_0001_0002_sbom_lineage.md create mode 100644 docs/implplan/SPRINT_7000_0001_0003_explainability.md create mode 100644 docs/implplan/SPRINT_7000_0001_0004_three_layer_reachability.md create mode 100644 docs/implplan/SPRINT_7000_0002_0001_unified_confidence_model.md create mode 100644 docs/implplan/SPRINT_7000_0002_0002_vulnerability_first_ux_api.md create mode 100644 docs/implplan/SPRINT_7000_0003_0001_evidence_graph_api.md create mode 100644 docs/implplan/SPRINT_7000_0003_0002_reachability_minimap_api.md create mode 100644 docs/implplan/SPRINT_7000_0003_0003_runtime_timeline_api.md create mode 100644 docs/implplan/SPRINT_7000_0004_0001_progressive_fidelity.md create mode 100644 docs/implplan/SPRINT_7000_0004_0002_evidence_size_budgets.md create mode 100644 docs/implplan/SPRINT_7000_0005_0001_quality_kpis_tracking.md create mode 100644 docs/implplan/SPRINT_7000_SUMMARY.md create mode 100644 docs/implplan/SPRINT_7100_0001_0001_trust_vector_foundation.md create mode 100644 docs/implplan/SPRINT_7100_0001_0002_verdict_manifest_replay.md create mode 100644 docs/implplan/SPRINT_7100_0002_0001_policy_gates_merge.md create mode 100644 docs/implplan/SPRINT_7100_0002_0002_source_defaults_calibration.md create mode 100644 docs/implplan/SPRINT_7100_0003_0001_ui_trust_algebra.md create mode 100644 docs/implplan/SPRINT_7100_0003_0002_integration_documentation.md create mode 100644 docs/implplan/SPRINT_7100_SUMMARY.md create mode 100644 docs/implplan/analysis/4300_explainable_triage_gap_analysis.md create mode 100644 docs/modules/authority/verdict-manifest.md create mode 100644 docs/modules/benchmark/architecture.md create mode 100644 docs/modules/excititor/trust-lattice.md create mode 100644 docs/modules/platform/explainable-triage-implementation-plan.md create mode 100644 docs/modules/platform/moat-gap-analysis.md create mode 100644 docs/modules/scanner/reachability-drift.md create mode 100644 docs/modules/web/smart-diff-ui-architecture.md create mode 100644 docs/operations/reachability-drift-guide.md delete mode 100644 docs/product-advisories/18-Dec-2025 - Designing Explainable Triage and Proof‑Linked Evidence.md create mode 100644 docs/product-advisories/22-Dec-2026 - UI Patterns for Triage and Replay.md delete mode 100644 docs/product-advisories/archive rename docs/product-advisories/{ => archived}/17-Dec-2025 - Reachability Drift Detection.md (91%) rename docs/product-advisories/{ => archived}/19-Dec-2025 - Stella Ops candidate features mapped to moat strength.md (100%) rename docs/product-advisories/{ => archived/2025-12-22-binary-reachability}/20-Dec-2025 - Layered binary + call‑stack reachability.md (100%) create mode 100644 docs/product-advisories/archived/2025-12-22-explainable-triage/18-Dec-2025 - Designing Explainable Triage and Proof-Linked Evidence.md rename docs/product-advisories/{ => archived}/21-Dec-2025 - Designing Explainable Triage Workflows.md (100%) create mode 100644 docs/product-advisories/archived/22-Dec-2025 - Getting Distro Backport Logic Right.md rename docs/product-advisories/{ => archived/22-Dec-2025}/19-Dec-2025 - Benchmarking Container Scanners Against Stella Ops.md (100%) rename docs/product-advisories/{ => archived/22-Dec-2025}/21-Dec-2025 - Smart Diff - Reproducibility as a Feature.md (100%) create mode 100644 docs/product-advisories/archived/22-Dec-2026 - Building a Trust Lattice for VEX Sources.md create mode 100644 docs/reachability/cve-symbol-mapping.md create mode 100644 docs/reachability/replay-verification.md create mode 100644 docs/reachability/slice-schema.md create mode 100644 src/__Libraries/StellaOps.VersionComparison/Comparers/DebianVersionComparer.cs create mode 100644 src/__Libraries/StellaOps.VersionComparison/Comparers/RpmVersionComparer.cs create mode 100644 src/__Libraries/StellaOps.VersionComparison/IVersionComparator.cs create mode 100644 src/__Libraries/StellaOps.VersionComparison/Models/ApkVersion.cs create mode 100644 src/__Libraries/StellaOps.VersionComparison/Models/DebianVersion.cs create mode 100644 src/__Libraries/StellaOps.VersionComparison/Models/RpmVersion.cs create mode 100644 src/__Libraries/StellaOps.VersionComparison/StellaOps.VersionComparison.csproj create mode 100644 src/__Libraries/__Tests/StellaOps.VersionComparison.Tests/DebianVersionComparerTests.cs create mode 100644 src/__Libraries/__Tests/StellaOps.VersionComparison.Tests/RpmVersionComparerTests.cs create mode 100644 src/__Libraries/__Tests/StellaOps.VersionComparison.Tests/StellaOps.VersionComparison.Tests.csproj diff --git a/docs/api/scanner-drift-api.md b/docs/api/scanner-drift-api.md new file mode 100644 index 000000000..c29a21b7b --- /dev/null +++ b/docs/api/scanner-drift-api.md @@ -0,0 +1,526 @@ +# Scanner Drift API Reference + +**Module:** Scanner +**Version:** 1.0 +**Base Path:** `/api/scanner` +**Last Updated:** 2025-12-22 + +--- + +## 1. Overview + +The Scanner Drift API provides endpoints for computing and retrieving reachability drift analysis between scans. Drift detection identifies when code changes create new paths to vulnerable sinks or mitigate existing risks. + +--- + +## 2. Authentication & Authorization + +### Required Scopes + +| Endpoint | Scope | +|----------|-------| +| Read drift results | `scanner:read` | +| Compute reachability | `scanner:write` | +| Admin operations | `scanner:admin` | + +### Headers + +```http +Authorization: Bearer +X-Tenant-Id: +``` + +--- + +## 3. Endpoints + +### 3.1 GET /scans/{scanId}/drift + +Retrieves drift analysis results comparing the specified scan against its base scan. + +**Parameters:** + +| Name | In | Type | Required | Description | +|------|-----|------|----------|-------------| +| scanId | path | string | Yes | Head scan identifier | +| baseScanId | query | string | No | Base scan ID (defaults to previous scan) | +| language | query | string | No | Filter by language (dotnet, node, java, etc.) | + +**Response: 200 OK** + +```json +{ + "id": "550e8400-e29b-41d4-a716-446655440000", + "baseScanId": "abc123", + "headScanId": "def456", + "language": "dotnet", + "detectedAt": "2025-12-22T10:30:00Z", + "newlyReachableCount": 3, + "newlyUnreachableCount": 1, + "totalDriftCount": 4, + "hasMaterialDrift": true, + "resultDigest": "sha256:a1b2c3d4..." +} +``` + +**Response: 404 Not Found** + +```json +{ + "error": "DRIFT_NOT_FOUND", + "message": "No drift analysis found for scan def456" +} +``` + +--- + +### 3.2 GET /drift/{driftId}/sinks + +Retrieves individual drifted sinks with pagination. + +**Parameters:** + +| Name | In | Type | Required | Description | +|------|-----|------|----------|-------------| +| driftId | path | uuid | Yes | Drift result identifier | +| direction | query | string | No | Filter: `became_reachable` or `became_unreachable` | +| sinkCategory | query | string | No | Filter by sink category | +| offset | query | int | No | Pagination offset (default: 0) | +| limit | query | int | No | Page size (default: 100, max: 1000) | + +**Response: 200 OK** + +```json +{ + "items": [ + { + "id": "660e8400-e29b-41d4-a716-446655440001", + "sinkNodeId": "MyApp.Services.DbService.ExecuteQuery(string)", + "symbol": "DbService.ExecuteQuery", + "sinkCategory": "sql_raw", + "direction": "became_reachable", + "cause": { + "kind": "guard_removed", + "description": "Guard condition removed in AuthMiddleware.Validate", + "changedSymbol": "AuthMiddleware.Validate", + "changedFile": "src/Middleware/AuthMiddleware.cs", + "changedLine": 42, + "codeChangeId": "770e8400-e29b-41d4-a716-446655440002" + }, + "path": { + "entrypoint": { + "nodeId": "MyApp.Controllers.UserController.GetUser(int)", + "symbol": "UserController.GetUser", + "file": "src/Controllers/UserController.cs", + "line": 15 + }, + "sink": { + "nodeId": "MyApp.Services.DbService.ExecuteQuery(string)", + "symbol": "DbService.ExecuteQuery", + "file": "src/Services/DbService.cs", + "line": 88 + }, + "intermediateCount": 3, + "keyNodes": [ + { + "nodeId": "MyApp.Middleware.AuthMiddleware.Validate()", + "symbol": "AuthMiddleware.Validate", + "file": "src/Middleware/AuthMiddleware.cs", + "line": 42, + "isChanged": true, + "changeKind": "guard_changed" + } + ] + }, + "associatedVulns": [ + { + "cveId": "CVE-2024-12345", + "epss": 0.85, + "cvss": 9.8, + "vexStatus": "affected", + "packagePurl": "pkg:nuget/Dapper@2.0.123" + } + ] + } + ], + "totalCount": 3, + "offset": 0, + "limit": 100 +} +``` + +--- + +### 3.3 POST /scans/{scanId}/compute-reachability + +Triggers reachability computation for a scan. Idempotent - returns cached result if already computed. + +**Parameters:** + +| Name | In | Type | Required | Description | +|------|-----|------|----------|-------------| +| scanId | path | string | Yes | Scan identifier | + +**Request Body:** + +```json +{ + "languages": ["dotnet", "node"], + "baseScanId": "abc123", + "forceRecompute": false +} +``` + +**Response: 202 Accepted** + +```json +{ + "jobId": "880e8400-e29b-41d4-a716-446655440003", + "status": "queued", + "estimatedCompletionSeconds": 30 +} +``` + +**Response: 200 OK** (cached result) + +```json +{ + "jobId": "880e8400-e29b-41d4-a716-446655440003", + "status": "completed", + "driftResultId": "550e8400-e29b-41d4-a716-446655440000" +} +``` + +--- + +### 3.4 GET /scans/{scanId}/reachability/components + +Lists components with their reachability status. + +**Parameters:** + +| Name | In | Type | Required | Description | +|------|-----|------|----------|-------------| +| scanId | path | string | Yes | Scan identifier | +| language | query | string | No | Filter by language | +| reachable | query | bool | No | Filter by reachability | +| offset | query | int | No | Pagination offset | +| limit | query | int | No | Page size | + +**Response: 200 OK** + +```json +{ + "items": [ + { + "purl": "pkg:nuget/Newtonsoft.Json@13.0.1", + "language": "dotnet", + "reachableSinkCount": 2, + "unreachableSinkCount": 5, + "totalSinkCount": 7, + "highestSeveritySink": "unsafe_deser", + "reachabilityGate": 5 + } + ], + "totalCount": 42, + "offset": 0, + "limit": 100 +} +``` + +--- + +### 3.5 GET /scans/{scanId}/reachability/findings + +Lists reachable vulnerable sinks with CVE associations. + +**Parameters:** + +| Name | In | Type | Required | Description | +|------|-----|------|----------|-------------| +| scanId | path | string | Yes | Scan identifier | +| minCvss | query | float | No | Minimum CVSS score | +| kevOnly | query | bool | No | Only KEV vulnerabilities | +| offset | query | int | No | Pagination offset | +| limit | query | int | No | Page size | + +**Response: 200 OK** + +```json +{ + "items": [ + { + "sinkNodeId": "MyApp.Services.CryptoService.Encrypt(string)", + "symbol": "CryptoService.Encrypt", + "sinkCategory": "crypto_weak", + "isReachable": true, + "shortestPathLength": 4, + "vulnerabilities": [ + { + "cveId": "CVE-2024-54321", + "cvss": 7.5, + "epss": 0.42, + "isKev": false, + "vexStatus": "affected" + } + ] + } + ], + "totalCount": 15, + "offset": 0, + "limit": 100 +} +``` + +--- + +### 3.6 GET /scans/{scanId}/reachability/explain + +Explains why a specific sink is reachable or unreachable. + +**Parameters:** + +| Name | In | Type | Required | Description | +|------|-----|------|----------|-------------| +| scanId | path | string | Yes | Scan identifier | +| sinkNodeId | query | string | Yes | Sink node identifier | +| includeFullPath | query | bool | No | Include full path (default: false) | + +**Response: 200 OK** + +```json +{ + "sinkNodeId": "MyApp.Services.DbService.ExecuteQuery(string)", + "isReachable": true, + "reachabilityGate": 6, + "confidence": "confirmed", + "explanation": "Sink is reachable from 2 HTTP entrypoints via direct call paths", + "entrypoints": [ + { + "nodeId": "MyApp.Controllers.UserController.GetUser(int)", + "entrypointType": "http_handler", + "pathLength": 4 + }, + { + "nodeId": "MyApp.Controllers.AdminController.Query(string)", + "entrypointType": "http_handler", + "pathLength": 2 + } + ], + "shortestPath": { + "entrypoint": {...}, + "sink": {...}, + "intermediateCount": 1, + "keyNodes": [...] + }, + "fullPath": ["node1", "node2", "node3", "sink"] +} +``` + +--- + +## 4. Request/Response Models + +### 4.1 DriftDirection + +```typescript +enum DriftDirection { + became_reachable = "became_reachable", + became_unreachable = "became_unreachable" +} +``` + +### 4.2 DriftCauseKind + +```typescript +enum DriftCauseKind { + guard_removed = "guard_removed", + guard_added = "guard_added", + new_public_route = "new_public_route", + visibility_escalated = "visibility_escalated", + dependency_upgraded = "dependency_upgraded", + symbol_removed = "symbol_removed", + unknown = "unknown" +} +``` + +### 4.3 SinkCategory + +```typescript +enum SinkCategory { + cmd_exec = "cmd_exec", + unsafe_deser = "unsafe_deser", + sql_raw = "sql_raw", + ssrf = "ssrf", + file_write = "file_write", + path_traversal = "path_traversal", + template_injection = "template_injection", + crypto_weak = "crypto_weak", + authz_bypass = "authz_bypass", + ldap_injection = "ldap_injection", + xpath_injection = "xpath_injection", + xxe_injection = "xxe_injection", + code_injection = "code_injection", + log_injection = "log_injection", + reflection = "reflection", + open_redirect = "open_redirect" +} +``` + +### 4.4 CodeChangeKind + +```typescript +enum CodeChangeKind { + added = "added", + removed = "removed", + signature_changed = "signature_changed", + guard_changed = "guard_changed", + dependency_changed = "dependency_changed", + visibility_changed = "visibility_changed" +} +``` + +--- + +## 5. Error Codes + +| Code | HTTP Status | Description | +|------|-------------|-------------| +| `SCAN_NOT_FOUND` | 404 | Scan ID does not exist | +| `DRIFT_NOT_FOUND` | 404 | No drift analysis for this scan | +| `GRAPH_NOT_EXTRACTED` | 400 | Call graph not yet extracted | +| `LANGUAGE_NOT_SUPPORTED` | 400 | Language not supported for reachability | +| `COMPUTATION_IN_PROGRESS` | 409 | Reachability computation already running | +| `COMPUTATION_FAILED` | 500 | Reachability computation failed | +| `INVALID_SINK_ID` | 400 | Sink node ID not found in graph | + +--- + +## 6. Rate Limiting + +| Endpoint | Rate Limit | +|----------|------------| +| GET endpoints | 100/min | +| POST compute | 10/min | + +Rate limit headers: +```http +X-RateLimit-Limit: 100 +X-RateLimit-Remaining: 95 +X-RateLimit-Reset: 1703242800 +``` + +--- + +## 7. Examples + +### 7.1 cURL - Get Drift Results + +```bash +curl -X GET \ + 'https://api.stellaops.example/api/scanner/scans/def456/drift?language=dotnet' \ + -H 'Authorization: Bearer ' \ + -H 'X-Tenant-Id: ' +``` + +### 7.2 cURL - Compute Reachability + +```bash +curl -X POST \ + 'https://api.stellaops.example/api/scanner/scans/def456/compute-reachability' \ + -H 'Authorization: Bearer ' \ + -H 'X-Tenant-Id: ' \ + -H 'Content-Type: application/json' \ + -d '{ + "languages": ["dotnet"], + "baseScanId": "abc123" + }' +``` + +### 7.3 C# SDK + +```csharp +var client = new ScannerClient(options); + +// Get drift results +var drift = await client.GetDriftAsync("def456", language: "dotnet"); +Console.WriteLine($"Newly reachable: {drift.NewlyReachableCount}"); + +// Get drifted sinks +var sinks = await client.GetDriftedSinksAsync(drift.Id, + direction: DriftDirection.BecameReachable); + +foreach (var sink in sinks.Items) +{ + Console.WriteLine($"{sink.Symbol}: {sink.Cause.Description}"); +} +``` + +### 7.4 TypeScript SDK + +```typescript +import { ScannerClient } from '@stellaops/sdk'; + +const client = new ScannerClient({ baseUrl, token }); + +// Get drift results +const drift = await client.getDrift('def456', { language: 'dotnet' }); +console.log(`Newly reachable: ${drift.newlyReachableCount}`); + +// Explain a sink +const explanation = await client.explainReachability('def456', { + sinkNodeId: 'MyApp.Services.DbService.ExecuteQuery(string)', + includeFullPath: true +}); + +console.log(explanation.explanation); +``` + +--- + +## 8. Webhooks + +### 8.1 drift.computed + +Fired when drift analysis completes. + +```json +{ + "event": "drift.computed", + "timestamp": "2025-12-22T10:30:00Z", + "data": { + "driftResultId": "550e8400-e29b-41d4-a716-446655440000", + "scanId": "def456", + "baseScanId": "abc123", + "newlyReachableCount": 3, + "newlyUnreachableCount": 1, + "hasMaterialDrift": true + } +} +``` + +### 8.2 drift.kev_reachable + +Fired when a KEV becomes reachable. + +```json +{ + "event": "drift.kev_reachable", + "timestamp": "2025-12-22T10:30:00Z", + "severity": "critical", + "data": { + "driftResultId": "550e8400-e29b-41d4-a716-446655440000", + "scanId": "def456", + "kevCveId": "CVE-2024-12345", + "sinkNodeId": "..." + } +} +``` + +--- + +## 9. References + +- **Architecture:** `docs/modules/scanner/reachability-drift.md` +- **Operations:** `docs/operations/reachability-drift-guide.md` +- **Source:** `src/Scanner/StellaOps.Scanner.WebService/Endpoints/ReachabilityDriftEndpoints.cs` diff --git a/docs/claims-index.md b/docs/claims-index.md new file mode 100644 index 000000000..1e691c27e --- /dev/null +++ b/docs/claims-index.md @@ -0,0 +1,173 @@ +# Stella Ops Claims Index + +This document provides a verifiable index of competitive claims. Each claim is linked to evidence and can be verified using the provided commands. + +> **Integrity**: This index is updated automatically by the benchmark CI workflow. Manual edits require PR approval. + +--- + +## How to Verify Claims + +```bash +# Verify a specific claim +stella benchmark verify + +# Run full benchmark suite +stella benchmark run --competitors trivy,grype,syft + +# Generate updated claims from latest benchmark +stella benchmark claims --output docs/claims-index.md +``` + +--- + +## Claim Categories + +| Category | Prefix | Description | +|----------|--------|-------------| +| Determinism | DET-* | Reproducible, bit-identical outputs | +| Reachability | REACH-* | Call-path and exploitability analysis | +| Proofs | PROOF-* | Attestation and cryptographic evidence | +| Unknowns | UNK-* | Explicit uncertainty tracking | +| VEX | VEX-* | VEX handling and conflict resolution | +| Offline | OFFLINE-* | Air-gapped operation | +| SBOM | SBOM-* | SBOM fidelity and formats | +| Performance | PERF-* | Speed and scalability | + +--- + +## Active Claims + +### Determinism + +| Claim ID | Claim | Evidence | Status | +|----------|-------|----------|--------| +| DET-001 | Stella Ops produces bit-identical scan results across runs | `bench/determinism/results.json` | PENDING | +| DET-002 | Score replay produces identical verdicts from manifest | `bench/replay/results.json` | PENDING | +| DET-003 | SBOM generation is deterministic (stable ordering, canonical JSON) | `bench/sbom/determinism.json` | PENDING | + +### Reachability + +| Claim ID | Claim | Evidence | Status | +|----------|-------|----------|--------| +| REACH-001 | Stella Ops detects reachable vulnerabilities missed by Trivy | `bench/competitors/trivy-comparison.json` | PENDING | +| REACH-002 | Stella Ops eliminates X% false positives via unreachable path detection | `bench/reachability/fp-elimination.json` | PENDING | +| REACH-003 | Three-layer reachability (static + binary + runtime) provides higher confidence | `bench/reachability/3layer-corpus.json` | PENDING | + +### Proofs & Attestations + +| Claim ID | Claim | Evidence | Status | +|----------|-------|----------|--------| +| PROOF-001 | Every scan produces DSSE-signed attestation | `bench/attestation/coverage.json` | PENDING | +| PROOF-002 | Score proofs enable third-party verification | `bench/proofs/verification.json` | PENDING | +| PROOF-003 | Evidence chain links finding to source material | `bench/proofs/chain-integrity.json` | PENDING | + +### Unknowns + +| Claim ID | Claim | Evidence | Status | +|----------|-------|----------|--------| +| UNK-001 | Unknowns are first-class, not suppressed | `bench/unknowns/tracking.json` | PENDING | +| UNK-002 | Unknowns have budgets and decay policies | `bench/unknowns/budgets.json` | PENDING | +| UNK-003 | Unknowns escalate based on blast radius and age | `bench/unknowns/escalation.json` | PENDING | + +### VEX Handling + +| Claim ID | Claim | Evidence | Status | +|----------|-------|----------|--------| +| VEX-001 | Native VEX ingestion with formal reasoning | `bench/vex/ingestion.json` | PENDING | +| VEX-002 | Lattice merge resolves conflicting VEX from multiple sources | `bench/vex/conflict-resolution.json` | PENDING | +| VEX-003 | VEX status affects scoring deterministically | `bench/vex/scoring-impact.json` | PENDING | + +### Offline / Air-Gapped + +| Claim ID | Claim | Evidence | Status | +|----------|-------|----------|--------| +| OFFLINE-001 | Full scan + attest + verify with no network | `bench/offline/e2e.json` | PENDING | +| OFFLINE-002 | Knowledge snapshots are cryptographically bound to scans | `bench/offline/snapshots.json` | PENDING | +| OFFLINE-003 | Offline bundles include all required feeds | `bench/offline/bundle-completeness.json` | PENDING | + +### SBOM Fidelity + +| Claim ID | Claim | Evidence | Status | +|----------|-------|----------|--------| +| SBOM-001 | CycloneDX 1.6+ and SPDX 3.0.1 export | `bench/sbom/format-coverage.json` | PENDING | +| SBOM-002 | Binary provenance tracked (Build-ID, PE hash) | `bench/sbom/binary-provenance.json` | PENDING | +| SBOM-003 | Layer attribution for all components | `bench/sbom/layer-attribution.json` | PENDING | +| SBOM-004 | SBOM lineage DAG with semantic diffing | `bench/sbom/lineage.json` | PENDING | + +### Performance + +| Claim ID | Claim | Evidence | Status | +|----------|-------|----------|--------| +| PERF-001 | Scan latency < 30s p95 for 100k LOC | `bench/performance/latency.json` | PENDING | +| PERF-002 | 10k scans/day without degradation | `bench/performance/scale.json` | PENDING | +| PERF-003 | Incremental scans < 5s for minor changes | `bench/performance/incremental.json` | PENDING | + +--- + +## Competitor Comparison Matrix + +| Capability | Stella Ops | Trivy | Grype | Snyk | Anchore | +|------------|-----------|-------|-------|------|---------| +| SBOM Fidelity | HIGH | MEDIUM | MEDIUM | MEDIUM | HIGH | +| VEX Handling | NATIVE | PARTIAL | NONE | UNKNOWN | PARTIAL | +| Explainability | HIGH (with falsifiability) | LOW | LOW | MEDIUM | MEDIUM | +| Smart-Diff | SEMANTIC | NONE | NONE | NONE | POLICY | +| Call-Stack Reachability | 3-LAYER | NONE | NONE | NONE | NONE | +| Deterministic Scoring | PROVEN | MODERATE | MODERATE | PROPRIETARY | MODERATE | +| Unknowns State | FIRST-CLASS | NONE | NONE | PARTIAL | PARTIAL | +| Offline/Air-Gapped | FULL | AD-HOC | AD-HOC | UNKNOWN | ENTERPRISE | +| Provenance/Attestations | DSSE/in-toto | SBOM ONLY | SBOM ONLY | UNKNOWN | SBOM+in-toto | + +> **Note**: Comparison based on public documentation and benchmarks. Updated: PENDING + +--- + +## Evidence Links + +All evidence files are stored in `bench/` directory and versioned in Git. + +| Evidence Type | Location | Format | +|---------------|----------|--------| +| Benchmark results | `bench/competitors/` | JSON | +| Determinism tests | `bench/determinism/` | JSON | +| Reachability corpus | `bench/reachability/` | JSON + ground truth | +| Performance baselines | `bench/performance/` | JSON | +| Attestation samples | `bench/attestation/` | DSSE envelopes | + +--- + +## Updating Claims + +Claims are updated via: + +1. **Automated**: `benchmark-vs-competitors.yml` workflow runs weekly +2. **Manual**: PRs updating evidence require benchmark re-run +3. **Release**: All claims verified before release + +### Claim Lifecycle + +``` +PENDING → VERIFIED → PUBLISHED + ↓ + DISPUTED → RESOLVED +``` + +- **PENDING**: Claim defined, evidence not yet generated +- **VERIFIED**: Evidence generated and validated +- **PUBLISHED**: Included in marketing materials +- **DISPUTED**: External challenge received +- **RESOLVED**: Dispute addressed with updated evidence + +--- + +## Related Documentation + +- [Benchmark Architecture](modules/benchmark/architecture.md) +- [Sprint 7000.0001.0001 - Competitive Benchmarking](implplan/SPRINT_7000_0001_0001_competitive_benchmarking.md) +- [Testing Strategy](implplan/SPRINT_5100_SUMMARY.md) + +--- + +*Last Updated*: 2025-12-22 +*Next Review*: After Sprint 7000.0001.0001 completion diff --git a/docs/guides/compare-workflow-user-guide.md b/docs/guides/compare-workflow-user-guide.md new file mode 100644 index 000000000..ac123b474 --- /dev/null +++ b/docs/guides/compare-workflow-user-guide.md @@ -0,0 +1,363 @@ +# Compare Workflow User Guide + +**Version:** 1.0 +**Last Updated:** 2025-12-22 + +## Overview + +The Compare workflow in StellaOps enables you to analyze what changed between two container image versions from a security perspective. Instead of reviewing entire vulnerability lists, you focus on **material risk changes** — the delta that matters for security decisions. + +## When to Use Compare + +Use the Compare view when you need to: + +- **Evaluate a new release** before deploying to production +- **Understand risk delta** between current and previous versions +- **Investigate policy gate failures** to see what caused blocking +- **Review security posture changes** after dependency updates +- **Audit compliance** by verifying what changed and why + +## Accessing the Compare View + +### From Release Details + +1. Navigate to **Releases** → Select a release +2. Click the **Compare** button in the release header +3. The system automatically selects the recommended baseline + +### From Build/Artifact + +1. Navigate to **Scans** → Select a scan +2. Click **Compare with baseline** +3. Select a baseline from the recommended options + +### Direct URL + +``` +/compare/{currentDigest}/{baselineDigest} +``` + +## Understanding the Interface + +### Baseline Selector + +At the top of the Compare view, you'll see the baseline selector: + +``` +Comparing: [myapp:v2.1.0] → [Select baseline ▼] + ├── Last Green Build (Recommended) + ├── Previous Release (v2.0.0) + ├── Main Branch + └── Custom... +``` + +**Baseline Options:** +- **Last Green Build**: Most recent version that passed all security gates +- **Previous Release**: The version tagged before the current one +- **Main Branch**: Latest scan from the main/master branch +- **Custom**: Manually select any previous scan + +### Baseline Rationale + +Below the selector, you'll see why this baseline was chosen: + +> "Selected last prod release with Allowed verdict under policy P-2024-001." + +This helps auditors understand the comparison context. + +### Delta Summary Strip + +The summary strip shows high-level counts: + +``` +┌────────────┬─────────────┬──────────────┬──────────────┬─────────────┐ +│ +5 added │ -3 removed │ ~2 changed │ Policy: v1.2 │ Feed: 2h ago│ +└────────────┴─────────────┴──────────────┴──────────────┴─────────────┘ +``` + +### Three-Pane Layout + +#### Left Pane: Categories + +Categories organize changes by type: + +| Category | Description | +|----------|-------------| +| **SBOM Changes** | Component additions, removals, version changes | +| **Reachability** | Functions becoming reachable/unreachable | +| **VEX Status** | Vulnerability status changes | +| **Policy** | Policy rule trigger changes | +| **Findings** | New or resolved vulnerabilities | +| **Unknowns** | New gaps in analysis | + +Click a category to filter the items pane. + +#### Middle Pane: Items + +Shows individual changes sorted by risk priority: + +``` +┌─────────────────────────────────────────────────────────────┐ +│ ⊕ CVE-2024-1234 · lodash@4.17.20 · +reachable [HIGH] │ +│ ⊕ CVE-2024-5678 · requests@2.28.0 · +KEV [CRITICAL]│ +│ ⊖ CVE-2024-9999 · urllib3@1.26.0 · -reachable [MEDIUM] │ +└─────────────────────────────────────────────────────────────┘ +``` + +**Change Types:** +- ⊕ Added (green): New in current version +- ⊖ Removed (red): Gone from current version +- ↔ Changed (yellow): Status or value changed + +#### Right Pane: Proof/Evidence + +When you select an item, the proof pane shows: + +1. **Witness Path**: Call path from entrypoint to vulnerable function +2. **VEX Merge**: How multiple VEX sources were combined +3. **Policy Rule**: Which rule triggered and why +4. **Envelope Hashes**: Cryptographic evidence for verification + +### Trust Indicators + +The trust indicators bar shows: + +| Indicator | Description | +|-----------|-------------| +| **Det. Hash** | Determinism hash for reproducibility | +| **Policy** | Policy version used for evaluation | +| **Feed** | Vulnerability feed snapshot timestamp | +| **Signature** | DSSE signature verification status | + +**Warning States:** +- ⚠️ **Stale Feed**: Vulnerability data > 24h old +- ⚠️ **Policy Drift**: Policy changed between scans +- 🔴 **Signature Invalid**: Verification failed + +### Actionables Panel + +The "What to do next" section provides prioritized recommendations: + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ What to do next: │ +│ 1. [CRITICAL] Upgrade lodash → 4.17.21 [Apply] │ +│ 2. [HIGH] Add VEX statement for urllib3 [Apply] │ +│ 3. [MEDIUM] Investigate new reachable path [Investigate] │ +│ 4. [LOW] Resolve unknown: missing SBOM [Investigate] │ +└─────────────────────────────────────────────────────────────────┘ +``` + +## Common Workflows + +### 1. Pre-Release Security Review + +**Goal**: Verify a release is safe to deploy + +1. Open the release in the UI +2. Click **Compare** (defaults to last green baseline) +3. Review the delta summary: + - New critical/high vulnerabilities? + - Reachability increases? + - Policy violations? +4. Examine each critical item: + - Check witness paths + - Review VEX status +5. Apply actionables or approve release + +### 2. Investigating a Blocked Release + +**Goal**: Understand why a release was blocked + +1. Open the blocked release +2. Look at the **Verdict** chip: `BLOCKED` +3. Click **Compare** to see what changed +4. Filter to **Policy** category +5. Select blocking rules to see: + - Which policy rule fired + - Evidence that triggered it + - Remediation options + +### 3. Dependency Update Impact + +**Goal**: Assess security impact of dependency updates + +1. Compare current branch to main +2. Filter to **SBOM Changes** +3. Review component version changes: + - Check if upgrades fix vulnerabilities + - Check if new vulnerabilities introduced +4. Filter to **Findings** for net impact + +### 4. Auditor Verification + +**Goal**: Verify security claims are accurate + +1. Open the Compare view +2. Check **Trust Indicators**: + - Signature valid? + - Feed current? + - Policy version expected? +3. Click **Copy Replay Command** +4. Run replay locally to verify determinism +5. Download **Evidence Pack** for records + +## Understanding Evidence + +### Witness Paths + +Witness paths show how vulnerable code is reachable: + +``` +main() [entrypoint] + ↓ +parseConfig() + ↓ +loadJson() + ↓ +yaml.load() [sink - CVE-2024-1234] + +Confidence: CONFIRMED +Gates: input_validation, sandboxing +``` + +**Confidence Tiers:** +- **CONFIRMED**: Call path verified through multiple sources +- **LIKELY**: High-confidence static analysis +- **PRESENT**: Function exists but reachability uncertain + +### VEX Merge Explanation + +When multiple VEX sources exist, the merge shows how they combined: + +``` +┌─────────────────────────────────────────────────────────┐ +│ VEX Status: NOT_AFFECTED │ +│ Strategy: priority │ +│ │ +│ Sources: │ +│ ★ [vendor] RedHat RHSA-2024:1234 - not_affected P1 │ +│ [distro] Ubuntu USN-5678-1 - affected P2 │ +│ [internal] Team assessment - not_affected P3 │ +│ │ +│ Resolution: Vendor claim takes priority │ +└─────────────────────────────────────────────────────────┘ +``` + +### Determinism Verification + +To verify a comparison is reproducible: + +1. Copy the replay command from Trust Indicators +2. Run locally: + ```bash + stellaops smart-diff replay \ + --base sha256:abc123... \ + --target sha256:def456... \ + --feed-snapshot sha256:feed789... \ + --policy sha256:policy012... + ``` +3. Compare the determinism hash + +## Role-Based Views + +### Developer View (Default) + +Focus: What do I need to fix? + +- **Default Tab**: Actionables +- **Visible**: Upgrade suggestions, witness paths +- **Hidden**: Detailed attestations, policy internals + +### Security View + +Focus: Are the security claims valid? + +- **Default Tab**: Claims/VEX +- **Visible**: VEX merge, policy reasoning, claim sources +- **Hidden**: Low-level attestation details + +### Audit View + +Focus: Can I verify these claims? + +- **Default Tab**: Attestations +- **Visible**: Signatures, replay commands, evidence pack +- **Hidden**: Actionables (read-only mode) + +## Exporting Reports + +### JSON Export + +Click **Export → JSON** to download: +- Full delta with all items +- Evidence references +- Trust indicators +- Actionables + +### PDF Export + +Click **Export → PDF** for a formatted report including: +- Executive summary +- Delta breakdown by category +- Critical findings +- Remediation recommendations + +### SARIF Export + +Click **Export → SARIF** for CI/CD integration: +- SARIF 2.1.0 format +- Compatible with GitHub Security, Azure DevOps +- Includes rule IDs for automation + +## Keyboard Shortcuts + +| Key | Action | +|-----|--------| +| `Tab` | Move between panes | +| `↑/↓` | Navigate items | +| `Enter` | Select/expand item | +| `Esc` | Close expanded detail | +| `C` | Copy replay command | +| `E` | Export menu | + +## Troubleshooting + +### "No baseline available" + +The system couldn't find a suitable baseline because: +- This is the first scan of this image +- No previous scans passed policy gates + +**Solution**: Use "Custom" to manually select any previous scan. + +### "Stale feed warning" + +The vulnerability feed is more than 24 hours old. + +**Impact**: New CVEs may not be reflected in the comparison. + +**Solution**: +1. Trigger a feed refresh +2. Re-run comparison after refresh + +### "Signature verification failed" + +The DSSE envelope signature couldn't be verified. + +**Causes**: +- Key rotation occurred +- Attestation was modified +- Network issue fetching public key + +**Solution**: +1. Check if keys were recently rotated +2. Try offline verification with local key +3. Contact security team if persistent + +## Related Documentation + +- [Smart-Diff CLI Reference](../cli/smart-diff-cli.md) +- [Smart-Diff UI Architecture](../modules/web/smart-diff-ui-architecture.md) +- [SARIF Integration Guide](../ci/sarif-integration.md) +- [Deterministic Replay Specification](../replay/DETERMINISTIC_REPLAY.md) diff --git a/docs/implplan/SPRINT_2000_0003_0001_alpine_connector.md b/docs/implplan/SPRINT_2000_0003_0001_alpine_connector.md new file mode 100644 index 000000000..50ec7c8de --- /dev/null +++ b/docs/implplan/SPRINT_2000_0003_0001_alpine_connector.md @@ -0,0 +1,346 @@ +# Sprint 2000.0003.0001 · Alpine Connector and APK Version Comparator + +## Topic & Scope + +- Implement Alpine Linux advisory connector for Concelier. +- Implement APK version comparator following Alpine's versioning semantics. +- Integrate with existing distro connector framework. +- **Working directory:** `src/Concelier/__Libraries/StellaOps.Concelier.Connector.Distro.Alpine/` + +## Advisory Reference + +- **Source:** `docs/product-advisories/archived/22-Dec-2025 - Getting Distro Backport Logic Right.md` +- **Gap Identified:** Alpine/APK support explicitly recommended but not implemented anywhere in codebase or scheduled sprints. + +## Dependencies & Concurrency + +- **Upstream**: None (uses existing connector framework) +- **Downstream**: Scanner distro detection, BinaryIndex Alpine corpus (future) +- **Safe to parallelize with**: SPRINT_2000_0003_0002 (Version Tests) + +## Documentation Prerequisites + +- `docs/modules/concelier/architecture.md` +- `src/Concelier/__Libraries/StellaOps.Concelier.Connector.Distro.Debian/` (reference implementation) +- Alpine Linux secdb format: https://secdb.alpinelinux.org/ + +--- + +## Tasks + +### T1: Create APK Version Comparator + +**Assignee**: Concelier Team +**Story Points**: 5 +**Status**: TODO +**Dependencies**: — + +**Description**: +Implement Alpine APK version comparison semantics. APK versions follow a simplified EVR model with `-r` suffix. + +**Implementation Path**: `src/Concelier/__Libraries/StellaOps.Concelier.Merge/Comparers/ApkVersion.cs` + +**APK Version Format**: +``` +-r +Examples: + 1.2.3-r0 + 1.2.3_alpha-r1 + 1.2.3_pre2-r0 +``` + +**APK Version Rules**: +- Underscore suffixes sort: `_alpha` < `_beta` < `_pre` < `_rc` < (none) < `_p` (patch) +- Numeric segments compare numerically +- `-r` is the package release number (like RPM release) +- Letters in version compare lexicographically + +**Implementation**: +```csharp +namespace StellaOps.Concelier.Merge.Comparers; + +/// +/// Compares Alpine APK package versions following apk-tools versioning rules. +/// +public sealed class ApkVersionComparer : IComparer, IComparer +{ + public static readonly ApkVersionComparer Instance = new(); + + public int Compare(ApkVersion? x, ApkVersion? y) + { + if (x is null && y is null) return 0; + if (x is null) return -1; + if (y is null) return 1; + + // Compare version part + var versionCmp = CompareVersionString(x.Version, y.Version); + if (versionCmp != 0) return versionCmp; + + // Compare pkgrel + return x.PkgRel.CompareTo(y.PkgRel); + } + + public int Compare(string? x, string? y) + { + if (!ApkVersion.TryParse(x, out var xVer)) + return string.Compare(x, y, StringComparison.Ordinal); + if (!ApkVersion.TryParse(y, out var yVer)) + return string.Compare(x, y, StringComparison.Ordinal); + return Compare(xVer, yVer); + } + + private static int CompareVersionString(string a, string b) + { + // Implement APK version comparison: + // 1. Split into segments (numeric, alpha, suffix) + // 2. Compare segment by segment + // 3. Handle _alpha, _beta, _pre, _rc, _p suffixes + // ... + } + + private static readonly Dictionary SuffixOrder = new() + { + ["_alpha"] = -4, + ["_beta"] = -3, + ["_pre"] = -2, + ["_rc"] = -1, + [""] = 0, + ["_p"] = 1 + }; +} + +public readonly record struct ApkVersion +{ + public required string Version { get; init; } + public required int PkgRel { get; init; } + public string? Suffix { get; init; } + + public static bool TryParse(string? input, out ApkVersion result) + { + result = default; + if (string.IsNullOrWhiteSpace(input)) return false; + + // Parse: -r + var rIndex = input.LastIndexOf("-r", StringComparison.Ordinal); + if (rIndex < 0) + { + result = new ApkVersion { Version = input, PkgRel = 0 }; + return true; + } + + var versionPart = input[..rIndex]; + var pkgRelPart = input[(rIndex + 2)..]; + + if (!int.TryParse(pkgRelPart, out var pkgRel)) + return false; + + result = new ApkVersion { Version = versionPart, PkgRel = pkgRel }; + return true; + } + + public override string ToString() => $"{Version}-r{PkgRel}"; +} +``` + +**Acceptance Criteria**: +- [ ] APK version parsing implemented +- [ ] Suffix ordering (_alpha < _beta < _pre < _rc < none < _p) +- [ ] PkgRel comparison working +- [ ] Edge cases: versions with letters, multiple underscores +- [ ] Unit tests with 30+ cases + +--- + +### T2: Create Alpine SecDB Parser + +**Assignee**: Concelier Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: T1 + +**Description**: +Parse Alpine Linux security database format (JSON). + +**Implementation Path**: `src/Concelier/__Libraries/StellaOps.Concelier.Connector.Distro.Alpine/Internal/AlpineSecDbParser.cs` + +**SecDB Format** (from https://secdb.alpinelinux.org/): +```json +{ + "distroversion": "v3.20", + "reponame": "main", + "urlprefix": "https://secdb.alpinelinux.org/", + "packages": [ + { + "pkg": { + "name": "openssl", + "secfixes": { + "3.1.4-r0": ["CVE-2023-5678"], + "3.1.3-r0": ["CVE-2023-1234", "CVE-2023-5555"] + } + } + } + ] +} +``` + +**Acceptance Criteria**: +- [ ] Parse secdb JSON format +- [ ] Extract package name, version, CVEs +- [ ] Map to `AffectedVersionRange` with `RangeKind = "apk"` + +--- + +### T3: Implement AlpineConnector + +**Assignee**: Concelier Team +**Story Points**: 5 +**Status**: TODO +**Dependencies**: T1, T2 + +**Description**: +Implement the full Alpine advisory connector following existing distro connector patterns. + +**Implementation Path**: `src/Concelier/__Libraries/StellaOps.Concelier.Connector.Distro.Alpine/AlpineConnector.cs` + +**Project Structure**: +``` +StellaOps.Concelier.Connector.Distro.Alpine/ +├── StellaOps.Concelier.Connector.Distro.Alpine.csproj +├── AlpineConnector.cs +├── Configuration/ +│ └── AlpineOptions.cs +├── Internal/ +│ ├── AlpineSecDbParser.cs +│ └── AlpineMapper.cs +└── Dto/ + └── AlpineSecDbDto.cs +``` + +**Supported Releases**: +- v3.18, v3.19, v3.20 (latest stable) +- edge (rolling) + +**Acceptance Criteria**: +- [ ] Fetch secdb from https://secdb.alpinelinux.org/ +- [ ] Parse all branches (main, community) +- [ ] Map to Advisory model with `type: "apk"` +- [ ] Preserve native APK version in ranges +- [ ] Integration tests with real secdb fixtures + +--- + +### T4: Register Alpine Connector in DI + +**Assignee**: Concelier Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T3 + +**Description**: +Register Alpine connector in Concelier WebService and add configuration. + +**Implementation Path**: `src/Concelier/StellaOps.Concelier.WebService/Extensions/ConnectorServiceExtensions.cs` + +**Configuration** (`etc/concelier.yaml`): +```yaml +concelier: + sources: + - name: alpine + kind: secdb + baseUrl: https://secdb.alpinelinux.org/ + signature: { type: none } + enabled: true + releases: [v3.18, v3.19, v3.20] +``` + +**Acceptance Criteria**: +- [ ] Connector registered via DI +- [ ] Configuration options working +- [ ] Health check includes Alpine source status + +--- + +### T5: Unit and Integration Tests + +**Assignee**: Concelier Team +**Story Points**: 5 +**Status**: TODO +**Dependencies**: T1-T4 + +**Test Matrix**: + +| Test Category | Count | Description | +|---------------|-------|-------------| +| APK Version Comparison | 30+ | Suffix ordering, pkgrel, edge cases | +| SecDB Parsing | 10+ | Real fixtures from secdb | +| Connector Integration | 5+ | End-to-end with mock HTTP | +| Golden Files | 3 | Per-release determinism | + +**Test Fixtures** (from real Alpine images): +``` +alpine:3.18 → apk info -v openssl → 3.1.4-r0 +alpine:3.19 → apk info -v curl → 8.5.0-r0 +alpine:3.20 → apk info -v zlib → 1.3.1-r0 +``` + +**Acceptance Criteria**: +- [ ] 30+ APK version comparison tests +- [ ] SecDB parsing tests with real fixtures +- [ ] Integration tests pass +- [ ] Golden file regression tests + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Concelier Team | Create APK Version Comparator | +| 2 | T2 | TODO | T1 | Concelier Team | Create Alpine SecDB Parser | +| 3 | T3 | TODO | T1, T2 | Concelier Team | Implement AlpineConnector | +| 4 | T4 | TODO | T3 | Concelier Team | Register Alpine Connector in DI | +| 5 | T5 | TODO | T1-T4 | Concelier Team | Unit and Integration Tests | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint created from advisory gap analysis. Alpine/APK identified as critical missing distro support. | Agent | + +--- + +## Decisions & Risks + +| Item | Type | Owner | Notes | +|------|------|-------|-------| +| SecDB over OVAL | Decision | Concelier Team | Alpine uses secdb JSON, not OVAL. Simpler to parse. | +| APK suffix ordering | Decision | Concelier Team | Follow apk-tools source for authoritative ordering | +| No GPG verification | Risk | Concelier Team | Alpine secdb is not signed. May add integrity check via HTTPS + known hash. | + +--- + +## Success Criteria + +- [ ] All 5 tasks marked DONE +- [ ] APK version comparator production-ready +- [ ] Alpine connector ingesting advisories +- [ ] 30+ version comparison tests passing +- [ ] Integration tests with real secdb +- [ ] `dotnet build` succeeds +- [ ] `dotnet test` succeeds with 100% pass rate + +--- + +## References + +- Advisory: `docs/product-advisories/archived/22-Dec-2025 - Getting Distro Backport Logic Right.md` +- Alpine SecDB: https://secdb.alpinelinux.org/ +- APK version comparison: https://gitlab.alpinelinux.org/alpine/apk-tools +- Existing Debian connector: `src/Concelier/__Libraries/StellaOps.Concelier.Connector.Distro.Debian/` + +--- + +*Document Version: 1.0.0* +*Created: 2025-12-22* diff --git a/docs/implplan/SPRINT_2000_0003_0002_distro_version_tests.md b/docs/implplan/SPRINT_2000_0003_0002_distro_version_tests.md new file mode 100644 index 000000000..21bdceb6a --- /dev/null +++ b/docs/implplan/SPRINT_2000_0003_0002_distro_version_tests.md @@ -0,0 +1,357 @@ +# Sprint 2000.0003.0002 · Comprehensive Distro Version Comparison Tests + +## Topic & Scope + +- Expand version comparator test coverage to 50-100 cases per distro. +- Create golden files for regression testing. +- Add real-image cross-check tests using container fixtures. +- **Working directory:** `src/Concelier/__Tests/StellaOps.Concelier.Merge.Tests/` + +## Advisory Reference + +- **Source:** `docs/product-advisories/archived/22-Dec-2025 - Getting Distro Backport Logic Right.md` +- **Gap Identified:** Current test coverage is 12 tests total (7 NEVRA, 5 EVR). Advisory recommends 50-100 per distro plus golden files and real-image cross-checks. + +## Dependencies & Concurrency + +- **Upstream**: None (tests existing code) +- **Downstream**: None +- **Safe to parallelize with**: SPRINT_2000_0003_0001 (Alpine Connector) + +## Documentation Prerequisites + +- `src/Concelier/__Libraries/StellaOps.Concelier.Merge/Comparers/Nevra.cs` +- `src/Concelier/__Libraries/StellaOps.Concelier.Merge/Comparers/DebianEvr.cs` +- RPM versioning: https://rpm.org/user_doc/versioning.html +- Debian policy: https://www.debian.org/doc/debian-policy/ch-controlfields.html#version + +--- + +## Tasks + +### T1: Expand NEVRA (RPM) Test Corpus + +**Assignee**: Concelier Team +**Story Points**: 5 +**Status**: TODO +**Dependencies**: — + +**Description**: +Create comprehensive test corpus for RPM NEVRA version comparison covering all edge cases. + +**Implementation Path**: `src/Concelier/__Tests/StellaOps.Concelier.Merge.Tests/Comparers/NevraComparerTests.cs` + +**Test Categories** (minimum 50 cases): + +| Category | Cases | Examples | +|----------|-------|----------| +| Epoch precedence | 10 | `0:9.9-9` < `1:1.0-1`, missing epoch = 0 | +| Numeric version ordering | 10 | `1.2.3` < `1.2.10`, `1.9` < `1.10` | +| Alpha/numeric segments | 10 | `1.0a` < `1.0b`, `1.0` < `1.0a` | +| Tilde pre-releases | 10 | `1.0~rc1` < `1.0~rc2` < `1.0`, `1.0~` < `1.0` | +| Release qualifiers | 10 | `1.0-1.el8` < `1.0-1.el9`, `1.0-1.el8_5` < `1.0-2.el8` | +| Backport patterns | 10 | `1.0-1.el8` vs `1.0-1.el8_5.1` (security backport) | +| Architecture ordering | 5 | `x86_64` vs `aarch64` vs `noarch` | + +**Test Data Format** (table-driven): +```csharp +public static TheoryData NevraComparisonCases => new() +{ + // Epoch precedence + { "0:1.0-1.el8", "1:0.1-1.el8", -1 }, // Epoch wins + { "1.0-1.el8", "0:1.0-1.el8", 0 }, // Missing epoch = 0 + { "2:1.0-1", "1:9.9-9", 1 }, // Higher epoch wins + + // Numeric ordering + { "1.9-1", "1.10-1", -1 }, // 9 < 10 + { "1.02-1", "1.2-1", 0 }, // Leading zeros ignored + + // Tilde pre-releases + { "1.0~rc1-1", "1.0-1", -1 }, // Tilde sorts before release + { "1.0~alpha-1", "1.0~beta-1", -1 }, // Alpha < beta lexically + { "1.0~~-1", "1.0~-1", -1 }, // Double tilde < single + + // Release qualifiers (RHEL backports) + { "1.0-1.el8", "1.0-1.el8_5", -1 }, // Base < security update + { "1.0-1.el8_5", "1.0-1.el8_5.1", -1 }, // Incremental backport + { "1.0-1.el8", "1.0-1.el9", -1 }, // el8 < el9 + + // ... 50+ more cases +}; + +[Theory] +[MemberData(nameof(NevraComparisonCases))] +public void Compare_NevraVersions_ReturnsExpectedOrder(string left, string right, int expected) +{ + var result = Math.Sign(NevraComparer.Instance.Compare(left, right)); + Assert.Equal(expected, result); +} +``` + +**Acceptance Criteria**: +- [ ] 50+ test cases for NEVRA comparison +- [ ] All edge cases from advisory covered (epochs, tildes, release qualifiers) +- [ ] Test data documented with comments explaining each case + +--- + +### T2: Expand Debian EVR Test Corpus + +**Assignee**: Concelier Team +**Story Points**: 5 +**Status**: TODO +**Dependencies**: — + +**Description**: +Create comprehensive test corpus for Debian EVR version comparison. + +**Implementation Path**: `src/Concelier/__Tests/StellaOps.Concelier.Merge.Tests/Comparers/DebianEvrComparerTests.cs` + +**Test Categories** (minimum 50 cases): + +| Category | Cases | Examples | +|----------|-------|----------| +| Epoch precedence | 10 | `1:1.0-1` > `0:9.9-9`, missing epoch = 0 | +| Upstream version | 10 | `1.2.3` < `1.2.10`, letter/number transitions | +| Tilde pre-releases | 10 | `1.0~rc1` < `1.0`, `2.0~beta` < `2.0~rc` | +| Debian revision | 10 | `1.0-1` < `1.0-2`, `1.0-1ubuntu1` patterns | +| Ubuntu specific | 10 | `1.0-1ubuntu0.1` backports, `1.0-1build1` rebuilds | +| Native packages | 5 | No revision (e.g., `1.0` vs `1.0-1`) | + +**Ubuntu Backport Patterns**: +```csharp +// Ubuntu security backports follow specific patterns +{ "1.0-1", "1.0-1ubuntu0.1", -1 }, // Security backport +{ "1.0-1ubuntu0.1", "1.0-1ubuntu0.2", -1 }, // Incremental backport +{ "1.0-1ubuntu1", "1.0-1ubuntu2", -1 }, // Ubuntu delta update +{ "1.0-1build1", "1.0-1build2", -1 }, // Rebuild +{ "1.0-1+deb12u1", "1.0-1+deb12u2", -1 }, // Debian stable update +``` + +**Acceptance Criteria**: +- [ ] 50+ test cases for Debian EVR comparison +- [ ] Ubuntu-specific patterns covered +- [ ] Debian stable update patterns (+debNuM) +- [ ] Test data documented with comments + +--- + +### T3: Create Golden Files for Regression Testing + +**Assignee**: Concelier Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: T1, T2 + +**Description**: +Create golden files that capture expected comparison results for regression testing. + +**Implementation Path**: `src/Concelier/__Tests/StellaOps.Concelier.Merge.Tests/Fixtures/Golden/` + +**Golden File Format** (NDJSON): +```json +{"left":"0:1.0-1.el8","right":"1:0.1-1.el8","expected":-1,"distro":"rpm","note":"epoch precedence"} +{"left":"1.0~rc1-1","right":"1.0-1","expected":-1,"distro":"rpm","note":"tilde pre-release"} +``` + +**Files**: +``` +Fixtures/Golden/ +├── rpm_version_comparison.golden.ndjson +├── deb_version_comparison.golden.ndjson +├── apk_version_comparison.golden.ndjson (after SPRINT_2000_0003_0001) +└── README.md (format documentation) +``` + +**Test Runner**: +```csharp +[Fact] +public async Task Compare_GoldenFile_AllCasesPass() +{ + var goldenPath = Path.Combine(TestContext.CurrentContext.TestDirectory, + "Fixtures", "Golden", "rpm_version_comparison.golden.ndjson"); + + var lines = await File.ReadAllLinesAsync(goldenPath); + var failures = new List(); + + foreach (var line in lines.Where(l => !string.IsNullOrWhiteSpace(l))) + { + var tc = JsonSerializer.Deserialize(line)!; + var actual = Math.Sign(NevraComparer.Instance.Compare(tc.Left, tc.Right)); + + if (actual != tc.Expected) + failures.Add($"FAIL: {tc.Left} vs {tc.Right}: expected {tc.Expected}, got {actual} ({tc.Note})"); + } + + Assert.Empty(failures); +} +``` + +**Acceptance Criteria**: +- [ ] Golden files created for RPM, Debian, APK +- [ ] 100+ cases per distro in golden files +- [ ] Golden file test runner implemented +- [ ] README documenting format and how to add cases + +--- + +### T4: Real Image Cross-Check Tests + +**Assignee**: Concelier Team +**Story Points**: 5 +**Status**: TODO +**Dependencies**: T1, T2 + +**Description**: +Create integration tests that pull real container images, extract package versions, and validate comparisons against known advisory data. + +**Implementation Path**: `src/Concelier/__Tests/StellaOps.Concelier.Integration.Tests/DistroVersionCrossCheckTests.cs` + +**Test Images**: +```csharp +public static TheoryData TestImages => new() +{ + { "registry.access.redhat.com/ubi9:latest", new[] { "openssl", "curl", "zlib" } }, + { "debian:12-slim", new[] { "openssl", "libcurl4", "zlib1g" } }, + { "ubuntu:22.04", new[] { "openssl", "curl", "zlib1g" } }, + { "alpine:3.20", new[] { "openssl", "curl", "zlib" } }, +}; +``` + +**Test Flow**: +1. Pull image using Testcontainers +2. Extract package versions (`rpm -q`, `dpkg-query -W`, `apk info -v`) +3. Look up known CVEs for those packages +4. Verify that version comparison correctly identifies fixed vs. vulnerable + +**Implementation**: +```csharp +[Theory] +[MemberData(nameof(TestImages))] +public async Task CrossCheck_RealImage_VersionComparisonCorrect(string image, string[] packages) +{ + await using var container = new ContainerBuilder() + .WithImage(image) + .WithCommand("sleep", "infinity") + .Build(); + + await container.StartAsync(); + + foreach (var pkg in packages) + { + // Extract installed version + var installedVersion = await ExtractPackageVersionAsync(container, pkg); + + // Get known advisory fixed version (from fixtures) + var advisory = GetTestAdvisory(pkg); + if (advisory == null) continue; + + // Compare using appropriate comparator + var comparer = GetComparerForImage(image); + var isFixed = comparer.Compare(installedVersion, advisory.FixedVersion) >= 0; + + // Verify against expected status + Assert.Equal(advisory.ExpectedFixed, isFixed); + } +} +``` + +**Test Fixtures** (known CVE data): +```json +{ + "package": "openssl", + "cve": "CVE-2023-5678", + "distro": "alpine", + "fixedVersion": "3.1.4-r0", + "vulnerableVersions": ["3.1.3-r0", "3.1.2-r0"] +} +``` + +**Acceptance Criteria**: +- [ ] Testcontainers integration working +- [ ] 4 distro images tested (UBI9, Debian 12, Ubuntu 22.04, Alpine 3.20) +- [ ] At least 3 packages per image validated +- [ ] CI-friendly (images cached, deterministic) + +--- + +### T5: Document Test Corpus and Contribution Guide + +**Assignee**: Concelier Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T1-T4 + +**Description**: +Document the test corpus structure and how to add new test cases. + +**Implementation Path**: `src/Concelier/__Tests/StellaOps.Concelier.Merge.Tests/README.md` + +**Documentation Contents**: +- Test corpus structure +- How to add new version comparison cases +- Golden file format and tooling +- Real image cross-check setup +- Known edge cases and their rationale + +**Acceptance Criteria**: +- [ ] README created with complete documentation +- [ ] Examples for adding new test cases +- [ ] CI badge showing test coverage + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Concelier Team | Expand NEVRA (RPM) Test Corpus | +| 2 | T2 | TODO | — | Concelier Team | Expand Debian EVR Test Corpus | +| 3 | T3 | TODO | T1, T2 | Concelier Team | Create Golden Files for Regression Testing | +| 4 | T4 | TODO | T1, T2 | Concelier Team | Real Image Cross-Check Tests | +| 5 | T5 | TODO | T1-T4 | Concelier Team | Document Test Corpus and Contribution Guide | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint created from advisory gap analysis. Test coverage identified as insufficient (12 tests vs 300+ recommended). | Agent | + +--- + +## Decisions & Risks + +| Item | Type | Owner | Notes | +|------|------|-------|-------| +| Table-driven tests | Decision | Concelier Team | Use xUnit TheoryData for maintainability | +| Golden files in NDJSON | Decision | Concelier Team | Easy to diff, append, and parse | +| Testcontainers for real images | Decision | Concelier Team | CI-friendly, reproducible | +| Image pull latency | Risk | Concelier Team | Cache images in CI; use slim variants | + +--- + +## Success Criteria + +- [ ] All 5 tasks marked DONE +- [ ] 50+ NEVRA comparison tests +- [ ] 50+ Debian EVR comparison tests +- [ ] Golden files with 100+ cases per distro +- [ ] Real image cross-check tests passing +- [ ] Documentation complete +- [ ] `dotnet test` succeeds with 100% pass rate + +--- + +## References + +- Advisory: `docs/product-advisories/archived/22-Dec-2025 - Getting Distro Backport Logic Right.md` +- RPM versioning: https://rpm.org/user_doc/versioning.html +- Debian policy: https://www.debian.org/doc/debian-policy/ch-controlfields.html#version +- Existing tests: `src/Concelier/__Tests/StellaOps.Concelier.Merge.Tests/` + +--- + +*Document Version: 1.0.0* +*Created: 2025-12-22* diff --git a/docs/implplan/SPRINT_3500_0001_0001_deeper_moat_master.md b/docs/implplan/SPRINT_3500_0001_0001_deeper_moat_master.md index d58fcce5e..f2931353f 100644 --- a/docs/implplan/SPRINT_3500_0001_0001_deeper_moat_master.md +++ b/docs/implplan/SPRINT_3500_0001_0001_deeper_moat_master.md @@ -28,7 +28,7 @@ These features address gaps no competitor has filled per `docs/market/competitiv ## Source Documents -**Primary Advisory**: `docs/product-advisories/unprocessed/16-Dec-2025 - Building a Deeper Moat Beyond Reachability.md` +**Primary Advisory**: `docs/product-advisories/archived/16-Dec-2025 - Building a Deeper Moat Beyond Reachability.md` **Related Documentation**: - `docs/07_HIGH_LEVEL_ARCHITECTURE.md` — System topology, trust boundaries diff --git a/docs/implplan/SPRINT_3500_SUMMARY.md b/docs/implplan/SPRINT_3500_SUMMARY.md index c46eefea7..f4fff45b2 100644 --- a/docs/implplan/SPRINT_3500_SUMMARY.md +++ b/docs/implplan/SPRINT_3500_SUMMARY.md @@ -258,7 +258,7 @@ graph TD - [Scanner AGENTS Guide](../../src/Scanner/AGENTS_SCORE_PROOFS.md) ⭐ FOR AGENTS **Source Advisory**: -- [16-Dec-2025 - Building a Deeper Moat Beyond Reachability](../product-advisories/unprocessed/16-Dec-2025 - Building a Deeper Moat Beyond Reachability.md) +- [16-Dec-2025 - Building a Deeper Moat Beyond Reachability](../product-advisories/archived/16-Dec-2025 - Building a Deeper Moat Beyond Reachability.md) --- diff --git a/docs/implplan/SPRINT_3600_0004_0001_nodejs_babel_integration.md b/docs/implplan/SPRINT_3600_0004_0001_nodejs_babel_integration.md new file mode 100644 index 000000000..61935d5be --- /dev/null +++ b/docs/implplan/SPRINT_3600_0004_0001_nodejs_babel_integration.md @@ -0,0 +1,293 @@ +# SPRINT_3600_0004_0001 - Node.js Babel Integration + +**Status:** TODO +**Priority:** P1 - HIGH +**Module:** Scanner +**Working Directory:** `src/Scanner/__Libraries/StellaOps.Scanner.CallGraph/Extraction/Node/` +**Estimated Effort:** Medium +**Dependencies:** SPRINT_3600_0003_0001 (Drift Detection Engine) - DONE + +--- + +## Topic & Scope + +Implement full @babel/traverse integration for Node.js call graph extraction. The current `NodeCallGraphExtractor` is a skeleton/trace-based implementation. This sprint delivers production-grade AST analysis for JavaScript/TypeScript projects. + +--- + +## Documentation Prerequisites + +- `docs/product-advisories/17-Dec-2025 - Reachability Drift Detection.md` (archived) +- `docs/modules/scanner/reachability-drift.md` +- `src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Node/AGENTS.md` +- `bench/reachability-benchmark/README.md` + +--- + +## Wave Coordination + +Single wave with parallel tracks: +- Track A: Babel AST infrastructure +- Track B: Framework-specific entrypoint detection +- Track C: Sink detection patterns +- Track D: Edge extraction and call graph building + +--- + +## Interlocks + +- Must produce stable node IDs compatible with existing `CallGraphSnapshot` model +- Must align with `bench/reachability-benchmark/` Node.js test cases +- Must integrate with existing `ICallGraphExtractor` interface + +--- + +## Action Tracker + +| Date (UTC) | Action | Owner | Notes | +|---|---|---|---| +| 2025-12-22 | Created sprint from gap analysis | Agent | Initial | + +--- + +## 1. OBJECTIVE + +Deliver production-grade Node.js call graph extraction: +1. **Babel AST Parsing** - Full @babel/traverse integration +2. **Framework Entrypoints** - Express, Fastify, Koa, NestJS, Hapi detection +3. **Sink Detection** - JavaScript-specific dangerous APIs +4. **Edge Extraction** - Function calls, method invocations, dynamic imports + +--- + +## 2. TECHNICAL DESIGN + +### 2.1 Architecture + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ NodeCallGraphExtractor │ +├─────────────────────────────────────────────────────────────────┤ +│ ┌─────────────┐ ┌─────────────┐ ┌─────────────────────────┐ │ +│ │ BabelParser │ │ AstWalker │ │ CallGraphBuilder │ │ +│ │ (external) │ │ (traverse) │ │ (nodes, edges, sinks) │ │ +│ └─────────────┘ └─────────────┘ └─────────────────────────┘ │ +│ │ │ │ │ +│ ▼ ▼ ▼ │ +│ ┌─────────────────────────────────────────────────────────────┐│ +│ │ Framework Detectors ││ +│ │ ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌───────┐ ││ +│ │ │ Express │ │ Fastify │ │ Koa │ │ NestJS │ │ Hapi │ ││ +│ │ └─────────┘ └─────────┘ └─────────┘ └─────────┘ └───────┘ ││ +│ └─────────────────────────────────────────────────────────────┘│ +│ │ │ +│ ▼ │ +│ ┌─────────────────────────────────────────────────────────────┐│ +│ │ Sink Matchers ││ +│ │ child_process.exec | fs.writeFile | eval | Function() ││ +│ │ http.request | crypto.createCipher | sql.query ││ +│ └─────────────────────────────────────────────────────────────┘│ +└─────────────────────────────────────────────────────────────────┘ +``` + +### 2.2 External Tool Integration + +The extractor invokes an external Node.js tool for AST parsing: + +```bash +# Tool location: tools/stella-callgraph-node/ +npx stella-callgraph-node \ + --root /path/to/project \ + --output json \ + --include-tests false \ + --max-depth 100 +``` + +Output format (JSON): +```json +{ + "nodes": [ + { + "id": "src/controllers/user.js:UserController.getUser", + "symbol": "UserController.getUser", + "file": "src/controllers/user.js", + "line": 42, + "visibility": "public", + "isEntrypoint": true, + "entrypointType": "express_handler", + "isSink": false + } + ], + "edges": [ + { + "source": "src/controllers/user.js:UserController.getUser", + "target": "src/services/db.js:query", + "kind": "direct", + "callSite": "src/controllers/user.js:45" + } + ], + "entrypoints": ["src/controllers/user.js:UserController.getUser"], + "sinks": ["src/services/db.js:query"] +} +``` + +### 2.3 Framework Entrypoint Detection + +| Framework | Detection Pattern | Entrypoint Type | +|-----------|------------------|-----------------| +| Express | `app.get()`, `app.post()`, `router.use()` | `express_handler` | +| Fastify | `fastify.get()`, `fastify.route()` | `fastify_handler` | +| Koa | `router.get()`, middleware functions | `koa_handler` | +| NestJS | `@Get()`, `@Post()`, `@Controller()` | `nestjs_controller` | +| Hapi | `server.route()` | `hapi_handler` | +| Generic | `module.exports`, `export default` | `module_export` | + +### 2.4 Sink Detection Patterns + +```javascript +// Command Execution +child_process.exec() +child_process.spawn() +child_process.execSync() +require('child_process').exec() + +// SQL Injection +connection.query() // without parameterization +knex.raw() +sequelize.query() + +// File Operations +fs.writeFile() +fs.writeFileSync() +fs.appendFile() + +// Deserialization +JSON.parse() // with untrusted input +eval() +Function() +vm.runInContext() + +// SSRF +http.request() +https.request() +axios() // with user-controlled URL +fetch() + +// Crypto (weak) +crypto.createCipher() // deprecated +crypto.createDecipher() +``` + +### 2.5 Node ID Generation + +Stable, deterministic node IDs: + +```javascript +// Pattern: {relative_file}:{export_name}.{function_name} +// Examples: +"src/controllers/user.js:UserController.getUser" +"src/services/db.js:module.query" +"src/utils/crypto.js:default.encrypt" +``` + +--- + +## Delivery Tracker + +| # | Task ID | Status | Description | Notes | +|---|---------|--------|-------------|-------| +| 1 | NODE-001 | TODO | Create stella-callgraph-node tool scaffold | `tools/stella-callgraph-node/` | +| 2 | NODE-002 | TODO | Implement Babel parser integration | @babel/parser, @babel/traverse | +| 3 | NODE-003 | TODO | Implement AST walker for function declarations | FunctionDeclaration, ArrowFunction | +| 4 | NODE-004 | TODO | Implement call expression extraction | CallExpression, MemberExpression | +| 5 | NODE-005 | TODO | Implement Express entrypoint detection | app.get/post/put/delete patterns | +| 6 | NODE-006 | TODO | Implement Fastify entrypoint detection | fastify.route patterns | +| 7 | NODE-007 | TODO | Implement Koa entrypoint detection | router.get patterns | +| 8 | NODE-008 | TODO | Implement NestJS entrypoint detection | Decorator-based (@Get, @Post) | +| 9 | NODE-009 | TODO | Implement Hapi entrypoint detection | server.route patterns | +| 10 | NODE-010 | TODO | Implement sink detection (child_process) | exec, spawn, execSync | +| 11 | NODE-011 | TODO | Implement sink detection (SQL) | query, raw, knex | +| 12 | NODE-012 | TODO | Implement sink detection (fs) | writeFile, appendFile | +| 13 | NODE-013 | TODO | Implement sink detection (eval/Function) | Dynamic code execution | +| 14 | NODE-014 | TODO | Implement sink detection (http/fetch) | SSRF patterns | +| 15 | NODE-015 | TODO | Update NodeCallGraphExtractor to invoke tool | Process execution + JSON parsing | +| 16 | NODE-016 | TODO | Implement BabelResultParser | JSON to CallGraphSnapshot | +| 17 | NODE-017 | TODO | Unit tests for AST parsing | Various JS patterns | +| 18 | NODE-018 | TODO | Unit tests for entrypoint detection | All frameworks | +| 19 | NODE-019 | TODO | Unit tests for sink detection | All categories | +| 20 | NODE-020 | TODO | Integration tests with benchmark cases | `bench/reachability-benchmark/node/` | +| 21 | NODE-021 | TODO | Golden fixtures for determinism | Stable node IDs, edge ordering | +| 22 | NODE-022 | TODO | TypeScript support | .ts/.tsx file handling | +| 23 | NODE-023 | TODO | ESM/CommonJS module resolution | import/require handling | +| 24 | NODE-024 | TODO | Dynamic import detection | import() expressions | + +--- + +## 3. ACCEPTANCE CRITERIA + +### 3.1 AST Parsing +- [ ] Parses JavaScript files (.js, .mjs, .cjs) +- [ ] Parses TypeScript files (.ts, .tsx) +- [ ] Handles ESM imports/exports +- [ ] Handles CommonJS require/module.exports +- [ ] Handles dynamic imports + +### 3.2 Entrypoint Detection +- [ ] Detects Express route handlers +- [ ] Detects Fastify route handlers +- [ ] Detects Koa middleware/routes +- [ ] Detects NestJS controllers +- [ ] Detects Hapi routes +- [ ] Classifies entrypoint types correctly + +### 3.3 Sink Detection +- [ ] Detects command execution sinks +- [ ] Detects SQL injection sinks +- [ ] Detects file write sinks +- [ ] Detects eval/Function sinks +- [ ] Detects SSRF sinks +- [ ] Classifies sink categories correctly + +### 3.4 Call Graph Quality +- [ ] Produces stable, deterministic node IDs +- [ ] Correctly extracts call edges +- [ ] Handles method chaining +- [ ] Handles callback patterns +- [ ] Handles Promise chains + +### 3.5 Performance +- [ ] Parses 100K LOC project in < 60s +- [ ] Memory usage < 2GB for large projects + +--- + +## Decisions & Risks + +| ID | Decision | Rationale | +|----|----------|-----------| +| NODE-DEC-001 | External Node.js tool | Babel runs in Node.js; separate process avoids .NET interop complexity | +| NODE-DEC-002 | JSON output format | Simple, debuggable, compatible with existing parser infrastructure | +| NODE-DEC-003 | Framework-specific detectors | Different frameworks have different routing patterns | + +| ID | Risk | Mitigation | +|----|------|------------| +| NODE-RISK-001 | Dynamic dispatch hard to trace | Conservative analysis; mark as "dynamic" call kind | +| NODE-RISK-002 | Callback hell complexity | Limit depth; focus on direct calls first | +| NODE-RISK-003 | Monorepo/workspace support | Start with single-package; extend later | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|---|---|---| +| 2025-12-22 | Created sprint from gap analysis | Agent | + +--- + +## References + +- **Master Sprint**: `SPRINT_3600_0001_0001_reachability_drift_master.md` +- **Advisory**: `docs/product-advisories/archived/17-Dec-2025 - Reachability Drift Detection.md` +- **Babel Docs**: https://babeljs.io/docs/babel-traverse +- **Existing Extractor**: `src/Scanner/__Libraries/StellaOps.Scanner.CallGraph/Extraction/Node/` diff --git a/docs/implplan/SPRINT_3600_0005_0001_policy_ci_gate_integration.md b/docs/implplan/SPRINT_3600_0005_0001_policy_ci_gate_integration.md new file mode 100644 index 000000000..e61a9307c --- /dev/null +++ b/docs/implplan/SPRINT_3600_0005_0001_policy_ci_gate_integration.md @@ -0,0 +1,325 @@ +# SPRINT_3600_0005_0001 - Policy CI Gate Integration + +**Status:** TODO +**Priority:** P1 - HIGH +**Module:** Policy, Scanner, CLI +**Working Directory:** `src/Policy/StellaOps.Policy.Engine/Gates/` +**Estimated Effort:** Small +**Dependencies:** SPRINT_3600_0003_0001 (Drift Detection Engine) - DONE + +--- + +## Topic & Scope + +Integrate reachability drift detection with the Policy module's CI gate system. This enables automated PR/commit blocking based on new reachable paths to vulnerable sinks. Also implements exit code semantics for CLI integration. + +--- + +## Documentation Prerequisites + +- `docs/product-advisories/17-Dec-2025 - Reachability Drift Detection.md` (§6) +- `docs/modules/policy/architecture.md` +- `src/Policy/AGENTS.md` +- `src/Cli/AGENTS.md` + +--- + +## Wave Coordination + +Single wave: +1. Policy gate conditions for drift +2. Exit code implementation in CLI +3. VEX candidate auto-emission on drift + +--- + +## Interlocks + +- Must integrate with existing `PolicyGateEvaluator` +- Must integrate with existing `VexCandidateEmitter` in Scanner +- CLI exit codes must align with shell conventions (0=success, non-zero=action needed) + +--- + +## Action Tracker + +| Date (UTC) | Action | Owner | Notes | +|---|---|---|---| +| 2025-12-22 | Created sprint from gap analysis | Agent | Initial | + +--- + +## 1. OBJECTIVE + +Enable CI/CD pipelines to gate on reachability drift: +1. **Policy Gate Conditions** - Block PRs when new reachable paths to affected sinks detected +2. **Exit Codes** - Semantic exit codes for CLI tooling +3. **VEX Auto-Emission** - Generate VEX candidates when reachability changes + +--- + +## 2. TECHNICAL DESIGN + +### 2.1 Policy Gate Conditions + +Extend `PolicyGateEvaluator` with drift-aware conditions: + +```yaml +# Policy configuration (etc/policy.yaml) +smart_diff: + gates: + # Block: New reachable paths to affected sinks + - id: drift_block_affected + condition: "delta_reachable > 0 AND vex_status IN ['affected', 'under_investigation']" + action: block + message: "New reachable paths to vulnerable sinks detected" + severity: critical + + # Warn: New paths to any sink (informational) + - id: drift_warn_new_paths + condition: "delta_reachable > 0" + action: warn + message: "New reachable paths detected - review recommended" + severity: medium + + # Block: KEV now reachable + - id: drift_block_kev + condition: "delta_reachable > 0 AND is_kev = true" + action: block + message: "Known Exploited Vulnerability now reachable" + severity: critical + + # Auto-allow: VEX confirms not_affected + - id: drift_allow_mitigated + condition: "vex_status = 'not_affected' AND vex_justification IN ['component_not_present', 'vulnerable_code_not_in_execute_path']" + action: allow + auto_mitigate: true +``` + +### 2.2 Gate Evaluation Context + +```csharp +// File: src/Policy/StellaOps.Policy.Engine/Gates/DriftGateContext.cs + +namespace StellaOps.Policy.Engine.Gates; + +/// +/// Context for drift-aware gate evaluation. +/// +public sealed record DriftGateContext +{ + /// + /// Number of sinks that became reachable in this scan. + /// + public required int DeltaReachable { get; init; } + + /// + /// Number of sinks that became unreachable (mitigated). + /// + public required int DeltaUnreachable { get; init; } + + /// + /// Whether any newly reachable sink is linked to a KEV. + /// + public required bool HasKevReachable { get; init; } + + /// + /// VEX status of newly reachable sinks. + /// + public required IReadOnlyList NewlyReachableVexStatuses { get; init; } + + /// + /// Highest CVSS score among newly reachable sinks. + /// + public double? MaxCvss { get; init; } + + /// + /// Highest EPSS score among newly reachable sinks. + /// + public double? MaxEpss { get; init; } +} +``` + +### 2.3 Exit Code Semantics + +| Code | Meaning | Description | +|------|---------|-------------| +| 0 | Success, no drift | No material reachability changes detected | +| 1 | Success, info drift | New paths detected but not to affected sinks | +| 2 | Hardening regression | Previously mitigated paths now reachable again | +| 3 | KEV reachable | Known Exploited Vulnerability now reachable | +| 10 | Input error | Invalid scan ID, missing parameters | +| 11 | Analysis error | Call graph extraction failed | +| 12 | Storage error | Database/cache unavailable | +| 13 | Policy error | Gate evaluation failed | + +```csharp +// File: src/Cli/StellaOps.Cli/Commands/DriftExitCodes.cs + +namespace StellaOps.Cli.Commands; + +/// +/// Exit codes for drift analysis commands. +/// +public static class DriftExitCodes +{ + public const int Success = 0; + public const int InfoDrift = 1; + public const int HardeningRegression = 2; + public const int KevReachable = 3; + + public const int InputError = 10; + public const int AnalysisError = 11; + public const int StorageError = 12; + public const int PolicyError = 13; + + public static int FromDriftResult(ReachabilityDriftResult result, DriftGateContext context) + { + if (context.HasKevReachable) + return KevReachable; + + if (context.DeltaReachable > 0 && context.NewlyReachableVexStatuses.Contains("affected")) + return HardeningRegression; + + if (context.DeltaReachable > 0) + return InfoDrift; + + return Success; + } +} +``` + +### 2.4 VEX Candidate Auto-Emission + +When drift detection identifies that a sink became unreachable, automatically emit a VEX candidate: + +```csharp +// Integration point in ReachabilityDriftDetector + +public async Task DetectWithVexEmissionAsync( + CallGraphSnapshot baseGraph, + CallGraphSnapshot headGraph, + IReadOnlyList codeChanges, + CancellationToken cancellationToken = default) +{ + var result = Detect(baseGraph, headGraph, codeChanges); + + // Emit VEX candidates for newly unreachable sinks + foreach (var sink in result.NewlyUnreachable) + { + await _vexCandidateEmitter.EmitAsync(new VexCandidate + { + VulnerabilityId = sink.AssociatedVulns.FirstOrDefault()?.CveId, + ProductKey = sink.Path.Entrypoint.Package, + Status = "not_affected", + Justification = "vulnerable_code_not_in_execute_path", + Trigger = VexCandidateTrigger.SinkUnreachable, + Evidence = new VexEvidence + { + DriftResultId = result.Id, + SinkNodeId = sink.SinkNodeId, + Cause = sink.Cause.Description + } + }, cancellationToken); + } + + return result; +} +``` + +### 2.5 CLI Integration + +```bash +# Drift analysis with gate evaluation +stella scan drift \ + --base-scan abc123 \ + --head-scan def456 \ + --policy etc/policy.yaml \ + --output sarif + +# Exit code reflects gate decision +echo $? # 0, 1, 2, 3, or 10+ +``` + +--- + +## Delivery Tracker + +| # | Task ID | Status | Description | Notes | +|---|---------|--------|-------------|-------| +| 1 | GATE-001 | TODO | Create DriftGateContext model | Policy module | +| 2 | GATE-002 | TODO | Extend PolicyGateEvaluator with drift conditions | `delta_reachable`, `is_kev` | +| 3 | GATE-003 | TODO | Add drift gate configuration schema | YAML validation | +| 4 | GATE-004 | TODO | Create DriftExitCodes class | CLI module | +| 5 | GATE-005 | TODO | Implement exit code mapping logic | FromDriftResult | +| 6 | GATE-006 | TODO | Wire exit codes to `stella scan drift` command | CLI | +| 7 | GATE-007 | TODO | Integrate VEX candidate emission in drift detector | Scanner | +| 8 | GATE-008 | TODO | Add VexCandidateTrigger.SinkUnreachable | Extend enum | +| 9 | GATE-009 | TODO | Unit tests for drift gate evaluation | All conditions | +| 10 | GATE-010 | TODO | Unit tests for exit code mapping | All scenarios | +| 11 | GATE-011 | TODO | Integration tests for CLI exit codes | End-to-end | +| 12 | GATE-012 | TODO | Integration tests for VEX auto-emission | Drift -> VEX flow | +| 13 | GATE-013 | TODO | Update policy configuration schema | Add smart_diff.gates | +| 14 | GATE-014 | TODO | Document gate configuration options | In operations guide | + +--- + +## 3. ACCEPTANCE CRITERIA + +### 3.1 Policy Gates +- [ ] Evaluates `delta_reachable > 0` condition correctly +- [ ] Evaluates `is_kev = true` condition correctly +- [ ] Evaluates combined conditions (AND/OR) +- [ ] Returns correct gate action (block/warn/allow) +- [ ] Supports auto_mitigate flag + +### 3.2 Exit Codes +- [ ] Returns 0 for no drift +- [ ] Returns 1 for info-level drift +- [ ] Returns 2 for hardening regression +- [ ] Returns 3 for KEV reachable +- [ ] Returns 10+ for errors + +### 3.3 VEX Auto-Emission +- [ ] Emits VEX candidate when sink becomes unreachable +- [ ] Sets correct justification (`vulnerable_code_not_in_execute_path`) +- [ ] Links to drift result as evidence +- [ ] Does not emit for already-unreachable sinks + +### 3.4 CLI Integration +- [ ] `stella scan drift` command respects gates +- [ ] Exit code reflects gate decision +- [ ] SARIF output includes gate results + +--- + +## Decisions & Risks + +| ID | Decision | Rationale | +|----|----------|-----------| +| GATE-DEC-001 | Exit code 3 for KEV | KEV is highest severity, distinct from hardening regression | +| GATE-DEC-002 | Auto-emit VEX only for unreachable | Reachable sinks need human review | +| GATE-DEC-003 | Policy YAML for gate config | Consistent with existing policy configuration | + +| ID | Risk | Mitigation | +|----|------|------------| +| GATE-RISK-001 | False positive blocks | Warn-first approach; require explicit block config | +| GATE-RISK-002 | VEX spam on large diffs | Rate limit emission; batch by CVE | +| GATE-RISK-003 | Exit code conflicts | Document clearly; 10+ reserved for errors | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|---|---|---| +| 2025-12-22 | Created sprint from gap analysis | Agent | + +--- + +## References + +- **Drift Sprint**: `SPRINT_3600_0003_0001_drift_detection_engine.md` +- **Policy Module**: `src/Policy/StellaOps.Policy.Engine/` +- **CLI Module**: `src/Cli/StellaOps.Cli/` +- **VEX Emitter**: `src/Scanner/__Libraries/StellaOps.Scanner.SmartDiff/Detection/VexCandidateEmitter.cs` diff --git a/docs/implplan/SPRINT_3600_0006_0001_documentation_finalization.md b/docs/implplan/SPRINT_3600_0006_0001_documentation_finalization.md new file mode 100644 index 000000000..ba44bd747 --- /dev/null +++ b/docs/implplan/SPRINT_3600_0006_0001_documentation_finalization.md @@ -0,0 +1,224 @@ +# SPRINT_3600_0006_0001 - Documentation Finalization + +**Status:** TODO +**Priority:** P0 - CRITICAL +**Module:** Documentation +**Working Directory:** `docs/` +**Estimated Effort:** Medium +**Dependencies:** SPRINT_3600_0003_0001 (Drift Detection Engine) - DONE + +--- + +## Topic & Scope + +Finalize documentation for the Reachability Drift Detection feature set. This sprint creates architecture documentation, API reference, and operations guide. + +--- + +## Documentation Prerequisites + +- `docs/product-advisories/17-Dec-2025 - Reachability Drift Detection.md` (to be archived) +- `docs/implplan/SPRINT_3600_0002_0001_call_graph_infrastructure.md` +- `docs/implplan/SPRINT_3600_0003_0001_drift_detection_engine.md` +- Source code implementations in `src/Scanner/__Libraries/` + +--- + +## Wave Coordination + +Single wave: +1. Architecture documentation +2. API reference +3. Operations guide +4. Advisory archival + +--- + +## Interlocks + +- Must align with implemented code +- Must follow existing documentation patterns +- Must be validated against actual API responses + +--- + +## Action Tracker + +| Date (UTC) | Action | Owner | Notes | +|---|---|---|---| +| 2025-12-22 | Created sprint from gap analysis | Agent | Initial | + +--- + +## 1. OBJECTIVE + +Deliver comprehensive documentation: +1. **Architecture Doc** - Technical design, data flow, component interactions +2. **API Reference** - Endpoint specifications, request/response models +3. **Operations Guide** - Deployment, configuration, monitoring +4. **Advisory Archival** - Move processed advisory to archived folder + +--- + +## 2. DELIVERABLES + +### 2.1 Architecture Document + +**Location:** `docs/modules/scanner/reachability-drift.md` + +**Outline:** +1. Overview & Purpose +2. Key Concepts + - Call Graph + - Reachability Analysis + - Drift Detection + - Cause Attribution +3. Data Flow Diagram +4. Component Architecture + - Call Graph Extractors + - Reachability Analyzer + - Drift Detector + - Path Compressor + - Cause Explainer +5. Language Support Matrix +6. Storage Schema + - PostgreSQL tables + - Valkey caching +7. API Endpoints (summary) +8. Integration Points + - Policy module + - VEX emission + - Attestation +9. Performance Characteristics +10. References + +### 2.2 API Reference + +**Location:** `docs/api/scanner-drift-api.md` + +**Outline:** +1. Overview +2. Authentication & Authorization +3. Endpoints + - `GET /scans/{scanId}/drift` + - `GET /drift/{driftId}/sinks` + - `POST /scans/{scanId}/compute-reachability` + - `GET /scans/{scanId}/reachability/components` + - `GET /scans/{scanId}/reachability/findings` + - `GET /scans/{scanId}/reachability/explain` +4. Request/Response Models +5. Error Codes +6. Rate Limiting +7. Examples (curl, SDK) + +### 2.3 Operations Guide + +**Location:** `docs/operations/reachability-drift-guide.md` + +**Outline:** +1. Prerequisites +2. Configuration + - Scanner service + - Valkey cache + - Policy gates +3. Deployment Modes + - Standalone + - Kubernetes + - Air-gapped +4. Monitoring & Metrics + - Key metrics + - Grafana dashboards + - Alert thresholds +5. Troubleshooting +6. Performance Tuning +7. Backup & Recovery +8. Security Considerations + +--- + +## Delivery Tracker + +| # | Task ID | Status | Description | Notes | +|---|---------|--------|-------------|-------| +| 1 | DOC-001 | TODO | Create architecture doc structure | `docs/modules/scanner/reachability-drift.md` | +| 2 | DOC-002 | TODO | Write Overview & Purpose section | Architecture doc | +| 3 | DOC-003 | TODO | Write Key Concepts section | Architecture doc | +| 4 | DOC-004 | TODO | Create data flow diagram (Mermaid) | Architecture doc | +| 5 | DOC-005 | TODO | Write Component Architecture section | Architecture doc | +| 6 | DOC-006 | TODO | Write Language Support Matrix | Architecture doc | +| 7 | DOC-007 | TODO | Write Storage Schema section | Architecture doc | +| 8 | DOC-008 | TODO | Write Integration Points section | Architecture doc | +| 9 | DOC-009 | TODO | Create API reference structure | `docs/api/scanner-drift-api.md` | +| 10 | DOC-010 | TODO | Document GET /scans/{scanId}/drift | API reference | +| 11 | DOC-011 | TODO | Document GET /drift/{driftId}/sinks | API reference | +| 12 | DOC-012 | TODO | Document POST /scans/{scanId}/compute-reachability | API reference | +| 13 | DOC-013 | TODO | Document request/response models | API reference | +| 14 | DOC-014 | TODO | Add curl/SDK examples | API reference | +| 15 | DOC-015 | TODO | Create operations guide structure | `docs/operations/reachability-drift-guide.md` | +| 16 | DOC-016 | TODO | Write Configuration section | Operations guide | +| 17 | DOC-017 | TODO | Write Deployment Modes section | Operations guide | +| 18 | DOC-018 | TODO | Write Monitoring & Metrics section | Operations guide | +| 19 | DOC-019 | TODO | Write Troubleshooting section | Operations guide | +| 20 | DOC-020 | TODO | Update src/Scanner/AGENTS.md | Add final contract refs | +| 21 | DOC-021 | TODO | Archive advisory | Move to `docs/product-advisories/archived/` | +| 22 | DOC-022 | TODO | Update docs/README.md | Add links to new docs | +| 23 | DOC-023 | TODO | Peer review | Technical accuracy check | + +--- + +## 3. ACCEPTANCE CRITERIA + +### 3.1 Architecture Doc +- [ ] Covers all implemented components +- [ ] Data flow diagram is accurate +- [ ] Language support matrix is complete +- [ ] Storage schema matches migrations +- [ ] Integration points are documented + +### 3.2 API Reference +- [ ] All endpoints documented +- [ ] Request/response models are accurate +- [ ] Error codes are complete +- [ ] Examples are tested and working + +### 3.3 Operations Guide +- [ ] Configuration options are complete +- [ ] Deployment modes are documented +- [ ] Metrics are defined +- [ ] Troubleshooting covers common issues + +### 3.4 Archival +- [ ] Advisory moved to archived folder +- [ ] Links updated in sprint files +- [ ] No broken references + +--- + +## Decisions & Risks + +| ID | Decision | Rationale | +|----|----------|-----------| +| DOC-DEC-001 | Mermaid for diagrams | Renders in GitLab/GitHub, text-based | +| DOC-DEC-002 | Separate ops guide | Different audience than architecture | +| DOC-DEC-003 | Archive after docs complete | Ensure traceability | + +| ID | Risk | Mitigation | +|----|------|------------| +| DOC-RISK-001 | Docs become stale | Link to source code; version docs | +| DOC-RISK-002 | Missing edge cases | Review with QA team | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|---|---|---| +| 2025-12-22 | Created sprint from gap analysis | Agent | + +--- + +## References + +- **Call Graph Sprint**: `SPRINT_3600_0002_0001_call_graph_infrastructure.md` +- **Drift Sprint**: `SPRINT_3600_0003_0001_drift_detection_engine.md` +- **Advisory**: `docs/product-advisories/17-Dec-2025 - Reachability Drift Detection.md` diff --git a/docs/implplan/SPRINT_3800_0001_0001_binary_call_edge_enhancement.md b/docs/implplan/SPRINT_3800_0001_0001_binary_call_edge_enhancement.md new file mode 100644 index 000000000..2fd4d9e00 --- /dev/null +++ b/docs/implplan/SPRINT_3800_0001_0001_binary_call_edge_enhancement.md @@ -0,0 +1,218 @@ +# Sprint 3800.0001.0001 · Binary Call-Edge Enhancement + +## Topic & Scope +- Enhance binary call graph extraction with disassembly-based call edge recovery. +- Implement indirect call resolution via PLT/IAT analysis. +- Add dynamic loading detection heuristics for `dlopen`/`LoadLibrary` patterns. +- **Working directory:** `src/Scanner/__Libraries/StellaOps.Scanner.CallGraph/Extraction/Binary/` + +## Dependencies & Concurrency +- **Upstream**: None (enhances existing `BinaryCallGraphExtractor`) +- **Downstream**: Sprint 3810 (CVE→Symbol Mapping) benefits from richer call graphs +- **Safe to parallelize with**: Sprint 3830 (VEX Integration), Sprint 3850 (CLI) + +## Documentation Prerequisites +- `docs/product-advisories/archived/2025-12-22-binary-reachability/20-Dec-2025 - Layered binary + call‑stack reachability.md` +- `docs/reachability/binary-reachability-schema.md` +- `src/Scanner/AGENTS.md` + +--- + +## Tasks + +### T1: Integrate iced-x86 for x86/x64 Disassembly + +**Assignee**: Scanner Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Add iced-x86 NuGet package for disassembling x86/x64 code sections to extract direct call instructions. + +**Implementation Path**: `src/Scanner/__Libraries/StellaOps.Scanner.CallGraph/Extraction/Binary/Disassembly/` + +**Acceptance Criteria**: +- [ ] Add `iced` NuGet package reference +- [ ] `X86Disassembler` class wrapping iced-x86 +- [ ] Extract CALL/JMP instructions from `.text` section +- [ ] Handle both 32-bit and 64-bit code +- [ ] Deterministic output (stable instruction ordering) + +--- + +### T2: Add Capstone Bindings for ARM64/Other Architectures + +**Assignee**: Scanner Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Add Capstone disassembler bindings for ARM64 and other non-x86 architectures. + +**Implementation Path**: `src/Scanner/__Libraries/StellaOps.Scanner.CallGraph/Extraction/Binary/Disassembly/` + +**Acceptance Criteria**: +- [ ] `CapstoneDisassembler` class for ARM64 +- [ ] Architecture detection from ELF/Mach-O headers +- [ ] Extract BL/BLR instructions for ARM64 +- [ ] Fallback to symbol-only analysis if arch unsupported + +--- + +### T3: Implement Direct Call Edge Extraction from .text + +**Assignee**: Scanner Team +**Story Points**: 8 +**Status**: TODO + +**Description**: +Extract direct call edges by disassembling `.text` section and resolving call targets. + +**Implementation Path**: `src/Scanner/__Libraries/StellaOps.Scanner.CallGraph/Extraction/Binary/` + +**Acceptance Criteria**: +- [ ] `DirectCallExtractor` class +- [ ] Parse call instruction operands to resolve target addresses +- [ ] Map addresses to symbols from symbol table +- [ ] Handle relative and absolute call addressing +- [ ] Create edges with `CallKind.Direct` and address-based `CallSite` +- [ ] Performance: <5s for typical 10MB binary + +**Edge Model**: +```csharp +new CallGraphEdge( + SourceId: $"native:{binary}/{caller_symbol}", + TargetId: $"native:{binary}/{callee_symbol}", + CallKind: CallKind.Direct, + CallSite: $"0x{instruction_address:X}" +) +``` + +--- + +### T4: PLT Stub → GOT Resolution for ELF + +**Assignee**: Scanner Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Resolve PLT stubs to their GOT entries to determine actual call targets for ELF binaries. + +**Acceptance Criteria**: +- [ ] Parse `.plt` section entries +- [ ] Map PLT stubs to GOT slots +- [ ] Resolve GOT entries to symbol names via `.rela.plt` +- [ ] Create edges with `CallKind.Plt` type +- [ ] Handle lazy binding patterns + +--- + +### T5: IAT Thunk Resolution for PE + +**Assignee**: Scanner Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Resolve Import Address Table thunks for PE binaries to connect call sites to imported functions. + +**Acceptance Criteria**: +- [ ] Parse IAT from PE optional header +- [ ] Map thunk addresses to import names +- [ ] Create edges with `CallKind.Iat` type +- [ ] Handle delay-load imports + +--- + +### T6: Dynamic Loading Detection (dlopen/LoadLibrary) + +**Assignee**: Scanner Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Detect calls to dynamic loading functions and infer loaded library candidates. + +**Acceptance Criteria**: +- [ ] Detect calls to `dlopen`, `dlsym` (ELF) +- [ ] Detect calls to `LoadLibraryA/W`, `GetProcAddress` (PE) +- [ ] Extract string literal arguments where resolvable +- [ ] Create edges with `CallKind.Dynamic` and lower confidence +- [ ] Mark as `EdgeConfidence.Medium` for heuristic matches + +--- + +### T7: String Literal Analysis for Dynamic Library Candidates + +**Assignee**: Scanner Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Analyze string literals near dynamic loading calls to infer library names. + +**Acceptance Criteria**: +- [ ] Extract `.rodata`/`.rdata` string references +- [ ] Correlate strings with `dlopen`/`LoadLibrary` call sites +- [ ] Match patterns: `lib*.so*`, `*.dll` +- [ ] Add inferred libs as `unknown` nodes with `is_dynamic=true` + +--- + +### T8: Update BinaryCallGraphExtractor Tests + +**Assignee**: Scanner Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Add comprehensive tests for new call edge extraction capabilities. + +**Implementation Path**: `src/Scanner/__Tests/StellaOps.Scanner.CallGraph.Tests/` + +**Acceptance Criteria**: +- [ ] Test fixtures for ELF x86_64, PE x64, Mach-O ARM64 +- [ ] Direct call extraction tests +- [ ] PLT/IAT resolution tests +- [ ] Dynamic loading detection tests +- [ ] Determinism tests (same binary → same edges) +- [ ] Golden output comparison + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Scanner Team | Integrate iced-x86 for x86/x64 Disassembly | +| 2 | T2 | TODO | — | Scanner Team | Add Capstone Bindings for ARM64 | +| 3 | T3 | TODO | T1, T2 | Scanner Team | Direct Call Edge Extraction from .text | +| 4 | T4 | TODO | T3 | Scanner Team | PLT Stub → GOT Resolution for ELF | +| 5 | T5 | TODO | T3 | Scanner Team | IAT Thunk Resolution for PE | +| 6 | T6 | TODO | T3 | Scanner Team | Dynamic Loading Detection | +| 7 | T7 | TODO | T6 | Scanner Team | String Literal Analysis | +| 8 | T8 | TODO | T1-T7 | Scanner Team | Update BinaryCallGraphExtractor Tests | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint file created from advisory gap analysis. | Agent | + +--- + +## Decisions & Risks + +| Item | Type | Owner | Notes | +|------|------|-------|-------| +| Disassembler choice | Decision | Scanner Team | iced-x86 for x86/x64 (pure .NET), Capstone for ARM64 | +| Performance budget | Risk | Scanner Team | Disassembly adds latency; cap at 5s for 10MB binary | +| Stripped binary handling | Decision | Scanner Team | Use address-based IDs when symbols unavailable | +| Conservative unknowns | Decision | Scanner Team | Mark unresolved indirect calls as Unknown edges | + +--- + +**Sprint Status**: TODO (0/8 tasks complete) diff --git a/docs/implplan/SPRINT_3800_SUMMARY.md b/docs/implplan/SPRINT_3800_SUMMARY.md new file mode 100644 index 000000000..d9238528f --- /dev/null +++ b/docs/implplan/SPRINT_3800_SUMMARY.md @@ -0,0 +1,120 @@ +# Sprint Epic 3800 · Layered Binary + Call-Stack Reachability + +## Overview + +This epic implements the two-stage reachability map as described in the product advisory "Layered binary + call‑stack reachability" (20-Dec-2025). It extends Stella Ops' reachability analysis with: + +1. **Deeper binary analysis** - Disassembly-based call edge extraction +2. **CVE→Symbol mapping** - Connect vulnerabilities to specific binary functions +3. **Attestable slices** - Minimal proof units for triage decisions +4. **Query & replay APIs** - On-demand reachability queries with verification +5. **VEX automation** - Auto-generate `code_not_reachable` justifications +6. **Runtime traces** - eBPF/ETW-based observed path evidence +7. **OCI storage & CLI** - Artifact management and command-line tools + +## Sprint Breakdown + +| Sprint | Topic | Tasks | Status | +|--------|-------|-------|--------| +| [3800.0001.0001](SPRINT_3800_0001_0001_binary_call_edge_enhancement.md) | Binary Call-Edge Enhancement | 8 | TODO | +| [3810.0001.0001](SPRINT_3810_0001_0001_cve_symbol_mapping_slice_format.md) | CVE→Symbol Mapping & Slice Format | 7 | TODO | +| [3820.0001.0001](SPRINT_3820_0001_0001_slice_query_replay_apis.md) | Slice Query & Replay APIs | 7 | TODO | +| [3830.0001.0001](SPRINT_3830_0001_0001_vex_integration_policy_binding.md) | VEX Integration & Policy Binding | 6 | TODO | +| [3840.0001.0001](SPRINT_3840_0001_0001_runtime_trace_merge.md) | Runtime Trace Merge | 7 | TODO | +| [3850.0001.0001](SPRINT_3850_0001_0001_oci_storage_cli.md) | OCI Storage & CLI | 8 | TODO | + +**Total Tasks**: 43 +**Status**: TODO (0/43 complete) + +## Recommended Execution Order + +``` +Sprint 3810 (CVE→Symbol + Slices) ─────────────────┐ + ├──► Sprint 3820 (Query APIs) ──► Sprint 3830 (VEX) +Sprint 3800 (Binary Enhancement) ──────────────────┘ + +Sprint 3850 (OCI + CLI) ─────────────────────────────► (parallel with 3830) + +Sprint 3840 (Runtime Traces) ────────────────────────► (optional, parallel with 3830-3850) +``` + +## Key Deliverables + +### Schemas & Contracts + +| Artifact | Location | Sprint | +|----------|----------|--------| +| Slice predicate schema | `docs/schemas/stellaops-slice.v1.schema.json` | 3810 | +| Slice OCI media type | `application/vnd.stellaops.slice.v1+json` | 3850 | +| Runtime event schema | `docs/schemas/runtime-call-event.schema.json` | 3840 | + +### APIs + +| Endpoint | Method | Description | Sprint | +|----------|--------|-------------|--------| +| `/api/slices/query` | POST | Query reachability for CVE/symbols | 3820 | +| `/api/slices/{digest}` | GET | Retrieve attested slice | 3820 | +| `/api/slices/replay` | POST | Verify slice reproducibility | 3820 | + +### CLI Commands + +| Command | Description | Sprint | +|---------|-------------|--------| +| `stella binary submit` | Submit binary graph | 3850 | +| `stella binary info` | Display graph info | 3850 | +| `stella binary symbols` | List symbols | 3850 | +| `stella binary verify` | Verify attestation | 3850 | + +### Documentation + +| Document | Location | Sprint | +|----------|----------|--------| +| Slice schema specification | `docs/reachability/slice-schema.md` | 3810 | +| CVE→Symbol mapping guide | `docs/reachability/cve-symbol-mapping.md` | 3810 | +| Replay verification guide | `docs/reachability/replay-verification.md` | 3820 | + +## Dependencies + +### External Libraries + +| Library | Purpose | Sprint | +|---------|---------|--------| +| iced-x86 | x86/x64 disassembly | 3800 | +| Capstone | ARM64 disassembly | 3800 | +| libbpf/cilium-ebpf | eBPF collector | 3840 | + +### Cross-Module Dependencies + +| From | To | Integration Point | +|------|-----|-------------------| +| Scanner | Concelier | Advisory feed for CVE→symbol mapping | +| Scanner | Attestor | DSSE signing for slices | +| Scanner | Excititor | Slice verdict consumption | +| Policy | Scanner | Unknowns budget enforcement | + +## Risk Register + +| Risk | Impact | Mitigation | Owner | +|------|--------|------------|-------| +| Disassembly performance | High | Cap at 5s per 10MB binary | Scanner Team | +| Missing CVE→symbol mappings | Medium | Fallback to package-level | Scanner Team | +| eBPF kernel compatibility | Medium | Require 5.8+, provide fallback | Platform Team | +| OCI registry compatibility | Low | Test against major registries | Scanner Team | + +## Success Metrics + +1. **Coverage**: >80% of binary CVEs have symbol-level mapping +2. **Performance**: Slice query <2s for typical graphs +3. **Accuracy**: Replay match rate >99.9% +4. **Adoption**: CLI commands used in >50% of offline deployments + +## Related Documentation + +- [Product Advisory](../product-advisories/archived/2025-12-22-binary-reachability/20-Dec-2025%20-%20Layered%20binary%20+%20call‑stack%20reachability.md) +- [Binary Reachability Schema](../reachability/binary-reachability-schema.md) +- [RichGraph Contract](../contracts/richgraph-v1.md) +- [Function-Level Evidence](../reachability/function-level-evidence.md) + +--- + +_Created: 2025-12-22. Owner: Scanner Guild._ diff --git a/docs/implplan/SPRINT_3810_0001_0001_cve_symbol_mapping_slice_format.md b/docs/implplan/SPRINT_3810_0001_0001_cve_symbol_mapping_slice_format.md new file mode 100644 index 000000000..fbd58bde7 --- /dev/null +++ b/docs/implplan/SPRINT_3810_0001_0001_cve_symbol_mapping_slice_format.md @@ -0,0 +1,262 @@ +# Sprint 3810.0001.0001 · CVE→Symbol Mapping & Slice Format + +## Topic & Scope +- Implement CVE to symbol/function mapping service for binary reachability queries. +- Define and implement the `ReachabilitySlice` schema as minimal attestable proof units. +- Create slice extraction logic to generate focused subgraphs for specific CVE queries. +- **Working directory:** `src/Scanner/__Libraries/StellaOps.Scanner.Reachability/Slices/` + +## Dependencies & Concurrency +- **Upstream**: Benefits from Sprint 3800 (richer call edges) +- **Downstream**: Sprint 3820 (Query APIs) consumes slices +- **Safe to parallelize with**: Sprint 3800, Sprint 3830 + +## Documentation Prerequisites +- `docs/product-advisories/archived/2025-12-22-binary-reachability/20-Dec-2025 - Layered binary + call‑stack reachability.md` +- `docs/reachability/slice-schema.md` (created this sprint) +- `docs/modules/concelier/architecture.md` + +--- + +## Tasks + +### T1: Define ReachabilitySlice Schema (DSSE Predicate) + +**Assignee**: Scanner Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Define the DSSE predicate schema for attestable reachability slices. + +**Implementation Path**: `src/Scanner/__Libraries/StellaOps.Scanner.Reachability/Slices/` + +**Acceptance Criteria**: +- [ ] `ReachabilitySlice` record with all required fields +- [ ] JSON schema at `docs/schemas/stellaops-slice.v1.schema.json` +- [ ] Predicate type URI: `stellaops.dev/predicates/reachability-slice@v1` +- [ ] Fields: inputs, query, subgraph, verdict, manifest + +**Schema Spec**: +```csharp +public sealed record ReachabilitySlice +{ + [JsonPropertyName("_type")] + public string Type { get; init; } = "https://stellaops.dev/predicates/reachability-slice/v1"; + + [JsonPropertyName("inputs")] + public required SliceInputs Inputs { get; init; } + + [JsonPropertyName("query")] + public required SliceQuery Query { get; init; } + + [JsonPropertyName("subgraph")] + public required SliceSubgraph Subgraph { get; init; } + + [JsonPropertyName("verdict")] + public required SliceVerdict Verdict { get; init; } + + [JsonPropertyName("manifest")] + public required ScanManifest Manifest { get; init; } +} + +public sealed record SliceQuery +{ + public string? CveId { get; init; } + public ImmutableArray TargetSymbols { get; init; } + public ImmutableArray Entrypoints { get; init; } + public string? PolicyHash { get; init; } +} + +public sealed record SliceVerdict +{ + public required string Status { get; init; } // "reachable" | "unreachable" | "unknown" + public required double Confidence { get; init; } + public ImmutableArray Reasons { get; init; } + public ImmutableArray PathWitnesses { get; init; } +} +``` + +--- + +### T2: Concelier → Scanner Advisory Feed Integration + +**Assignee**: Scanner Team + Concelier Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Create integration layer to consume CVE advisory data from Concelier for symbol mapping. + +**Implementation Path**: `src/Scanner/__Libraries/StellaOps.Scanner.Advisory/` + +**Acceptance Criteria**: +- [ ] `IAdvisoryClient` interface for Concelier queries +- [ ] `AdvisoryClient` HTTP implementation +- [ ] Query by CVE ID → get affected packages, functions, symbols +- [ ] Cache advisory data with TTL (1 hour default) +- [ ] Offline fallback to local advisory bundle + +--- + +### T3: Vulnerability Surface Service for CVE → Symbols + +**Assignee**: Scanner Team +**Story Points**: 8 +**Status**: TODO + +**Description**: +Build service that maps CVE identifiers to affected binary symbols/functions. + +**Implementation Path**: `src/Scanner/__Libraries/StellaOps.Scanner.VulnSurfaces/` + +**Acceptance Criteria**: +- [ ] `IVulnSurfaceService` interface +- [ ] `VulnSurfaceService` implementation +- [ ] Query: CVE + PURL → list of affected symbols +- [ ] Support for function-level granularity +- [ ] Handle missing mappings gracefully (return all public symbols of package) +- [ ] Integration with `StellaOps.Scanner.VulnSurfaces` existing code + +**Query Model**: +```csharp +public interface IVulnSurfaceService +{ + Task GetAffectedSymbolsAsync( + string cveId, + string purl, + CancellationToken ct = default); +} + +public sealed record VulnSurfaceResult +{ + public required string CveId { get; init; } + public required string Purl { get; init; } + public required ImmutableArray Symbols { get; init; } + public required string Source { get; init; } // "patch-diff" | "advisory" | "heuristic" + public required double Confidence { get; init; } +} +``` + +--- + +### T4: Slice Extractor (Subgraph from Full Graph) + +**Assignee**: Scanner Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Implement algorithm to extract minimal subgraph containing paths from entrypoints to target symbols. + +**Implementation Path**: `src/Scanner/__Libraries/StellaOps.Scanner.Reachability/Slices/` + +**Acceptance Criteria**: +- [ ] `SliceExtractor` class +- [ ] Input: full RichGraph, query (target symbols, entrypoints) +- [ ] Output: minimal subgraph with only relevant nodes/edges +- [ ] BFS/DFS from targets to find all paths to entrypoints +- [ ] Include gate annotations on path edges +- [ ] Deterministic extraction (stable ordering) + +--- + +### T5: Slice DSSE Signing with Content-Addressed Storage + +**Assignee**: Scanner Team + Attestor Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Sign extracted slices as DSSE envelopes and store in CAS. + +**Implementation Path**: `src/Scanner/__Libraries/StellaOps.Scanner.Reachability/Slices/` + +**Acceptance Criteria**: +- [ ] `SliceDsseSigner` using existing DSSE infrastructure +- [ ] Content-addressed storage: `cas://slices/{blake3-hash}` +- [ ] Slice digest computation (deterministic) +- [ ] Return `slice_digest` for retrieval + +--- + +### T6: Verdict Computation (Reachable/Unreachable/Unknown) + +**Assignee**: Scanner Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Compute verdict for slice based on path analysis and unknowns. + +**Acceptance Criteria**: +- [ ] `VerdictComputer` class +- [ ] "reachable": at least one path exists with high confidence +- [ ] "unreachable": no paths found and no unknowns on boundaries +- [ ] "unknown": unknowns present on potential paths +- [ ] Confidence score based on edge confidence aggregation +- [ ] Reason codes for verdict explanation + +**Verdict Rules**: +``` +reachable := path_exists AND min_path_confidence > 0.7 +unreachable := NOT path_exists AND unknown_count == 0 +unknown := path_exists AND (unknown_count > threshold OR min_confidence < 0.5) + OR NOT path_exists AND unknown_count > 0 +``` + +--- + +### T7: Slice Schema JSON Validation Tests + +**Assignee**: Scanner Team +**Story Points**: 2 +**Status**: TODO + +**Description**: +Create tests validating slice JSON against schema. + +**Implementation Path**: `src/Scanner/__Tests/StellaOps.Scanner.Reachability.Tests/Slices/` + +**Acceptance Criteria**: +- [ ] Schema validation tests +- [ ] Round-trip serialization tests +- [ ] Determinism tests (same query → same slice bytes) +- [ ] Golden output comparison + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Scanner Team | Define ReachabilitySlice Schema | +| 2 | T2 | TODO | — | Scanner + Concelier | Advisory Feed Integration | +| 3 | T3 | TODO | T2 | Scanner Team | Vulnerability Surface Service | +| 4 | T4 | TODO | T1 | Scanner Team | Slice Extractor | +| 5 | T5 | TODO | T1, T4 | Scanner + Attestor | Slice DSSE Signing | +| 6 | T6 | TODO | T4 | Scanner Team | Verdict Computation | +| 7 | T7 | TODO | T1-T6 | Scanner Team | Schema Validation Tests | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint file created from advisory gap analysis. | Agent | + +--- + +## Decisions & Risks + +| Item | Type | Owner | Notes | +|------|------|-------|-------| +| Slice granularity | Decision | Scanner Team | One slice per CVE+PURL query | +| Unknown handling | Decision | Scanner Team | Conservative: unknowns → unknown verdict | +| Cache TTL | Decision | Scanner Team | 1 hour for advisory data, configurable | +| Missing CVE→symbol mappings | Risk | Scanner Team | Fallback to package-level (all public symbols) | + +--- + +**Sprint Status**: TODO (0/7 tasks complete) diff --git a/docs/implplan/SPRINT_3820_0001_0001_slice_query_replay_apis.md b/docs/implplan/SPRINT_3820_0001_0001_slice_query_replay_apis.md new file mode 100644 index 000000000..2642c97ac --- /dev/null +++ b/docs/implplan/SPRINT_3820_0001_0001_slice_query_replay_apis.md @@ -0,0 +1,241 @@ +# Sprint 3820.0001.0001 · Slice Query & Replay APIs + +## Topic & Scope +- Implement query API for on-demand reachability slice generation. +- Implement slice retrieval by digest. +- Implement replay API with byte-for-byte verification. +- **Working directory:** `src/Scanner/StellaOps.Scanner.WebService/Endpoints/` and `src/Scanner/__Libraries/StellaOps.Scanner.Reachability/Replay/` + +## Dependencies & Concurrency +- **Upstream**: Sprint 3810 (Slice Format) must be complete +- **Downstream**: Sprint 3830 (VEX Integration) consumes slice verdicts +- **Safe to parallelize with**: Sprint 3840 (Runtime Traces) + +## Documentation Prerequisites +- `docs/reachability/slice-schema.md` +- `docs/reachability/replay-verification.md` (created this sprint) +- `docs/api/scanner-api.md` + +--- + +## Tasks + +### T1: POST /api/slices/query Endpoint + +**Assignee**: Scanner Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Implement query endpoint that generates reachability slices on demand. + +**Implementation Path**: `src/Scanner/StellaOps.Scanner.WebService/Endpoints/SliceEndpoints.cs` + +**Acceptance Criteria**: +- [ ] `POST /api/slices/query` endpoint +- [ ] Request body: `{ cve, symbols[], entrypoints[], policy?, scanId }` +- [ ] Response: `{ sliceDigest, verdict, confidence, paths[], cacheHit }` +- [ ] Generate slice using `SliceExtractor` from Sprint 3810 +- [ ] Sign and store slice in CAS +- [ ] Return 202 Accepted for async generation of large slices + +**Request/Response Contracts**: +```csharp +public sealed record SliceQueryRequest +{ + public string? CveId { get; init; } + public ImmutableArray Symbols { get; init; } + public ImmutableArray Entrypoints { get; init; } + public string? PolicyHash { get; init; } + public required string ScanId { get; init; } +} + +public sealed record SliceQueryResponse +{ + public required string SliceDigest { get; init; } + public required string Verdict { get; init; } + public required double Confidence { get; init; } + public ImmutableArray PathWitnesses { get; init; } + public required bool CacheHit { get; init; } + public string? JobId { get; init; } // For async generation +} +``` + +--- + +### T2: GET /api/slices/{digest} Endpoint + +**Assignee**: Scanner Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Implement retrieval endpoint for attested slices by digest. + +**Implementation Path**: `src/Scanner/StellaOps.Scanner.WebService/Endpoints/SliceEndpoints.cs` + +**Acceptance Criteria**: +- [ ] `GET /api/slices/{digest}` endpoint +- [ ] Return DSSE envelope with slice predicate +- [ ] Support `Accept: application/json` for JSON slice +- [ ] Support `Accept: application/dsse+json` for DSSE envelope +- [ ] 404 if slice not found in CAS + +--- + +### T3: Slice Caching Layer with TTL + +**Assignee**: Scanner Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Implement caching for generated slices to avoid redundant computation. + +**Implementation Path**: `src/Scanner/__Libraries/StellaOps.Scanner.Reachability/Slices/` + +**Acceptance Criteria**: +- [ ] `ISliceCache` interface +- [ ] In-memory cache with configurable TTL (default 1 hour) +- [ ] Cache key: hash of (scanId, query parameters) +- [ ] Cache eviction on memory pressure +- [ ] Metrics: cache hit/miss rate + +--- + +### T4: POST /api/slices/replay Endpoint + +**Assignee**: Scanner Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Implement replay endpoint that recomputes a slice and verifies byte-for-byte match. + +**Implementation Path**: `src/Scanner/StellaOps.Scanner.WebService/Endpoints/SliceEndpoints.cs` + +**Acceptance Criteria**: +- [ ] `POST /api/slices/replay` endpoint +- [ ] Request body: `{ sliceDigest }` +- [ ] Response: `{ match, originalDigest, recomputedDigest, diff? }` +- [ ] Rehydrate inputs from CAS +- [ ] Recompute slice with same parameters +- [ ] Compare byte-for-byte + +**Response Contract**: +```csharp +public sealed record ReplayResponse +{ + public required bool Match { get; init; } + public required string OriginalDigest { get; init; } + public required string RecomputedDigest { get; init; } + public SliceDiff? Diff { get; init; } // Only if !Match +} + +public sealed record SliceDiff +{ + public ImmutableArray MissingNodes { get; init; } + public ImmutableArray ExtraNodes { get; init; } + public ImmutableArray MissingEdges { get; init; } + public ImmutableArray ExtraEdges { get; init; } + public string? VerdictDiff { get; init; } +} +``` + +--- + +### T5: Replay Verification with Diff Output + +**Assignee**: Scanner Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Implement detailed diff computation when replay doesn't match. + +**Implementation Path**: `src/Scanner/__Libraries/StellaOps.Scanner.Reachability/Replay/` + +**Acceptance Criteria**: +- [ ] `SliceDiffComputer` class +- [ ] Compare node sets (added/removed) +- [ ] Compare edge sets (added/removed) +- [ ] Compare verdicts +- [ ] Human-readable diff output +- [ ] Deterministic diff ordering + +--- + +### T6: Integration Tests for Slice Workflow + +**Assignee**: Scanner Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +End-to-end tests for slice query and replay workflow. + +**Implementation Path**: `src/Scanner/__Tests/StellaOps.Scanner.WebService.Tests/Integration/` + +**Acceptance Criteria**: +- [ ] Query → retrieve → verify workflow test +- [ ] Replay match test +- [ ] Replay mismatch test (with tampered inputs) +- [ ] Cache hit test +- [ ] Async generation test for large slices + +--- + +### T7: OpenAPI Spec Updates + +**Assignee**: Scanner Team +**Story Points**: 2 +**Status**: TODO + +**Description**: +Update OpenAPI specification with new slice endpoints. + +**Implementation Path**: `docs/api/openapi/scanner.yaml` + +**Acceptance Criteria**: +- [ ] Document `POST /api/slices/query` +- [ ] Document `GET /api/slices/{digest}` +- [ ] Document `POST /api/slices/replay` +- [ ] Request/response schemas +- [ ] Error responses + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | Sprint 3810 | Scanner Team | POST /api/slices/query Endpoint | +| 2 | T2 | TODO | T1 | Scanner Team | GET /api/slices/{digest} Endpoint | +| 3 | T3 | TODO | T1 | Scanner Team | Slice Caching Layer | +| 4 | T4 | TODO | T1, T2 | Scanner Team | POST /api/slices/replay Endpoint | +| 5 | T5 | TODO | T4 | Scanner Team | Replay Verification with Diff | +| 6 | T6 | TODO | T1-T5 | Scanner Team | Integration Tests | +| 7 | T7 | TODO | T1-T4 | Scanner Team | OpenAPI Spec Updates | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint file created from advisory gap analysis. | Agent | + +--- + +## Decisions & Risks + +| Item | Type | Owner | Notes | +|------|------|-------|-------| +| Async vs sync query | Decision | Scanner Team | Sync for small graphs (<10k nodes), async for larger | +| Cache eviction | Decision | Scanner Team | LRU with 1GB memory cap | +| Replay determinism | Risk | Scanner Team | Must ensure all inputs are CAS-addressed | +| Rate limiting | Decision | Scanner Team | 10 queries/min per tenant default | + +--- + +**Sprint Status**: TODO (0/7 tasks complete) diff --git a/docs/implplan/SPRINT_3830_0001_0001_vex_integration_policy_binding.md b/docs/implplan/SPRINT_3830_0001_0001_vex_integration_policy_binding.md new file mode 100644 index 000000000..6e25c7d7c --- /dev/null +++ b/docs/implplan/SPRINT_3830_0001_0001_vex_integration_policy_binding.md @@ -0,0 +1,234 @@ +# Sprint 3830.0001.0001 · VEX Integration & Policy Binding + +## Topic & Scope +- Connect reachability slices to VEX decision automation. +- Implement automatic `code_not_reachable` justification generation. +- Add policy binding to slices with strict/forward/any modes. +- Integrate unknowns budget enforcement into policy evaluation. +- **Working directory:** `src/Excititor/__Libraries/StellaOps.Excititor.Core/` and `src/Policy/__Libraries/` + +## Dependencies & Concurrency +- **Upstream**: Sprint 3810 (Slice Format), Sprint 3820 (Query APIs) +- **Downstream**: None (terminal feature sprint) +- **Safe to parallelize with**: Sprint 3840 (Runtime Traces), Sprint 3850 (CLI) + +## Documentation Prerequisites +- `docs/reachability/slice-schema.md` +- `docs/modules/excititor/architecture.md` +- `docs/modules/policy/architecture.md` + +--- + +## Tasks + +### T1: Excititor ← Slice Verdict Consumption + +**Assignee**: Excititor Team + Scanner Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Enable Excititor to consume slice verdicts and use them in VEX decisions. + +**Implementation Path**: `src/Excititor/__Libraries/StellaOps.Excititor.Core/Reachability/` + +**Acceptance Criteria**: +- [ ] `ISliceVerdictConsumer` interface +- [ ] `SliceVerdictConsumer` implementation +- [ ] Query Scanner slice API for CVE+PURL combinations +- [ ] Map slice verdicts to VEX status influence +- [ ] Cache verdicts per scan lifecycle + +**Integration Flow**: +``` +Finding (CVE+PURL) + → Query slice verdict + → If unreachable: suggest not_affected + → If reachable: maintain affected status + → If unknown: flag for manual triage +``` + +--- + +### T2: Auto-Generate code_not_reachable Justification + +**Assignee**: Excititor Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Automatically generate VEX justification when slice verdict is "unreachable". + +**Implementation Path**: `src/Excititor/__Libraries/StellaOps.Excititor.Core/Justification/` + +**Acceptance Criteria**: +- [ ] `ReachabilityJustificationGenerator` class +- [ ] Generate `code_not_reachable` justification with evidence +- [ ] Include slice digest as evidence reference +- [ ] Include path analysis summary in justification text +- [ ] Support OpenVEX, CSAF, CycloneDX justification formats + +**Justification Template**: +```json +{ + "category": "code_not_reachable", + "details": "Static analysis determined no execution path from application entrypoints to vulnerable function.", + "evidence": { + "slice_digest": "blake3:abc123...", + "slice_uri": "cas://slices/blake3:abc123...", + "analyzer_version": "scanner.native:1.2.0", + "confidence": 0.95 + } +} +``` + +--- + +### T3: Policy Binding to Slices (strict/forward/any) + +**Assignee**: Policy Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Implement policy version binding for slices with validation modes. + +**Implementation Path**: `src/Scanner/__Libraries/StellaOps.Scanner.Reachability/Slices/` + +**Acceptance Criteria**: +- [ ] `PolicyBinding` record in slice schema +- [ ] `strict`: Slice invalid if policy changes +- [ ] `forward`: Slice valid with newer policy versions +- [ ] `any`: Slice valid with any policy version +- [ ] Policy hash computation from DSL +- [ ] Validation on slice retrieval + +**Binding Schema**: +```csharp +public sealed record PolicyBinding +{ + public required string PolicyDigest { get; init; } + public required string PolicyVersion { get; init; } + public required DateTimeOffset BoundAt { get; init; } + public required PolicyBindingMode Mode { get; init; } +} + +public enum PolicyBindingMode { Strict, Forward, Any } +``` + +--- + +### T4: Unknowns Budget Enforcement in Policy + +**Assignee**: Policy Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Enforce unknowns budget in policy evaluation for slice-based decisions. + +**Implementation Path**: `src/Policy/__Libraries/StellaOps.Policy.Engine/` + +**Acceptance Criteria**: +- [ ] `UnknownsBudget` policy rule type +- [ ] Configurable threshold per severity/category +- [ ] Block deployment if unknowns exceed budget +- [ ] Report unknowns count in policy evaluation result +- [ ] Support per-environment budgets + +**Policy Rule Example**: +```yaml +rules: + - id: unknowns-budget + type: unknowns_budget + config: + max_critical_unknowns: 0 + max_high_unknowns: 5 + max_medium_unknowns: 20 + fail_action: block +``` + +--- + +### T5: Feature Flag Gate Conditions in Verdicts + +**Assignee**: Scanner Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Include feature flag gate information in slice verdicts. + +**Implementation Path**: `src/Scanner/__Libraries/StellaOps.Scanner.Reachability/Slices/` + +**Acceptance Criteria**: +- [ ] Detect feature flag gates on paths (from existing `FeatureFlagDetector`) +- [ ] Include gate conditions in verdict reasons +- [ ] Mark as "conditionally reachable" when gated +- [ ] Specify flag name/condition required for reachability + +**Verdict Extension**: +```csharp +public sealed record GatedPath +{ + public required string PathId { get; init; } + public required string GateType { get; init; } // "feature_flag", "config", "auth" + public required string GateCondition { get; init; } // "FEATURE_X=true" + public required bool GateSatisfied { get; init; } +} +``` + +--- + +### T6: VEX Export with Reachability Evidence + +**Assignee**: Excititor Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Include reachability evidence in VEX exports. + +**Implementation Path**: `src/Excititor/__Libraries/StellaOps.Excititor.Formats.*/` + +**Acceptance Criteria**: +- [ ] OpenVEX: Include evidence in statement +- [ ] CSAF: Include in remediation section +- [ ] CycloneDX: Include in analysis metadata +- [ ] Link to slice URI for full evidence + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | Sprint 3820 | Excititor + Scanner | Slice Verdict Consumption | +| 2 | T2 | TODO | T1 | Excititor Team | Auto-Generate code_not_reachable | +| 3 | T3 | TODO | Sprint 3810 | Policy Team | Policy Binding to Slices | +| 4 | T4 | TODO | T3 | Policy Team | Unknowns Budget Enforcement | +| 5 | T5 | TODO | Sprint 3810 | Scanner Team | Feature Flag Gate Conditions | +| 6 | T6 | TODO | T1, T2 | Excititor Team | VEX Export with Evidence | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint file created from advisory gap analysis. | Agent | + +--- + +## Decisions & Risks + +| Item | Type | Owner | Notes | +|------|------|-------|-------| +| Auto-justification approval | Decision | Excititor Team | Auto-generated justifications require human approval by default | +| Policy binding default | Decision | Policy Team | Default to `strict` for production | +| Unknowns budget defaults | Decision | Policy Team | Critical=0, High=5, Medium=20 | +| Gated path confidence | Decision | Scanner Team | Gated paths get 0.5x confidence multiplier | + +--- + +**Sprint Status**: TODO (0/6 tasks complete) diff --git a/docs/implplan/SPRINT_3840_0001_0001_runtime_trace_merge.md b/docs/implplan/SPRINT_3840_0001_0001_runtime_trace_merge.md new file mode 100644 index 000000000..764287bf8 --- /dev/null +++ b/docs/implplan/SPRINT_3840_0001_0001_runtime_trace_merge.md @@ -0,0 +1,241 @@ +# Sprint 3840.0001.0001 · Runtime Trace Merge + +## Topic & Scope +- Implement runtime trace capture via eBPF (Linux) and ETW (Windows). +- Create trace ingestion service for merging observed paths with static analysis. +- Generate "observed path" slices with runtime evidence. +- **Working directory:** `src/Scanner/__Libraries/StellaOps.Scanner.Runtime/` and `src/Zastava/` + +## Dependencies & Concurrency +- **Upstream**: Sprint 3810 (Slice Format) for observed-path slices +- **Downstream**: Enhances Sprint 3830 (VEX Integration) with runtime confidence +- **Safe to parallelize with**: Sprint 3850 (CLI) + +## Documentation Prerequisites +- `docs/reachability/runtime-facts.md` +- `docs/reachability/runtime-static-union-schema.md` +- `docs/modules/zastava/architecture.md` + +--- + +## Tasks + +### T1: eBPF Collector Design (uprobe-based) + +**Assignee**: Scanner Team + Platform Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Design eBPF-based function tracing collector using uprobes. + +**Implementation Path**: `src/Scanner/__Libraries/StellaOps.Scanner.Runtime/Ebpf/` + +**Acceptance Criteria**: +- [ ] Design document for eBPF collector architecture +- [ ] uprobe attachment strategy for target functions +- [ ] Data format for captured events +- [ ] Ringbuffer configuration for event streaming +- [ ] Security model (CAP_BPF, CAP_PERFMON) +- [ ] Container namespace awareness + +**Event Schema**: +```csharp +public sealed record RuntimeCallEvent +{ + public required ulong Timestamp { get; init; } // nanoseconds since boot + public required uint Pid { get; init; } + public required uint Tid { get; init; } + public required ulong CallerAddress { get; init; } + public required ulong CalleeAddress { get; init; } + public required string CallerSymbol { get; init; } + public required string CalleeSymbol { get; init; } + public required string BinaryPath { get; init; } +} +``` + +--- + +### T2: Linux eBPF Collector Implementation + +**Assignee**: Platform Team +**Story Points**: 8 +**Status**: TODO + +**Description**: +Implement eBPF collector for Linux using libbpf or bpf2go. + +**Implementation Path**: `src/Scanner/__Libraries/StellaOps.Scanner.Runtime/Ebpf/` + +**Acceptance Criteria**: +- [ ] eBPF program for uprobe tracing (BPF CO-RE) +- [ ] User-space loader and event reader +- [ ] Symbol resolution via /proc/kallsyms and binary symbols +- [ ] Ringbuffer-based event streaming +- [ ] Handle ASLR via /proc/pid/maps +- [ ] Graceful degradation without eBPF support + +**Technology Choice**: +- Use `bpf2go` for Go-based loader or libbpf-bootstrap +- Alternative: `cilium/ebpf` library + +--- + +### T3: ETW Collector for Windows + +**Assignee**: Platform Team +**Story Points**: 8 +**Status**: TODO + +**Description**: +Implement ETW-based function tracing for Windows. + +**Implementation Path**: `src/Scanner/__Libraries/StellaOps.Scanner.Runtime/Etw/` + +**Acceptance Criteria**: +- [ ] ETW session for CLR and native events +- [ ] Microsoft-Windows-DotNETRuntime provider subscription +- [ ] Stack walking for call chains +- [ ] Symbol resolution via DbgHelp +- [ ] Container-aware (process isolation) +- [ ] Admin privilege handling + +--- + +### T4: Trace Ingestion Service + +**Assignee**: Scanner Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Create service for ingesting runtime traces and storing in normalized format. + +**Implementation Path**: `src/Scanner/__Libraries/StellaOps.Scanner.Runtime/Ingestion/` + +**Acceptance Criteria**: +- [ ] `ITraceIngestionService` interface +- [ ] `TraceIngestionService` implementation +- [ ] Accept events from eBPF/ETW collectors +- [ ] Normalize to common `RuntimeCallEvent` format +- [ ] Batch writes to storage +- [ ] Deduplication of repeated call patterns +- [ ] CAS storage for trace files + +--- + +### T5: Runtime → Static Graph Merge Algorithm + +**Assignee**: Scanner Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Implement algorithm to merge runtime observations with static call graphs. + +**Implementation Path**: `src/Scanner/__Libraries/StellaOps.Scanner.Reachability/Runtime/` + +**Acceptance Criteria**: +- [ ] `RuntimeStaticMerger` class +- [ ] Match runtime events to static graph nodes by symbol +- [ ] Add "observed" annotation to edges +- [ ] Add new edges for runtime-only paths (dynamic dispatch) +- [ ] Timestamp metadata for observation recency +- [ ] Confidence boost for observed paths + +**Merge Rules**: +``` +For each runtime edge (A → B): + If static edge exists: + Mark edge as "observed" + Add observation timestamp + Boost confidence to 1.0 + Else: + Add edge with origin="runtime" + Set confidence based on observation count +``` + +--- + +### T6: "Observed Path" Slice Generation + +**Assignee**: Scanner Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Generate slices that include runtime-observed paths as evidence. + +**Implementation Path**: `src/Scanner/__Libraries/StellaOps.Scanner.Reachability/Slices/` + +**Acceptance Criteria**: +- [ ] Include `observed_at` timestamps in slice edges +- [ ] New verdict: "observed_reachable" (highest confidence) +- [ ] Include observation count and recency +- [ ] Link to trace CAS artifacts + +**Observed Edge Extension**: +```csharp +public sealed record ObservedEdgeMetadata +{ + public required DateTimeOffset FirstObserved { get; init; } + public required DateTimeOffset LastObserved { get; init; } + public required int ObservationCount { get; init; } + public required string TraceDigest { get; init; } +} +``` + +--- + +### T7: Trace Retention and Pruning Policies + +**Assignee**: Scanner Team +**Story Points**: 2 +**Status**: TODO + +**Description**: +Implement retention policies for runtime trace data. + +**Acceptance Criteria**: +- [ ] Configurable retention period (default 30 days) +- [ ] Automatic pruning of old traces +- [ ] Keep traces referenced by active slices +- [ ] Aggregation of old traces into summaries +- [ ] Storage quota enforcement + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Scanner + Platform | eBPF Collector Design | +| 2 | T2 | TODO | T1 | Platform Team | Linux eBPF Collector | +| 3 | T3 | TODO | — | Platform Team | ETW Collector for Windows | +| 4 | T4 | TODO | T2, T3 | Scanner Team | Trace Ingestion Service | +| 5 | T5 | TODO | T4, Sprint 3810 | Scanner Team | Runtime → Static Merge | +| 6 | T6 | TODO | T5 | Scanner Team | Observed Path Slices | +| 7 | T7 | TODO | T4 | Scanner Team | Trace Retention Policies | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint file created from advisory gap analysis. | Agent | + +--- + +## Decisions & Risks + +| Item | Type | Owner | Notes | +|------|------|-------|-------| +| eBPF kernel version | Risk | Platform Team | Requires kernel 5.8+ for CO-RE; fallback needed for older | +| Performance overhead | Risk | Platform Team | Target <5% CPU overhead in production | +| Privacy/security | Decision | Platform Team | Traces contain execution paths; follow data retention policies | +| Windows container support | Risk | Platform Team | ETW in containers has limitations | + +--- + +**Sprint Status**: TODO (0/7 tasks complete) diff --git a/docs/implplan/SPRINT_3850_0001_0001_oci_storage_cli.md b/docs/implplan/SPRINT_3850_0001_0001_oci_storage_cli.md new file mode 100644 index 000000000..5372138f1 --- /dev/null +++ b/docs/implplan/SPRINT_3850_0001_0001_oci_storage_cli.md @@ -0,0 +1,308 @@ +# Sprint 3850.0001.0001 · OCI Storage & CLI + +## Topic & Scope +- Implement OCI artifact storage for reachability slices. +- Create `stella binary` CLI command group for binary reachability operations. +- **Working directory:** `src/Scanner/__Libraries/StellaOps.Scanner.Storage.Oci/` and `src/Cli/StellaOps.Cli/Commands/Binary/` + +## Dependencies & Concurrency +- **Upstream**: Sprint 3810 (Slice Format), Sprint 3820 (Query APIs) +- **Downstream**: None (terminal feature sprint) +- **Safe to parallelize with**: Sprint 3830, Sprint 3840 + +## Documentation Prerequisites +- `docs/reachability/binary-reachability-schema.md` (BR9 section) +- `docs/24_OFFLINE_KIT.md` +- `src/Cli/StellaOps.Cli/AGENTS.md` + +--- + +## Tasks + +### T1: OCI Manifest Builder for Slices + +**Assignee**: Scanner Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Build OCI manifest structures for storing slices as OCI artifacts. + +**Implementation Path**: `src/Scanner/__Libraries/StellaOps.Scanner.Storage.Oci/` + +**Acceptance Criteria**: +- [ ] `SliceOciManifestBuilder` class +- [ ] Media type: `application/vnd.stellaops.slice.v1+json` +- [ ] Include slice JSON as blob +- [ ] Include DSSE envelope as separate blob +- [ ] Annotations for query metadata + +**Manifest Structure**: +```json +{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "artifactType": "application/vnd.stellaops.slice.v1+json", + "config": { + "mediaType": "application/vnd.stellaops.slice.config.v1+json", + "digest": "sha256:...", + "size": 123 + }, + "layers": [ + { + "mediaType": "application/vnd.stellaops.slice.v1+json", + "digest": "sha256:...", + "size": 45678, + "annotations": { + "org.stellaops.slice.cve": "CVE-2024-1234", + "org.stellaops.slice.verdict": "unreachable" + } + }, + { + "mediaType": "application/vnd.dsse+json", + "digest": "sha256:...", + "size": 2345 + } + ], + "annotations": { + "org.stellaops.slice.query.cve": "CVE-2024-1234", + "org.stellaops.slice.query.purl": "pkg:npm/lodash@4.17.21", + "org.stellaops.slice.created": "2025-12-22T10:00:00Z" + } +} +``` + +--- + +### T2: Registry Push Service (Harbor/Zot) + +**Assignee**: Scanner Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Implement service to push slice artifacts to OCI registries. + +**Implementation Path**: `src/Scanner/__Libraries/StellaOps.Scanner.Storage.Oci/` + +**Acceptance Criteria**: +- [ ] `IOciPushService` interface +- [ ] `OciPushService` implementation +- [ ] Support basic auth and token auth +- [ ] Support Harbor, Zot, GHCR +- [ ] Referrer API support (OCI 1.1) +- [ ] Retry with exponential backoff +- [ ] Offline mode: save to local OCI layout + +**Push Flow**: +``` +1. Build manifest +2. Push blob: slice.json +3. Push blob: slice.dsse +4. Push config +5. Push manifest +6. (Optional) Create referrer to image +``` + +--- + +### T3: stella binary submit Command + +**Assignee**: CLI Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Implement CLI command to submit binary for reachability analysis. + +**Implementation Path**: `src/Cli/StellaOps.Cli/Commands/Binary/` + +**Acceptance Criteria**: +- [ ] `stella binary submit --graph --binary ` +- [ ] Upload graph to Scanner API +- [ ] Upload binary for analysis (optional) +- [ ] Display submission status +- [ ] Return graph digest + +**Usage**: +```bash +# Submit pre-generated graph +stella binary submit --graph ./richgraph.json + +# Submit binary for analysis +stella binary submit --binary ./myapp --analyze + +# Submit with attestation +stella binary submit --graph ./richgraph.json --sign +``` + +--- + +### T4: stella binary info Command + +**Assignee**: CLI Team +**Story Points**: 2 +**Status**: TODO + +**Description**: +Implement CLI command to display binary graph information. + +**Implementation Path**: `src/Cli/StellaOps.Cli/Commands/Binary/` + +**Acceptance Criteria**: +- [ ] `stella binary info --hash ` +- [ ] Display node/edge counts +- [ ] Display entrypoints +- [ ] Display build-ID and format +- [ ] Display attestation status +- [ ] JSON output option + +**Output Format**: +``` +Binary Graph: blake3:abc123... +Format: ELF x86_64 +Build-ID: gnu-build-id:5f0c7c3c... +Nodes: 1247 +Edges: 3891 +Entrypoints: 5 +Attestation: Signed (Rekor #12345678) +``` + +--- + +### T5: stella binary symbols Command + +**Assignee**: CLI Team +**Story Points**: 2 +**Status**: TODO + +**Description**: +Implement CLI command to list symbols from binary graph. + +**Implementation Path**: `src/Cli/StellaOps.Cli/Commands/Binary/` + +**Acceptance Criteria**: +- [ ] `stella binary symbols --hash ` +- [ ] Filter: `--stripped-only`, `--exported-only`, `--entrypoints-only` +- [ ] Search: `--search ` +- [ ] Pagination support +- [ ] JSON output option + +**Usage**: +```bash +# List all symbols +stella binary symbols --hash blake3:abc123... + +# List only stripped (heuristic) symbols +stella binary symbols --hash blake3:abc123... --stripped-only + +# Search for specific function +stella binary symbols --hash blake3:abc123... --search "ssl_*" +``` + +--- + +### T6: stella binary verify Command + +**Assignee**: CLI Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Implement CLI command to verify binary graph attestation. + +**Implementation Path**: `src/Cli/StellaOps.Cli/Commands/Binary/` + +**Acceptance Criteria**: +- [ ] `stella binary verify --graph --dsse ` +- [ ] Verify DSSE signature +- [ ] Verify Rekor inclusion (if logged) +- [ ] Verify graph digest matches +- [ ] Display verification result +- [ ] Exit code: 0=valid, 1=invalid + +**Verification Flow**: +``` +1. Parse DSSE envelope +2. Verify signature against configured keys +3. Extract predicate, verify graph hash +4. (Optional) Verify Rekor inclusion proof +5. Report result +``` + +--- + +### T7: CLI Integration Tests + +**Assignee**: CLI Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Integration tests for binary CLI commands. + +**Implementation Path**: `src/Cli/StellaOps.Cli.Tests/` + +**Acceptance Criteria**: +- [ ] Submit command test with mock API +- [ ] Info command test +- [ ] Symbols command test with filters +- [ ] Verify command test (valid and invalid cases) +- [ ] Offline mode tests + +--- + +### T8: Documentation Updates + +**Assignee**: CLI Team +**Story Points**: 2 +**Status**: TODO + +**Description**: +Update CLI documentation with binary commands. + +**Implementation Path**: `docs/09_API_CLI_REFERENCE.md` + +**Acceptance Criteria**: +- [ ] Document all `stella binary` subcommands +- [ ] Usage examples +- [ ] Error codes and troubleshooting +- [ ] Link to binary reachability schema docs + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | Sprint 3810 | Scanner Team | OCI Manifest Builder | +| 2 | T2 | TODO | T1 | Scanner Team | Registry Push Service | +| 3 | T3 | TODO | T2 | CLI Team | stella binary submit | +| 4 | T4 | TODO | — | CLI Team | stella binary info | +| 5 | T5 | TODO | — | CLI Team | stella binary symbols | +| 6 | T6 | TODO | — | CLI Team | stella binary verify | +| 7 | T7 | TODO | T3-T6 | CLI Team | CLI Integration Tests | +| 8 | T8 | TODO | T3-T6 | CLI Team | Documentation Updates | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint file created from advisory gap analysis. | Agent | + +--- + +## Decisions & Risks + +| Item | Type | Owner | Notes | +|------|------|-------|-------| +| OCI media types | Decision | Scanner Team | Use stellaops vendor prefix | +| Registry compatibility | Risk | Scanner Team | Test against Harbor, Zot, GHCR, ACR | +| Offline bundle format | Decision | CLI Team | Use OCI image layout for offline | +| Authentication | Decision | CLI Team | Support docker config.json and explicit creds | + +--- + +**Sprint Status**: TODO (0/8 tasks complete) diff --git a/docs/implplan/SPRINT_3900_0001_0001_exception_objects_schema_model.md b/docs/implplan/SPRINT_3900_0001_0001_exception_objects_schema_model.md index 5ed5e2920..24cea5bc5 100644 --- a/docs/implplan/SPRINT_3900_0001_0001_exception_objects_schema_model.md +++ b/docs/implplan/SPRINT_3900_0001_0001_exception_objects_schema_model.md @@ -12,7 +12,7 @@ - **Safe to parallelize with**: Unrelated epics ## Documentation Prerequisites -- `docs/product-advisories/unprocessed/moats/20-Dec-2025 - Moat Explanation - Exception management as auditable objects.md` +- `docs/product-advisories/archived/20-Dec-2025 - Moat Explanation - Exception management as auditable objects.md` - `docs/modules/policy/architecture.md` - `docs/db/SPECIFICATION.md` diff --git a/docs/implplan/SPRINT_3900_0003_0001_exploit_path_inbox_proof_bundles.md b/docs/implplan/SPRINT_3900_0003_0001_exploit_path_inbox_proof_bundles.md new file mode 100644 index 000000000..8f441ddff --- /dev/null +++ b/docs/implplan/SPRINT_3900_0003_0001_exploit_path_inbox_proof_bundles.md @@ -0,0 +1,1298 @@ +# Sprint 3900.0003.0001 · Exploit Path Inbox & Proof Bundles + +## Topic & Scope + +- Implement triage inbox grouped by exploit paths instead of individual findings +- Create proof bundle API aggregating reach subgraph + symbol map + VEX claims +- Enable analysts to triage entire attack chains, not isolated CVEs + +**Working directory:** `src/Scanner/__Libraries/StellaOps.Scanner.Triage/`, `src/Web/StellaOps.Web/src/app/features/triage/` + +## Dependencies & Concurrency + +- **Upstream**: SPRINT_3900_0002_0002 (Exception UI Wiring) — IN PROGRESS +- **Upstream**: SPRINT_4200_0001_0001 (Triage REST API) — provides base triage endpoints +- **Downstream**: SPRINT_3900_0003_0002 (Recheck Policy & Evidence Hooks) +- **Safe to parallelize with**: SPRINT_4100_0003_0001 (Snapshot Merge Preview) + +## Documentation Prerequisites + +- `docs/product-advisories/22-Dec-2026 - UI Patterns for Triage and Replay.md` (source advisory) +- `src/Scanner/__Libraries/StellaOps.Scanner.Reachability/AGENTS.md` +- `src/Web/StellaOps.Web/src/app/features/triage/` existing components +- `docs/modules/triage/exploit-path-inbox.md` (created with this sprint) + +--- + +## Problem Analysis + +Current triage workspace groups findings by CVE/package, requiring analysts to mentally correlate: +- Which packages are reachable from entry points +- Which runtime paths lead to vulnerable symbols +- Which VEX statements apply to this specific context + +The advisory specifies inbox grouping by **exploit path**: `(artifact → package → vulnerable symbol → runtime path)`. This allows: +1. Triage entire attack chains at once +2. See full evidence for each path +3. Apply exceptions to paths, not just CVEs + +--- + +## Tasks + +### T1: Define ExploitPath Domain Model + +**Assignee**: Scanner Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: — + +**Description**: +Create the domain model for exploit paths that aggregates reachability evidence. + +**Implementation Path**: `src/Scanner/__Libraries/StellaOps.Scanner.Triage/Models/ExploitPath.cs` + +**Model Definition**: +```csharp +namespace StellaOps.Scanner.Triage.Models; + +/// +/// Represents a complete exploit path from artifact entry point to vulnerable symbol. +/// Groups related findings that share the same attack chain. +/// +public sealed record ExploitPath +{ + /// + /// Stable deterministic ID for this path: hash(artifact + package + symbol + entryPoint). + /// + public required string PathId { get; init; } + + /// + /// Artifact containing the vulnerable code. + /// + public required string ArtifactDigest { get; init; } + + /// + /// Package containing the vulnerability. + /// + public required PackageRef Package { get; init; } + + /// + /// The vulnerable symbol (function, method, class). + /// + public required VulnerableSymbol Symbol { get; init; } + + /// + /// Entry point from which this path is reachable. + /// + public required EntryPoint EntryPoint { get; init; } + + /// + /// All CVEs affecting this path. + /// + public required ImmutableArray CveIds { get; init; } + + /// + /// Reachability status from lattice. + /// + public required ReachabilityStatus Reachability { get; init; } + + /// + /// Aggregated CVSS/EPSS scores for this path. + /// + public required PathRiskScore RiskScore { get; init; } + + /// + /// Evidence supporting this path. + /// + public required PathEvidence Evidence { get; init; } + + /// + /// Active exceptions applying to this path. + /// + public ImmutableArray ActiveExceptions { get; init; } = []; + + /// + /// Whether this path is "quiet" (all findings suppressed by exceptions/VEX). + /// + public bool IsQuiet => ActiveExceptions.Length > 0 || Evidence.VexStatus == VexStatus.NotAffected; + + /// + /// When this path was first detected. + /// + public required DateTimeOffset FirstSeenAt { get; init; } + + /// + /// When this path was last updated. + /// + public required DateTimeOffset LastUpdatedAt { get; init; } +} + +public sealed record PackageRef( + string Purl, + string Name, + string Version, + string? Ecosystem); + +public sealed record VulnerableSymbol( + string FullyQualifiedName, + string? SourceFile, + int? LineNumber, + string? Language); + +public sealed record EntryPoint( + string Name, + string Type, // http, grpc, cli, scheduled, etc. + string? Path); + +public sealed record PathRiskScore( + decimal AggregatedCvss, + decimal MaxEpss, + int CriticalCount, + int HighCount, + int MediumCount, + int LowCount); + +public sealed record PathEvidence( + ReachabilityLatticeState LatticeState, + VexStatus VexStatus, + decimal Confidence, + ImmutableArray Items); + +public sealed record EvidenceItem( + string Type, // static-path, runtime-hit, feature-flag, backport, vex-claim + string Source, + string Description, + decimal Weight); + +public sealed record ExceptionRef( + string ExceptionId, + string Reason, + DateTimeOffset ExpiresAt); + +public enum ReachabilityStatus +{ + Unknown, + StaticallyReachable, + RuntimeConfirmed, + Unreachable, + Contested +} +``` + +**Acceptance Criteria**: +- [ ] `ExploitPath` record with stable deterministic ID +- [ ] Aggregates CVEs, reachability, VEX status +- [ ] `IsQuiet` property for filtering +- [ ] Evidence model with items and weights +- [ ] Exception refs for active suppressions + +--- + +### T2: Implement ExploitPathGroupingService + +**Assignee**: Scanner Team +**Story Points**: 5 +**Status**: TODO +**Dependencies**: T1 + +**Description**: +Service that groups findings into exploit paths based on reachability graph. + +**Implementation Path**: `src/Scanner/__Libraries/StellaOps.Scanner.Triage/Services/ExploitPathGroupingService.cs` + +**Implementation**: +```csharp +namespace StellaOps.Scanner.Triage.Services; + +/// +/// Groups findings into exploit paths by correlating reachability data. +/// +public sealed class ExploitPathGroupingService : IExploitPathGroupingService +{ + private readonly IReachabilityQueryService _reachability; + private readonly IVexDecisionService _vexService; + private readonly IExceptionEvaluator _exceptionEvaluator; + private readonly ILogger _logger; + + public ExploitPathGroupingService( + IReachabilityQueryService reachability, + IVexDecisionService vexService, + IExceptionEvaluator exceptionEvaluator, + ILogger logger) + { + _reachability = reachability; + _vexService = vexService; + _exceptionEvaluator = exceptionEvaluator; + _logger = logger; + } + + /// + /// Groups findings for an artifact into exploit paths. + /// + public async Task> GroupFindingsAsync( + string artifactDigest, + IReadOnlyList findings, + CancellationToken ct = default) + { + _logger.LogInformation( + "Grouping {Count} findings into exploit paths for {Artifact}", + findings.Count, artifactDigest); + + // Step 1: Get reachability graph for artifact + var reachGraph = await _reachability.GetReachGraphAsync(artifactDigest, ct); + if (reachGraph is null) + { + _logger.LogWarning("No reachability graph for {Artifact}, using finding-based grouping", artifactDigest); + return GroupByFindingFallback(artifactDigest, findings); + } + + // Step 2: Map findings to vulnerable symbols + var symbolFindings = MapFindingsToSymbols(findings, reachGraph); + + // Step 3: Group by (package, symbol, entryPoint) tuple + var pathGroups = symbolFindings + .GroupBy(sf => (sf.Package.Purl, sf.Symbol.FullyQualifiedName, sf.EntryPoint.Name)) + .ToList(); + + // Step 4: Build ExploitPath for each group + var paths = new List(); + foreach (var group in pathGroups) + { + var path = await BuildExploitPathAsync( + artifactDigest, + group.Key, + group.ToList(), + reachGraph, + ct); + paths.Add(path); + } + + // Step 5: Sort by risk (highest first) + return paths + .OrderByDescending(p => p.RiskScore.AggregatedCvss) + .ThenByDescending(p => p.RiskScore.MaxEpss) + .ThenBy(p => p.IsQuiet) // Non-quiet first + .ToList(); + } + + /// + /// Generates stable deterministic path ID. + /// + public static string GeneratePathId( + string artifactDigest, + string packagePurl, + string symbolName, + string entryPointName) + { + var input = $"{artifactDigest}|{packagePurl}|{symbolName}|{entryPointName}"; + var hash = SHA256.HashData(Encoding.UTF8.GetBytes(input)); + return $"path:{Convert.ToHexString(hash)[..16].ToLowerInvariant()}"; + } + + private async Task BuildExploitPathAsync( + string artifactDigest, + (string Purl, string Symbol, string EntryPoint) key, + List symbolFindings, + ReachabilityGraph reachGraph, + CancellationToken ct) + { + var pathId = GeneratePathId(artifactDigest, key.Purl, key.Symbol, key.EntryPoint); + var first = symbolFindings[0]; + + // Aggregate CVEs + var cveIds = symbolFindings + .SelectMany(sf => sf.Finding.CveIds) + .Distinct() + .OrderBy(x => x) + .ToImmutableArray(); + + // Get VEX status for this path + var vexStatus = await _vexService.GetStatusForPathAsync( + artifactDigest, key.Purl, cveIds, ct); + + // Get active exceptions + var exceptions = await _exceptionEvaluator.GetActiveExceptionsForPathAsync( + pathId, cveIds, ct); + + // Calculate risk score + var riskScore = CalculateRiskScore(symbolFindings); + + // Build evidence + var evidence = BuildEvidence(symbolFindings, reachGraph, vexStatus); + + // Determine reachability status + var reachStatus = DetermineReachabilityStatus(evidence.LatticeState); + + return new ExploitPath + { + PathId = pathId, + ArtifactDigest = artifactDigest, + Package = first.Package, + Symbol = first.Symbol, + EntryPoint = first.EntryPoint, + CveIds = cveIds, + Reachability = reachStatus, + RiskScore = riskScore, + Evidence = evidence, + ActiveExceptions = exceptions.Select(e => new ExceptionRef( + e.ExceptionId, + e.Reason, + e.ExpiresAt)).ToImmutableArray(), + FirstSeenAt = symbolFindings.Min(sf => sf.Finding.FirstSeenAt), + LastUpdatedAt = DateTimeOffset.UtcNow + }; + } + + private static PathRiskScore CalculateRiskScore(List findings) + { + var allFindings = findings.Select(sf => sf.Finding).ToList(); + return new PathRiskScore( + AggregatedCvss: allFindings.Max(f => f.CvssScore), + MaxEpss: allFindings.Max(f => f.EpssScore), + CriticalCount: allFindings.Count(f => f.Severity == Severity.Critical), + HighCount: allFindings.Count(f => f.Severity == Severity.High), + MediumCount: allFindings.Count(f => f.Severity == Severity.Medium), + LowCount: allFindings.Count(f => f.Severity == Severity.Low)); + } + + private PathEvidence BuildEvidence( + List findings, + ReachabilityGraph reachGraph, + VexStatusResult vexStatus) + { + var items = new List(); + + // Add reachability evidence + foreach (var sf in findings) + { + var paths = reachGraph.GetPathsTo(sf.Symbol.FullyQualifiedName); + foreach (var path in paths) + { + items.Add(new EvidenceItem( + Type: path.IsRuntimeObserved ? "runtime-hit" : "static-path", + Source: "reachability-analyzer", + Description: $"Path from {path.EntryPoint} to {path.TargetSymbol}", + Weight: path.IsRuntimeObserved ? 0.9m : 0.6m)); + } + } + + // Add VEX evidence + if (vexStatus.HasStatement) + { + items.Add(new EvidenceItem( + Type: "vex-claim", + Source: vexStatus.Source ?? "unknown", + Description: $"VEX status: {vexStatus.Status}", + Weight: vexStatus.TrustWeight)); + } + + // Determine lattice state + var latticeState = ComputeLatticeState(items); + + return new PathEvidence( + LatticeState: latticeState, + VexStatus: vexStatus.Status, + Confidence: items.Sum(i => i.Weight) / Math.Max(items.Count, 1), + Items: items.ToImmutableArray()); + } + + private static ReachabilityLatticeState ComputeLatticeState(List items) + { + var hasRuntimeHit = items.Any(i => i.Type == "runtime-hit"); + var hasStaticPath = items.Any(i => i.Type == "static-path"); + + if (hasRuntimeHit) + return ReachabilityLatticeState.RuntimeObserved; + if (hasStaticPath) + return ReachabilityLatticeState.StaticallyReachable; + return ReachabilityLatticeState.Unknown; + } + + private static ReachabilityStatus DetermineReachabilityStatus(ReachabilityLatticeState state) + { + return state switch + { + ReachabilityLatticeState.RuntimeObserved => ReachabilityStatus.RuntimeConfirmed, + ReachabilityLatticeState.StaticallyReachable => ReachabilityStatus.StaticallyReachable, + ReachabilityLatticeState.Unreachable => ReachabilityStatus.Unreachable, + ReachabilityLatticeState.Contested => ReachabilityStatus.Contested, + _ => ReachabilityStatus.Unknown + }; + } + + private IReadOnlyList GroupByFindingFallback( + string artifactDigest, + IReadOnlyList findings) + { + // Fallback: group by package only when no reachability data + return findings + .GroupBy(f => f.PackagePurl) + .Select(g => new ExploitPath + { + PathId = GeneratePathId(artifactDigest, g.Key, "unknown", "unknown"), + ArtifactDigest = artifactDigest, + Package = new PackageRef(g.Key, g.First().PackageName, g.First().PackageVersion, null), + Symbol = new VulnerableSymbol("unknown", null, null, null), + EntryPoint = new EntryPoint("unknown", "unknown", null), + CveIds = g.SelectMany(f => f.CveIds).Distinct().ToImmutableArray(), + Reachability = ReachabilityStatus.Unknown, + RiskScore = new PathRiskScore( + g.Max(f => f.CvssScore), + g.Max(f => f.EpssScore), + g.Count(f => f.Severity == Severity.Critical), + g.Count(f => f.Severity == Severity.High), + g.Count(f => f.Severity == Severity.Medium), + g.Count(f => f.Severity == Severity.Low)), + Evidence = new PathEvidence( + ReachabilityLatticeState.Unknown, + VexStatus.UnderInvestigation, + 0m, + []), + FirstSeenAt = g.Min(f => f.FirstSeenAt), + LastUpdatedAt = DateTimeOffset.UtcNow + }) + .ToList(); + } +} + +public interface IExploitPathGroupingService +{ + Task> GroupFindingsAsync( + string artifactDigest, + IReadOnlyList findings, + CancellationToken ct = default); +} + +internal sealed record SymbolFinding( + Finding Finding, + PackageRef Package, + VulnerableSymbol Symbol, + EntryPoint EntryPoint); +``` + +**Acceptance Criteria**: +- [ ] Groups findings by (package, symbol, entryPoint) +- [ ] Generates stable deterministic path IDs +- [ ] Correlates reachability evidence +- [ ] Integrates VEX status +- [ ] Includes active exceptions +- [ ] Fallback when no reachability data +- [ ] Sorted by risk score + +--- + +### T3: Create Triage Inbox API Endpoint + +**Assignee**: Scanner Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: T1, T2 + +**Description**: +Add `GET /triage/inbox` endpoint returning exploit paths with filtering. + +**Implementation Path**: `src/Scanner/StellaOps.Scanner.WebService/Endpoints/TriageInboxEndpoints.cs` + +**Implementation**: +```csharp +namespace StellaOps.Scanner.WebService.Endpoints; + +public static class TriageInboxEndpoints +{ + public static IEndpointRouteBuilder MapTriageInboxEndpoints(this IEndpointRouteBuilder routes) + { + var group = routes.MapGroup("/api/v1/triage") + .WithTags("Triage") + .RequireAuthorization(); + + group.MapGet("/inbox", HandleGetInboxAsync) + .WithName("GetTriageInbox") + .WithSummary("Get triage inbox grouped by exploit paths") + .Produces() + .Produces(StatusCodes.Status400BadRequest); + + group.MapGet("/paths/{pathId}", HandleGetPathAsync) + .WithName("GetExploitPath") + .WithSummary("Get exploit path details") + .Produces() + .Produces(StatusCodes.Status404NotFound); + + return routes; + } + + private static async Task HandleGetInboxAsync( + [AsParameters] TriageInboxQuery query, + IExploitPathGroupingService groupingService, + IFindingsRepository findingsRepo, + CancellationToken ct) + { + // Validate scope + if (string.IsNullOrEmpty(query.ArtifactDigest) && string.IsNullOrEmpty(query.Environment)) + { + return Results.BadRequest("Either artifactDigest or environment scope required"); + } + + // Get findings for scope + var findings = await findingsRepo.GetFindingsAsync( + artifactDigest: query.ArtifactDigest, + environment: query.Environment, + ct: ct); + + // Group into exploit paths + var artifactDigest = query.ArtifactDigest ?? findings.FirstOrDefault()?.ArtifactDigest ?? ""; + var paths = await groupingService.GroupFindingsAsync(artifactDigest, findings, ct); + + // Apply filters + if (query.Quiet.HasValue) + { + paths = paths.Where(p => p.IsQuiet == query.Quiet.Value).ToList(); + } + + if (query.MinCvss.HasValue) + { + paths = paths.Where(p => p.RiskScore.AggregatedCvss >= query.MinCvss.Value).ToList(); + } + + if (query.Reachability.HasValue) + { + paths = paths.Where(p => p.Reachability == query.Reachability.Value).ToList(); + } + + // Paginate + var total = paths.Count; + var paged = paths + .Skip(query.Offset) + .Take(query.Limit) + .ToList(); + + return Results.Ok(new TriageInboxResponse + { + Paths = paged.Select(MapToDto).ToList(), + TotalCount = total, + QuietCount = paths.Count(p => p.IsQuiet), + ActiveCount = paths.Count(p => !p.IsQuiet), + Offset = query.Offset, + Limit = query.Limit + }); + } + + private static async Task HandleGetPathAsync( + string pathId, + IExploitPathRepository pathRepo, + CancellationToken ct) + { + var path = await pathRepo.GetByIdAsync(pathId, ct); + if (path is null) + { + return Results.NotFound($"Exploit path {pathId} not found"); + } + + return Results.Ok(MapToDetailDto(path)); + } + + private static ExploitPathDto MapToDto(ExploitPath path) => new() + { + PathId = path.PathId, + Package = new PackageRefDto(path.Package.Purl, path.Package.Name, path.Package.Version), + Symbol = path.Symbol.FullyQualifiedName, + EntryPoint = path.EntryPoint.Name, + CveIds = path.CveIds.ToList(), + Reachability = path.Reachability.ToString(), + RiskScore = new RiskScoreDto( + path.RiskScore.AggregatedCvss, + path.RiskScore.MaxEpss, + path.RiskScore.CriticalCount, + path.RiskScore.HighCount), + IsQuiet = path.IsQuiet, + ExceptionCount = path.ActiveExceptions.Length, + LastUpdatedAt = path.LastUpdatedAt + }; + + private static ExploitPathDetailDto MapToDetailDto(ExploitPath path) => new() + { + PathId = path.PathId, + ArtifactDigest = path.ArtifactDigest, + Package = new PackageRefDto(path.Package.Purl, path.Package.Name, path.Package.Version), + Symbol = new SymbolDto( + path.Symbol.FullyQualifiedName, + path.Symbol.SourceFile, + path.Symbol.LineNumber, + path.Symbol.Language), + EntryPoint = new EntryPointDto( + path.EntryPoint.Name, + path.EntryPoint.Type, + path.EntryPoint.Path), + CveIds = path.CveIds.ToList(), + Reachability = path.Reachability.ToString(), + RiskScore = new RiskScoreDto( + path.RiskScore.AggregatedCvss, + path.RiskScore.MaxEpss, + path.RiskScore.CriticalCount, + path.RiskScore.HighCount), + Evidence = new EvidenceDto( + path.Evidence.LatticeState.ToString(), + path.Evidence.VexStatus.ToString(), + path.Evidence.Confidence, + path.Evidence.Items.Select(i => new EvidenceItemDto(i.Type, i.Source, i.Description, i.Weight)).ToList()), + ActiveExceptions = path.ActiveExceptions.Select(e => new ExceptionRefDto(e.ExceptionId, e.Reason, e.ExpiresAt)).ToList(), + IsQuiet = path.IsQuiet, + FirstSeenAt = path.FirstSeenAt, + LastUpdatedAt = path.LastUpdatedAt + }; +} + +public sealed record TriageInboxQuery( + string? ArtifactDigest, + string? Environment, + bool? Quiet, + decimal? MinCvss, + ReachabilityStatus? Reachability, + int Offset = 0, + int Limit = 50); + +public sealed record TriageInboxResponse +{ + public required List Paths { get; init; } + public required int TotalCount { get; init; } + public required int QuietCount { get; init; } + public required int ActiveCount { get; init; } + public required int Offset { get; init; } + public required int Limit { get; init; } +} + +// DTOs omitted for brevity - see contracts file +``` + +**Acceptance Criteria**: +- [ ] `GET /triage/inbox` returns exploit paths +- [ ] Scope filtering by artifact or environment +- [ ] `quiet=true/false` filter for suppressed paths +- [ ] Pagination with offset/limit +- [ ] Returns counts (total, quiet, active) +- [ ] Tenant isolation via authorization + +--- + +### T4: Create Proof Bundle API Endpoint + +**Assignee**: Scanner Team +**Story Points**: 5 +**Status**: TODO +**Dependencies**: T1, T2 + +**Description**: +Add `GET /triage/paths/{pathId}/proof` endpoint returning complete proof bundle. + +**Implementation Path**: `src/Scanner/StellaOps.Scanner.WebService/Endpoints/ProofBundleEndpoints.cs` + +**Implementation**: +```csharp +namespace StellaOps.Scanner.WebService.Endpoints; + +public static class ProofBundleEndpoints +{ + public static IEndpointRouteBuilder MapProofBundleEndpoints(this IEndpointRouteBuilder routes) + { + var group = routes.MapGroup("/api/v1/triage/paths") + .WithTags("Triage") + .RequireAuthorization(); + + group.MapGet("/{pathId}/proof", HandleGetProofBundleAsync) + .WithName("GetProofBundle") + .WithSummary("Get proof bundle for exploit path with reach subgraph, symbol map, and VEX claims") + .Produces() + .Produces(StatusCodes.Status404NotFound); + + group.MapGet("/{pathId}/proof/export", HandleExportProofBundleAsync) + .WithName("ExportProofBundle") + .WithSummary("Export proof bundle as JSON file") + .Produces(contentType: "application/json") + .Produces(StatusCodes.Status404NotFound); + + return routes; + } + + private static async Task HandleGetProofBundleAsync( + string pathId, + IProofBundleService bundleService, + CancellationToken ct) + { + var bundle = await bundleService.GetProofBundleAsync(pathId, ct); + if (bundle is null) + { + return Results.NotFound($"Proof bundle for path {pathId} not found"); + } + + return Results.Ok(MapToResponse(bundle)); + } + + private static async Task HandleExportProofBundleAsync( + string pathId, + IProofBundleService bundleService, + CancellationToken ct) + { + var bundle = await bundleService.GetProofBundleAsync(pathId, ct); + if (bundle is null) + { + return Results.NotFound($"Proof bundle for path {pathId} not found"); + } + + var json = ProofBundleSerializer.Serialize(bundle); + var bytes = Encoding.UTF8.GetBytes(json); + return Results.File(bytes, "application/json", $"proof-{pathId}.json"); + } + + private static ProofBundleResponse MapToResponse(ProofBundle bundle) => new() + { + PathId = bundle.PathId, + GeneratedAt = bundle.GeneratedAt, + ReachSubgraph = new ReachSubgraphDto + { + Nodes = bundle.ReachSubgraph.Nodes.Select(n => new GraphNodeDto( + n.Id, n.Label, n.Type, n.Depth, n.IsVulnerable, n.IsEntryPoint)).ToList(), + Edges = bundle.ReachSubgraph.Edges.Select(e => new GraphEdgeDto( + e.SourceId, e.TargetId, e.Label, e.Weight)).ToList(), + EntryPointId = bundle.ReachSubgraph.EntryPointId, + VulnerableSymbolId = bundle.ReachSubgraph.VulnerableSymbolId, + TotalNodes = bundle.ReachSubgraph.Nodes.Count, + TotalEdges = bundle.ReachSubgraph.Edges.Count + }, + SymbolMap = new SymbolMapDto + { + Symbols = bundle.SymbolMap.Symbols.Select(s => new SymbolEntryDto( + s.Id, + s.FullyQualifiedName, + s.SourceFile, + s.LineNumber, + s.Language, + s.Signature)).ToList(), + SourceFiles = bundle.SymbolMap.SourceFiles.ToList() + }, + VexClaims = bundle.VexClaims.Select(v => new VexClaimDto + { + CveId = v.CveId, + Status = v.Status.ToString(), + Justification = v.Justification, + Source = v.Source, + TrustScore = v.TrustScore, + Timestamp = v.Timestamp, + SignatureValid = v.SignatureValid + }).ToList(), + TrustScores = new TrustScoresDto + { + ReachabilityConfidence = bundle.TrustScores.ReachabilityConfidence, + VexConfidence = bundle.TrustScores.VexConfidence, + OverallConfidence = bundle.TrustScores.OverallConfidence, + EvidenceCount = bundle.TrustScores.EvidenceCount + }, + BundleDigest = bundle.BundleDigest + }; +} + +/// +/// Service for assembling proof bundles. +/// +public sealed class ProofBundleService : IProofBundleService +{ + private readonly IExploitPathRepository _pathRepo; + private readonly IReachabilityQueryService _reachability; + private readonly ISymbolMapService _symbolMap; + private readonly IVexClaimService _vexClaims; + private readonly ILogger _logger; + + public ProofBundleService( + IExploitPathRepository pathRepo, + IReachabilityQueryService reachability, + ISymbolMapService symbolMap, + IVexClaimService vexClaims, + ILogger logger) + { + _pathRepo = pathRepo; + _reachability = reachability; + _symbolMap = symbolMap; + _vexClaims = vexClaims; + _logger = logger; + } + + public async Task GetProofBundleAsync(string pathId, CancellationToken ct) + { + var path = await _pathRepo.GetByIdAsync(pathId, ct); + if (path is null) + { + return null; + } + + _logger.LogInformation("Building proof bundle for path {PathId}", pathId); + + // Get reach subgraph (pruned to relevant nodes) + var subgraph = await _reachability.GetSubgraphAsync( + path.ArtifactDigest, + path.EntryPoint.Name, + path.Symbol.FullyQualifiedName, + ct); + + // Get symbol map for all symbols in subgraph + var symbolIds = subgraph.Nodes.Select(n => n.Id).ToList(); + var symbolMap = await _symbolMap.GetSymbolMapAsync(path.ArtifactDigest, symbolIds, ct); + + // Get VEX claims for all CVEs + var vexClaims = await _vexClaims.GetClaimsAsync(path.ArtifactDigest, path.CveIds, ct); + + // Calculate trust scores + var trustScores = CalculateTrustScores(path, subgraph, vexClaims); + + var bundle = new ProofBundle + { + PathId = pathId, + GeneratedAt = DateTimeOffset.UtcNow, + ReachSubgraph = subgraph, + SymbolMap = symbolMap, + VexClaims = vexClaims.ToImmutableArray(), + TrustScores = trustScores + }; + + // Compute digest for integrity + bundle = bundle with { BundleDigest = ComputeBundleDigest(bundle) }; + + return bundle; + } + + private static TrustScores CalculateTrustScores( + ExploitPath path, + ReachSubgraph subgraph, + IReadOnlyList vexClaims) + { + var reachConfidence = subgraph.Edges.Any(e => e.Weight > 0.8m) ? 0.9m : 0.6m; + var vexConfidence = vexClaims.Any() ? vexClaims.Max(v => v.TrustScore) : 0m; + var evidenceCount = path.Evidence.Items.Length; + + return new TrustScores( + ReachabilityConfidence: reachConfidence, + VexConfidence: vexConfidence, + OverallConfidence: (reachConfidence + vexConfidence) / 2, + EvidenceCount: evidenceCount); + } + + private static string ComputeBundleDigest(ProofBundle bundle) + { + var json = ProofBundleSerializer.SerializeForDigest(bundle); + var hash = SHA256.HashData(Encoding.UTF8.GetBytes(json)); + return $"sha256:{Convert.ToHexString(hash).ToLowerInvariant()}"; + } +} + +public interface IProofBundleService +{ + Task GetProofBundleAsync(string pathId, CancellationToken ct); +} +``` + +**Acceptance Criteria**: +- [ ] `GET /triage/paths/{pathId}/proof` returns proof bundle +- [ ] Bundle includes reach subgraph (nodes + edges) +- [ ] Bundle includes symbol map with source locations +- [ ] Bundle includes VEX claims with trust scores +- [ ] Bundle has computed digest for integrity +- [ ] Export endpoint for JSON download +- [ ] Logged for audit + +--- + +### T5: Create Angular Triage Inbox Component + +**Assignee**: Frontend Team +**Story Points**: 8 +**Status**: TODO +**Dependencies**: T3, T4 + +**Description**: +Implement 3-pane triage inbox UI with exploit path list, details, and proof viewer. + +**Implementation Path**: `src/Web/StellaOps.Web/src/app/features/triage/inbox/` + +**Component Structure**: +``` +inbox/ +├── triage-inbox.component.ts # Main container +├── triage-inbox.component.html +├── triage-inbox.component.scss +├── components/ +│ ├── path-list/ +│ │ ├── path-list.component.ts # Left pane - path list +│ │ └── path-list.component.html +│ ├── path-detail/ +│ │ ├── path-detail.component.ts # Center pane - path details +│ │ └── path-detail.component.html +│ └── proof-viewer/ +│ ├── proof-viewer.component.ts # Right pane - proof bundle +│ ├── reach-graph.component.ts # Subgraph visualization +│ ├── symbol-map.component.ts # Symbol map with source links +│ └── vex-claims.component.ts # VEX claims with trust badges +├── models/ +│ └── inbox.models.ts +└── services/ + └── inbox.service.ts +``` + +**Key Features**: +1. **Left Pane (Path List)**: + - Exploit paths sorted by risk + - Quiet/Active toggle filter + - Search by CVE, package, symbol + - Risk score badges (Critical/High/Medium/Low) + - Reachability status icons + +2. **Center Pane (Path Detail)**: + - CVE list with CVSS/EPSS scores + - Package and symbol information + - Entry point context + - Active exceptions list + - "Create Exception" button + +3. **Right Pane (Proof Viewer)**: + - Collapsible reach subgraph (Cytoscape.js) + - Symbol map with source file links + - VEX claims with trust score badges + - "Export Proof" button + +**Acceptance Criteria**: +- [ ] 3-pane responsive layout +- [ ] Path list with filtering and sorting +- [ ] Path detail view with CVE/package/symbol info +- [ ] Proof viewer with reach graph visualization +- [ ] Symbol map with source navigation +- [ ] VEX claims with verification badges +- [ ] Quiet/Active toggle +- [ ] Export proof bundle +- [ ] Loading states and error handling + +--- + +### T6: Add Inbox API Models and Service + +**Assignee**: Frontend Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T3, T4 + +**Description**: +TypeScript models and service for inbox API integration. + +**Implementation Path**: `src/Web/StellaOps.Web/src/app/core/api/inbox.models.ts` + +**Implementation**: +```typescript +// inbox.models.ts +export interface ExploitPath { + pathId: string; + package: PackageRef; + symbol: string; + entryPoint: string; + cveIds: string[]; + reachability: ReachabilityStatus; + riskScore: RiskScore; + isQuiet: boolean; + exceptionCount: number; + lastUpdatedAt: string; +} + +export interface PackageRef { + purl: string; + name: string; + version: string; +} + +export interface RiskScore { + aggregatedCvss: number; + maxEpss: number; + criticalCount: number; + highCount: number; +} + +export type ReachabilityStatus = + | 'Unknown' + | 'StaticallyReachable' + | 'RuntimeConfirmed' + | 'Unreachable' + | 'Contested'; + +export interface TriageInboxResponse { + paths: ExploitPath[]; + totalCount: number; + quietCount: number; + activeCount: number; + offset: number; + limit: number; +} + +export interface ProofBundle { + pathId: string; + generatedAt: string; + reachSubgraph: ReachSubgraph; + symbolMap: SymbolMap; + vexClaims: VexClaim[]; + trustScores: TrustScores; + bundleDigest: string; +} + +export interface ReachSubgraph { + nodes: GraphNode[]; + edges: GraphEdge[]; + entryPointId: string; + vulnerableSymbolId: string; + totalNodes: number; + totalEdges: number; +} + +export interface GraphNode { + id: string; + label: string; + type: string; + depth: number; + isVulnerable: boolean; + isEntryPoint: boolean; +} + +export interface GraphEdge { + sourceId: string; + targetId: string; + label: string; + weight: number; +} + +export interface SymbolMap { + symbols: SymbolEntry[]; + sourceFiles: string[]; +} + +export interface SymbolEntry { + id: string; + fullyQualifiedName: string; + sourceFile: string | null; + lineNumber: number | null; + language: string | null; + signature: string | null; +} + +export interface VexClaim { + cveId: string; + status: string; + justification: string | null; + source: string; + trustScore: number; + timestamp: string; + signatureValid: boolean; +} + +export interface TrustScores { + reachabilityConfidence: number; + vexConfidence: number; + overallConfidence: number; + evidenceCount: number; +} +``` + +**Acceptance Criteria**: +- [ ] All TypeScript interfaces defined +- [ ] Matches API response contracts +- [ ] Exported from barrel file + +--- + +### T7: Unit and Integration Tests + +**Assignee**: Scanner Team +**Story Points**: 5 +**Status**: TODO +**Dependencies**: T1-T6 + +**Description**: +Comprehensive tests for exploit path grouping and proof bundles. + +**Test Cases**: +```csharp +public class ExploitPathGroupingServiceTests +{ + [Fact] + public async Task GroupFindings_WithReachabilityData_GroupsByPath() + { + // Arrange + var findings = CreateFindingsWithSharedPath(); + var service = CreateService(); + + // Act + var paths = await service.GroupFindingsAsync("sha256:abc", findings); + + // Assert + paths.Should().HaveCount(1); + paths[0].CveIds.Should().HaveCount(3); + paths[0].PathId.Should().NotBeNullOrEmpty(); + } + + [Fact] + public async Task GroupFindings_NoReachability_FallsBackToPackageGrouping() + { + // Arrange + var findings = CreateFindings(); + var service = CreateServiceWithNoReachData(); + + // Act + var paths = await service.GroupFindingsAsync("sha256:abc", findings); + + // Assert + paths.Should().NotBeEmpty(); + paths.Should().OnlyContain(p => p.Reachability == ReachabilityStatus.Unknown); + } + + [Fact] + public void GeneratePathId_SameInputs_ProducesSameId() + { + var id1 = ExploitPathGroupingService.GeneratePathId("sha256:a", "pkg:npm/lodash@4.0", "eval", "http"); + var id2 = ExploitPathGroupingService.GeneratePathId("sha256:a", "pkg:npm/lodash@4.0", "eval", "http"); + + id1.Should().Be(id2); + } + + [Fact] + public void GeneratePathId_DifferentInputs_ProducesDifferentIds() + { + var id1 = ExploitPathGroupingService.GeneratePathId("sha256:a", "pkg:npm/lodash@4.0", "eval", "http"); + var id2 = ExploitPathGroupingService.GeneratePathId("sha256:a", "pkg:npm/lodash@4.0", "exec", "http"); + + id1.Should().NotBe(id2); + } +} + +public class ProofBundleServiceTests +{ + [Fact] + public async Task GetProofBundle_ValidPath_ReturnsCompleteBundle() + { + // Arrange + var pathId = "path:abc123"; + var service = CreateService(); + + // Act + var bundle = await service.GetProofBundleAsync(pathId, CancellationToken.None); + + // Assert + bundle.Should().NotBeNull(); + bundle!.ReachSubgraph.Nodes.Should().NotBeEmpty(); + bundle.SymbolMap.Symbols.Should().NotBeEmpty(); + bundle.BundleDigest.Should().StartWith("sha256:"); + } + + [Fact] + public async Task GetProofBundle_InvalidPath_ReturnsNull() + { + var service = CreateService(); + + var bundle = await service.GetProofBundleAsync("nonexistent", CancellationToken.None); + + bundle.Should().BeNull(); + } + + [Fact] + public async Task GetProofBundle_SamePath_ProducesSameDigest() + { + var service = CreateService(); + + var bundle1 = await service.GetProofBundleAsync("path:abc", CancellationToken.None); + var bundle2 = await service.GetProofBundleAsync("path:abc", CancellationToken.None); + + bundle1!.BundleDigest.Should().Be(bundle2!.BundleDigest); + } +} + +public class TriageInboxEndpointsTests : IClassFixture +{ + [Fact] + public async Task GetInbox_WithScope_ReturnsExploitPaths() + { + var client = _factory.CreateClient(); + + var response = await client.GetAsync("/api/v1/triage/inbox?artifactDigest=sha256:test"); + + response.StatusCode.Should().Be(HttpStatusCode.OK); + var result = await response.Content.ReadFromJsonAsync(); + result!.Paths.Should().NotBeNull(); + } + + [Fact] + public async Task GetInbox_NoScope_Returns400() + { + var client = _factory.CreateClient(); + + var response = await client.GetAsync("/api/v1/triage/inbox"); + + response.StatusCode.Should().Be(HttpStatusCode.BadRequest); + } + + [Fact] + public async Task GetProofBundle_ValidPath_ReturnsBundle() + { + var client = _factory.CreateClient(); + await SeedTestPathAsync(); + + var response = await client.GetAsync("/api/v1/triage/paths/path:test123/proof"); + + response.StatusCode.Should().Be(HttpStatusCode.OK); + var bundle = await response.Content.ReadFromJsonAsync(); + bundle!.ReachSubgraph.Should().NotBeNull(); + } +} +``` + +**Acceptance Criteria**: +- [ ] ExploitPathGroupingService unit tests +- [ ] ProofBundleService unit tests +- [ ] Deterministic path ID tests +- [ ] API endpoint integration tests +- [ ] ≥80% code coverage + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Scanner Team | Define ExploitPath Domain Model | +| 2 | T2 | TODO | T1 | Scanner Team | Implement ExploitPathGroupingService | +| 3 | T3 | TODO | T1, T2 | Scanner Team | Create Triage Inbox API Endpoint | +| 4 | T4 | TODO | T1, T2 | Scanner Team | Create Proof Bundle API Endpoint | +| 5 | T5 | TODO | T3, T4 | Frontend Team | Create Angular Triage Inbox Component | +| 6 | T6 | TODO | T3, T4 | Frontend Team | Add Inbox API Models and Service | +| 7 | T7 | TODO | T1-T6 | Scanner Team | Unit and Integration Tests | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint created from gap analysis of "UI Patterns for Triage and Replay" advisory. Addresses Gaps 1 (Exploit Path Inbox) and 2 (Proof Bundle API). | Claude | + +--- + +## Decisions & Risks + +| Item | Type | Owner | Notes | +|------|------|-------|-------| +| Path ID generation | Decision | Scanner Team | SHA-256 hash of (artifact, purl, symbol, entryPoint) for determinism | +| Fallback grouping | Decision | Scanner Team | Group by package when no reachability data | +| Graph visualization | Decision | Frontend Team | Use Cytoscape.js for reach subgraph | +| Proof bundle digest | Decision | Scanner Team | SHA-256 of canonical JSON for integrity | + +--- + +## Success Criteria + +- [ ] All 7 tasks marked DONE +- [ ] Inbox groups findings by exploit path +- [ ] Proof bundle includes reach subgraph + symbol map + VEX claims +- [ ] 3-pane UI functional with filtering +- [ ] Path IDs are deterministic and stable +- [ ] ≥80% test coverage +- [ ] `dotnet build` succeeds +- [ ] `dotnet test` succeeds diff --git a/docs/implplan/SPRINT_3900_0003_0002_recheck_policy_evidence_hooks.md b/docs/implplan/SPRINT_3900_0003_0002_recheck_policy_evidence_hooks.md new file mode 100644 index 000000000..0ba0985b4 --- /dev/null +++ b/docs/implplan/SPRINT_3900_0003_0002_recheck_policy_evidence_hooks.md @@ -0,0 +1,1521 @@ +# Sprint 3900.0003.0002 · Recheck Policy & Evidence Hooks + +## Topic & Scope + +- Extend exception model with recheck policies that auto-invalidate exceptions on condition changes +- Implement evidence hooks requiring specific attestations before exception approval +- Integrate with build gates to fail on triggered recheck conditions +- Enable evidence-driven exception lifecycle + +**Working directory:** `src/Policy/__Libraries/StellaOps.Policy.Exceptions/`, `src/Web/StellaOps.Web/src/app/features/exceptions/` + +## Dependencies & Concurrency + +- **Upstream**: SPRINT_3900_0002_0002 (Exception UI Wiring) — IN PROGRESS +- **Upstream**: SPRINT_3900_0003_0001 (Exploit Path Inbox) — provides path context +- **Downstream**: Build gate integration (CI/CD pipelines) +- **Safe to parallelize with**: SPRINT_4100_0003_0001 (Snapshot Merge Preview) + +## Documentation Prerequisites + +- `docs/product-advisories/22-Dec-2026 - UI Patterns for Triage and Replay.md` (source advisory) +- `src/Policy/__Libraries/StellaOps.Policy.Exceptions/AGENTS.md` +- `docs/modules/policy/recheck-policy.md` (created with this sprint) +- `docs/modules/policy/evidence-hooks.md` (created with this sprint) + +--- + +## Problem Analysis + +Current exception system has: +- Expiry dates (time-based invalidation) +- Scope rules (CVE, package, image, environment) +- Approval workflow (draft → pending → approved → active) + +**Missing**: +1. **Recheck Policy**: Conditions that auto-invalidate exceptions (e.g., EPSS rises above threshold, reachability graph changes) +2. **Evidence Hooks**: Required attestations before approval (feature flag disabled, backport applied, compensating control) +3. **Build Gate Integration**: CI/CD fails if recheck policy triggered + +The advisory specifies: +```yaml +recheck_policy: + ReachGraphChange: Block + EPSSAbove: 0.5 + EnvScope: [prod, staging] + UnknownsAbove: 10 +``` + +And evidence hooks: +``` +- Feature flag off in prod (require-evidence: feature-flag-attestation) +- Backport PR merged (require-evidence: backport-attestation) +- Compensating control in place (require-evidence: control-attestation) +``` + +--- + +## Tasks + +### T1: Define RecheckPolicy Model + +**Assignee**: Policy Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: — + +**Description**: +Create the recheck policy model with condition types. + +**Implementation Path**: `src/Policy/__Libraries/StellaOps.Policy.Exceptions/Models/RecheckPolicy.cs` + +**Model Definition**: +```csharp +namespace StellaOps.Policy.Exceptions.Models; + +/// +/// Policy defining conditions that trigger exception re-evaluation. +/// When any condition is met, the exception may be invalidated or flagged. +/// +public sealed record RecheckPolicy +{ + /// + /// Unique identifier for this policy configuration. + /// + public required string PolicyId { get; init; } + + /// + /// Human-readable name for this policy. + /// + public required string Name { get; init; } + + /// + /// Conditions that trigger recheck. + /// + public required ImmutableArray Conditions { get; init; } + + /// + /// Default action when any condition is triggered. + /// + public required RecheckAction DefaultAction { get; init; } + + /// + /// Whether this policy is active. + /// + public bool IsActive { get; init; } = true; + + /// + /// When this policy was created. + /// + public required DateTimeOffset CreatedAt { get; init; } +} + +/// +/// A single condition that triggers exception re-evaluation. +/// +public sealed record RecheckCondition +{ + /// + /// Type of condition to check. + /// + public required RecheckConditionType Type { get; init; } + + /// + /// Threshold value (interpretation depends on Type). + /// + public decimal? Threshold { get; init; } + + /// + /// Environment scopes where this condition applies. + /// + public ImmutableArray EnvironmentScope { get; init; } = []; + + /// + /// Action to take when this specific condition is triggered. + /// If null, uses policy's DefaultAction. + /// + public RecheckAction? Action { get; init; } + + /// + /// Human-readable description of this condition. + /// + public string? Description { get; init; } +} + +/// +/// Types of recheck conditions. +/// +public enum RecheckConditionType +{ + /// + /// Reachability graph changes (new paths discovered). + /// + ReachGraphChange, + + /// + /// EPSS score exceeds threshold. + /// + EPSSAbove, + + /// + /// CVSS score exceeds threshold. + /// + CVSSAbove, + + /// + /// Unknown budget exceeds threshold. + /// + UnknownsAbove, + + /// + /// New CVE added to same package. + /// + NewCVEInPackage, + + /// + /// KEV (Known Exploited Vulnerability) flag set. + /// + KEVFlagged, + + /// + /// Exception nearing expiry (days before). + /// + ExpiryWithin, + + /// + /// VEX status changes (e.g., from NotAffected to Affected). + /// + VEXStatusChange, + + /// + /// Package version changes. + /// + PackageVersionChange +} + +/// +/// Action to take when recheck condition is triggered. +/// +public enum RecheckAction +{ + /// + /// Log warning but allow exception to remain active. + /// + Warn, + + /// + /// Require manual re-approval of exception. + /// + RequireReapproval, + + /// + /// Automatically revoke the exception. + /// + Revoke, + + /// + /// Block build/deployment pipeline. + /// + Block +} + +/// +/// Result of evaluating recheck conditions against an exception. +/// +public sealed record RecheckEvaluationResult +{ + /// + /// Whether any conditions were triggered. + /// + public required bool IsTriggered { get; init; } + + /// + /// List of triggered conditions with details. + /// + public required ImmutableArray TriggeredConditions { get; init; } + + /// + /// Recommended action based on triggered conditions. + /// + public required RecheckAction? RecommendedAction { get; init; } + + /// + /// When this evaluation was performed. + /// + public required DateTimeOffset EvaluatedAt { get; init; } + + /// + /// Human-readable summary. + /// + public string Summary => IsTriggered + ? $"{TriggeredConditions.Length} condition(s) triggered: {string.Join(", ", TriggeredConditions.Select(t => t.Type))}" + : "No conditions triggered"; +} + +/// +/// Details of a triggered recheck condition. +/// +public sealed record TriggeredCondition( + RecheckConditionType Type, + string Description, + decimal? CurrentValue, + decimal? ThresholdValue, + RecheckAction Action); +``` + +**Acceptance Criteria**: +- [ ] `RecheckPolicy` record with conditions +- [ ] All condition types from advisory +- [ ] Action types (Warn, RequireReapproval, Revoke, Block) +- [ ] Environment scope support +- [ ] Evaluation result model + +--- + +### T2: Extend ExceptionObject with RecheckPolicy + +**Assignee**: Policy Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T1 + +**Description**: +Add recheck policy reference to the exception model. + +**Implementation Path**: `src/Policy/__Libraries/StellaOps.Policy.Exceptions/Models/ExceptionObject.cs` (modify) + +**Changes**: +```csharp +public sealed record ExceptionObject +{ + // ... existing fields ... + + /// + /// Recheck policy that governs automatic re-evaluation. + /// If null, exception is only invalidated by expiry. + /// + public RecheckPolicy? RecheckPolicy { get; init; } + + /// + /// Result of last recheck evaluation. + /// + public RecheckEvaluationResult? LastRecheckResult { get; init; } + + /// + /// When recheck was last evaluated. + /// + public DateTimeOffset? LastRecheckAt { get; init; } + + /// + /// Whether this exception is blocked by recheck policy. + /// + public bool IsBlockedByRecheck => + LastRecheckResult?.IsTriggered == true && + LastRecheckResult.RecommendedAction == RecheckAction.Block; + + /// + /// Whether this exception requires re-approval. + /// + public bool RequiresReapproval => + LastRecheckResult?.IsTriggered == true && + LastRecheckResult.RecommendedAction == RecheckAction.RequireReapproval; +} +``` + +**Migration**: Add columns to `policy.exceptions` table: +```sql +ALTER TABLE policy.exceptions ADD COLUMN IF NOT EXISTS recheck_policy_id TEXT; +ALTER TABLE policy.exceptions ADD COLUMN IF NOT EXISTS last_recheck_result JSONB; +ALTER TABLE policy.exceptions ADD COLUMN IF NOT EXISTS last_recheck_at TIMESTAMPTZ; +``` + +**Acceptance Criteria**: +- [ ] `RecheckPolicy` field added to `ExceptionObject` +- [ ] Last recheck result tracked +- [ ] `IsBlockedByRecheck` computed property +- [ ] `RequiresReapproval` computed property +- [ ] Database migration created + +--- + +### T3: Define EvidenceHook Model + +**Assignee**: Policy Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: — + +**Description**: +Create evidence hook model requiring attestations before exception approval. + +**Implementation Path**: `src/Policy/__Libraries/StellaOps.Policy.Exceptions/Models/EvidenceHook.cs` + +**Model Definition**: +```csharp +namespace StellaOps.Policy.Exceptions.Models; + +/// +/// Evidence hook requiring specific attestations before exception approval. +/// +public sealed record EvidenceHook +{ + /// + /// Unique identifier for this hook. + /// + public required string HookId { get; init; } + + /// + /// Type of evidence required. + /// + public required EvidenceType Type { get; init; } + + /// + /// Human-readable description of the requirement. + /// + public required string Description { get; init; } + + /// + /// Whether this evidence is mandatory for approval. + /// + public bool IsMandatory { get; init; } = true; + + /// + /// Schema or predicate type for validation. + /// + public string? ValidationSchema { get; init; } + + /// + /// Maximum age of evidence (for freshness validation). + /// + public TimeSpan? MaxAge { get; init; } + + /// + /// Required trust score for evidence source. + /// + public decimal? MinTrustScore { get; init; } +} + +/// +/// Types of evidence that can be required. +/// +public enum EvidenceType +{ + /// + /// Feature flag is disabled in target environment. + /// + FeatureFlagDisabled, + + /// + /// Backport PR has been merged. + /// + BackportMerged, + + /// + /// Compensating control attestation. + /// + CompensatingControl, + + /// + /// Security review completed. + /// + SecurityReview, + + /// + /// Runtime mitigation in place. + /// + RuntimeMitigation, + + /// + /// WAF rule deployed. + /// + WAFRuleDeployed, + + /// + /// Custom attestation type. + /// + CustomAttestation +} + +/// +/// Evidence submitted to satisfy a hook. +/// +public sealed record SubmittedEvidence +{ + /// + /// Unique identifier for this evidence submission. + /// + public required string EvidenceId { get; init; } + + /// + /// Hook this evidence satisfies. + /// + public required string HookId { get; init; } + + /// + /// Type of evidence. + /// + public required EvidenceType Type { get; init; } + + /// + /// Reference to the evidence (URL, attestation ID, etc.). + /// + public required string Reference { get; init; } + + /// + /// Evidence content or payload. + /// + public string? Content { get; init; } + + /// + /// DSSE envelope if signed. + /// + public string? DsseEnvelope { get; init; } + + /// + /// Whether signature was verified. + /// + public bool SignatureVerified { get; init; } + + /// + /// Trust score of evidence source. + /// + public decimal TrustScore { get; init; } + + /// + /// When evidence was submitted. + /// + public required DateTimeOffset SubmittedAt { get; init; } + + /// + /// Who submitted the evidence. + /// + public required string SubmittedBy { get; init; } + + /// + /// Validation status. + /// + public required EvidenceValidationStatus ValidationStatus { get; init; } + + /// + /// Validation error if any. + /// + public string? ValidationError { get; init; } +} + +/// +/// Status of evidence validation. +/// +public enum EvidenceValidationStatus +{ + Pending, + Valid, + Invalid, + Expired, + InsufficientTrust +} + +/// +/// Registry of required evidence hooks for an exception type. +/// +public sealed record EvidenceRequirements +{ + /// + /// Required evidence hooks. + /// + public required ImmutableArray Hooks { get; init; } + + /// + /// Evidence submitted so far. + /// + public ImmutableArray SubmittedEvidence { get; init; } = []; + + /// + /// Whether all mandatory evidence is satisfied. + /// + public bool IsSatisfied => Hooks + .Where(h => h.IsMandatory) + .All(h => SubmittedEvidence.Any(e => + e.HookId == h.HookId && + e.ValidationStatus == EvidenceValidationStatus.Valid)); + + /// + /// Missing mandatory evidence. + /// + public ImmutableArray MissingEvidence => Hooks + .Where(h => h.IsMandatory) + .Where(h => !SubmittedEvidence.Any(e => + e.HookId == h.HookId && + e.ValidationStatus == EvidenceValidationStatus.Valid)) + .ToImmutableArray(); +} +``` + +**Acceptance Criteria**: +- [ ] `EvidenceHook` model with all types +- [ ] `SubmittedEvidence` for tracking submissions +- [ ] `EvidenceRequirements` with satisfaction check +- [ ] Mandatory vs optional hooks +- [ ] Trust score requirement +- [ ] Max age for freshness + +--- + +### T4: Implement RecheckEvaluationService + +**Assignee**: Policy Team +**Story Points**: 5 +**Status**: TODO +**Dependencies**: T1, T2 + +**Description**: +Service that evaluates recheck conditions against current state. + +**Implementation Path**: `src/Policy/__Libraries/StellaOps.Policy.Exceptions/Services/RecheckEvaluationService.cs` + +**Implementation**: +```csharp +namespace StellaOps.Policy.Exceptions.Services; + +/// +/// Evaluates recheck conditions against current vulnerability state. +/// +public sealed class RecheckEvaluationService : IRecheckEvaluationService +{ + private readonly IEPSSService _epssService; + private readonly ICVSSService _cvssService; + private readonly IReachabilityQueryService _reachability; + private readonly IVexDecisionService _vexService; + private readonly IUnknownsBudgetService _unknownsService; + private readonly IKEVService _kevService; + private readonly ILogger _logger; + + public RecheckEvaluationService( + IEPSSService epssService, + ICVSSService cvssService, + IReachabilityQueryService reachability, + IVexDecisionService vexService, + IUnknownsBudgetService unknownsService, + IKEVService kevService, + ILogger logger) + { + _epssService = epssService; + _cvssService = cvssService; + _reachability = reachability; + _vexService = vexService; + _unknownsService = unknownsService; + _kevService = kevService; + _logger = logger; + } + + /// + /// Evaluates all recheck conditions for an exception. + /// + public async Task EvaluateAsync( + ExceptionObject exception, + EvaluationContext context, + CancellationToken ct = default) + { + if (exception.RecheckPolicy is null) + { + return new RecheckEvaluationResult + { + IsTriggered = false, + TriggeredConditions = [], + RecommendedAction = null, + EvaluatedAt = DateTimeOffset.UtcNow + }; + } + + _logger.LogInformation( + "Evaluating recheck policy {PolicyId} for exception {ExceptionId}", + exception.RecheckPolicy.PolicyId, exception.ExceptionId); + + var triggered = new List(); + + foreach (var condition in exception.RecheckPolicy.Conditions) + { + // Check environment scope + if (condition.EnvironmentScope.Length > 0 && + !condition.EnvironmentScope.Contains(context.Environment)) + { + continue; + } + + var result = await EvaluateConditionAsync(condition, exception, context, ct); + if (result is not null) + { + triggered.Add(result); + } + } + + var isTriggered = triggered.Count > 0; + var recommendedAction = isTriggered + ? triggered.Max(t => t.Action) + : (RecheckAction?)null; + + return new RecheckEvaluationResult + { + IsTriggered = isTriggered, + TriggeredConditions = triggered.ToImmutableArray(), + RecommendedAction = recommendedAction, + EvaluatedAt = DateTimeOffset.UtcNow + }; + } + + private async Task EvaluateConditionAsync( + RecheckCondition condition, + ExceptionObject exception, + EvaluationContext context, + CancellationToken ct) + { + return condition.Type switch + { + RecheckConditionType.EPSSAbove => await EvaluateEPSSAsync(condition, exception, ct), + RecheckConditionType.CVSSAbove => await EvaluateCVSSAsync(condition, exception, ct), + RecheckConditionType.ReachGraphChange => await EvaluateReachGraphAsync(condition, exception, context, ct), + RecheckConditionType.UnknownsAbove => await EvaluateUnknownsAsync(condition, context, ct), + RecheckConditionType.KEVFlagged => await EvaluateKEVAsync(condition, exception, ct), + RecheckConditionType.VEXStatusChange => await EvaluateVEXChangeAsync(condition, exception, ct), + RecheckConditionType.ExpiryWithin => EvaluateExpiry(condition, exception), + RecheckConditionType.NewCVEInPackage => await EvaluateNewCVEAsync(condition, exception, ct), + RecheckConditionType.PackageVersionChange => await EvaluateVersionChangeAsync(condition, exception, context, ct), + _ => null + }; + } + + private async Task EvaluateEPSSAsync( + RecheckCondition condition, + ExceptionObject exception, + CancellationToken ct) + { + if (condition.Threshold is null) return null; + + var maxEpss = 0m; + foreach (var cveId in exception.Scope.CveIds) + { + var epss = await _epssService.GetScoreAsync(cveId, ct); + if (epss > maxEpss) maxEpss = epss; + } + + if (maxEpss > condition.Threshold) + { + return new TriggeredCondition( + Type: RecheckConditionType.EPSSAbove, + Description: $"EPSS score {maxEpss:P1} exceeds threshold {condition.Threshold:P1}", + CurrentValue: maxEpss, + ThresholdValue: condition.Threshold, + Action: condition.Action ?? RecheckAction.Warn); + } + + return null; + } + + private async Task EvaluateCVSSAsync( + RecheckCondition condition, + ExceptionObject exception, + CancellationToken ct) + { + if (condition.Threshold is null) return null; + + var maxCvss = 0m; + foreach (var cveId in exception.Scope.CveIds) + { + var cvss = await _cvssService.GetScoreAsync(cveId, ct); + if (cvss > maxCvss) maxCvss = cvss; + } + + if (maxCvss > condition.Threshold) + { + return new TriggeredCondition( + Type: RecheckConditionType.CVSSAbove, + Description: $"CVSS score {maxCvss:F1} exceeds threshold {condition.Threshold:F1}", + CurrentValue: maxCvss, + ThresholdValue: condition.Threshold, + Action: condition.Action ?? RecheckAction.Warn); + } + + return null; + } + + private async Task EvaluateReachGraphAsync( + RecheckCondition condition, + ExceptionObject exception, + EvaluationContext context, + CancellationToken ct) + { + // Check if reach graph has changed since exception was created + var currentGraph = await _reachability.GetGraphHashAsync(context.ArtifactDigest, ct); + var originalGraph = exception.Metadata?.GetValueOrDefault("reach_graph_hash"); + + if (currentGraph != originalGraph) + { + return new TriggeredCondition( + Type: RecheckConditionType.ReachGraphChange, + Description: "Reachability graph has changed since exception was created", + CurrentValue: null, + ThresholdValue: null, + Action: condition.Action ?? RecheckAction.RequireReapproval); + } + + return null; + } + + private async Task EvaluateUnknownsAsync( + RecheckCondition condition, + EvaluationContext context, + CancellationToken ct) + { + if (condition.Threshold is null) return null; + + var unknowns = await _unknownsService.GetCountAsync(context.ArtifactDigest, ct); + + if (unknowns > condition.Threshold) + { + return new TriggeredCondition( + Type: RecheckConditionType.UnknownsAbove, + Description: $"Unknown count {unknowns} exceeds threshold {condition.Threshold}", + CurrentValue: unknowns, + ThresholdValue: condition.Threshold, + Action: condition.Action ?? RecheckAction.Block); + } + + return null; + } + + private async Task EvaluateKEVAsync( + RecheckCondition condition, + ExceptionObject exception, + CancellationToken ct) + { + foreach (var cveId in exception.Scope.CveIds) + { + var isKev = await _kevService.IsKnownExploitedAsync(cveId, ct); + if (isKev) + { + return new TriggeredCondition( + Type: RecheckConditionType.KEVFlagged, + Description: $"{cveId} is now in CISA KEV catalog", + CurrentValue: 1, + ThresholdValue: 0, + Action: condition.Action ?? RecheckAction.Block); + } + } + + return null; + } + + private async Task EvaluateVEXChangeAsync( + RecheckCondition condition, + ExceptionObject exception, + CancellationToken ct) + { + var originalVex = exception.Metadata?.GetValueOrDefault("original_vex_status"); + if (originalVex is null) return null; + + foreach (var cveId in exception.Scope.CveIds) + { + var currentVex = await _vexService.GetStatusAsync(cveId, ct); + if (currentVex.ToString() != originalVex) + { + return new TriggeredCondition( + Type: RecheckConditionType.VEXStatusChange, + Description: $"VEX status changed from {originalVex} to {currentVex}", + CurrentValue: null, + ThresholdValue: null, + Action: condition.Action ?? RecheckAction.RequireReapproval); + } + } + + return null; + } + + private TriggeredCondition? EvaluateExpiry( + RecheckCondition condition, + ExceptionObject exception) + { + if (condition.Threshold is null || exception.ExpiresAt is null) return null; + + var daysUntilExpiry = (exception.ExpiresAt.Value - DateTimeOffset.UtcNow).TotalDays; + + if (daysUntilExpiry <= (double)condition.Threshold) + { + return new TriggeredCondition( + Type: RecheckConditionType.ExpiryWithin, + Description: $"Exception expires in {daysUntilExpiry:F0} days", + CurrentValue: (decimal)daysUntilExpiry, + ThresholdValue: condition.Threshold, + Action: condition.Action ?? RecheckAction.Warn); + } + + return null; + } + + // Additional evaluation methods omitted for brevity... +} + +public interface IRecheckEvaluationService +{ + Task EvaluateAsync( + ExceptionObject exception, + EvaluationContext context, + CancellationToken ct = default); +} + +public sealed record EvaluationContext( + string ArtifactDigest, + string Environment, + DateTimeOffset EvaluationTime); +``` + +**Acceptance Criteria**: +- [ ] Evaluates all condition types +- [ ] Environment scope filtering +- [ ] Returns triggered conditions with details +- [ ] Determines recommended action +- [ ] Logging for audit + +--- + +### T5: Implement EvidenceRequirementValidator + +**Assignee**: Policy Team +**Story Points**: 5 +**Status**: TODO +**Dependencies**: T3 + +**Description**: +Service that validates evidence requirements before exception approval. + +**Implementation Path**: `src/Policy/__Libraries/StellaOps.Policy.Exceptions/Services/EvidenceRequirementValidator.cs` + +**Implementation**: +```csharp +namespace StellaOps.Policy.Exceptions.Services; + +/// +/// Validates that all required evidence is present before exception approval. +/// +public sealed class EvidenceRequirementValidator : IEvidenceRequirementValidator +{ + private readonly IEvidenceHookRegistry _hookRegistry; + private readonly IAttestationVerifier _attestationVerifier; + private readonly ITrustScoreService _trustScoreService; + private readonly ILogger _logger; + + public EvidenceRequirementValidator( + IEvidenceHookRegistry hookRegistry, + IAttestationVerifier attestationVerifier, + ITrustScoreService trustScoreService, + ILogger logger) + { + _hookRegistry = hookRegistry; + _attestationVerifier = attestationVerifier; + _trustScoreService = trustScoreService; + _logger = logger; + } + + /// + /// Validates that an exception can be approved based on evidence requirements. + /// + public async Task ValidateForApprovalAsync( + ExceptionObject exception, + CancellationToken ct = default) + { + _logger.LogInformation( + "Validating evidence requirements for exception {ExceptionId}", + exception.ExceptionId); + + // Get required hooks for this exception type + var requiredHooks = await _hookRegistry.GetRequiredHooksAsync( + exception.Type, exception.Scope, ct); + + if (requiredHooks.Length == 0) + { + return new EvidenceValidationResult + { + IsValid = true, + MissingEvidence = [], + InvalidEvidence = [], + Message = "No evidence requirements for this exception type" + }; + } + + var missingEvidence = new List(); + var invalidEvidence = new List<(EvidenceHook Hook, SubmittedEvidence Evidence, string Error)>(); + var validEvidence = new List(); + + foreach (var hook in requiredHooks.Where(h => h.IsMandatory)) + { + var submitted = exception.EvidenceRequirements?.SubmittedEvidence + .FirstOrDefault(e => e.HookId == hook.HookId); + + if (submitted is null) + { + missingEvidence.Add(hook); + continue; + } + + // Validate the evidence + var validation = await ValidateEvidenceAsync(hook, submitted, ct); + if (!validation.IsValid) + { + invalidEvidence.Add((hook, submitted, validation.Error!)); + } + else + { + validEvidence.Add(submitted); + } + } + + var isValid = missingEvidence.Count == 0 && invalidEvidence.Count == 0; + + return new EvidenceValidationResult + { + IsValid = isValid, + MissingEvidence = missingEvidence.ToImmutableArray(), + InvalidEvidence = invalidEvidence.Select(e => new InvalidEvidenceEntry( + e.Hook.HookId, e.Evidence.EvidenceId, e.Error)).ToImmutableArray(), + ValidEvidence = validEvidence.ToImmutableArray(), + Message = isValid + ? "All evidence requirements satisfied" + : BuildValidationMessage(missingEvidence, invalidEvidence) + }; + } + + private async Task<(bool IsValid, string? Error)> ValidateEvidenceAsync( + EvidenceHook hook, + SubmittedEvidence evidence, + CancellationToken ct) + { + // Check freshness + if (hook.MaxAge.HasValue) + { + var age = DateTimeOffset.UtcNow - evidence.SubmittedAt; + if (age > hook.MaxAge.Value) + { + return (false, $"Evidence is stale (age: {age.TotalHours:F0}h, max: {hook.MaxAge.Value.TotalHours:F0}h)"); + } + } + + // Check trust score + if (hook.MinTrustScore.HasValue) + { + var trustScore = await _trustScoreService.GetScoreAsync(evidence.Reference, ct); + if (trustScore < hook.MinTrustScore.Value) + { + return (false, $"Evidence trust score {trustScore:P0} below minimum {hook.MinTrustScore:P0}"); + } + } + + // Verify signature if DSSE envelope present + if (evidence.DsseEnvelope is not null) + { + var verification = await _attestationVerifier.VerifyAsync(evidence.DsseEnvelope, ct); + if (!verification.IsValid) + { + return (false, $"Signature verification failed: {verification.Error}"); + } + } + + // Validate against schema if specified + if (hook.ValidationSchema is not null && evidence.Content is not null) + { + var schemaValid = ValidateSchema(evidence.Content, hook.ValidationSchema); + if (!schemaValid.IsValid) + { + return (false, $"Schema validation failed: {schemaValid.Error}"); + } + } + + return (true, null); + } + + private static (bool IsValid, string? Error) ValidateSchema(string content, string schema) + { + // Schema validation implementation + // Could use JSON Schema, custom predicates, etc. + return (true, null); + } + + private static string BuildValidationMessage( + List missing, + List<(EvidenceHook Hook, SubmittedEvidence Evidence, string Error)> invalid) + { + var parts = new List(); + + if (missing.Count > 0) + { + parts.Add($"Missing evidence: {string.Join(", ", missing.Select(h => h.Type))}"); + } + + if (invalid.Count > 0) + { + parts.Add($"Invalid evidence: {string.Join(", ", invalid.Select(e => $"{e.Hook.Type}: {e.Error}"))}"); + } + + return string.Join("; ", parts); + } +} + +public interface IEvidenceRequirementValidator +{ + Task ValidateForApprovalAsync( + ExceptionObject exception, + CancellationToken ct = default); +} + +public sealed record EvidenceValidationResult +{ + public required bool IsValid { get; init; } + public required ImmutableArray MissingEvidence { get; init; } + public required ImmutableArray InvalidEvidence { get; init; } + public ImmutableArray ValidEvidence { get; init; } = []; + public required string Message { get; init; } +} + +public sealed record InvalidEvidenceEntry(string HookId, string EvidenceId, string Error); +``` + +**Acceptance Criteria**: +- [ ] Validates all mandatory evidence present +- [ ] Checks evidence freshness (MaxAge) +- [ ] Checks trust score +- [ ] Verifies DSSE signatures +- [ ] Schema validation support +- [ ] Clear error messages for missing/invalid evidence + +--- + +### T6: Integrate with Build Gate + +**Assignee**: Policy Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: T4, T5 + +**Description**: +Integrate recheck evaluation with CI/CD build gate. + +**Implementation Path**: `src/Policy/StellaOps.Policy.Engine/BuildGate/ExceptionRecheckGate.cs` + +**Implementation**: +```csharp +namespace StellaOps.Policy.Engine.BuildGate; + +/// +/// Build gate that checks recheck policies before allowing deployment. +/// +public sealed class ExceptionRecheckGate : IBuildGate +{ + private readonly IExceptionEvaluator _exceptionEvaluator; + private readonly IRecheckEvaluationService _recheckService; + private readonly ILogger _logger; + + public string GateName => "exception-recheck"; + public int Priority => 100; // Run after other gates + + public async Task EvaluateAsync( + BuildGateContext context, + CancellationToken ct = default) + { + _logger.LogInformation( + "Evaluating exception recheck gate for artifact {Artifact}", + context.ArtifactDigest); + + // Get all active exceptions for this artifact + var exceptions = await _exceptionEvaluator.GetActiveExceptionsAsync( + context.ArtifactDigest, ct); + + var blockers = new List(); + var warnings = new List(); + + foreach (var exception in exceptions) + { + if (exception.RecheckPolicy is null) continue; + + var evalContext = new EvaluationContext( + context.ArtifactDigest, + context.Environment, + DateTimeOffset.UtcNow); + + var result = await _recheckService.EvaluateAsync(exception, evalContext, ct); + + if (result.IsTriggered) + { + foreach (var triggered in result.TriggeredConditions) + { + var message = $"Exception {exception.ExceptionId}: {triggered.Description}"; + + if (triggered.Action == RecheckAction.Block) + { + blockers.Add(message); + } + else if (triggered.Action == RecheckAction.Warn) + { + warnings.Add(message); + } + } + } + } + + if (blockers.Count > 0) + { + return new BuildGateResult + { + Passed = false, + GateName = GateName, + Message = $"Recheck policy blocking: {string.Join("; ", blockers)}", + Blockers = blockers.ToImmutableArray(), + Warnings = warnings.ToImmutableArray() + }; + } + + return new BuildGateResult + { + Passed = true, + GateName = GateName, + Message = warnings.Count > 0 + ? $"Passed with {warnings.Count} warning(s)" + : "All exception recheck policies satisfied", + Blockers = [], + Warnings = warnings.ToImmutableArray() + }; + } +} + +public interface IBuildGate +{ + string GateName { get; } + int Priority { get; } + Task EvaluateAsync(BuildGateContext context, CancellationToken ct = default); +} + +public sealed record BuildGateContext( + string ArtifactDigest, + string Environment, + string? Branch, + string? PipelineId); + +public sealed record BuildGateResult +{ + public required bool Passed { get; init; } + public required string GateName { get; init; } + public required string Message { get; init; } + public required ImmutableArray Blockers { get; init; } + public required ImmutableArray Warnings { get; init; } +} +``` + +**Acceptance Criteria**: +- [ ] Evaluates recheck policies on build gate +- [ ] Returns FAIL if Block action triggered +- [ ] Returns warnings for Warn actions +- [ ] Clear message with triggered conditions +- [ ] Integrates with existing build gate pipeline + +--- + +### T7: Update Exception Modal UI + +**Assignee**: Frontend Team +**Story Points**: 5 +**Status**: TODO +**Dependencies**: T1, T3 + +**Description**: +Update exception creation modal with recheck policy and evidence requirements. + +**Implementation Path**: `src/Web/StellaOps.Web/src/app/features/exceptions/exception-wizard.component.ts` (modify) + +**UI Changes**: +1. **Recheck Policy Step**: + - Condition type selector (EPSS Above, Reach Graph Change, etc.) + - Threshold input for numeric conditions + - Environment scope multi-select + - Action selector (Warn, Require Reapproval, Block) + +2. **Evidence Requirements Step**: + - List of required evidence hooks + - Upload/link evidence for each hook + - Validation status badges + - Missing evidence warnings + +3. **Summary Step**: + - Show recheck policy summary + - Show evidence status (satisfied/missing) + - Block submit if mandatory evidence missing + +**Component Updates**: +```typescript +// exception-wizard.component.ts additions + +interface RecheckConditionForm { + type: RecheckConditionType; + threshold: number | null; + environmentScope: string[]; + action: RecheckAction; +} + +interface EvidenceSubmission { + hookId: string; + type: EvidenceType; + reference: string; + content: string | null; + file: File | null; +} + +// Add to wizard steps +readonly steps = [ + 'scope', + 'reason', + 'expiry', + 'recheck-policy', // NEW + 'evidence', // NEW + 'review' +]; + +// Validation +get canSubmit(): boolean { + return this.isScopeValid && + this.isReasonValid && + this.isExpiryValid && + this.isRecheckPolicyValid && + this.isEvidenceSatisfied; // NEW - block if evidence missing +} + +get isEvidenceSatisfied(): boolean { + const required = this.evidenceRequirements.filter(e => e.isMandatory); + return required.every(r => + this.submittedEvidence.some(s => + s.hookId === r.hookId && s.validationStatus === 'Valid')); +} +``` + +**Acceptance Criteria**: +- [ ] Recheck policy configuration step added +- [ ] Evidence requirements step added +- [ ] Missing evidence blocks submission +- [ ] Validation status shown for each evidence +- [ ] Clear UX for adding/uploading evidence +- [ ] Summary shows complete configuration + +--- + +### T8: Database Migration and Tests + +**Assignee**: Policy Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: T1-T7 + +**Description**: +Database migration and comprehensive tests. + +**Migration**: `src/Policy/__Libraries/StellaOps.Policy.Storage.Postgres/Migrations/010_recheck_evidence.sql` + +```sql +-- Recheck policy storage +CREATE TABLE IF NOT EXISTS policy.recheck_policies ( + policy_id TEXT PRIMARY KEY, + name TEXT NOT NULL, + conditions JSONB NOT NULL, + default_action TEXT NOT NULL, + is_active BOOLEAN NOT NULL DEFAULT true, + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT now() +); + +-- Evidence hooks registry +CREATE TABLE IF NOT EXISTS policy.evidence_hooks ( + hook_id TEXT PRIMARY KEY, + type TEXT NOT NULL, + description TEXT NOT NULL, + is_mandatory BOOLEAN NOT NULL DEFAULT true, + validation_schema TEXT, + max_age_seconds BIGINT, + min_trust_score DECIMAL(5,4), + created_at TIMESTAMPTZ NOT NULL DEFAULT now() +); + +-- Submitted evidence +CREATE TABLE IF NOT EXISTS policy.submitted_evidence ( + evidence_id TEXT PRIMARY KEY, + exception_id TEXT NOT NULL REFERENCES policy.exceptions(exception_id), + hook_id TEXT NOT NULL REFERENCES policy.evidence_hooks(hook_id), + type TEXT NOT NULL, + reference TEXT NOT NULL, + content TEXT, + dsse_envelope TEXT, + signature_verified BOOLEAN NOT NULL DEFAULT false, + trust_score DECIMAL(5,4) NOT NULL DEFAULT 0, + submitted_at TIMESTAMPTZ NOT NULL DEFAULT now(), + submitted_by TEXT NOT NULL, + validation_status TEXT NOT NULL DEFAULT 'Pending', + validation_error TEXT +); + +-- Add recheck columns to exceptions +ALTER TABLE policy.exceptions ADD COLUMN IF NOT EXISTS recheck_policy_id TEXT REFERENCES policy.recheck_policies(policy_id); +ALTER TABLE policy.exceptions ADD COLUMN IF NOT EXISTS last_recheck_result JSONB; +ALTER TABLE policy.exceptions ADD COLUMN IF NOT EXISTS last_recheck_at TIMESTAMPTZ; + +-- Index for efficient queries +CREATE INDEX IF NOT EXISTS idx_submitted_evidence_exception ON policy.submitted_evidence(exception_id); +CREATE INDEX IF NOT EXISTS idx_submitted_evidence_hook ON policy.submitted_evidence(hook_id); +CREATE INDEX IF NOT EXISTS idx_exceptions_recheck ON policy.exceptions(recheck_policy_id) WHERE recheck_policy_id IS NOT NULL; +``` + +**Test Cases**: +```csharp +public class RecheckEvaluationServiceTests +{ + [Fact] + public async Task Evaluate_EPSSAboveThreshold_TriggersCondition() + { + // Arrange + var exception = CreateExceptionWithRecheckPolicy( + RecheckConditionType.EPSSAbove, threshold: 0.5m); + _epssService.Setup(x => x.GetScoreAsync(It.IsAny(), It.IsAny())) + .ReturnsAsync(0.7m); + + // Act + var result = await _service.EvaluateAsync(exception, CreateContext()); + + // Assert + result.IsTriggered.Should().BeTrue(); + result.TriggeredConditions.Should().ContainSingle(c => c.Type == RecheckConditionType.EPSSAbove); + } + + [Fact] + public async Task Evaluate_KEVFlagged_BlocksException() + { + // Arrange + var exception = CreateExceptionWithRecheckPolicy( + RecheckConditionType.KEVFlagged, action: RecheckAction.Block); + _kevService.Setup(x => x.IsKnownExploitedAsync(It.IsAny(), It.IsAny())) + .ReturnsAsync(true); + + // Act + var result = await _service.EvaluateAsync(exception, CreateContext()); + + // Assert + result.IsTriggered.Should().BeTrue(); + result.RecommendedAction.Should().Be(RecheckAction.Block); + } +} + +public class EvidenceRequirementValidatorTests +{ + [Fact] + public async Task Validate_MissingMandatoryEvidence_ReturnsInvalid() + { + // Arrange + var exception = CreateExceptionWithMissingEvidence(); + + // Act + var result = await _validator.ValidateForApprovalAsync(exception); + + // Assert + result.IsValid.Should().BeFalse(); + result.MissingEvidence.Should().NotBeEmpty(); + } + + [Fact] + public async Task Validate_StaleEvidence_ReturnsInvalid() + { + // Arrange + var exception = CreateExceptionWithStaleEvidence(); + + // Act + var result = await _validator.ValidateForApprovalAsync(exception); + + // Assert + result.IsValid.Should().BeFalse(); + result.InvalidEvidence.Should().ContainSingle(e => e.Error.Contains("stale")); + } + + [Fact] + public async Task Validate_AllEvidenceSatisfied_ReturnsValid() + { + // Arrange + var exception = CreateExceptionWithAllEvidence(); + + // Act + var result = await _validator.ValidateForApprovalAsync(exception); + + // Assert + result.IsValid.Should().BeTrue(); + } +} + +public class ExceptionRecheckGateTests +{ + [Fact] + public async Task Evaluate_BlockingCondition_FailsGate() + { + // Arrange + var context = new BuildGateContext("sha256:test", "prod", "main", "pipeline-123"); + + // Act + var result = await _gate.EvaluateAsync(context); + + // Assert + result.Passed.Should().BeFalse(); + result.Blockers.Should().NotBeEmpty(); + } +} +``` + +**Acceptance Criteria**: +- [ ] Migration creates all tables and indexes +- [ ] RecheckEvaluationService tests +- [ ] EvidenceRequirementValidator tests +- [ ] Build gate integration tests +- [ ] ≥80% code coverage + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Policy Team | Define RecheckPolicy Model | +| 2 | T2 | TODO | T1 | Policy Team | Extend ExceptionObject with RecheckPolicy | +| 3 | T3 | TODO | — | Policy Team | Define EvidenceHook Model | +| 4 | T4 | TODO | T1, T2 | Policy Team | Implement RecheckEvaluationService | +| 5 | T5 | TODO | T3 | Policy Team | Implement EvidenceRequirementValidator | +| 6 | T6 | TODO | T4, T5 | Policy Team | Integrate with Build Gate | +| 7 | T7 | TODO | T1, T3 | Frontend Team | Update Exception Modal UI | +| 8 | T8 | TODO | T1-T7 | Policy Team | Database Migration and Tests | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint created from gap analysis of "UI Patterns for Triage and Replay" advisory. Addresses Gaps 3 (Recheck Policy) and 4 (Evidence Hooks). | Claude | + +--- + +## Decisions & Risks + +| Item | Type | Owner | Notes | +|------|------|-------|-------| +| Recheck condition types | Decision | Policy Team | 9 types covering EPSS, CVSS, KEV, reach graph, VEX, unknowns | +| Evidence hook types | Decision | Policy Team | 7 types covering feature flags, backports, controls, reviews | +| Build gate integration | Decision | Policy Team | Block action stops deployment | +| Trust score threshold | Decision | Policy Team | Configurable per hook, default 0 | + +--- + +## Success Criteria + +- [ ] All 8 tasks marked DONE +- [ ] Recheck policy evaluates all conditions +- [ ] Evidence requirements block approval if unsatisfied +- [ ] Build gate fails on Block conditions +- [ ] UI shows recheck policy and evidence status +- [ ] ≥80% test coverage +- [ ] `dotnet build` succeeds +- [ ] `dotnet test` succeeds diff --git a/docs/implplan/SPRINT_4000_0002_0001_backport_ux.md b/docs/implplan/SPRINT_4000_0002_0001_backport_ux.md new file mode 100644 index 000000000..fb8b9e357 --- /dev/null +++ b/docs/implplan/SPRINT_4000_0002_0001_backport_ux.md @@ -0,0 +1,412 @@ +# Sprint 4000.0002.0001 · Backport Explainability UX + +## Topic & Scope + +- Add "Compared with" indicator to vulnerability findings showing which comparator was used. +- Implement "Why Fixed" popover showing version comparison steps. +- Display evidence trail for backport determinations. +- **Working directory:** `src/Web/StellaOps.Web/` (Angular UI) + +## Advisory Reference + +- **Source:** `docs/product-advisories/archived/22-Dec-2025 - Getting Distro Backport Logic Right.md` +- **Gap Identified:** Advisory recommends UX showing "Compared with: RPM EVR / dpkg rules" and "why fixed" popover. No UI work was scheduled. + +## Dependencies & Concurrency + +- **Upstream**: SPRINT_2000_0003_0001 (Alpine comparator), existing version comparators +- **Downstream**: None +- **Safe to parallelize with**: Backend sprints + +## Documentation Prerequisites + +- `docs/modules/ui/architecture.md` +- `docs/modules/scanner/architecture.md` (findings model) +- `docs/product-advisories/archived/22-Dec-2025 - Getting Distro Backport Logic Right.md` + +--- + +## Tasks + +### T1: Extend Findings API Response + +**Assignee**: Backend Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: — + +**Description**: +Extend the vulnerability findings API to include version comparison metadata. + +**Implementation Path**: `src/Scanner/__Libraries/StellaOps.Scanner.Models/Findings/VersionComparisonEvidence.cs` + +**New Fields**: +```csharp +public sealed record VersionComparisonEvidence +{ + /// + /// Comparator algorithm used (rpm-evr, dpkg, apk, semver). + /// + public required string Comparator { get; init; } + + /// + /// Installed version in native format. + /// + public required string InstalledVersion { get; init; } + + /// + /// Fixed version threshold from advisory. + /// + public required string FixedVersion { get; init; } + + /// + /// Whether the installed version is >= fixed. + /// + public required bool IsFixed { get; init; } + + /// + /// Human-readable proof lines showing comparison steps. + /// + public ImmutableArray ProofLines { get; init; } = []; + + /// + /// Advisory source (DSA-1234, RHSA-2025:1234, USN-1234-1). + /// + public string? AdvisorySource { get; init; } +} +``` + +**API Response** (`GET /api/v1/scans/{id}/findings/{findingId}`): +```json +{ + "findingId": "...", + "cveId": "CVE-2025-12345", + "package": "openssl", + "installedVersion": "1:1.1.1k-1+deb11u1", + "severity": "HIGH", + "status": "fixed", + "versionComparison": { + "comparator": "dpkg", + "installedVersion": "1:1.1.1k-1+deb11u1", + "fixedVersion": "1:1.1.1k-1+deb11u2", + "isFixed": false, + "proofLines": [ + "Epoch: 1 == 1 (equal)", + "Upstream: 1.1.1k == 1.1.1k (equal)", + "Revision: 1+deb11u1 < 1+deb11u2 (VULNERABLE)" + ], + "advisorySource": "DSA-5678-1" + } +} +``` + +**Acceptance Criteria**: +- [ ] VersionComparisonEvidence model created +- [ ] API response includes comparison metadata +- [ ] ProofLines generated by comparators + +--- + +### T2: Update Version Comparators to Emit Proof Lines + +**Assignee**: Concelier Team +**Story Points**: 5 +**Status**: TODO +**Dependencies**: T1 + +**Description**: +Extend version comparators to optionally emit human-readable proof lines. + +**Implementation Path**: `src/Concelier/__Libraries/StellaOps.Concelier.Merge/Comparers/` + +**Interface Extension**: +```csharp +public interface IVersionComparator +{ + int Compare(string? left, string? right); + + /// + /// Compare with proof generation for explainability. + /// + VersionComparisonResult CompareWithProof(string? left, string? right); +} + +public sealed record VersionComparisonResult( + int Comparison, + ImmutableArray ProofLines); +``` + +**Example Proof Lines (RPM)**: +``` +Epoch: 0 < 1 (left is older) +``` +``` +Epoch: 1 == 1 (equal) +Version segment 1: 1 == 1 (equal) +Version segment 2: 2 < 3 (left is older) +Result: VULNERABLE (installed < fixed) +``` + +**Example Proof Lines (Debian)**: +``` +Epoch: 1 == 1 (equal) +Upstream version: 1.1.1k == 1.1.1k (equal) +Debian revision: 1+deb11u1 < 1+deb11u2 (left is older) +Result: VULNERABLE (installed < fixed) +``` + +**Acceptance Criteria**: +- [ ] NEVRA comparator emits proof lines +- [ ] DebianEvr comparator emits proof lines +- [ ] APK comparator emits proof lines (after SPRINT_2000_0003_0001) +- [ ] Unit tests verify proof line content + +--- + +### T3: Create "Compared With" Badge Component + +**Assignee**: UI Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: T1 + +**Description**: +Create Angular component showing which comparator was used. + +**Implementation Path**: `src/Web/StellaOps.Web/src/app/shared/components/comparator-badge/` + +**Component**: +```typescript +// comparator-badge.component.ts +@Component({ + selector: 'app-comparator-badge', + template: ` + + compare_arrows + {{ comparatorLabel }} + + `, + styles: [` + .comparator-badge { + display: inline-flex; + align-items: center; + gap: 4px; + padding: 2px 8px; + border-radius: 4px; + font-size: 12px; + font-weight: 500; + } + .comparator-rpm { background: #fee2e2; color: #991b1b; } + .comparator-dpkg { background: #fef3c7; color: #92400e; } + .comparator-apk { background: #d1fae5; color: #065f46; } + .comparator-semver { background: #e0e7ff; color: #3730a3; } + `] +}) +export class ComparatorBadgeComponent { + @Input() comparator!: string; + + get comparatorLabel(): string { + switch (this.comparator) { + case 'rpm-evr': return 'RPM EVR'; + case 'dpkg': return 'dpkg'; + case 'apk': return 'APK'; + case 'semver': return 'SemVer'; + default: return this.comparator; + } + } + + get comparatorClass(): string { + return `comparator-${this.comparator.replace('-', '')}`; + } +} +``` + +**Usage in Findings Table**: +```html + + {{ finding.installedVersion }} + + + +``` + +**Acceptance Criteria**: +- [ ] Component created with distro-specific styling +- [ ] Badge shows comparator type (RPM EVR, dpkg, APK, SemVer) +- [ ] Accessible (ARIA labels) + +--- + +### T4: Create "Why Fixed/Vulnerable" Popover + +**Assignee**: UI Team +**Story Points**: 5 +**Status**: TODO +**Dependencies**: T1, T2, T3 + +**Description**: +Create popover showing version comparison steps for explainability. + +**Implementation Path**: `src/Web/StellaOps.Web/src/app/shared/components/version-proof-popover/` + +**Component**: +```typescript +// version-proof-popover.component.ts +@Component({ + selector: 'app-version-proof-popover', + template: ` + + + +
+ + {{ isFixed ? 'check_circle' : 'error' }} + + {{ isFixed ? 'Fixed' : 'Vulnerable' }} +
+ +
+
+ Installed: + {{ installedVersion }} +
+
+ Fixed in: + {{ fixedVersion }} +
+
+ + + +
+
Comparison steps:
+
    +
  1. {{ line }}
  2. +
+
+ +
+ source + Source: {{ advisorySource }} +
+
+ ` +}) +export class VersionProofPopoverComponent { + @Input() comparison!: VersionComparisonEvidence; + + get isFixed(): boolean { return this.comparison.isFixed; } + get installedVersion(): string { return this.comparison.installedVersion; } + get fixedVersion(): string { return this.comparison.fixedVersion; } + get proofLines(): string[] { return this.comparison.proofLines; } + get advisorySource(): string | undefined { return this.comparison.advisorySource; } +} +``` + +**Popover Content Example**: +``` +┌─────────────────────────────────────┐ +│ ⚠ Vulnerable │ +├─────────────────────────────────────┤ +│ Installed: 1:1.1.1k-1+deb11u1 │ +│ Fixed in: 1:1.1.1k-1+deb11u2 │ +├─────────────────────────────────────┤ +│ Comparison steps: │ +│ 1. Epoch: 1 == 1 (equal) │ +│ 2. Upstream: 1.1.1k == 1.1.1k │ +│ 3. Revision: 1+deb11u1 < 1+deb11u2 │ +│ (VULNERABLE) │ +├─────────────────────────────────────┤ +│ 📄 Source: DSA-5678-1 │ +└─────────────────────────────────────┘ +``` + +**Acceptance Criteria**: +- [ ] Popover shows installed vs fixed versions +- [ ] Step-by-step comparison proof displayed +- [ ] Advisory source linked +- [ ] Accessible keyboard navigation + +--- + +### T5: Integration and E2E Tests + +**Assignee**: UI Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: T1-T4 + +**Description**: +Add integration tests for the new UI components. + +**Test Cases**: +- [ ] ComparatorBadge renders correctly for all comparator types +- [ ] VersionProofPopover opens and displays proof lines +- [ ] Findings table shows comparison metadata +- [ ] E2E test: click proof popover, verify content + +**Acceptance Criteria**: +- [ ] Unit tests for components +- [ ] E2E test with Playwright/Cypress +- [ ] Accessibility audit passes + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Backend Team | Extend Findings API Response | +| 2 | T2 | TODO | T1 | Concelier Team | Update Version Comparators to Emit Proof Lines | +| 3 | T3 | TODO | T1 | UI Team | Create "Compared With" Badge Component | +| 4 | T4 | TODO | T1, T2, T3 | UI Team | Create "Why Fixed/Vulnerable" Popover | +| 5 | T5 | TODO | T1-T4 | UI Team | Integration and E2E Tests | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint created from advisory gap analysis. UX explainability identified as missing. | Agent | + +--- + +## Decisions & Risks + +| Item | Type | Owner | Notes | +|------|------|-------|-------| +| Proof lines in API response | Decision | Backend Team | Include in standard findings response, not separate endpoint | +| Comparator badge styling | Decision | UI Team | Distro-specific colors for quick visual identification | +| Popover vs modal | Decision | UI Team | Popover for quick glance; modal would interrupt workflow | + +--- + +## Success Criteria + +- [ ] All 5 tasks marked DONE +- [ ] Comparator badge visible on findings +- [ ] Why Fixed popover shows proof steps +- [ ] E2E tests passing +- [ ] Accessibility audit passes +- [ ] `ng build` succeeds +- [ ] `ng test` succeeds + +--- + +## References + +- Advisory: `docs/product-advisories/archived/22-Dec-2025 - Getting Distro Backport Logic Right.md` +- Angular Material: https://material.angular.io/ +- Findings API: `docs/api/scanner-findings.yaml` +- UI Architecture: `docs/modules/ui/architecture.md` + +--- + +*Document Version: 1.0.0* +*Created: 2025-12-22* diff --git a/docs/implplan/SPRINT_4200_0002_0003_delta_compare_view.md b/docs/implplan/SPRINT_4200_0002_0003_delta_compare_view.md index d1022518d..0dd7c7677 100644 --- a/docs/implplan/SPRINT_4200_0002_0003_delta_compare_view.md +++ b/docs/implplan/SPRINT_4200_0002_0003_delta_compare_view.md @@ -753,6 +753,585 @@ export class CompareExportService { --- +### T9: Baseline Rationale Display + +**Assignee**: UI Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T2 + +**Description**: +Show why the baseline was selected (auditor-friendly explanation). + +**Implementation Path**: Add to baseline selector component + +```typescript +// baseline-rationale.component.ts +@Component({ + selector: 'stella-baseline-rationale', + standalone: true, + imports: [CommonModule, MatIconModule, MatTooltipModule], + template: ` +
+ info + {{ rationale() }} + +
+ ` +}) +export class BaselineRationaleComponent { + rationale = input(); + + // Example rationales: + // "Selected last prod release with Allowed verdict under policy P-2024-001." + // "Auto-selected: most recent green build on main branch (2h ago)." + // "User override: manually selected v1.4.2 as comparison baseline." +} +``` + +**Acceptance Criteria**: +- [ ] Shows rationale text below baseline selector +- [ ] Explains why baseline was auto-selected +- [ ] Shows different message for manual override +- [ ] Click opens detailed selection log + +--- + +### T10: Actionables Section ("What to do next") + +**Assignee**: UI Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: T1, Backend API (Sprint 4200.0002.0006) + +**Description**: +Show structured recommendations for addressing delta findings. + +**Implementation Path**: `actionables-panel.component.ts` (new file) + +```typescript +export interface Actionable { + id: string; + type: 'upgrade' | 'patch' | 'vex' | 'config' | 'investigate'; + priority: 'critical' | 'high' | 'medium' | 'low'; + title: string; + description: string; + component?: string; + targetVersion?: string; + cveIds?: string[]; + estimatedEffort?: string; +} + +@Component({ + selector: 'stella-actionables-panel', + standalone: true, + imports: [CommonModule, MatListModule, MatChipsModule, MatIconModule, MatButtonModule], + template: ` +
+

+ task_alt + What to do next +

+ + + + + {{ getActionIcon(action.type) }} + +
+ {{ action.title }} + + {{ action.priority }} + +
+
{{ action.description }}
+ +
+
+ +
+ check_circle +

No immediate actions required

+
+
+ ` +}) +export class ActionablesPanelComponent { + actionables = input([]); + + getActionIcon(type: string): string { + const icons: Record = { + upgrade: 'upgrade', + patch: 'build', + vex: 'description', + config: 'settings', + investigate: 'search' + }; + return icons[type] || 'task'; + } +} +``` + +**Acceptance Criteria**: +- [ ] Shows prioritized list of actionables +- [ ] Supports upgrade, patch, VEX, config, investigate types +- [ ] Priority chips with color coding +- [ ] Apply button triggers action workflow +- [ ] Empty state when no actions needed + +--- + +### T11: Determinism Trust Indicators + +**Assignee**: UI Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T1 + +**Description**: +Display determinism hash, policy version, feed snapshot, signature status. + +**Implementation Path**: `trust-indicators.component.ts` (new file) + +```typescript +export interface TrustIndicators { + determinismHash: string; + policyVersion: string; + policyHash: string; + feedSnapshotTimestamp: Date; + feedSnapshotHash: string; + signatureStatus: 'valid' | 'invalid' | 'missing' | 'pending'; + signerIdentity?: string; +} + +@Component({ + selector: 'stella-trust-indicators', + standalone: true, + imports: [CommonModule, MatChipsModule, MatIconModule, MatTooltipModule], + template: ` +
+ +
+ warning + Verification {{ indicators()?.signatureStatus }}: Some actions may be restricted +
+ +
+
+ fingerprint + Det. Hash: + {{ indicators()?.determinismHash | slice:0:12 }}... + +
+ +
+ policy + Policy: + {{ indicators()?.policyVersion }} +
+ +
+ {{ isFeedStale() ? 'warning' : 'cloud_done' }} + Feed: + {{ indicators()?.feedSnapshotTimestamp | date:'short' }} + ({{ age }}) +
+ +
+ {{ getSignatureIcon() }} + Signature: + {{ indicators()?.signatureStatus }} +
+
+
+ ` +}) +export class TrustIndicatorsComponent { + indicators = input(); + + feedStaleThresholdHours = 24; + + isFeedStale(): boolean { + const ts = this.indicators()?.feedSnapshotTimestamp; + if (!ts) return true; + const age = Date.now() - new Date(ts).getTime(); + return age > this.feedStaleThresholdHours * 60 * 60 * 1000; + } +} +``` + +**Acceptance Criteria**: +- [ ] Shows determinism hash with copy button +- [ ] Shows policy version +- [ ] Shows feed snapshot timestamp with age +- [ ] Shows signature verification status +- [ ] Degraded banner when signature invalid/missing +- [ ] Stale feed warning when > 24h old + +--- + +### T12: Witness Path Visualization + +**Assignee**: UI Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: T6 + +**Description**: +Show minimal call path from entrypoint to vulnerable sink. + +**Implementation Path**: Add to proof pane + +```typescript +export interface WitnessPath { + id: string; + entrypoint: string; + sink: string; + nodes: WitnessNode[]; + confidence: 'confirmed' | 'likely' | 'present'; + gates: string[]; +} + +export interface WitnessNode { + method: string; + file?: string; + line?: number; + isEntrypoint?: boolean; + isSink?: boolean; +} + +@Component({ + selector: 'stella-witness-path', + standalone: true, + imports: [CommonModule, MatIconModule, MatButtonModule], + template: ` +
+
+ + {{ path()?.confidence }} + + +
+ +
+ +
+
+ login + dangerous + arrow_downward +
+
+ {{ node.method }} + + {{ node.file }}:{{ node.line }} + +
+
+
+
+ +
+ ... {{ hiddenCount() }} more nodes ... +
+
+ +
+ Gates: + {{ gate }} +
+
+ ` +}) +export class WitnessPathComponent { + path = input(); + expanded = signal(false); + + visibleNodes = computed(() => { + const nodes = this.path()?.nodes || []; + if (this.expanded() || nodes.length <= 5) return nodes; + // Show first 2 and last 2 + return [...nodes.slice(0, 2), ...nodes.slice(-2)]; + }); + + hiddenCount = computed(() => { + const total = this.path()?.nodes?.length || 0; + return this.expanded() ? 0 : Math.max(0, total - 4); + }); +} +``` + +**Acceptance Criteria**: +- [ ] Shows entrypoint → sink path +- [ ] Collapsible for long paths (> 5 nodes) +- [ ] Shows confidence tier +- [ ] Shows gates (security controls) +- [ ] Expand-on-demand for full path + +--- + +### T13: VEX Claim Merge Explanation + +**Assignee**: UI Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T6 + +**Description**: +Show VEX claim sources and how they were merged. + +**Implementation Path**: `vex-merge-explanation.component.ts` (new file) + +```typescript +export interface VexClaimSource { + source: 'vendor' | 'distro' | 'internal' | 'community'; + document: string; + status: string; + justification?: string; + timestamp: Date; + priority: number; +} + +export interface VexMergeResult { + finalStatus: string; + sources: VexClaimSource[]; + mergeStrategy: 'priority' | 'latest' | 'conservative'; + conflictResolution?: string; +} + +@Component({ + selector: 'stella-vex-merge-explanation', + standalone: true, + imports: [CommonModule, MatIconModule, MatExpansionModule], + template: ` + + + + merge + VEX Status: {{ result()?.finalStatus }} + + + {{ result()?.sources?.length }} sources merged + + + +
+
+ Strategy: {{ result()?.mergeStrategy }} + + ({{ result()?.conflictResolution }}) + +
+ +
+
+
+ {{ getSourceIcon(src.source) }} + {{ src.source }} + {{ src.status }} + P{{ src.priority }} +
+
+ {{ src.document }} + {{ src.timestamp | date:'short' }} +
+
+ {{ src.justification }} +
+
+
+
+
+ ` +}) +export class VexMergeExplanationComponent { + result = input(); + + getSourceIcon(source: string): string { + const icons: Record = { + vendor: 'business', + distro: 'dns', + internal: 'home', + community: 'groups' + }; + return icons[source] || 'source'; + } +} +``` + +**Acceptance Criteria**: +- [ ] Shows final merged VEX status +- [ ] Lists all source documents +- [ ] Shows merge strategy used +- [ ] Highlights winning source +- [ ] Shows conflict resolution if any + +--- + +### T14: Role-Based Default Views + +**Assignee**: UI Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T1, Authority integration + +**Description**: +Show different default tabs/content based on user persona. + +**Implementation Path**: Add role detection to compare-view.component.ts + +```typescript +type UserRole = 'developer' | 'security' | 'audit'; + +// Role-based defaults +const ROLE_DEFAULTS: Record = { + developer: { + defaultTab: 'actionables', + showFeatures: ['actionables', 'witness-paths', 'upgrade-suggestions'] + }, + security: { + defaultTab: 'claims', + showFeatures: ['vex-merge', 'policy-reasoning', 'claim-sources', 'actionables'] + }, + audit: { + defaultTab: 'attestations', + showFeatures: ['signatures', 'replay', 'evidence-pack', 'envelope-hashes'] + } +}; + +// In component: +userRole = signal('developer'); +roleDefaults = computed(() => ROLE_DEFAULTS[this.userRole()]); + +ngOnInit() { + this.authService.getCurrentUserRoles().subscribe(roles => { + if (roles.includes('auditor')) this.userRole.set('audit'); + else if (roles.includes('security')) this.userRole.set('security'); + else this.userRole.set('developer'); + }); +} +``` + +**Acceptance Criteria**: +- [ ] Detects user role from Authority +- [ ] Sets default tab based on role +- [ ] Shows/hides features based on role +- [ ] Developer: actionables first +- [ ] Security: claims/merge first +- [ ] Audit: signatures/replay first + +--- + +### T15: Feed Staleness Warning + +**Assignee**: UI Team +**Story Points**: 1 +**Status**: TODO +**Dependencies**: T11 + +**Description**: +Alert banner when vulnerability feed snapshot is stale. + +**Implementation**: Included in T11 TrustIndicatorsComponent with `isFeedStale()` check. + +**Acceptance Criteria**: +- [ ] Warning icon when feed > 24h old +- [ ] Shows feed age in human-readable format +- [ ] Tooltip explains staleness implications +- [ ] Configurable threshold + +--- + +### T16: Policy Drift Indicator + +**Assignee**: UI Team +**Story Points**: 1 +**Status**: TODO +**Dependencies**: T11 + +**Description**: +Show if policy changed between base and head scans. + +**Implementation Path**: Add to trust-indicators.component.ts + +```typescript +export interface PolicyDrift { + basePolicy: { version: string; hash: string }; + headPolicy: { version: string; hash: string }; + hasDrift: boolean; + driftSummary?: string; +} + +// Add to template: +
+ warning + Policy changed between scans + +
+``` + +**Acceptance Criteria**: +- [ ] Detects policy version/hash mismatch +- [ ] Shows warning banner +- [ ] Links to policy diff view +- [ ] Explains impact on comparison + +--- + +### T17: Replay Command Display + +**Assignee**: UI Team +**Story Points**: 1 +**Status**: TODO +**Dependencies**: T11 + +**Description**: +Copy-to-clipboard button for replay command to verify determinism. + +**Implementation Path**: Add to trust-indicators.component.ts + +```typescript +// Add to template: +
+ +
+ +// In component: +copyReplayCommand(): void { + const cmd = `stellaops smart-diff replay \\ + --base ${this.baseDigest()} \\ + --target ${this.headDigest()} \\ + --feed-snapshot ${this.indicators()?.feedSnapshotHash} \\ + --policy ${this.indicators()?.policyHash}`; + + navigator.clipboard.writeText(cmd); + this.snackBar.open('Replay command copied', 'OK', { duration: 2000 }); +} +``` + +**Acceptance Criteria**: +- [ ] Button copies CLI command +- [ ] Command includes all determinism inputs +- [ ] Snackbar confirms copy +- [ ] Works across browsers + +--- + ## Delivery Tracker | # | Task ID | Status | Dependency | Owners | Task Definition | @@ -765,6 +1344,15 @@ export class CompareExportService { | 6 | T6 | TODO | T1, T5 | UI Team | Proof pane | | 7 | T7 | TODO | T6 | UI Team | Before/After toggle | | 8 | T8 | TODO | T1 | UI Team | Export delta report | +| 9 | T9 | TODO | T2 | UI Team | Baseline rationale display | +| 10 | T10 | TODO | T1, Backend | UI Team | Actionables section ("What to do next") | +| 11 | T11 | TODO | T1 | UI Team | Determinism trust indicators | +| 12 | T12 | TODO | T6 | UI Team | Witness path visualization | +| 13 | T13 | TODO | T6 | UI Team | VEX claim merge explanation | +| 14 | T14 | TODO | T1, Authority | UI Team | Role-based default views | +| 15 | T15 | TODO | T11 | UI Team | Feed staleness warning | +| 16 | T16 | TODO | T11 | UI Team | Policy drift indicator | +| 17 | T17 | TODO | T11 | UI Team | Replay command display | --- @@ -773,6 +1361,7 @@ export class CompareExportService { | Date (UTC) | Update | Owner | |------------|--------|-------| | 2025-12-21 | Sprint created from UX Gap Analysis. Smart-Diff UI identified as key comparison feature. | Claude | +| 2025-12-22 | Sprint amended with 9 new tasks (T9-T17) from advisory "21-Dec-2025 - Smart Diff - Reproducibility as a Feature.md". Added baseline rationale, actionables, trust indicators, witness paths, VEX merge explanation, role-based views, feed staleness, policy drift, replay command. | Claude | --- @@ -784,16 +1373,40 @@ export class CompareExportService { | Baseline presets | Decision | UI Team | Last green, previous release, main, custom | | View modes | Decision | UI Team | Side-by-side and unified diff | | Categories | Decision | UI Team | SBOM, Reachability, VEX, Policy, Findings, Unknowns | +| Baseline rationale | Decision | UI Team | Show auditor-friendly explanation of baseline selection | +| Trust indicators | Decision | UI Team | Show determinism hash, policy version, feed snapshot, signature | +| Role-based defaults | Decision | UI Team | Dev→actionables, Security→claims, Audit→signatures | +| Feed staleness threshold | Decision | UI Team | 24h default, configurable | +| Witness path collapse | Decision | UI Team | Collapse paths > 5 nodes, show first 2 + last 2 | + +--- + +## Dependencies + +| Dependency | Sprint | Status | Notes | +|------------|--------|--------|-------| +| Baseline Selection API | 4200.0002.0006 | TODO | Backend API for recommended baselines with rationale | +| Actionables Engine API | 4200.0002.0006 | TODO | Backend API for generating remediation recommendations | +| Authority Role API | Authority | EXISTS | User role detection for role-based views | +| Smart-Diff Backend | 3500 | DONE | Core smart-diff computation | --- ## Success Criteria -- [ ] All 8 tasks marked DONE -- [ ] Baseline can be selected +- [ ] All 17 tasks marked DONE +- [ ] Baseline can be selected with rationale displayed - [ ] Delta summary shows counts - [ ] Three-pane layout works - [ ] Evidence accessible for each change - [ ] Export works (JSON/PDF) +- [ ] Actionables section shows recommendations +- [ ] Trust indicators visible (hash, policy, feed, signature) +- [ ] Witness paths render with collapse/expand +- [ ] VEX merge explanation shows sources +- [ ] Role-based default views work +- [ ] Feed staleness warning appears when > 24h +- [ ] Policy drift indicator shows when policy changed +- [ ] Replay command copyable - [ ] `ng build` succeeds - [ ] `ng test` succeeds diff --git a/docs/implplan/SPRINT_4200_0002_0006_delta_compare_api.md b/docs/implplan/SPRINT_4200_0002_0006_delta_compare_api.md new file mode 100644 index 000000000..f49e7b0eb --- /dev/null +++ b/docs/implplan/SPRINT_4200_0002_0006_delta_compare_api.md @@ -0,0 +1,884 @@ +# Sprint 4200.0002.0006 · Delta Compare Backend API + +## Topic & Scope + +Backend API endpoints to support the Delta/Compare View UI (Sprint 4200.0002.0003). Provides baseline selection with rationale, actionables generation, and trust indicator data. + +**Working directory:** `src/Scanner/StellaOps.Scanner.WebService/` + +**Source Advisory**: `docs/product-advisories/21-Dec-2025 - Smart Diff - Reproducibility as a Feature.md` + +## Dependencies & Concurrency + +- **Upstream**: Sprint 3500 (Smart-Diff core implementation) - DONE +- **Downstream**: Sprint 4200.0002.0003 (Delta Compare View UI) +- **Safe to parallelize with**: Sprint 4200.0002.0004 (CLI Compare) + +## Documentation Prerequisites + +- `src/Scanner/AGENTS.md` +- `docs/modules/scanner/architecture.md` +- `docs/product-advisories/21-Dec-2025 - Smart Diff - Reproducibility as a Feature.md` + +--- + +## Tasks + +### T1: Baseline Selection API + +**Assignee**: Scanner Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: — + +**Description**: +API endpoint to get recommended baselines with rationale for a given artifact. + +**Implementation Path**: `Endpoints/BaselineEndpoints.cs` (new file) + +```csharp +// BaselineEndpoints.cs +using Microsoft.AspNetCore.Http.HttpResults; +using StellaOps.Scanner.Core.Models; + +namespace StellaOps.Scanner.WebService.Endpoints; + +public static class BaselineEndpoints +{ + public static void MapBaselineEndpoints(this IEndpointRouteBuilder routes) + { + var group = routes.MapGroup("/api/v1/baselines") + .WithTags("Baselines"); + + group.MapGet("/recommendations/{artifactDigest}", GetRecommendedBaselines) + .WithName("GetRecommendedBaselines") + .WithSummary("Get recommended baselines for an artifact") + .Produces(StatusCodes.Status200OK); + + group.MapGet("/rationale/{baseDigest}/{headDigest}", GetBaselineRationale) + .WithName("GetBaselineRationale") + .WithSummary("Get rationale for a specific baseline selection") + .Produces(StatusCodes.Status200OK); + } + + private static async Task> GetRecommendedBaselines( + string artifactDigest, + [AsParameters] BaselineQuery query, + IBaselineService baselineService, + CancellationToken ct) + { + var recommendations = await baselineService.GetRecommendationsAsync( + artifactDigest, + query.Environment, + query.PolicyId, + ct); + + return TypedResults.Ok(new BaselineRecommendationsResponse + { + ArtifactDigest = artifactDigest, + Recommendations = recommendations, + GeneratedAt = DateTime.UtcNow + }); + } + + private static async Task> GetBaselineRationale( + string baseDigest, + string headDigest, + IBaselineService baselineService, + CancellationToken ct) + { + var rationale = await baselineService.GetRationaleAsync(baseDigest, headDigest, ct); + + return TypedResults.Ok(rationale); + } +} + +public record BaselineQuery +{ + public string? Environment { get; init; } + public string? PolicyId { get; init; } +} + +public record BaselineRecommendation +{ + public required string Id { get; init; } + public required string Type { get; init; } // "last-green", "previous-release", "main-branch", "custom" + public required string Label { get; init; } + public required string Digest { get; init; } + public required DateTime Timestamp { get; init; } + public required string Rationale { get; init; } + public string? VerdictStatus { get; init; } // "allowed", "blocked", "warn" + public string? PolicyVersion { get; init; } + public bool IsDefault { get; init; } +} + +public record BaselineRecommendationsResponse +{ + public required string ArtifactDigest { get; init; } + public required IReadOnlyList Recommendations { get; init; } + public required DateTime GeneratedAt { get; init; } +} + +public record BaselineRationaleResponse +{ + public required string BaseDigest { get; init; } + public required string HeadDigest { get; init; } + public required string SelectionType { get; init; } + public required string Rationale { get; init; } + public required string DetailedExplanation { get; init; } + public IReadOnlyList? SelectionCriteria { get; init; } + public DateTime? BaseTimestamp { get; init; } + public DateTime? HeadTimestamp { get; init; } +} +``` + +**Service Implementation**: `Services/BaselineService.cs` (new file) + +```csharp +// BaselineService.cs +namespace StellaOps.Scanner.WebService.Services; + +public interface IBaselineService +{ + Task> GetRecommendationsAsync( + string artifactDigest, + string? environment, + string? policyId, + CancellationToken ct); + + Task GetRationaleAsync( + string baseDigest, + string headDigest, + CancellationToken ct); +} + +public class BaselineService : IBaselineService +{ + private readonly IScanRepository _scanRepo; + private readonly IPolicyGateService _policyService; + + public BaselineService(IScanRepository scanRepo, IPolicyGateService policyService) + { + _scanRepo = scanRepo; + _policyService = policyService; + } + + public async Task> GetRecommendationsAsync( + string artifactDigest, + string? environment, + string? policyId, + CancellationToken ct) + { + var recommendations = new List(); + + // 1. Last green verdict in same environment + var lastGreen = await _scanRepo.GetLastGreenVerdictAsync( + artifactDigest, environment, policyId, ct); + if (lastGreen != null) + { + recommendations.Add(new BaselineRecommendation + { + Id = "last-green", + Type = "last-green", + Label = "Last Green Build", + Digest = lastGreen.Digest, + Timestamp = lastGreen.CompletedAt, + Rationale = $"Selected last prod release with Allowed verdict under policy {lastGreen.PolicyVersion}.", + VerdictStatus = "allowed", + PolicyVersion = lastGreen.PolicyVersion, + IsDefault = true + }); + } + + // 2. Previous release tag + var previousRelease = await _scanRepo.GetPreviousReleaseAsync(artifactDigest, ct); + if (previousRelease != null) + { + recommendations.Add(new BaselineRecommendation + { + Id = "previous-release", + Type = "previous-release", + Label = $"Previous Release ({previousRelease.Tag})", + Digest = previousRelease.Digest, + Timestamp = previousRelease.ReleasedAt, + Rationale = $"Previous release tag: {previousRelease.Tag}", + VerdictStatus = previousRelease.VerdictStatus, + IsDefault = lastGreen == null + }); + } + + // 3. Parent commit / merge-base + var parentCommit = await _scanRepo.GetParentCommitScanAsync(artifactDigest, ct); + if (parentCommit != null) + { + recommendations.Add(new BaselineRecommendation + { + Id = "parent-commit", + Type = "main-branch", + Label = "Parent Commit", + Digest = parentCommit.Digest, + Timestamp = parentCommit.CompletedAt, + Rationale = $"Parent commit on main branch: {parentCommit.CommitSha[..8]}", + VerdictStatus = parentCommit.VerdictStatus, + IsDefault = false + }); + } + + return recommendations; + } + + public async Task GetRationaleAsync( + string baseDigest, + string headDigest, + CancellationToken ct) + { + var baseScan = await _scanRepo.GetByDigestAsync(baseDigest, ct); + var headScan = await _scanRepo.GetByDigestAsync(headDigest, ct); + + var selectionType = DetermineSelectionType(baseScan, headScan); + var rationale = GenerateRationale(selectionType, baseScan, headScan); + var explanation = GenerateDetailedExplanation(selectionType, baseScan, headScan); + + return new BaselineRationaleResponse + { + BaseDigest = baseDigest, + HeadDigest = headDigest, + SelectionType = selectionType, + Rationale = rationale, + DetailedExplanation = explanation, + SelectionCriteria = GetSelectionCriteria(selectionType), + BaseTimestamp = baseScan?.CompletedAt, + HeadTimestamp = headScan?.CompletedAt + }; + } + + private static string DetermineSelectionType(Scan? baseScan, Scan? headScan) + { + // Logic to determine how baseline was selected + if (baseScan?.VerdictStatus == "allowed") return "last-green"; + if (baseScan?.ReleaseTag != null) return "previous-release"; + return "manual"; + } + + private static string GenerateRationale(string type, Scan? baseScan, Scan? headScan) + { + return type switch + { + "last-green" => $"Selected last prod release with Allowed verdict under policy {baseScan?.PolicyVersion}.", + "previous-release" => $"Selected previous release: {baseScan?.ReleaseTag}", + "manual" => "User manually selected this baseline for comparison.", + _ => "Baseline selected for comparison." + }; + } + + private static string GenerateDetailedExplanation(string type, Scan? baseScan, Scan? headScan) + { + return type switch + { + "last-green" => $"This baseline was automatically selected because it represents the most recent scan " + + $"that received an 'Allowed' verdict under the current policy. This ensures you're " + + $"comparing against a known-good state that passed all security gates.", + "previous-release" => $"This baseline corresponds to the previous release tag in your version history. " + + $"Comparing against the previous release helps identify what changed between versions.", + _ => "This baseline was manually selected for comparison." + }; + } + + private static IReadOnlyList GetSelectionCriteria(string type) + { + return type switch + { + "last-green" => new[] { "Verdict = Allowed", "Same environment", "Most recent" }, + "previous-release" => new[] { "Has release tag", "Previous in version order" }, + _ => Array.Empty() + }; + } +} +``` + +**Acceptance Criteria**: +- [ ] GET /api/v1/baselines/recommendations/{artifactDigest} returns baseline options +- [ ] GET /api/v1/baselines/rationale/{baseDigest}/{headDigest} returns selection rationale +- [ ] Recommendations sorted by relevance +- [ ] Rationale includes auditor-friendly explanation +- [ ] Deterministic output (same inputs → same recommendations) + +--- + +### T2: Delta Computation API + +**Assignee**: Scanner Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: T1 + +**Description**: +API endpoint to compute delta verdict between two scans. + +**Implementation Path**: `Endpoints/DeltaEndpoints.cs` (new file) + +```csharp +// DeltaEndpoints.cs +namespace StellaOps.Scanner.WebService.Endpoints; + +public static class DeltaEndpoints +{ + public static void MapDeltaEndpoints(this IEndpointRouteBuilder routes) + { + var group = routes.MapGroup("/api/v1/delta") + .WithTags("Delta"); + + group.MapPost("/compute", ComputeDelta) + .WithName("ComputeDelta") + .WithSummary("Compute delta verdict between two artifacts") + .Produces(StatusCodes.Status200OK) + .Produces(StatusCodes.Status202Accepted); + + group.MapGet("/{deltaId}", GetDelta) + .WithName("GetDelta") + .WithSummary("Get computed delta by ID") + .Produces(StatusCodes.Status200OK); + + group.MapGet("/{deltaId}/trust-indicators", GetTrustIndicators) + .WithName("GetDeltaTrustIndicators") + .WithSummary("Get trust indicators for a delta") + .Produces(StatusCodes.Status200OK); + } + + private static async Task, Accepted>> ComputeDelta( + DeltaComputeRequest request, + IDeltaService deltaService, + CancellationToken ct) + { + // Check if already computed + var existing = await deltaService.GetExistingDeltaAsync( + request.BaseVerdictHash, + request.HeadVerdictHash, + request.PolicyHash, + ct); + + if (existing != null) + { + return TypedResults.Ok(existing); + } + + // Start computation + var pending = await deltaService.StartComputationAsync(request, ct); + return TypedResults.Accepted($"/api/v1/delta/{pending.DeltaId}", pending); + } + + private static async Task> GetDelta( + string deltaId, + IDeltaService deltaService, + CancellationToken ct) + { + var delta = await deltaService.GetByIdAsync(deltaId, ct); + return TypedResults.Ok(delta); + } + + private static async Task> GetTrustIndicators( + string deltaId, + IDeltaService deltaService, + CancellationToken ct) + { + var indicators = await deltaService.GetTrustIndicatorsAsync(deltaId, ct); + return TypedResults.Ok(indicators); + } +} + +public record DeltaComputeRequest +{ + public required string BaseVerdictHash { get; init; } + public required string HeadVerdictHash { get; init; } + public required string PolicyHash { get; init; } +} + +public record DeltaVerdictResponse +{ + public required string DeltaId { get; init; } + public required string Status { get; init; } // "pending", "computing", "complete", "failed" + public required string BaseDigest { get; init; } + public required string HeadDigest { get; init; } + public DeltaSummary? Summary { get; init; } + public IReadOnlyList? Categories { get; init; } + public IReadOnlyList? Items { get; init; } + public TrustIndicatorsResponse? TrustIndicators { get; init; } + public DateTime ComputedAt { get; init; } +} + +public record DeltaSummary +{ + public int TotalAdded { get; init; } + public int TotalRemoved { get; init; } + public int TotalChanged { get; init; } + public int NewExploitableVulns { get; init; } + public int ReachabilityFlips { get; init; } + public int VexClaimFlips { get; init; } + public int ComponentChanges { get; init; } +} + +public record DeltaCategory +{ + public required string Id { get; init; } + public required string Name { get; init; } + public required string Icon { get; init; } + public int Added { get; init; } + public int Removed { get; init; } + public int Changed { get; init; } +} + +public record DeltaItem +{ + public required string Id { get; init; } + public required string Category { get; init; } + public required string ChangeType { get; init; } // "added", "removed", "changed" + public required string Title { get; init; } + public string? Severity { get; init; } + public string? BeforeValue { get; init; } + public string? AfterValue { get; init; } + public double Priority { get; init; } +} + +public record TrustIndicatorsResponse +{ + public required string DeterminismHash { get; init; } + public required string PolicyVersion { get; init; } + public required string PolicyHash { get; init; } + public required DateTime FeedSnapshotTimestamp { get; init; } + public required string FeedSnapshotHash { get; init; } + public required string SignatureStatus { get; init; } // "valid", "invalid", "missing", "pending" + public string? SignerIdentity { get; init; } + public PolicyDrift? PolicyDrift { get; init; } +} + +public record PolicyDrift +{ + public required string BasePolicyVersion { get; init; } + public required string BasePolicyHash { get; init; } + public required string HeadPolicyVersion { get; init; } + public required string HeadPolicyHash { get; init; } + public bool HasDrift { get; init; } + public string? DriftSummary { get; init; } +} +``` + +**Acceptance Criteria**: +- [ ] POST /api/v1/delta/compute initiates or returns cached delta +- [ ] GET /api/v1/delta/{deltaId} returns delta results +- [ ] GET /api/v1/delta/{deltaId}/trust-indicators returns trust data +- [ ] Idempotent computation (same inputs → same deltaId) +- [ ] 202 Accepted for pending computations + +--- + +### T3: Actionables Engine API + +**Assignee**: Scanner Team +**Story Points**: 5 +**Status**: TODO +**Dependencies**: T2 + +**Description**: +API endpoint to generate structured remediation recommendations. + +**Implementation Path**: `Endpoints/ActionablesEndpoints.cs` (new file) + +```csharp +// ActionablesEndpoints.cs +namespace StellaOps.Scanner.WebService.Endpoints; + +public static class ActionablesEndpoints +{ + public static void MapActionablesEndpoints(this IEndpointRouteBuilder routes) + { + var group = routes.MapGroup("/api/v1/actionables") + .WithTags("Actionables"); + + group.MapGet("/delta/{deltaId}", GetDeltaActionables) + .WithName("GetDeltaActionables") + .WithSummary("Get actionable recommendations for a delta") + .Produces(StatusCodes.Status200OK); + } + + private static async Task> GetDeltaActionables( + string deltaId, + IActionablesService actionablesService, + CancellationToken ct) + { + var actionables = await actionablesService.GenerateForDeltaAsync(deltaId, ct); + return TypedResults.Ok(actionables); + } +} + +public record ActionablesResponse +{ + public required string DeltaId { get; init; } + public required IReadOnlyList Actionables { get; init; } + public required DateTime GeneratedAt { get; init; } +} + +public record Actionable +{ + public required string Id { get; init; } + public required string Type { get; init; } // "upgrade", "patch", "vex", "config", "investigate" + public required string Priority { get; init; } // "critical", "high", "medium", "low" + public required string Title { get; init; } + public required string Description { get; init; } + public string? Component { get; init; } + public string? CurrentVersion { get; init; } + public string? TargetVersion { get; init; } + public IReadOnlyList? CveIds { get; init; } + public string? EstimatedEffort { get; init; } + public ActionableEvidence? Evidence { get; init; } +} + +public record ActionableEvidence +{ + public string? WitnessId { get; init; } + public string? VexDocumentId { get; init; } + public string? PolicyRuleId { get; init; } +} +``` + +**Service Implementation**: `Services/ActionablesService.cs` (new file) + +```csharp +// ActionablesService.cs +namespace StellaOps.Scanner.WebService.Services; + +public interface IActionablesService +{ + Task GenerateForDeltaAsync(string deltaId, CancellationToken ct); +} + +public class ActionablesService : IActionablesService +{ + private readonly IDeltaService _deltaService; + private readonly IPackageAdvisoryService _advisoryService; + private readonly IVexService _vexService; + + public ActionablesService( + IDeltaService deltaService, + IPackageAdvisoryService advisoryService, + IVexService vexService) + { + _deltaService = deltaService; + _advisoryService = advisoryService; + _vexService = vexService; + } + + public async Task GenerateForDeltaAsync(string deltaId, CancellationToken ct) + { + var delta = await _deltaService.GetByIdAsync(deltaId, ct); + var actionables = new List(); + + foreach (var item in delta.Items ?? Array.Empty()) + { + var action = await GenerateActionableForItem(item, ct); + if (action != null) + { + actionables.Add(action); + } + } + + // Sort by priority + actionables = actionables + .OrderBy(a => GetPriorityOrder(a.Priority)) + .ThenBy(a => a.Title) + .ToList(); + + return new ActionablesResponse + { + DeltaId = deltaId, + Actionables = actionables, + GeneratedAt = DateTime.UtcNow + }; + } + + private async Task GenerateActionableForItem(DeltaItem item, CancellationToken ct) + { + return item.Category switch + { + "vulnerabilities" when item.ChangeType == "added" => + await GenerateVulnActionable(item, ct), + "reachability" when item.ChangeType == "changed" => + await GenerateReachabilityActionable(item, ct), + "components" when item.ChangeType == "added" => + await GenerateComponentActionable(item, ct), + "unknowns" => + GenerateUnknownsActionable(item), + _ => null + }; + } + + private async Task GenerateVulnActionable(DeltaItem item, CancellationToken ct) + { + // Look up fix version + var fixVersion = await _advisoryService.GetFixVersionAsync(item.Id, ct); + + return new Actionable + { + Id = $"action-{item.Id}", + Type = fixVersion != null ? "upgrade" : "investigate", + Priority = item.Severity ?? "medium", + Title = fixVersion != null + ? $"Upgrade to fix {item.Title}" + : $"Investigate {item.Title}", + Description = fixVersion != null + ? $"Upgrade component to version {fixVersion} to remediate this vulnerability." + : $"New vulnerability detected. Investigate impact and consider VEX statement if not affected.", + TargetVersion = fixVersion, + CveIds = new[] { item.Id } + }; + } + + private async Task GenerateReachabilityActionable(DeltaItem item, CancellationToken ct) + { + return new Actionable + { + Id = $"action-{item.Id}", + Type = "investigate", + Priority = "high", + Title = $"Review reachability change: {item.Title}", + Description = "Code path reachability changed. Review if vulnerable function is now reachable from entrypoint.", + Evidence = new ActionableEvidence { WitnessId = item.Id } + }; + } + + private async Task GenerateComponentActionable(DeltaItem item, CancellationToken ct) + { + return new Actionable + { + Id = $"action-{item.Id}", + Type = "investigate", + Priority = "low", + Title = $"New component: {item.Title}", + Description = "New dependency added. Verify it meets security requirements." + }; + } + + private Actionable GenerateUnknownsActionable(DeltaItem item) + { + return new Actionable + { + Id = $"action-{item.Id}", + Type = "investigate", + Priority = "medium", + Title = $"Resolve unknown: {item.Title}", + Description = "Missing information detected. Provide SBOM or VEX data to resolve." + }; + } + + private static int GetPriorityOrder(string priority) => priority switch + { + "critical" => 0, + "high" => 1, + "medium" => 2, + "low" => 3, + _ => 4 + }; +} +``` + +**Acceptance Criteria**: +- [ ] GET /api/v1/actionables/delta/{deltaId} returns recommendations +- [ ] Actionables sorted by priority +- [ ] Upgrade recommendations include target version +- [ ] Investigate recommendations include evidence links +- [ ] VEX recommendations for not-affected cases + +--- + +### T4: Evidence/Proof API Extensions + +**Assignee**: Scanner Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T2 + +**Description**: +Extend existing evidence API to support delta-specific evidence. + +**Implementation Path**: Extend `Endpoints/EvidenceEndpoints.cs` + +```csharp +// Add to existing EvidenceEndpoints.cs +group.MapGet("/delta/{deltaId}/items/{itemId}", GetDeltaItemEvidence) + .WithName("GetDeltaItemEvidence") + .WithSummary("Get evidence for a specific delta item") + .Produces(StatusCodes.Status200OK); + +group.MapGet("/delta/{deltaId}/witness-paths", GetDeltaWitnessPaths) + .WithName("GetDeltaWitnessPaths") + .WithSummary("Get witness paths for reachability changes in delta") + .Produces(StatusCodes.Status200OK); + +group.MapGet("/delta/{deltaId}/vex-merge/{vulnId}", GetVexMergeExplanation) + .WithName("GetVexMergeExplanation") + .WithSummary("Get VEX merge explanation for a vulnerability") + .Produces(StatusCodes.Status200OK); +``` + +**Response Models**: + +```csharp +public record DeltaItemEvidenceResponse +{ + public required string ItemId { get; init; } + public required string DeltaId { get; init; } + public object? BeforeEvidence { get; init; } + public object? AfterEvidence { get; init; } + public IReadOnlyList? WitnessPaths { get; init; } + public VexMergeExplanationResponse? VexMerge { get; init; } +} + +public record WitnessPathsResponse +{ + public required string DeltaId { get; init; } + public required IReadOnlyList Paths { get; init; } +} + +public record WitnessPath +{ + public required string Id { get; init; } + public required string Entrypoint { get; init; } + public required string Sink { get; init; } + public required IReadOnlyList Nodes { get; init; } + public required string Confidence { get; init; } // "confirmed", "likely", "present" + public IReadOnlyList? Gates { get; init; } +} + +public record WitnessNode +{ + public required string Method { get; init; } + public string? File { get; init; } + public int? Line { get; init; } + public bool IsEntrypoint { get; init; } + public bool IsSink { get; init; } +} + +public record VexMergeExplanationResponse +{ + public required string VulnId { get; init; } + public required string FinalStatus { get; init; } + public required IReadOnlyList Sources { get; init; } + public required string MergeStrategy { get; init; } // "priority", "latest", "conservative" + public string? ConflictResolution { get; init; } +} + +public record VexClaimSource +{ + public required string Source { get; init; } // "vendor", "distro", "internal", "community" + public required string Document { get; init; } + public required string Status { get; init; } + public string? Justification { get; init; } + public required DateTime Timestamp { get; init; } + public int Priority { get; init; } +} +``` + +**Acceptance Criteria**: +- [ ] GET /api/v1/evidence/delta/{deltaId}/items/{itemId} returns before/after evidence +- [ ] GET /api/v1/evidence/delta/{deltaId}/witness-paths returns call paths +- [ ] GET /api/v1/evidence/delta/{deltaId}/vex-merge/{vulnId} returns merge explanation +- [ ] Witness paths include confidence and gates + +--- + +### T5: OpenAPI Specification Update + +**Assignee**: Scanner Team +**Story Points**: 1 +**Status**: TODO +**Dependencies**: T1, T2, T3, T4 + +**Description**: +Update OpenAPI spec with new delta comparison endpoints. + +**Implementation Path**: `openapi/scanner-api.yaml` + +**Acceptance Criteria**: +- [ ] All new endpoints documented in OpenAPI +- [ ] Request/response schemas defined +- [ ] Examples provided for each endpoint +- [ ] `npm run api:lint` passes + +--- + +### T6: Integration Tests + +**Assignee**: Scanner Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: T1, T2, T3, T4 + +**Description**: +Integration tests for delta comparison API. + +**Implementation Path**: `__Tests/StellaOps.Scanner.WebService.Tests/DeltaApiTests.cs` + +**Acceptance Criteria**: +- [ ] Tests for baseline recommendations API +- [ ] Tests for delta computation API +- [ ] Tests for actionables generation +- [ ] Tests for evidence retrieval +- [ ] Tests for idempotent behavior +- [ ] All tests pass with `dotnet test` + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Scanner Team | Baseline Selection API | +| 2 | T2 | TODO | T1 | Scanner Team | Delta Computation API | +| 3 | T3 | TODO | T2 | Scanner Team | Actionables Engine API | +| 4 | T4 | TODO | T2 | Scanner Team | Evidence/Proof API Extensions | +| 5 | T5 | TODO | T1-T4 | Scanner Team | OpenAPI Specification Update | +| 6 | T6 | TODO | T1-T4 | Scanner Team | Integration Tests | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint created to support Delta Compare View UI (Sprint 4200.0002.0003). Derived from advisory "21-Dec-2025 - Smart Diff - Reproducibility as a Feature.md". | Claude | + +--- + +## Decisions & Risks + +| Item | Type | Owner | Notes | +|------|------|-------|-------| +| Idempotent delta computation | Decision | Scanner Team | Cache by (base_hash, head_hash, policy_hash) | +| Baseline selection algorithm | Decision | Scanner Team | Prefer last green, then previous release, then parent commit | +| Actionables priority order | Decision | Scanner Team | critical > high > medium > low | +| VEX merge strategy | Decision | Scanner Team | Priority-based by default (vendor > distro > internal > community) | + +--- + +## Dependencies + +| Dependency | Sprint | Status | Notes | +|------------|--------|--------|-------| +| Smart-Diff Core | 3500 | DONE | Core delta computation engine | +| Delta Compare View UI | 4200.0002.0003 | TODO | Consumer of these APIs | +| VEX Service | Excititor | EXISTS | VEX merge logic | +| Package Advisory Service | Concelier | EXISTS | Fix version lookup | + +--- + +## Success Criteria + +- [ ] All 6 tasks marked DONE +- [ ] All endpoints return expected responses +- [ ] Baseline selection includes rationale +- [ ] Delta computation is idempotent +- [ ] Actionables are sorted by priority +- [ ] Evidence includes witness paths and VEX merge +- [ ] OpenAPI spec valid +- [ ] Integration tests pass +- [ ] `dotnet build` succeeds +- [ ] `dotnet test` succeeds diff --git a/docs/implplan/SPRINT_4300_0001_0001_cli_attestation_verify.md b/docs/implplan/SPRINT_4300_0001_0001_cli_attestation_verify.md new file mode 100644 index 000000000..ba139a02d --- /dev/null +++ b/docs/implplan/SPRINT_4300_0001_0001_cli_attestation_verify.md @@ -0,0 +1,624 @@ +# Sprint 4300.0001.0001 - CLI Attestation Chain Verify Command + +## Topic & Scope + +- Implement `stella verify image --require sbom,vex,decision` command +- Discover attestations via OCI referrers API +- Verify DSSE signatures and chain integrity +- Return signed summary; non-zero exit for CI/CD gates +- Support offline verification mode + +**Working directory:** `src/Cli/StellaOps.Cli/Commands/` + +## Dependencies & Concurrency + +- **Upstream (DONE):** + - SPRINT_4100_0003_0002: OCI Referrer Discovery (OciReferrerDiscovery, RvaOciPublisher) + - SPRINT_4100_0003_0001: Risk Verdict Attestation (RvaVerifier) +- **Downstream:** CI/CD integration documentation +- **Safe to parallelize with:** SPRINT_4300_0001_0002, SPRINT_4300_0002_* + +## Documentation Prerequisites + +- `docs/modules/cli/architecture.md` +- `src/Cli/StellaOps.Cli/AGENTS.md` +- SPRINT_4100_0003_0002 (OCI referrer patterns) + +--- + +## Tasks + +### T1: Define VerifyImageCommand + +**Assignee**: CLI Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: — + +**Description**: +Add `stella verify image` subcommand with attestation requirements. + +**Implementation Path**: `Commands/VerifyCommandGroup.cs` (extend) + +**Command Signature**: +``` +stella verify image + --require # sbom,vex,decision (comma-separated) + --trust-policy # Trust policy YAML (signers, issuers) + --output # table, json, sarif + --strict # Fail on any missing attestation + --verbose # Show verification details +``` + +**Implementation**: +```csharp +private static Command BuildVerifyImageCommand( + IServiceProvider services, + Option verboseOption, + CancellationToken cancellationToken) +{ + var referenceArg = new Argument("reference") + { + Description = "Image reference (registry/repo@sha256:digest or registry/repo:tag)" + }; + + var requireOption = new Option("--require", "-r") + { + Description = "Required attestation types: sbom, vex, decision, approval", + AllowMultipleArgumentsPerToken = true + }; + requireOption.SetDefaultValue(new[] { "sbom", "vex", "decision" }); + + var trustPolicyOption = new Option("--trust-policy") + { + Description = "Path to trust policy file (YAML)" + }; + + var outputOption = new Option("--output", "-o") + { + Description = "Output format: table, json, sarif" + }.SetDefaultValue("table").FromAmong("table", "json", "sarif"); + + var strictOption = new Option("--strict") + { + Description = "Fail if any required attestation is missing" + }; + + var command = new Command("image", "Verify attestation chain for a container image") + { + referenceArg, + requireOption, + trustPolicyOption, + outputOption, + strictOption, + verboseOption + }; + + command.SetAction(parseResult => + { + var reference = parseResult.GetValue(referenceArg) ?? string.Empty; + var require = parseResult.GetValue(requireOption) ?? Array.Empty(); + var trustPolicy = parseResult.GetValue(trustPolicyOption); + var output = parseResult.GetValue(outputOption) ?? "table"; + var strict = parseResult.GetValue(strictOption); + var verbose = parseResult.GetValue(verboseOption); + + return CommandHandlers.HandleVerifyImageAsync( + services, reference, require, trustPolicy, output, strict, verbose, cancellationToken); + }); + + return command; +} +``` + +**Acceptance Criteria**: +- [ ] `stella verify image` command registered +- [ ] `--require` accepts comma-separated attestation types +- [ ] `--trust-policy` loads trust configuration +- [ ] `--output` supports table, json, sarif formats +- [ ] `--strict` mode fails on missing attestations +- [ ] Help text documents all options + +--- + +### T2: Implement ImageAttestationVerifier Service + +**Assignee**: CLI Team +**Story Points**: 4 +**Status**: TODO +**Dependencies**: T1 + +**Description**: +Create service that discovers and verifies attestations for an image. + +**Implementation Path**: `Services/ImageAttestationVerifier.cs` (new) + +**Implementation**: +```csharp +namespace StellaOps.Cli.Services; + +public sealed class ImageAttestationVerifier : IImageAttestationVerifier +{ + private readonly IOciReferrerDiscovery _referrerDiscovery; + private readonly IRvaVerifier _rvaVerifier; + private readonly IDsseVerifier _dsseVerifier; + private readonly ITrustPolicyLoader _trustPolicyLoader; + private readonly ILogger _logger; + + public async Task VerifyAsync( + ImageVerificationRequest request, + CancellationToken ct = default) + { + var result = new ImageVerificationResult + { + ImageReference = request.Reference, + ImageDigest = await ResolveDigestAsync(request.Reference, ct), + VerifiedAt = DateTimeOffset.UtcNow + }; + + // Load trust policy + var trustPolicy = request.TrustPolicyPath is not null + ? await _trustPolicyLoader.LoadAsync(request.TrustPolicyPath, ct) + : TrustPolicy.Default; + + // Discover attestations via OCI referrers + var referrers = await _referrerDiscovery.ListReferrersAsync( + request.Registry, request.Repository, result.ImageDigest, ct); + + if (!referrers.IsSuccess) + { + result.Errors.Add($"Failed to discover referrers: {referrers.Error}"); + return result; + } + + // Group by attestation type + var attestationsByType = referrers.Referrers + .GroupBy(r => MapArtifactTypeToAttestationType(r.ArtifactType)) + .ToDictionary(g => g.Key, g => g.ToList()); + + // Verify each required attestation type + foreach (var requiredType in request.RequiredTypes) + { + var verification = await VerifyAttestationTypeAsync( + requiredType, attestationsByType, trustPolicy, ct); + result.Attestations.Add(verification); + } + + // Compute overall result + result.IsValid = result.Attestations.All(a => a.IsValid || !request.Strict); + result.MissingTypes = request.RequiredTypes + .Except(result.Attestations.Where(a => a.IsValid).Select(a => a.Type)) + .ToList(); + + return result; + } + + private async Task VerifyAttestationTypeAsync( + string type, + Dictionary> attestationsByType, + TrustPolicy trustPolicy, + CancellationToken ct) + { + if (!attestationsByType.TryGetValue(type, out var referrers) || referrers.Count == 0) + { + return new AttestationVerification + { + Type = type, + IsValid = false, + Status = AttestationStatus.Missing, + Message = $"No {type} attestation found" + }; + } + + // Verify the most recent attestation + var latest = referrers.OrderByDescending(r => r.Annotations.GetValueOrDefault("created")).First(); + + // Fetch and verify DSSE envelope + var envelope = await FetchEnvelopeAsync(latest.Digest, ct); + var verifyResult = await _dsseVerifier.VerifyAsync(envelope, trustPolicy, ct); + + return new AttestationVerification + { + Type = type, + IsValid = verifyResult.IsValid, + Status = verifyResult.IsValid ? AttestationStatus.Verified : AttestationStatus.Invalid, + Digest = latest.Digest, + SignerIdentity = verifyResult.SignerIdentity, + Message = verifyResult.IsValid ? "Signature valid" : verifyResult.Error, + VerifiedAt = DateTimeOffset.UtcNow + }; + } +} + +public sealed record ImageVerificationRequest +{ + public required string Reference { get; init; } + public required string Registry { get; init; } + public required string Repository { get; init; } + public required IReadOnlyList RequiredTypes { get; init; } + public string? TrustPolicyPath { get; init; } + public bool Strict { get; init; } +} + +public sealed record ImageVerificationResult +{ + public required string ImageReference { get; init; } + public required string ImageDigest { get; init; } + public required DateTimeOffset VerifiedAt { get; init; } + public bool IsValid { get; set; } + public List Attestations { get; } = []; + public List MissingTypes { get; set; } = []; + public List Errors { get; } = []; +} + +public sealed record AttestationVerification +{ + public required string Type { get; init; } + public required bool IsValid { get; init; } + public required AttestationStatus Status { get; init; } + public string? Digest { get; init; } + public string? SignerIdentity { get; init; } + public string? Message { get; init; } + public DateTimeOffset? VerifiedAt { get; init; } +} + +public enum AttestationStatus +{ + Verified, + Invalid, + Missing, + Expired, + UntrustedSigner +} +``` + +**Acceptance Criteria**: +- [ ] `ImageAttestationVerifier.cs` created +- [ ] Discovers attestations via OCI referrers +- [ ] Verifies DSSE signatures +- [ ] Validates against trust policy +- [ ] Returns comprehensive verification result +- [ ] Handles missing attestations gracefully + +--- + +### T3: Implement Trust Policy Loader + +**Assignee**: CLI Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: — + +**Description**: +Load and parse trust policy configuration. + +**Implementation Path**: `Services/TrustPolicyLoader.cs` (new) + +**Trust Policy Schema**: +```yaml +# trust-policy.yaml +version: "1" +attestations: + sbom: + required: true + signers: + - identity: "builder@stellaops.example.com" + issuer: "https://accounts.google.com" + vex: + required: true + signers: + - identity: "security@stellaops.example.com" + decision: + required: true + signers: + - identity: "policy-engine@stellaops.example.com" + approval: + required: false + signers: + - identity: "*@stellaops.example.com" + +defaults: + requireRekor: true + maxAge: "168h" # 7 days +``` + +**Acceptance Criteria**: +- [ ] `TrustPolicyLoader.cs` created +- [ ] Parses YAML trust policy +- [ ] Validates policy structure +- [ ] Default policy when none specified +- [ ] Signer identity matching (exact, wildcard) + +--- + +### T4: Implement Command Handler + +**Assignee**: CLI Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T1, T2, T3 + +**Description**: +Implement the command handler that orchestrates verification. + +**Implementation Path**: `Commands/CommandHandlers.VerifyImage.cs` (new) + +**Implementation**: +```csharp +public static async Task HandleVerifyImageAsync( + IServiceProvider services, + string reference, + string[] require, + string? trustPolicy, + string output, + bool strict, + bool verbose, + CancellationToken ct) +{ + var verifier = services.GetRequiredService(); + var console = services.GetRequiredService(); + + // Parse reference + var (registry, repository, digest) = ParseImageReference(reference); + + var request = new ImageVerificationRequest + { + Reference = reference, + Registry = registry, + Repository = repository, + RequiredTypes = require.ToList(), + TrustPolicyPath = trustPolicy, + Strict = strict + }; + + var result = await verifier.VerifyAsync(request, ct); + + // Output results + switch (output) + { + case "json": + console.WriteJson(result); + break; + case "sarif": + console.WriteSarif(ConvertToSarif(result)); + break; + default: + WriteTableOutput(console, result, verbose); + break; + } + + // Return exit code + return result.IsValid ? 0 : 1; +} + +private static void WriteTableOutput(IConsoleOutput console, ImageVerificationResult result, bool verbose) +{ + console.WriteLine($"Image: {result.ImageReference}"); + console.WriteLine($"Digest: {result.ImageDigest}"); + console.WriteLine(); + + var table = new ConsoleTable("Type", "Status", "Signer", "Message"); + foreach (var att in result.Attestations) + { + var status = att.IsValid ? "[green]PASS[/]" : "[red]FAIL[/]"; + table.AddRow(att.Type, status, att.SignerIdentity ?? "-", att.Message ?? "-"); + } + console.WriteTable(table); + + console.WriteLine(); + console.WriteLine(result.IsValid + ? "[green]Verification PASSED[/]" + : "[red]Verification FAILED[/]"); + + if (result.MissingTypes.Count > 0) + { + console.WriteLine($"[yellow]Missing: {string.Join(", ", result.MissingTypes)}[/]"); + } +} +``` + +**Acceptance Criteria**: +- [ ] Command handler implemented +- [ ] Parses image reference (registry/repo@digest or :tag) +- [ ] Table output with colorized status +- [ ] JSON output for automation +- [ ] SARIF output for security tools +- [ ] Exit code 0 on pass, 1 on fail + +--- + +### T5: Add Unit Tests + +**Assignee**: CLI Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T4 + +**Description**: +Comprehensive tests for image verification. + +**Implementation Path**: `src/Cli/__Tests/StellaOps.Cli.Tests/Commands/VerifyImageTests.cs` + +**Test Cases**: +```csharp +public class VerifyImageTests +{ + [Fact] + public async Task Verify_AllAttestationsPresent_ReturnsPass() + { + // Arrange + var verifier = CreateVerifierWithMocks( + sbom: CreateValidAttestation(), + vex: CreateValidAttestation(), + decision: CreateValidAttestation()); + + var request = CreateRequest(require: new[] { "sbom", "vex", "decision" }); + + // Act + var result = await verifier.VerifyAsync(request); + + // Assert + result.IsValid.Should().BeTrue(); + result.Attestations.Should().HaveCount(3); + result.Attestations.Should().OnlyContain(a => a.IsValid); + } + + [Fact] + public async Task Verify_MissingAttestation_Strict_ReturnsFail() + { + var verifier = CreateVerifierWithMocks( + sbom: CreateValidAttestation(), + vex: null, // Missing + decision: CreateValidAttestation()); + + var request = CreateRequest(require: new[] { "sbom", "vex", "decision" }, strict: true); + + var result = await verifier.VerifyAsync(request); + + result.IsValid.Should().BeFalse(); + result.MissingTypes.Should().Contain("vex"); + } + + [Fact] + public async Task Verify_InvalidSignature_ReturnsFail() + { + var verifier = CreateVerifierWithMocks( + sbom: CreateInvalidAttestation("Bad signature")); + + var request = CreateRequest(require: new[] { "sbom" }); + + var result = await verifier.VerifyAsync(request); + + result.IsValid.Should().BeFalse(); + result.Attestations[0].Status.Should().Be(AttestationStatus.Invalid); + } + + [Fact] + public async Task Verify_UntrustedSigner_ReturnsFail() + { + var verifier = CreateVerifierWithMocks( + sbom: CreateAttestationWithSigner("untrusted@evil.com")); + + var request = CreateRequest( + require: new[] { "sbom" }, + trustPolicy: CreatePolicyAllowing("trusted@example.com")); + + var result = await verifier.VerifyAsync(request); + + result.IsValid.Should().BeFalse(); + result.Attestations[0].Status.Should().Be(AttestationStatus.UntrustedSigner); + } + + [Fact] + public void ParseImageReference_WithDigest_Parses() + { + var (registry, repo, digest) = CommandHandlers.ParseImageReference( + "gcr.io/myproject/myapp@sha256:abc123"); + + registry.Should().Be("gcr.io"); + repo.Should().Be("myproject/myapp"); + digest.Should().Be("sha256:abc123"); + } + + [Fact] + public async Task Handler_ValidResult_ReturnsExitCode0() + { + var services = CreateServicesWithValidVerifier(); + + var exitCode = await CommandHandlers.HandleVerifyImageAsync( + services, "registry/app@sha256:abc", + new[] { "sbom" }, null, "table", false, false, CancellationToken.None); + + exitCode.Should().Be(0); + } + + [Fact] + public async Task Handler_InvalidResult_ReturnsExitCode1() + { + var services = CreateServicesWithFailingVerifier(); + + var exitCode = await CommandHandlers.HandleVerifyImageAsync( + services, "registry/app@sha256:abc", + new[] { "sbom" }, null, "table", true, false, CancellationToken.None); + + exitCode.Should().Be(1); + } +} +``` + +**Acceptance Criteria**: +- [ ] All attestations present test +- [ ] Missing attestation (strict) test +- [ ] Invalid signature test +- [ ] Untrusted signer test +- [ ] Reference parsing tests +- [ ] Exit code tests +- [ ] All 7+ tests pass + +--- + +### T6: Add DI Registration and Integration + +**Assignee**: CLI Team +**Story Points**: 1 +**Status**: TODO +**Dependencies**: T2, T3 + +**Description**: +Register services and integrate command. + +**Acceptance Criteria**: +- [ ] `IImageAttestationVerifier` registered in DI +- [ ] `ITrustPolicyLoader` registered in DI +- [ ] Command added to verify group +- [ ] Integration test with mock registry + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | CLI Team | Define VerifyImageCommand | +| 2 | T2 | TODO | T1 | CLI Team | Implement ImageAttestationVerifier | +| 3 | T3 | TODO | — | CLI Team | Implement Trust Policy Loader | +| 4 | T4 | TODO | T1, T2, T3 | CLI Team | Implement Command Handler | +| 5 | T5 | TODO | T4 | CLI Team | Add unit tests | +| 6 | T6 | TODO | T2, T3 | CLI Team | Add DI registration | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint created from Explainable Triage advisory gap analysis (G1). | Agent | + +--- + +## Decisions & Risks + +| Item | Type | Owner | Notes | +|------|------|-------|-------| +| Default required types | Decision | CLI Team | sbom,vex,decision as defaults | +| SARIF output | Decision | CLI Team | Enables integration with security scanners | +| Trust policy format | Decision | CLI Team | YAML for human readability | +| Exit codes | Decision | CLI Team | 0=pass, 1=fail, 2=error | + +| Risk | Mitigation | +|------|------------| +| Registry auth complexity | Reuse existing OCI auth providers | +| Large referrer lists | Pagination and filtering by type | +| Offline mode | Fallback to local evidence directory | + +--- + +## Success Criteria + +- [ ] All 6 tasks marked DONE +- [ ] `stella verify image` command works end-to-end +- [ ] Exit code 1 when attestations missing/invalid +- [ ] Trust policy filtering works +- [ ] 7+ tests passing +- [ ] `dotnet build` succeeds +- [ ] `dotnet test` succeeds diff --git a/docs/implplan/SPRINT_4300_0001_0001_oci_verdict_attestation_push.md b/docs/implplan/SPRINT_4300_0001_0001_oci_verdict_attestation_push.md new file mode 100644 index 000000000..51ab7ef0c --- /dev/null +++ b/docs/implplan/SPRINT_4300_0001_0001_oci_verdict_attestation_push.md @@ -0,0 +1,181 @@ +# SPRINT_4300_0001_0001: OCI Verdict Attestation Referrer Push + +## Sprint Metadata + +| Field | Value | +|-------|-------| +| **Sprint ID** | 4300_0001_0001 | +| **Title** | OCI Verdict Attestation Referrer Push | +| **Priority** | P0 (Critical) | +| **Moat Strength** | 5 (Structural moat) | +| **Working Directory** | `src/Attestor/`, `src/Scanner/`, `src/Zastava/` | +| **Estimated Effort** | 2 weeks | +| **Dependencies** | VerdictReceiptStatement (exists), ProofSpine (exists) | + +--- + +## Objective + +Implement the capability to push signed risk verdicts as OCI referrer artifacts, creating a portable "ship token" that can be attached to container images and verified independently by registries, admission controllers, and audit systems. + +This is the **moat anchor** feature: "We don't output findings; we output an attestable decision that can be replayed." + +--- + +## Background + +The advisory identifies "Signed, replayable risk verdicts" as a **Moat 5** feature. While `VerdictReceiptStatement` and `ProofSpine` infrastructure exist, the verdict is not yet: +1. Pushed as an OCI artifact referrer (per OCI 1.1 spec) +2. Discoverable via `referrers` API +3. Verifiable standalone without StellaOps backend + +Competitors (Syft + Sigstore, cosign) sign SBOMs as attestations, but not **risk decisions end-to-end**. + +--- + +## Deliverables + +### D1: OCI Verdict Artifact Schema +- Define `application/vnd.stellaops.verdict.v1+json` media type +- Create OCI manifest structure for verdict bundle +- Include: verdict statement, proof bundle digest, policy snapshot reference + +### D2: Verdict Pusher Service +- Implement `IVerdictPusher` interface in `StellaOps.Attestor.OCI` +- Support OCI Distribution 1.1 referrers API +- Handle authentication (bearer token, basic auth) +- Retry logic with backoff + +### D3: Scanner Integration +- Hook verdict push into scan completion flow +- Add `--push-verdict` flag to CLI +- Emit telemetry on push success/failure + +### D4: Registry Webhook Observer +- Extend Zastava to observe verdict referrers +- Validate verdict signature on webhook +- Store verdict metadata in findings ledger + +### D5: Verification CLI +- `stella verdict verify ` command +- Fetch verdict via referrers API +- Validate signature and replay inputs + +--- + +## Tasks + +### Phase 1: Schema & Models + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| VERDICT-001 | Define OCI verdict media type and manifest schema | TODO | | +| VERDICT-002 | Create `VerdictOciManifest` record in `StellaOps.Attestor.OCI` | TODO | | +| VERDICT-003 | Add verdict artifact type constants | TODO | | +| VERDICT-004 | Write schema validation tests | TODO | | + +### Phase 2: Push Infrastructure + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| VERDICT-005 | Implement `IVerdictPusher` interface | TODO | | +| VERDICT-006 | Create `OciVerdictPusher` with referrers API support | TODO | | +| VERDICT-007 | Add registry authentication handling | TODO | | +| VERDICT-008 | Implement retry with exponential backoff | TODO | | +| VERDICT-009 | Add push telemetry (OTEL spans, metrics) | TODO | | +| VERDICT-010 | Integration tests with local registry (testcontainers) | TODO | | + +### Phase 3: Scanner Integration + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| VERDICT-011 | Add `VerdictPushOptions` to scan configuration | TODO | | +| VERDICT-012 | Hook pusher into `ScanJobProcessor` completion | TODO | | +| VERDICT-013 | Add `--push-verdict` CLI flag | TODO | | +| VERDICT-014 | Update scan status response with verdict digest | TODO | | +| VERDICT-015 | E2E test: scan -> verdict push -> verify | TODO | | + +### Phase 4: Zastava Observer + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| VERDICT-016 | Extend webhook handler for verdict artifacts | TODO | | +| VERDICT-017 | Implement verdict signature validation | TODO | | +| VERDICT-018 | Store verdict metadata in findings ledger | TODO | | +| VERDICT-019 | Add verdict discovery endpoint | TODO | | + +### Phase 5: Verification CLI + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| VERDICT-020 | Implement `stella verdict verify` command | TODO | | +| VERDICT-021 | Fetch verdict via referrers API | TODO | | +| VERDICT-022 | Validate DSSE envelope signature | TODO | | +| VERDICT-023 | Verify input digests against manifest | TODO | | +| VERDICT-024 | Output verification report (JSON/human) | TODO | | + +--- + +## Acceptance Criteria + +1. **AC1**: Verdict can be pushed to any OCI 1.1 compliant registry +2. **AC2**: Verdict is discoverable via `GET /v2//referrers/` +3. **AC3**: `stella verdict verify` succeeds with valid signature +4. **AC4**: Verdict includes sbomDigest, feedsDigest, policyDigest for replay +5. **AC5**: Zastava can observe and validate verdict push events + +--- + +## Technical Notes + +### OCI Manifest Structure +```json +{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "artifactType": "application/vnd.stellaops.verdict.v1+json", + "config": { + "mediaType": "application/vnd.stellaops.verdict.config.v1+json", + "digest": "sha256:...", + "size": 0 + }, + "layers": [ + { + "mediaType": "application/vnd.stellaops.verdict.v1+json", + "digest": "sha256:...", + "size": 1234 + } + ], + "subject": { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "digest": "sha256:", + "size": 5678 + }, + "annotations": { + "org.stellaops.verdict.decision": "pass", + "org.stellaops.verdict.timestamp": "2025-12-22T00:00:00Z" + } +} +``` + +### Signing +- Use existing `IProofChainSigner` for DSSE envelope +- Support Sigstore (keyless) and local key signing + +--- + +## Risks & Mitigations + +| Risk | Impact | Mitigation | +|------|--------|------------| +| Registry doesn't support referrers API | Cannot push | Fallback to tag-based approach | +| Large verdict bundles | Slow push | Compress, reference external proofs | +| Key management complexity | Security | Document key rotation procedures | + +--- + +## Documentation Updates + +- [ ] Update `docs/modules/attestor/architecture.md` +- [ ] Add `docs/operations/verdict-attestation-guide.md` +- [ ] Update CLI reference with `verdict` commands diff --git a/docs/implplan/SPRINT_4300_0001_0002_findings_evidence_api.md b/docs/implplan/SPRINT_4300_0001_0002_findings_evidence_api.md new file mode 100644 index 000000000..a6d0d6c9c --- /dev/null +++ b/docs/implplan/SPRINT_4300_0001_0002_findings_evidence_api.md @@ -0,0 +1,511 @@ +# Sprint 4300.0001.0002 - Findings Evidence API Endpoint + +## Topic & Scope + +- Add `GET /api/v1/findings/{findingId}/evidence` endpoint +- Returns consolidated evidence contract matching advisory spec +- Uses existing `EvidenceCompositionService` internally +- Add OpenAPI schema documentation + +**Working directory:** `src/Scanner/StellaOps.Scanner.WebService/` + +## Dependencies & Concurrency + +- **Upstream (DONE):** + - EvidenceCompositionService (SPRINT_3800_0003_0001) + - TriageDbContext entities +- **Downstream:** UI evidence drawer integration +- **Safe to parallelize with:** SPRINT_4300_0001_0001, SPRINT_4300_0002_* + +## Documentation Prerequisites + +- `docs/modules/scanner/architecture.md` +- `src/Scanner/StellaOps.Scanner.WebService/AGENTS.md` +- SPRINT_3800_0003_0001 (Evidence API models) + +--- + +## Tasks + +### T1: Define FindingEvidenceResponse Contract + +**Assignee**: Scanner Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: — + +**Description**: +Define the response contract matching the advisory specification. + +**Implementation Path**: `Contracts/FindingEvidenceContracts.cs` (new or extend) + +**Contract**: +```csharp +namespace StellaOps.Scanner.WebService.Contracts; + +/// +/// Consolidated evidence response for a finding. +/// Matches the advisory contract for explainable triage UX. +/// +public sealed record FindingEvidenceResponse +{ + /// + /// Unique finding identifier. + /// + [JsonPropertyName("finding_id")] + public required string FindingId { get; init; } + + /// + /// CVE or vulnerability identifier. + /// + [JsonPropertyName("cve")] + public required string Cve { get; init; } + + /// + /// Affected component details. + /// + [JsonPropertyName("component")] + public required ComponentInfo Component { get; init; } + + /// + /// Reachable path from entrypoint to vulnerable code. + /// + [JsonPropertyName("reachable_path")] + public IReadOnlyList ReachablePath { get; init; } = []; + + /// + /// Entrypoint details (HTTP route, CLI command, etc.). + /// + [JsonPropertyName("entrypoint")] + public EntrypointInfo? Entrypoint { get; init; } + + /// + /// VEX exploitability status. + /// + [JsonPropertyName("vex")] + public VexStatusInfo? Vex { get; init; } + + /// + /// When this evidence was last observed/generated. + /// + [JsonPropertyName("last_seen")] + public required DateTimeOffset LastSeen { get; init; } + + /// + /// Content-addressed references to attestations. + /// + [JsonPropertyName("attestation_refs")] + public IReadOnlyList AttestationRefs { get; init; } = []; + + /// + /// Risk score with explanation. + /// + [JsonPropertyName("score")] + public ScoreInfo? Score { get; init; } + + /// + /// Boundary exposure information. + /// + [JsonPropertyName("boundary")] + public BoundaryInfo? Boundary { get; init; } + + /// + /// Evidence freshness and TTL. + /// + [JsonPropertyName("freshness")] + public FreshnessInfo Freshness { get; init; } = new(); +} + +public sealed record ComponentInfo +{ + [JsonPropertyName("name")] + public required string Name { get; init; } + + [JsonPropertyName("version")] + public required string Version { get; init; } + + [JsonPropertyName("purl")] + public string? Purl { get; init; } + + [JsonPropertyName("ecosystem")] + public string? Ecosystem { get; init; } +} + +public sealed record EntrypointInfo +{ + [JsonPropertyName("type")] + public required string Type { get; init; } // http, grpc, cli, cron, queue + + [JsonPropertyName("route")] + public string? Route { get; init; } + + [JsonPropertyName("method")] + public string? Method { get; init; } + + [JsonPropertyName("auth")] + public string? Auth { get; init; } // jwt:scope, mtls, apikey, none +} + +public sealed record VexStatusInfo +{ + [JsonPropertyName("status")] + public required string Status { get; init; } // affected, not_affected, under_investigation, fixed + + [JsonPropertyName("justification")] + public string? Justification { get; init; } + + [JsonPropertyName("timestamp")] + public DateTimeOffset? Timestamp { get; init; } + + [JsonPropertyName("issuer")] + public string? Issuer { get; init; } +} + +public sealed record ScoreInfo +{ + [JsonPropertyName("risk_score")] + public required int RiskScore { get; init; } + + [JsonPropertyName("contributions")] + public IReadOnlyList Contributions { get; init; } = []; +} + +public sealed record ScoreContribution +{ + [JsonPropertyName("factor")] + public required string Factor { get; init; } + + [JsonPropertyName("value")] + public required int Value { get; init; } + + [JsonPropertyName("reason")] + public string? Reason { get; init; } +} + +public sealed record BoundaryInfo +{ + [JsonPropertyName("surface")] + public required string Surface { get; init; } + + [JsonPropertyName("exposure")] + public required string Exposure { get; init; } // internet, internal, none + + [JsonPropertyName("auth")] + public AuthInfo? Auth { get; init; } + + [JsonPropertyName("controls")] + public IReadOnlyList Controls { get; init; } = []; +} + +public sealed record AuthInfo +{ + [JsonPropertyName("mechanism")] + public required string Mechanism { get; init; } + + [JsonPropertyName("required_scopes")] + public IReadOnlyList RequiredScopes { get; init; } = []; +} + +public sealed record FreshnessInfo +{ + [JsonPropertyName("is_stale")] + public bool IsStale { get; init; } + + [JsonPropertyName("expires_at")] + public DateTimeOffset? ExpiresAt { get; init; } + + [JsonPropertyName("ttl_remaining_hours")] + public int? TtlRemainingHours { get; init; } +} +``` + +**Acceptance Criteria**: +- [ ] `FindingEvidenceContracts.cs` created +- [ ] All fields from advisory included +- [ ] JSON property names use snake_case +- [ ] XML documentation on all properties +- [ ] Nullable fields where appropriate + +--- + +### T2: Implement FindingsEvidenceController + +**Assignee**: Scanner Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: T1 + +**Description**: +Create the REST endpoint for evidence retrieval. + +**Implementation Path**: `Controllers/FindingsEvidenceController.cs` (new) + +**Implementation**: +```csharp +namespace StellaOps.Scanner.WebService.Controllers; + +[ApiController] +[Route("api/v1/findings")] +[Produces("application/json")] +public sealed class FindingsEvidenceController : ControllerBase +{ + private readonly IEvidenceCompositionService _evidenceService; + private readonly ITriageQueryService _triageService; + private readonly ILogger _logger; + + public FindingsEvidenceController( + IEvidenceCompositionService evidenceService, + ITriageQueryService triageService, + ILogger logger) + { + _evidenceService = evidenceService; + _triageService = triageService; + _logger = logger; + } + + /// + /// Get consolidated evidence for a finding. + /// + /// The finding identifier. + /// Include raw source locations (requires elevated permissions). + /// Evidence retrieved successfully. + /// Finding not found. + /// Insufficient permissions for raw source. + [HttpGet("{findingId}/evidence")] + [ProducesResponseType(typeof(FindingEvidenceResponse), StatusCodes.Status200OK)] + [ProducesResponseType(StatusCodes.Status404NotFound)] + [ProducesResponseType(StatusCodes.Status403Forbidden)] + public async Task GetEvidenceAsync( + [FromRoute] string findingId, + [FromQuery] bool includeRaw = false, + CancellationToken ct = default) + { + _logger.LogDebug("Getting evidence for finding {FindingId}", findingId); + + // Check permissions for raw source + if (includeRaw && !User.HasClaim("scope", "evidence:raw")) + { + return Forbid("Requires evidence:raw scope for raw source access"); + } + + // Get finding + var finding = await _triageService.GetFindingAsync(findingId, ct); + if (finding is null) + { + return NotFound(new { error = "Finding not found", findingId }); + } + + // Compose evidence + var evidence = await _evidenceService.ComposeAsync(finding, includeRaw, ct); + + // Map to response + var response = MapToResponse(finding, evidence); + + return Ok(response); + } + + /// + /// Get evidence for multiple findings (batch). + /// + [HttpPost("evidence/batch")] + [ProducesResponseType(typeof(BatchEvidenceResponse), StatusCodes.Status200OK)] + public async Task GetBatchEvidenceAsync( + [FromBody] BatchEvidenceRequest request, + CancellationToken ct = default) + { + if (request.FindingIds.Count > 100) + { + return BadRequest(new { error = "Maximum 100 findings per batch" }); + } + + var results = new List(); + foreach (var findingId in request.FindingIds) + { + var finding = await _triageService.GetFindingAsync(findingId, ct); + if (finding is null) continue; + + var evidence = await _evidenceService.ComposeAsync(finding, false, ct); + results.Add(MapToResponse(finding, evidence)); + } + + return Ok(new BatchEvidenceResponse { Findings = results }); + } + + private static FindingEvidenceResponse MapToResponse( + TriageFinding finding, + ComposedEvidence evidence) + { + return new FindingEvidenceResponse + { + FindingId = finding.Id.ToString(), + Cve = finding.Cve ?? finding.RuleId ?? "unknown", + Component = new ComponentInfo + { + Name = evidence.ComponentName ?? "unknown", + Version = evidence.ComponentVersion ?? "unknown", + Purl = finding.ComponentPurl, + Ecosystem = evidence.Ecosystem + }, + ReachablePath = evidence.ReachablePath ?? [], + Entrypoint = evidence.Entrypoint is not null + ? new EntrypointInfo + { + Type = evidence.Entrypoint.Type, + Route = evidence.Entrypoint.Route, + Method = evidence.Entrypoint.Method, + Auth = evidence.Entrypoint.Auth + } + : null, + Vex = evidence.VexStatus is not null + ? new VexStatusInfo + { + Status = evidence.VexStatus.Status, + Justification = evidence.VexStatus.Justification, + Timestamp = evidence.VexStatus.Timestamp, + Issuer = evidence.VexStatus.Issuer + } + : null, + LastSeen = evidence.LastSeen, + AttestationRefs = evidence.AttestationDigests ?? [], + Score = evidence.Score is not null + ? new ScoreInfo + { + RiskScore = evidence.Score.RiskScore, + Contributions = evidence.Score.Contributions + .Select(c => new ScoreContribution + { + Factor = c.Factor, + Value = c.Value, + Reason = c.Reason + }).ToList() + } + : null, + Boundary = evidence.Boundary is not null + ? new BoundaryInfo + { + Surface = evidence.Boundary.Surface, + Exposure = evidence.Boundary.Exposure, + Auth = evidence.Boundary.Auth is not null + ? new AuthInfo + { + Mechanism = evidence.Boundary.Auth.Mechanism, + RequiredScopes = evidence.Boundary.Auth.Scopes ?? [] + } + : null, + Controls = evidence.Boundary.Controls ?? [] + } + : null, + Freshness = new FreshnessInfo + { + IsStale = evidence.IsStale, + ExpiresAt = evidence.ExpiresAt, + TtlRemainingHours = evidence.TtlRemainingHours + } + }; + } +} + +public sealed record BatchEvidenceRequest +{ + [JsonPropertyName("finding_ids")] + public required IReadOnlyList FindingIds { get; init; } +} + +public sealed record BatchEvidenceResponse +{ + [JsonPropertyName("findings")] + public required IReadOnlyList Findings { get; init; } +} +``` + +**Acceptance Criteria**: +- [ ] GET `/api/v1/findings/{findingId}/evidence` works +- [ ] POST `/api/v1/findings/evidence/batch` for batch retrieval +- [ ] `includeRaw` parameter with permission check +- [ ] 404 when finding not found +- [ ] 403 when raw access denied +- [ ] Proper error responses + +--- + +### T3: Add OpenAPI Documentation + +**Assignee**: Scanner Team +**Story Points**: 1 +**Status**: TODO +**Dependencies**: T1, T2 + +**Description**: +Add OpenAPI schema documentation for the endpoint. + +**Implementation Path**: `docs/schemas/findings-evidence-api.openapi.yaml` + +**Acceptance Criteria**: +- [ ] OpenAPI spec added +- [ ] All request/response schemas documented +- [ ] Examples included +- [ ] Error responses documented + +--- + +### T4: Add Unit Tests + +**Assignee**: Scanner Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T2 + +**Description**: +Unit tests for the evidence endpoint. + +**Test Cases**: +- [ ] Valid finding returns evidence +- [ ] Unknown finding returns 404 +- [ ] Raw access without permission returns 403 +- [ ] Batch request with mixed results +- [ ] Mapping preserves all fields + +**Acceptance Criteria**: +- [ ] 5+ unit tests passing +- [ ] Controller tested with mocks +- [ ] Response mapping tested + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Scanner Team | Define response contract | +| 2 | T2 | TODO | T1 | Scanner Team | Implement controller | +| 3 | T3 | TODO | T1, T2 | Scanner Team | Add OpenAPI docs | +| 4 | T4 | TODO | T2 | Scanner Team | Add unit tests | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint created from Explainable Triage advisory gap analysis (G6). | Agent | + +--- + +## Decisions & Risks + +| Item | Type | Owner | Notes | +|------|------|-------|-------| +| Snake_case JSON | Decision | Scanner Team | Matches advisory contract | +| Raw access permission | Decision | Scanner Team | evidence:raw scope required | +| Batch limit | Decision | Scanner Team | 100 findings max per request | + +--- + +## Success Criteria + +- [ ] All 4 tasks marked DONE +- [ ] Endpoint returns evidence matching advisory contract +- [ ] Performance < 300ms per finding +- [ ] 5+ tests passing +- [ ] `dotnet build` succeeds +- [ ] `dotnet test` succeeds diff --git a/docs/implplan/SPRINT_4300_0001_0002_one_command_audit_replay.md b/docs/implplan/SPRINT_4300_0001_0002_one_command_audit_replay.md new file mode 100644 index 000000000..b2feb3754 --- /dev/null +++ b/docs/implplan/SPRINT_4300_0001_0002_one_command_audit_replay.md @@ -0,0 +1,181 @@ +# SPRINT_4300_0001_0002: One-Command Audit Replay CLI + +## Sprint Metadata + +| Field | Value | +|-------|-------| +| **Sprint ID** | 4300_0001_0002 | +| **Title** | One-Command Audit Replay CLI | +| **Priority** | P0 (Critical) | +| **Moat Strength** | 5 (Structural moat) | +| **Working Directory** | `src/Cli/`, `src/__Libraries/StellaOps.Replay.Core/`, `src/AirGap/` | +| **Estimated Effort** | 2 weeks | +| **Dependencies** | ReplayManifest (exists), ReplayVerifier (exists), SPRINT_4300_0001_0001 | + +--- + +## Objective + +Implement a single CLI command that enables auditors to replay and verify risk verdicts from a self-contained bundle, without network connectivity or access to the StellaOps backend. + +**Moat thesis**: "We don't output findings; we output an attestable decision that can be replayed." + +--- + +## Background + +The advisory requires "air-gapped reproducibility" where audits are a "one-command replay." Current implementation has: +- `ReplayManifest` with input hashes +- `ReplayVerifier` with depth levels (HashOnly, FullRecompute, PolicyFreeze) +- `ReplayBundleWriter` for bundle creation + +**Gap**: No unified CLI command; manual steps required. + +--- + +## Deliverables + +### D1: Audit Bundle Format +- Define `audit-bundle.tar.gz` structure +- Include: manifest, SBOM snapshot, feed snapshot, policy snapshot, verdict +- Add merkle root for integrity + +### D2: Bundle Export Command +- `stella audit export --scan-id= --output=./audit.tar.gz` +- Package all inputs and verdict into portable bundle +- Sign bundle manifest + +### D3: Bundle Replay Command +- `stella audit replay --bundle=./audit.tar.gz` +- Extract and validate bundle +- Re-execute policy evaluation +- Compare verdict hashes + +### D4: Verification Report +- JSON and human-readable output +- Show: input match, verdict match, drift detection +- Exit code: 0=match, 1=drift, 2=error + +### D5: Air-Gap Integration +- Integrate with `AirGap.Importer` for offline execution +- Support `--offline` mode (no network checks) + +--- + +## Tasks + +### Phase 1: Bundle Format + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| REPLAY-001 | Define audit bundle manifest schema (`audit-manifest.json`) | TODO | | +| REPLAY-002 | Create `AuditBundleWriter` in `StellaOps.Replay.Core` | TODO | | +| REPLAY-003 | Implement merkle root calculation for bundle contents | TODO | | +| REPLAY-004 | Add bundle signature (DSSE envelope) | TODO | | +| REPLAY-005 | Write bundle format specification doc | TODO | | + +### Phase 2: Export Command + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| REPLAY-006 | Add `stella audit export` command structure | TODO | | +| REPLAY-007 | Implement scan snapshot fetcher | TODO | | +| REPLAY-008 | Implement feed snapshot exporter (point-in-time) | TODO | | +| REPLAY-009 | Implement policy snapshot exporter | TODO | | +| REPLAY-010 | Package into tar.gz with manifest | TODO | | +| REPLAY-011 | Sign manifest and add to bundle | TODO | | +| REPLAY-012 | Add progress output for large bundles | TODO | | + +### Phase 3: Replay Command + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| REPLAY-013 | Add `stella audit replay` command structure | TODO | | +| REPLAY-014 | Implement bundle extractor with validation | TODO | | +| REPLAY-015 | Create isolated replay context (no external calls) | TODO | | +| REPLAY-016 | Load SBOM, feeds, policy from bundle | TODO | | +| REPLAY-017 | Re-execute `TrustLatticeEngine.Evaluate()` | TODO | | +| REPLAY-018 | Compare computed verdict hash with stored | TODO | | +| REPLAY-019 | Detect and report input drift | TODO | | + +### Phase 4: Verification Report + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| REPLAY-020 | Define `AuditReplayReport` model | TODO | | +| REPLAY-021 | Implement JSON report formatter | TODO | | +| REPLAY-022 | Implement human-readable report formatter | TODO | | +| REPLAY-023 | Add `--format=json|text` flag | TODO | | +| REPLAY-024 | Set exit codes based on verdict match | TODO | | + +### Phase 5: Air-Gap Integration + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| REPLAY-025 | Add `--offline` flag to replay command | TODO | | +| REPLAY-026 | Integrate with `AirGap.Importer` trust store | TODO | | +| REPLAY-027 | Validate time anchor from bundle | TODO | | +| REPLAY-028 | E2E test: export -> transfer -> replay offline | TODO | | + +--- + +## Acceptance Criteria + +1. **AC1**: `stella audit export` produces a self-contained bundle +2. **AC2**: `stella audit replay` succeeds with matching verdict on same inputs +3. **AC3**: Replay fails deterministically if any input is modified +4. **AC4**: Works fully offline with `--offline` flag +5. **AC5**: Bundle is verifiable months after creation + +--- + +## Technical Notes + +### Bundle Structure +``` +audit-bundle.tar.gz +├── audit-manifest.json # Bundle metadata + merkle root +├── audit-manifest.sig # DSSE signature of manifest +├── sbom/ +│ └── sbom.spdx.json # SBOM snapshot +├── feeds/ +│ ├── advisories.ndjson # Advisory snapshot +│ └── feeds-digest.sha256 # Feed content hash +├── policy/ +│ ├── policy-bundle.tar # OPA bundle +│ └── policy-digest.sha256 # Policy hash +├── vex/ +│ └── vex-statements.json # VEX claims at time of scan +└── verdict/ + ├── verdict.json # VerdictReceiptStatement + └── proof-bundle.json # Full proof chain +``` + +### Replay Semantics +``` +same_inputs = ( + sha256(sbom) == manifest.sbomDigest && + sha256(feeds) == manifest.feedsDigest && + sha256(policy) == manifest.policyDigest +) +same_verdict = sha256(computed_verdict) == manifest.verdictDigest +replay_passed = same_inputs && same_verdict +``` + +--- + +## Risks & Mitigations + +| Risk | Impact | Mitigation | +|------|--------|------------| +| Bundle size too large | Storage/transfer issues | Support streaming, external references | +| Feed snapshot incomplete | Replay fails | Validate feed coverage before export | +| Clock drift affects time-based rules | Inconsistent replay | Use bundle timestamp as evaluation time | + +--- + +## Documentation Updates + +- [ ] Add `docs/operations/audit-replay-guide.md` +- [ ] Update CLI reference with `audit` commands +- [ ] Add air-gap operation runbook diff --git a/docs/implplan/SPRINT_4300_0002_0001_evidence_privacy_controls.md b/docs/implplan/SPRINT_4300_0002_0001_evidence_privacy_controls.md new file mode 100644 index 000000000..4d77889aa --- /dev/null +++ b/docs/implplan/SPRINT_4300_0002_0001_evidence_privacy_controls.md @@ -0,0 +1,376 @@ +# Sprint 4300.0002.0001 - Evidence Privacy Controls + +## Topic & Scope + +- Add `EvidenceRedactionService` for privacy-aware proof views +- Store file hashes, symbol names, line ranges (no raw source by default) +- Gate raw source access behind elevated permissions (Authority scope check) +- Default to redacted proofs + +**Working directory:** `src/Scanner/__Libraries/StellaOps.Scanner.Evidence/` + +## Dependencies & Concurrency + +- **Upstream (DONE):** + - Evidence Bundle models + - Authority scope system +- **Downstream:** Evidence API, UI evidence drawer +- **Safe to parallelize with:** SPRINT_4300_0001_*, SPRINT_4300_0002_0002 + +## Documentation Prerequisites + +- `docs/modules/scanner/architecture.md` +- `docs/modules/authority/architecture.md` + +--- + +## Tasks + +### T1: Define Redaction Levels + +**Assignee**: Scanner Team +**Story Points**: 1 +**Status**: TODO +**Dependencies**: — + +**Description**: +Define the redaction levels for evidence. + +**Implementation Path**: `Privacy/EvidenceRedactionLevel.cs` (new) + +**Implementation**: +```csharp +namespace StellaOps.Scanner.Evidence.Privacy; + +/// +/// Redaction levels for evidence data. +/// +public enum EvidenceRedactionLevel +{ + /// + /// Full evidence including raw source code. + /// Requires elevated permissions. + /// + Full = 0, + + /// + /// Standard redaction: file hashes, symbol names, line ranges. + /// No raw source code. + /// + Standard = 1, + + /// + /// Minimal: only digests and counts. + /// For external sharing. + /// + Minimal = 2 +} + +/// +/// Fields that can be redacted. +/// +[Flags] +public enum RedactableFields +{ + None = 0, + SourceCode = 1 << 0, + FilePaths = 1 << 1, + LineNumbers = 1 << 2, + SymbolNames = 1 << 3, + CallArguments = 1 << 4, + EnvironmentVars = 1 << 5, + InternalUrls = 1 << 6, + All = SourceCode | FilePaths | LineNumbers | SymbolNames | CallArguments | EnvironmentVars | InternalUrls +} +``` + +**Acceptance Criteria**: +- [ ] Three redaction levels defined +- [ ] RedactableFields flags enum +- [ ] Documentation on each level + +--- + +### T2: Implement EvidenceRedactionService + +**Assignee**: Scanner Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: T1 + +**Description**: +Service to apply redaction rules to evidence. + +**Implementation Path**: `Privacy/EvidenceRedactionService.cs` (new) + +**Implementation**: +```csharp +namespace StellaOps.Scanner.Evidence.Privacy; + +public interface IEvidenceRedactionService +{ + /// + /// Redacts evidence based on the specified level. + /// + EvidenceBundle Redact(EvidenceBundle bundle, EvidenceRedactionLevel level); + + /// + /// Redacts specific fields from evidence. + /// + EvidenceBundle RedactFields(EvidenceBundle bundle, RedactableFields fields); + + /// + /// Determines the appropriate redaction level for a user. + /// + EvidenceRedactionLevel DetermineLevel(ClaimsPrincipal user); +} + +public sealed class EvidenceRedactionService : IEvidenceRedactionService +{ + private readonly ILogger _logger; + + public EvidenceBundle Redact(EvidenceBundle bundle, EvidenceRedactionLevel level) + { + return level switch + { + EvidenceRedactionLevel.Full => bundle, + EvidenceRedactionLevel.Standard => RedactStandard(bundle), + EvidenceRedactionLevel.Minimal => RedactMinimal(bundle), + _ => RedactStandard(bundle) + }; + } + + private EvidenceBundle RedactStandard(EvidenceBundle bundle) + { + return bundle with + { + Reachability = bundle.Reachability is not null + ? RedactReachability(bundle.Reachability) + : null, + CallStack = bundle.CallStack is not null + ? RedactCallStack(bundle.CallStack) + : null, + Provenance = bundle.Provenance // Keep as-is (already redacted) + }; + } + + private ReachabilityEvidence RedactReachability(ReachabilityEvidence evidence) + { + return evidence with + { + Paths = evidence.Paths.Select(p => new ReachabilityPath + { + PathId = p.PathId, + Steps = p.Steps.Select(s => new ReachabilityStep + { + Node = RedactSymbol(s.Node), + FileHash = s.FileHash, // Keep hash + Lines = s.Lines, // Keep line range + SourceCode = null // Redact source + }).ToList() + }).ToList(), + GraphDigest = evidence.GraphDigest + }; + } + + private CallStackEvidence RedactCallStack(CallStackEvidence evidence) + { + return evidence with + { + Frames = evidence.Frames.Select(f => new CallFrame + { + Function = RedactSymbol(f.Function), + FileHash = f.FileHash, + Line = f.Line, + Arguments = null, // Redact arguments + Locals = null // Redact locals + }).ToList() + }; + } + + private string RedactSymbol(string symbol) + { + // Keep class and method names, redact arguments + // "MyClass.MyMethod(string arg1, int arg2)" -> "MyClass.MyMethod(...)" + var parenIndex = symbol.IndexOf('('); + if (parenIndex > 0) + { + return symbol[..parenIndex] + "(...)"; + } + return symbol; + } + + private EvidenceBundle RedactMinimal(EvidenceBundle bundle) + { + return bundle with + { + Reachability = bundle.Reachability is not null + ? new ReachabilityEvidence + { + Result = bundle.Reachability.Result, + Confidence = bundle.Reachability.Confidence, + PathCount = bundle.Reachability.Paths.Count, + Paths = [], // No paths + GraphDigest = bundle.Reachability.GraphDigest + } + : null, + CallStack = null, + Provenance = bundle.Provenance is not null + ? new ProvenanceEvidence + { + BuildId = bundle.Provenance.BuildId, + BuildDigest = bundle.Provenance.BuildDigest, + Verified = bundle.Provenance.Verified + } + : null + }; + } + + public EvidenceRedactionLevel DetermineLevel(ClaimsPrincipal user) + { + if (user.HasClaim("scope", "evidence:full") || + user.HasClaim("role", "security_admin")) + { + return EvidenceRedactionLevel.Full; + } + + if (user.HasClaim("scope", "evidence:standard") || + user.HasClaim("role", "security_analyst")) + { + return EvidenceRedactionLevel.Standard; + } + + return EvidenceRedactionLevel.Minimal; + } +} +``` + +**Acceptance Criteria**: +- [ ] `EvidenceRedactionService.cs` created +- [ ] Standard redaction removes source code +- [ ] Minimal redaction removes paths and details +- [ ] User-based level determination +- [ ] Symbol redaction preserves method names + +--- + +### T3: Integrate with Evidence Composition + +**Assignee**: Scanner Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T2 + +**Description**: +Integrate redaction into evidence composition flow. + +**Implementation Path**: Modify `EvidenceCompositionService.cs` + +**Acceptance Criteria**: +- [ ] Redaction applied before response +- [ ] User context passed through +- [ ] Logging for access levels + +--- + +### T4: Add Unit Tests + +**Assignee**: Scanner Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T2 + +**Description**: +Tests for redaction logic. + +**Test Cases**: +```csharp +public class EvidenceRedactionServiceTests +{ + [Fact] + public void Redact_Standard_RemovesSourceCode() + { + var bundle = CreateBundleWithSource(); + var result = _service.Redact(bundle, EvidenceRedactionLevel.Standard); + + result.Reachability!.Paths + .SelectMany(p => p.Steps) + .Should().OnlyContain(s => s.SourceCode is null); + } + + [Fact] + public void Redact_Standard_KeepsFileHashes() + { + var bundle = CreateBundleWithSource(); + var result = _service.Redact(bundle, EvidenceRedactionLevel.Standard); + + result.Reachability!.Paths + .SelectMany(p => p.Steps) + .Should().OnlyContain(s => s.FileHash is not null); + } + + [Fact] + public void Redact_Minimal_RemovesPaths() + { + var bundle = CreateBundleWithPaths(5); + var result = _service.Redact(bundle, EvidenceRedactionLevel.Minimal); + + result.Reachability!.Paths.Should().BeEmpty(); + result.Reachability.PathCount.Should().Be(5); + } + + [Fact] + public void DetermineLevel_SecurityAdmin_ReturnsFull() + { + var user = CreateUserWithRole("security_admin"); + var level = _service.DetermineLevel(user); + + level.Should().Be(EvidenceRedactionLevel.Full); + } + + [Fact] + public void DetermineLevel_NoScopes_ReturnsMinimal() + { + var user = CreateUserWithNoScopes(); + var level = _service.DetermineLevel(user); + + level.Should().Be(EvidenceRedactionLevel.Minimal); + } +} +``` + +**Acceptance Criteria**: +- [ ] Source code removal tested +- [ ] File hash preservation tested +- [ ] Minimal redaction tested +- [ ] User level determination tested +- [ ] 5+ tests passing + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Scanner Team | Define redaction levels | +| 2 | T2 | TODO | T1 | Scanner Team | Implement redaction service | +| 3 | T3 | TODO | T2 | Scanner Team | Integrate with composition | +| 4 | T4 | TODO | T2 | Scanner Team | Add unit tests | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint created from Explainable Triage advisory gap analysis (G2). | Agent | + +--- + +## Success Criteria + +- [ ] All 4 tasks marked DONE +- [ ] Source code never exposed without permission +- [ ] File hashes and line ranges preserved +- [ ] 5+ tests passing +- [ ] `dotnet build` succeeds diff --git a/docs/implplan/SPRINT_4300_0002_0001_unknowns_budget_policy.md b/docs/implplan/SPRINT_4300_0002_0001_unknowns_budget_policy.md new file mode 100644 index 000000000..93bc11d7e --- /dev/null +++ b/docs/implplan/SPRINT_4300_0002_0001_unknowns_budget_policy.md @@ -0,0 +1,167 @@ +# SPRINT_4300_0002_0001: Unknowns Budget Policy Integration + +## Sprint Metadata + +| Field | Value | +|-------|-------| +| **Sprint ID** | 4300_0002_0001 | +| **Title** | Unknowns Budget Policy Integration | +| **Priority** | P1 (High) | +| **Moat Strength** | 4 (Strong moat) | +| **Working Directory** | `src/Policy/`, `src/Signals/`, `src/Scanner/` | +| **Estimated Effort** | 2 weeks | +| **Dependencies** | UncertaintyTier (exists), UnknownStateLedger (exists) | + +--- + +## Objective + +Implement policy-level enforcement of unknown budgets, enabling rules like "fail if unknowns > N in production" or "warn if uncertainty tier is T1 for critical components." + +**Moat thesis**: "We quantify uncertainty and gate on it." + +--- + +## Background + +The advisory identifies "Unknowns as first-class state" as a **Moat 4** feature. Current implementation has: +- `UncertaintyTier` (T1-T4) with entropy classification +- `UnknownStateLedger` tracking marker kinds +- Risk modifiers from uncertainty + +**Gap**: No policy integration to enforce unknown budgets. + +--- + +## Deliverables + +### D1: Unknown Budget Rule DSL +- Define policy rules for unknown thresholds +- Support tier-based, count-based, and entropy-based rules +- Environment scoping (dev/staging/prod) + +### D2: Policy Engine Integration +- Extend `PolicyGateEvaluator` with unknown budget gates +- Add unknown state to evaluation context +- Emit violation on budget exceeded + +### D3: Unknown Budget Configuration +- Admin UI for setting budgets per environment +- API endpoints for budget CRUD +- Default budgets per tier + +### D4: Reporting & Alerts +- Include unknown budget status in scan reports +- Notify on budget threshold crossings +- Dashboard widget for unknown trends + +--- + +## Tasks + +### Phase 1: Policy Rule DSL + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| BUDGET-001 | Define `UnknownBudgetRule` schema | TODO | | +| BUDGET-002 | Add budget rules to policy bundle format | TODO | | +| BUDGET-003 | Create `UnknownBudgetRuleParser` | TODO | | +| BUDGET-004 | Support expressions: `unknowns.count > 10`, `unknowns.tier == T1` | TODO | | +| BUDGET-005 | Add environment scope filter | TODO | | + +### Phase 2: Policy Engine Integration + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| BUDGET-006 | Extend `PolicyEvaluationContext` with unknown state | TODO | | +| BUDGET-007 | Add `UnknownBudgetGate` to `PolicyGateEvaluator` | TODO | | +| BUDGET-008 | Implement tier-based gate: block on T1, warn on T2 | TODO | | +| BUDGET-009 | Implement count-based gate: fail if count > threshold | TODO | | +| BUDGET-010 | Implement entropy-based gate: fail if mean entropy > threshold | TODO | | +| BUDGET-011 | Emit `BudgetExceededViolation` with details | TODO | | +| BUDGET-012 | Unit tests for all gate types | TODO | | + +### Phase 3: Configuration + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| BUDGET-013 | Add `UnknownBudgetOptions` configuration | TODO | | +| BUDGET-014 | Create budget management API endpoints | TODO | | +| BUDGET-015 | Implement default budgets (prod: T2 max, staging: T1 warn) | TODO | | +| BUDGET-016 | Add budget configuration to policy YAML | TODO | | + +### Phase 4: Reporting + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| BUDGET-017 | Add unknown budget section to scan report | TODO | | +| BUDGET-018 | Create `UnknownBudgetExceeded` notification event | TODO | | +| BUDGET-019 | Integrate with Notify module for alerts | TODO | | +| BUDGET-020 | Add budget status to policy evaluation response | TODO | | + +--- + +## Acceptance Criteria + +1. **AC1**: Policy can define `unknowns.count <= 5` threshold +2. **AC2**: Policy can define `unknowns.tier != T1` requirement +3. **AC3**: Budget violations appear in scan results +4. **AC4**: Notifications fire on budget exceeded +5. **AC5**: Environment-specific budgets work correctly + +--- + +## Technical Notes + +### Policy Rule Examples +```yaml +unknown_budgets: + - name: "production-strict" + environment: "production" + rules: + - tier_max: T2 # Block if any T1 unknowns + - count_max: 5 # Block if > 5 unknowns total + - entropy_max: 0.4 # Block if mean entropy > 0.4 + action: block + + - name: "staging-warn" + environment: "staging" + rules: + - tier_max: T1 # Warn on T1, allow T2-T4 + - count_max: 20 + action: warn +``` + +### Gate Evaluation +```csharp +public sealed class UnknownBudgetGate : IPolicyGate +{ + public GateResult Evaluate(UnknownBudgetRule rule, UnknownState state) + { + if (rule.TierMax.HasValue && state.MaxTier < rule.TierMax.Value) + return GateResult.Fail($"Tier {state.MaxTier} exceeds budget {rule.TierMax}"); + + if (rule.CountMax.HasValue && state.Count > rule.CountMax.Value) + return GateResult.Fail($"Count {state.Count} exceeds budget {rule.CountMax}"); + + return GateResult.Pass(); + } +} +``` + +--- + +## Risks & Mitigations + +| Risk | Impact | Mitigation | +|------|--------|------------| +| Too strict budgets block all deployments | Adoption friction | Provide sensible defaults, gradual rollout | +| Unknown counting varies by scan | Inconsistent gates | Normalize counting methodology | + +--- + +## Documentation Updates + +- [ ] Update `docs/modules/policy/architecture.md` +- [ ] Add `docs/operations/unknown-budgets-guide.md` +- [ ] Update policy DSL reference diff --git a/docs/implplan/SPRINT_4300_0002_0002_evidence_ttl_enforcement.md b/docs/implplan/SPRINT_4300_0002_0002_evidence_ttl_enforcement.md new file mode 100644 index 000000000..846c566f8 --- /dev/null +++ b/docs/implplan/SPRINT_4300_0002_0002_evidence_ttl_enforcement.md @@ -0,0 +1,477 @@ +# Sprint 4300.0002.0002 - Evidence TTL Strategy Enforcement + +## Topic & Scope + +- Implement `EvidenceTtlEnforcer` service +- Define TTL policy per evidence type +- Add staleness checking to policy gate evaluation +- Emit `stale_evidence` warning/block based on configuration + +**Working directory:** `src/Policy/__Libraries/StellaOps.Policy/` + +## Dependencies & Concurrency + +- **Upstream (DONE):** + - Evidence Bundle models + - Policy Engine gates +- **Downstream:** Policy decisions, UI staleness warnings +- **Safe to parallelize with:** SPRINT_4300_0001_*, SPRINT_4300_0002_0001 + +## Documentation Prerequisites + +- `docs/modules/policy/architecture.md` +- Advisory staleness invariant specification + +--- + +## Tasks + +### T1: Define TTL Configuration + +**Assignee**: Policy Team +**Story Points**: 1 +**Status**: TODO +**Dependencies**: — + +**Description**: +Define configurable TTL per evidence type. + +**Implementation Path**: `Freshness/EvidenceTtlOptions.cs` (new) + +**Implementation**: +```csharp +namespace StellaOps.Policy.Freshness; + +/// +/// TTL configuration per evidence type. +/// +public sealed class EvidenceTtlOptions +{ + /// + /// SBOM evidence TTL. Long because digest is immutable. + /// Default: 30 days. + /// + public TimeSpan SbomTtl { get; set; } = TimeSpan.FromDays(30); + + /// + /// Boundary evidence TTL. Short because environment changes. + /// Default: 72 hours. + /// + public TimeSpan BoundaryTtl { get; set; } = TimeSpan.FromHours(72); + + /// + /// Reachability evidence TTL. Medium based on code churn. + /// Default: 7 days. + /// + public TimeSpan ReachabilityTtl { get; set; } = TimeSpan.FromDays(7); + + /// + /// VEX evidence TTL. Renew on boundary/reachability change. + /// Default: 14 days. + /// + public TimeSpan VexTtl { get; set; } = TimeSpan.FromDays(14); + + /// + /// Policy decision TTL. + /// Default: 24 hours. + /// + public TimeSpan PolicyDecisionTtl { get; set; } = TimeSpan.FromHours(24); + + /// + /// Human approval TTL. + /// Default: 30 days. + /// + public TimeSpan HumanApprovalTtl { get; set; } = TimeSpan.FromDays(30); + + /// + /// Warning threshold as percentage of TTL remaining. + /// Default: 20% (warn when 80% of TTL elapsed). + /// + public double WarningThresholdPercent { get; set; } = 0.20; + + /// + /// Action when evidence is stale. + /// + public StaleEvidenceAction StaleAction { get; set; } = StaleEvidenceAction.Warn; +} + +/// +/// Action to take when evidence is stale. +/// +public enum StaleEvidenceAction +{ + /// + /// Allow but log warning. + /// + Warn, + + /// + /// Block the decision. + /// + Block, + + /// + /// Degrade confidence score. + /// + DegradeConfidence +} +``` + +**Acceptance Criteria**: +- [ ] TTL options for each evidence type +- [ ] Warning threshold configurable +- [ ] Stale action configurable +- [ ] Sensible defaults + +--- + +### T2: Implement EvidenceTtlEnforcer + +**Assignee**: Policy Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: T1 + +**Description**: +Service to check and enforce TTL policies. + +**Implementation Path**: `Freshness/EvidenceTtlEnforcer.cs` (new) + +**Implementation**: +```csharp +namespace StellaOps.Policy.Freshness; + +public interface IEvidenceTtlEnforcer +{ + /// + /// Checks freshness of all evidence in a bundle. + /// + EvidenceFreshnessResult CheckFreshness(EvidenceBundle bundle, DateTimeOffset asOf); + + /// + /// Gets TTL for a specific evidence type. + /// + TimeSpan GetTtl(EvidenceType type); + + /// + /// Computes expiration time for evidence. + /// + DateTimeOffset ComputeExpiration(EvidenceType type, DateTimeOffset createdAt); +} + +public sealed class EvidenceTtlEnforcer : IEvidenceTtlEnforcer +{ + private readonly EvidenceTtlOptions _options; + private readonly ILogger _logger; + + public EvidenceTtlEnforcer( + IOptions options, + ILogger logger) + { + _options = options.Value; + _logger = logger; + } + + public EvidenceFreshnessResult CheckFreshness(EvidenceBundle bundle, DateTimeOffset asOf) + { + var checks = new List(); + + // Check each evidence type + if (bundle.Reachability is not null) + { + checks.Add(CheckType(EvidenceType.Reachability, bundle.Reachability.ComputedAt, asOf)); + } + + if (bundle.CallStack is not null) + { + checks.Add(CheckType(EvidenceType.CallStack, bundle.CallStack.CapturedAt, asOf)); + } + + if (bundle.VexStatus is not null) + { + checks.Add(CheckType(EvidenceType.Vex, bundle.VexStatus.Timestamp, asOf)); + } + + if (bundle.Provenance is not null) + { + checks.Add(CheckType(EvidenceType.Sbom, bundle.Provenance.BuildTime, asOf)); + } + + // Determine overall status + var anyStale = checks.Any(c => c.Status == FreshnessStatus.Stale); + var anyWarning = checks.Any(c => c.Status == FreshnessStatus.Warning); + + return new EvidenceFreshnessResult + { + OverallStatus = anyStale ? FreshnessStatus.Stale + : anyWarning ? FreshnessStatus.Warning + : FreshnessStatus.Fresh, + Checks = checks, + RecommendedAction = anyStale ? _options.StaleAction : StaleEvidenceAction.Warn, + CheckedAt = asOf + }; + } + + private EvidenceFreshnessCheck CheckType( + EvidenceType type, + DateTimeOffset createdAt, + DateTimeOffset asOf) + { + var ttl = GetTtl(type); + var expiresAt = createdAt + ttl; + var remaining = expiresAt - asOf; + var warningThreshold = ttl * _options.WarningThresholdPercent; + + FreshnessStatus status; + if (remaining <= TimeSpan.Zero) + { + status = FreshnessStatus.Stale; + } + else if (remaining <= warningThreshold) + { + status = FreshnessStatus.Warning; + } + else + { + status = FreshnessStatus.Fresh; + } + + return new EvidenceFreshnessCheck + { + Type = type, + CreatedAt = createdAt, + ExpiresAt = expiresAt, + Ttl = ttl, + Remaining = remaining > TimeSpan.Zero ? remaining : TimeSpan.Zero, + Status = status, + Message = status switch + { + FreshnessStatus.Stale => $"{type} evidence expired {-remaining.TotalHours:F0}h ago", + FreshnessStatus.Warning => $"{type} evidence expires in {remaining.TotalHours:F0}h", + _ => $"{type} evidence fresh ({remaining.TotalDays:F0}d remaining)" + } + }; + } + + public TimeSpan GetTtl(EvidenceType type) + { + return type switch + { + EvidenceType.Sbom => _options.SbomTtl, + EvidenceType.Reachability => _options.ReachabilityTtl, + EvidenceType.Boundary => _options.BoundaryTtl, + EvidenceType.Vex => _options.VexTtl, + EvidenceType.PolicyDecision => _options.PolicyDecisionTtl, + EvidenceType.HumanApproval => _options.HumanApprovalTtl, + EvidenceType.CallStack => _options.ReachabilityTtl, + _ => TimeSpan.FromDays(7) + }; + } + + public DateTimeOffset ComputeExpiration(EvidenceType type, DateTimeOffset createdAt) + { + return createdAt + GetTtl(type); + } +} + +public sealed record EvidenceFreshnessResult +{ + public required FreshnessStatus OverallStatus { get; init; } + public required IReadOnlyList Checks { get; init; } + public required StaleEvidenceAction RecommendedAction { get; init; } + public required DateTimeOffset CheckedAt { get; init; } + + public bool IsAcceptable => OverallStatus != FreshnessStatus.Stale; + public bool HasWarnings => OverallStatus == FreshnessStatus.Warning; +} + +public sealed record EvidenceFreshnessCheck +{ + public required EvidenceType Type { get; init; } + public required DateTimeOffset CreatedAt { get; init; } + public required DateTimeOffset ExpiresAt { get; init; } + public required TimeSpan Ttl { get; init; } + public required TimeSpan Remaining { get; init; } + public required FreshnessStatus Status { get; init; } + public required string Message { get; init; } +} + +public enum FreshnessStatus +{ + Fresh, + Warning, + Stale +} + +public enum EvidenceType +{ + Sbom, + Reachability, + Boundary, + Vex, + PolicyDecision, + HumanApproval, + CallStack +} +``` + +**Acceptance Criteria**: +- [ ] `EvidenceTtlEnforcer.cs` created +- [ ] Checks all evidence types +- [ ] Warning when approaching expiration +- [ ] Stale detection when expired +- [ ] Configurable via options + +--- + +### T3: Integrate with Policy Gate + +**Assignee**: Policy Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T2 + +**Description**: +Add freshness check to policy gate evaluation. + +**Implementation Path**: Modify `PolicyGateEvaluator.cs` + +**Integration**: +```csharp +// In PolicyGateEvaluator.EvaluateAsync() +var freshnessResult = _ttlEnforcer.CheckFreshness(evidenceBundle, DateTimeOffset.UtcNow); + +if (freshnessResult.OverallStatus == FreshnessStatus.Stale) +{ + switch (freshnessResult.RecommendedAction) + { + case StaleEvidenceAction.Block: + return PolicyGateDecision.Blocked("Evidence is stale", freshnessResult.Checks); + + case StaleEvidenceAction.DegradeConfidence: + confidence *= 0.5; // Halve confidence for stale evidence + break; + + case StaleEvidenceAction.Warn: + default: + warnings.Add("Evidence is stale - consider refreshing"); + break; + } +} +``` + +**Acceptance Criteria**: +- [ ] Freshness checked during gate evaluation +- [ ] Block action prevents approval +- [ ] Degrade action reduces confidence +- [ ] Warn action adds warning message + +--- + +### T4: Add Unit Tests + +**Assignee**: Policy Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T2 + +**Description**: +Tests for TTL enforcement. + +**Test Cases**: +```csharp +public class EvidenceTtlEnforcerTests +{ + [Fact] + public void CheckFreshness_AllFresh_ReturnsFresh() + { + var bundle = CreateBundle(createdAt: DateTimeOffset.UtcNow.AddHours(-1)); + var result = _enforcer.CheckFreshness(bundle, DateTimeOffset.UtcNow); + + result.OverallStatus.Should().Be(FreshnessStatus.Fresh); + result.IsAcceptable.Should().BeTrue(); + } + + [Fact] + public void CheckFreshness_ReachabilityNearExpiry_ReturnsWarning() + { + var bundle = CreateBundle( + reachabilityCreatedAt: DateTimeOffset.UtcNow.AddDays(-6)); // 7 day TTL + + var result = _enforcer.CheckFreshness(bundle, DateTimeOffset.UtcNow); + + result.OverallStatus.Should().Be(FreshnessStatus.Warning); + result.Checks.First(c => c.Type == EvidenceType.Reachability) + .Status.Should().Be(FreshnessStatus.Warning); + } + + [Fact] + public void CheckFreshness_BoundaryExpired_ReturnsStale() + { + var bundle = CreateBundle( + boundaryCreatedAt: DateTimeOffset.UtcNow.AddDays(-5)); // 72h TTL + + var result = _enforcer.CheckFreshness(bundle, DateTimeOffset.UtcNow); + + result.OverallStatus.Should().Be(FreshnessStatus.Stale); + result.IsAcceptable.Should().BeFalse(); + } + + [Theory] + [InlineData(EvidenceType.Sbom, 30)] + [InlineData(EvidenceType.Boundary, 3)] + [InlineData(EvidenceType.Reachability, 7)] + [InlineData(EvidenceType.Vex, 14)] + public void GetTtl_ReturnsConfiguredValue(EvidenceType type, int expectedDays) + { + var ttl = _enforcer.GetTtl(type); + ttl.TotalDays.Should().BeApproximately(expectedDays, 0.1); + } +} +``` + +**Acceptance Criteria**: +- [ ] Fresh evidence test +- [ ] Warning threshold test +- [ ] Stale evidence test +- [ ] TTL values test +- [ ] 5+ tests passing + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Policy Team | Define TTL configuration | +| 2 | T2 | TODO | T1 | Policy Team | Implement enforcer service | +| 3 | T3 | TODO | T2 | Policy Team | Integrate with policy gate | +| 4 | T4 | TODO | T2 | Policy Team | Add unit tests | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint created from Explainable Triage advisory gap analysis (G3). | Agent | + +--- + +## Decisions & Risks + +| Item | Type | Owner | Notes | +|------|------|-------|-------| +| Default TTLs | Decision | Policy Team | Based on advisory recommendations | +| Warning at 20% | Decision | Policy Team | Gives ~1 day warning for boundary | +| Default action Warn | Decision | Policy Team | Non-breaking, can escalate to Block | + +--- + +## Success Criteria + +- [ ] All 4 tasks marked DONE +- [ ] Stale evidence detected correctly +- [ ] Policy gate honors TTL settings +- [ ] 5+ tests passing +- [ ] `dotnet build` succeeds diff --git a/docs/implplan/SPRINT_4300_0002_0002_unknowns_attestation_predicates.md b/docs/implplan/SPRINT_4300_0002_0002_unknowns_attestation_predicates.md new file mode 100644 index 000000000..c3b54158e --- /dev/null +++ b/docs/implplan/SPRINT_4300_0002_0002_unknowns_attestation_predicates.md @@ -0,0 +1,104 @@ +# SPRINT_4300_0002_0002: Unknowns Attestation Predicates + +## Sprint Metadata + +| Field | Value | +|-------|-------| +| **Sprint ID** | 4300_0002_0002 | +| **Title** | Unknowns Attestation Predicates | +| **Priority** | P1 (High) | +| **Moat Strength** | 4 (Strong moat) | +| **Working Directory** | `src/Attestor/`, `src/Signals/`, `src/Unknowns/` | +| **Estimated Effort** | 1 week | +| **Dependencies** | SPRINT_4300_0002_0001, UncertaintyTier (exists) | + +--- + +## Objective + +Create in-toto attestation predicates for unknown states, making uncertainty auditable, portable, and verifiable as part of the proof chain. + +**Moat thesis**: "We quantify uncertainty and gate on it." — Extended to: uncertainty is attestable. + +--- + +## Background + +Unknowns need to be: +1. Recorded in attestations for audit trails +2. Portable with verdicts for external verification +3. Queryable by admission controllers + +--- + +## Deliverables + +### D1: Unknown State Attestation Predicate +- Define `uncertainty.stella/v1` predicate type +- Include: tier, entropy, marker kinds, evidence + +### D2: Unknown Budget Attestation Predicate +- Define `uncertainty-budget.stella/v1` predicate type +- Include: budget definition, evaluation result, violations + +### D3: Integration with Proof Chain +- Emit unknown attestations as part of `ProofSpineAssembler` +- Link to verdict attestation + +### D4: Verification Support +- Extend `stella verdict verify` to check unknown attestations + +--- + +## Tasks + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| UATT-001 | Define `UncertaintyStatement` in-toto predicate | TODO | | +| UATT-002 | Define `UncertaintyBudgetStatement` predicate | TODO | | +| UATT-003 | Create statement builders in `StellaOps.Attestor.ProofChain` | TODO | | +| UATT-004 | Integrate into `ProofSpineAssembler` | TODO | | +| UATT-005 | Add unknown attestation to verdict bundle | TODO | | +| UATT-006 | Extend verification CLI for unknown predicates | TODO | | +| UATT-007 | Add JSON schema for predicates | TODO | | +| UATT-008 | Write attestation round-trip tests | TODO | | + +--- + +## Acceptance Criteria + +1. **AC1**: Unknown state is captured in attestation +2. **AC2**: Budget evaluation result is attestable +3. **AC3**: Attestations are signed and verifiable +4. **AC4**: Proof chain links unknown to verdict + +--- + +## Technical Notes + +### Uncertainty Statement +```json +{ + "_type": "https://in-toto.io/Statement/v1", + "subject": [{"digest": {"sha256": ""}}], + "predicateType": "uncertainty.stella/v1", + "predicate": { + "graphRevisionId": "...", + "aggregateTier": "T2", + "meanEntropy": 0.35, + "unknownCount": 7, + "markers": [ + {"kind": "U1", "count": 3, "entropy": 0.45}, + {"kind": "U2", "count": 4, "entropy": 0.28} + ], + "evaluatedAt": "2025-12-22T00:00:00Z" + } +} +``` + +--- + +## Documentation Updates + +- [ ] Update attestation type catalog +- [ ] Add uncertainty predicate specification diff --git a/docs/implplan/SPRINT_4300_0003_0001_predicate_schemas.md b/docs/implplan/SPRINT_4300_0003_0001_predicate_schemas.md new file mode 100644 index 000000000..f2b461cb8 --- /dev/null +++ b/docs/implplan/SPRINT_4300_0003_0001_predicate_schemas.md @@ -0,0 +1,388 @@ +# Sprint 4300.0003.0001 - Predicate Type JSON Schemas + +## Topic & Scope + +- Create JSON Schema definitions for all stella.ops predicate types +- Add schema validation to attestation creation +- Publish schemas to `docs/schemas/predicates/` + +**Working directory:** `docs/schemas/predicates/`, `src/Attestor/` + +## Dependencies & Concurrency + +- **Upstream (DONE):** + - Existing predicate models in code +- **Downstream:** Schema validation, external tooling +- **Safe to parallelize with:** All SPRINT_4300_* + +## Documentation Prerequisites + +- Existing predicate implementations +- in-toto specification + +--- + +## Tasks + +### T1: Create stella.ops/sbom@v1 Schema + +**Assignee**: Attestor Team +**Story Points**: 1 +**Status**: TODO +**Dependencies**: — + +**Implementation Path**: `docs/schemas/predicates/sbom.v1.schema.json` + +**Schema**: +```json +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://stella.ops/predicates/sbom@v1", + "title": "StellaOps SBOM Attestation Predicate", + "description": "Predicate for SBOM attestations linking software bill of materials to artifacts.", + "type": "object", + "required": ["format", "digest", "componentCount"], + "properties": { + "format": { + "type": "string", + "enum": ["cyclonedx-1.6", "spdx-3.0.1", "spdx-2.3"], + "description": "SBOM format specification." + }, + "digest": { + "type": "string", + "pattern": "^sha256:[a-f0-9]{64}$", + "description": "Content-addressed digest of the SBOM document." + }, + "componentCount": { + "type": "integer", + "minimum": 0, + "description": "Number of components in the SBOM." + }, + "uri": { + "type": "string", + "format": "uri", + "description": "URI where the full SBOM can be retrieved." + }, + "tooling": { + "type": "string", + "description": "Tool used to generate the SBOM." + }, + "createdAt": { + "type": "string", + "format": "date-time", + "description": "When the SBOM was generated." + } + }, + "additionalProperties": false +} +``` + +**Acceptance Criteria**: +- [ ] Schema file created +- [ ] Validates against sample data +- [ ] Documents all fields + +--- + +### T2: Create stella.ops/vex@v1 Schema + +**Assignee**: Attestor Team +**Story Points**: 1 +**Status**: TODO +**Dependencies**: — + +**Implementation Path**: `docs/schemas/predicates/vex.v1.schema.json` + +**Schema**: +```json +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://stella.ops/predicates/vex@v1", + "title": "StellaOps VEX Attestation Predicate", + "description": "Predicate for VEX statements embedded in attestations.", + "type": "object", + "required": ["format", "statements"], + "properties": { + "format": { + "type": "string", + "enum": ["openvex", "csaf-vex", "cyclonedx-vex"], + "description": "VEX format specification." + }, + "statements": { + "type": "array", + "items": { + "$ref": "#/$defs/vexStatement" + }, + "minItems": 1, + "description": "VEX statements in this attestation." + }, + "digest": { + "type": "string", + "pattern": "^sha256:[a-f0-9]{64}$", + "description": "Content-addressed digest of the VEX document." + }, + "author": { + "type": "string", + "description": "Author of the VEX statements." + }, + "timestamp": { + "type": "string", + "format": "date-time", + "description": "When the VEX was issued." + } + }, + "$defs": { + "vexStatement": { + "type": "object", + "required": ["vulnerability", "status"], + "properties": { + "vulnerability": { + "type": "string", + "description": "CVE or vulnerability identifier." + }, + "status": { + "type": "string", + "enum": ["affected", "not_affected", "under_investigation", "fixed"], + "description": "VEX status." + }, + "justification": { + "type": "string", + "description": "Justification for not_affected status." + }, + "products": { + "type": "array", + "items": { "type": "string" }, + "description": "Affected products (PURLs)." + } + } + } + }, + "additionalProperties": false +} +``` + +**Acceptance Criteria**: +- [ ] Schema file created +- [ ] VEX statement definition included +- [ ] Validates against sample data + +--- + +### T3: Create stella.ops/reachability@v1 Schema + +**Assignee**: Attestor Team +**Story Points**: 1 +**Status**: TODO +**Dependencies**: — + +**Implementation Path**: `docs/schemas/predicates/reachability.v1.schema.json` + +**Schema**: +```json +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://stella.ops/predicates/reachability@v1", + "title": "StellaOps Reachability Attestation Predicate", + "description": "Predicate for reachability analysis results.", + "type": "object", + "required": ["result", "confidence", "graphDigest"], + "properties": { + "result": { + "type": "string", + "enum": ["reachable", "unreachable", "unknown"], + "description": "Reachability analysis result." + }, + "confidence": { + "type": "number", + "minimum": 0, + "maximum": 1, + "description": "Confidence score (0-1)." + }, + "graphDigest": { + "type": "string", + "pattern": "^sha256:[a-f0-9]{64}$", + "description": "Digest of the call graph used." + }, + "paths": { + "type": "array", + "items": { + "$ref": "#/$defs/reachabilityPath" + }, + "description": "Paths from entrypoints to vulnerable code." + }, + "entrypoints": { + "type": "array", + "items": { "$ref": "#/$defs/entrypoint" }, + "description": "Entrypoints considered." + }, + "computedAt": { + "type": "string", + "format": "date-time" + }, + "expiresAt": { + "type": "string", + "format": "date-time" + } + }, + "$defs": { + "reachabilityPath": { + "type": "object", + "required": ["pathId", "steps"], + "properties": { + "pathId": { "type": "string" }, + "steps": { + "type": "array", + "items": { + "type": "object", + "properties": { + "node": { "type": "string" }, + "fileHash": { "type": "string" }, + "lines": { + "type": "array", + "items": { "type": "integer" }, + "minItems": 2, + "maxItems": 2 + } + } + } + } + } + }, + "entrypoint": { + "type": "object", + "required": ["type"], + "properties": { + "type": { "type": "string" }, + "route": { "type": "string" }, + "auth": { "type": "string" } + } + } + }, + "additionalProperties": false +} +``` + +**Acceptance Criteria**: +- [ ] Schema file created +- [ ] Path and entrypoint definitions +- [ ] Validates against sample data + +--- + +### T4: Create Remaining Predicate Schemas + +**Assignee**: Attestor Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: — + +**Files**: +- `docs/schemas/predicates/boundary.v1.schema.json` +- `docs/schemas/predicates/policy-decision.v1.schema.json` +- `docs/schemas/predicates/human-approval.v1.schema.json` + +**Acceptance Criteria**: +- [ ] All 3 schemas created +- [ ] Match existing model definitions +- [ ] Validate against samples + +--- + +### T5: Add Schema Validation to Attestation Service + +**Assignee**: Attestor Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T1-T4 + +**Description**: +Add schema validation when creating attestations. + +**Implementation Path**: `src/Attestor/__Libraries/StellaOps.Attestor.Core/Validation/` + +**Implementation**: +```csharp +public interface IPredicateSchemaValidator +{ + ValidationResult Validate(string predicateType, JsonElement predicate); +} + +public sealed class PredicateSchemaValidator : IPredicateSchemaValidator +{ + private readonly IReadOnlyDictionary _schemas; + + public PredicateSchemaValidator() + { + _schemas = LoadSchemas(); + } + + public ValidationResult Validate(string predicateType, JsonElement predicate) + { + if (!_schemas.TryGetValue(predicateType, out var schema)) + { + return ValidationResult.Skip($"No schema for {predicateType}"); + } + + var results = schema.Validate(predicate); + return results.IsValid + ? ValidationResult.Valid() + : ValidationResult.Invalid(results.Errors); + } +} +``` + +**Acceptance Criteria**: +- [ ] Schema loader implemented +- [ ] Validation during attestation creation +- [ ] Graceful handling of unknown predicates +- [ ] Error messages include path + +--- + +### T6: Add Unit Tests + +**Assignee**: Attestor Team +**Story Points**: 1 +**Status**: TODO +**Dependencies**: T5 + +**Test Cases**: +- [ ] Valid SBOM predicate passes +- [ ] Invalid VEX status fails +- [ ] Missing required field fails +- [ ] Unknown predicate type skips + +**Acceptance Criteria**: +- [ ] 4+ tests passing +- [ ] Coverage for each schema + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Attestor Team | SBOM schema | +| 2 | T2 | TODO | — | Attestor Team | VEX schema | +| 3 | T3 | TODO | — | Attestor Team | Reachability schema | +| 4 | T4 | TODO | — | Attestor Team | Remaining schemas | +| 5 | T5 | TODO | T1-T4 | Attestor Team | Schema validation | +| 6 | T6 | TODO | T5 | Attestor Team | Unit tests | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint created from Explainable Triage advisory gap analysis (G4). | Agent | + +--- + +## Success Criteria + +- [ ] All 6 tasks marked DONE +- [ ] 6 predicate schemas created +- [ ] Validation integrated +- [ ] 4+ tests passing +- [ ] `dotnet build` succeeds diff --git a/docs/implplan/SPRINT_4300_0003_0001_sealed_knowledge_snapshot.md b/docs/implplan/SPRINT_4300_0003_0001_sealed_knowledge_snapshot.md new file mode 100644 index 000000000..ecce41d4f --- /dev/null +++ b/docs/implplan/SPRINT_4300_0003_0001_sealed_knowledge_snapshot.md @@ -0,0 +1,165 @@ +# SPRINT_4300_0003_0001: Sealed Knowledge Snapshot Export/Import + +## Sprint Metadata + +| Field | Value | +|-------|-------| +| **Sprint ID** | 4300_0003_0001 | +| **Title** | Sealed Knowledge Snapshot Export/Import | +| **Priority** | P1 (High) | +| **Moat Strength** | 4 (Strong moat) | +| **Working Directory** | `src/AirGap/`, `src/Concelier/`, `src/Excititor/`, `src/Cli/` | +| **Estimated Effort** | 2 weeks | +| **Dependencies** | AirGap.Importer (exists), ReplayManifest (exists) | + +--- + +## Objective + +Implement a "sealed knowledge snapshot" workflow for air-gapped environments, packaging all advisory feeds, VEX statements, and policies into a cryptographically verifiable bundle that can be transferred offline and validated on import. + +**Moat thesis**: Air-gapped "runtime" is common; air-gapped **reproducibility** is not. + +--- + +## Background + +The advisory identifies air-gapped epistemic mode as **Moat 4**. Current implementation has: +- `AirGap.Controller` with state management +- `ReplayVerifier` with depth levels +- `TrustStore` for offline validation + +**Gap**: No unified export/import workflow for knowledge snapshots. + +--- + +## Deliverables + +### D1: Knowledge Snapshot Format +- Define snapshot bundle structure +- Include: advisories, VEX, policies, time anchor, trust roots +- Merkle tree for content integrity + +### D2: Snapshot Export CLI +- `stella airgap export --output=./knowledge-2025-12-22.tar.gz` +- Point-in-time feed extraction +- Sign snapshot with designated key + +### D3: Snapshot Import CLI +- `stella airgap import --bundle=./knowledge-2025-12-22.tar.gz` +- Verify signature and merkle root +- Validate time anchor freshness +- Apply to local database + +### D4: Snapshot Diff +- Compare two snapshots +- Report: new advisories, updated VEX, policy changes + +### D5: Staleness Policy +- Configurable max age for snapshots +- Warn/block on stale knowledge + +--- + +## Tasks + +### Phase 1: Snapshot Format + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| SEAL-001 | Define `KnowledgeSnapshotManifest` schema | TODO | | +| SEAL-002 | Implement merkle tree builder for bundle contents | TODO | | +| SEAL-003 | Create `SnapshotBundleWriter` | TODO | | +| SEAL-004 | Add DSSE signing for manifest | TODO | | + +### Phase 2: Export + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| SEAL-005 | Add `stella airgap export` command | TODO | | +| SEAL-006 | Implement advisory snapshot extractor | TODO | | +| SEAL-007 | Implement VEX snapshot extractor | TODO | | +| SEAL-008 | Implement policy bundle extractor | TODO | | +| SEAL-009 | Add time anchor token generation | TODO | | +| SEAL-010 | Package into signed bundle | TODO | | + +### Phase 3: Import + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| SEAL-011 | Add `stella airgap import` command | TODO | | +| SEAL-012 | Implement signature verification | TODO | | +| SEAL-013 | Implement merkle root validation | TODO | | +| SEAL-014 | Validate time anchor against staleness policy | TODO | | +| SEAL-015 | Apply advisories to Concelier database | TODO | | +| SEAL-016 | Apply VEX to Excititor database | TODO | | +| SEAL-017 | Apply policies to Policy registry | TODO | | + +### Phase 4: Diff & Staleness + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| SEAL-018 | Implement `stella airgap diff` command | TODO | | +| SEAL-019 | Add staleness policy configuration | TODO | | +| SEAL-020 | Emit warnings on stale imports | TODO | | + +--- + +## Acceptance Criteria + +1. **AC1**: Export produces self-contained knowledge bundle +2. **AC2**: Import validates signature and merkle root +3. **AC3**: Stale snapshots are rejected (configurable age) +4. **AC4**: Diff shows changes between snapshots +5. **AC5**: Imported knowledge enables offline scans + +--- + +## Technical Notes + +### Bundle Structure +``` +knowledge-2025-12-22.tar.gz +├── manifest.json # Snapshot metadata + merkle root +├── manifest.sig # DSSE signature +├── time-anchor.json # RFC 3161 or Roughtime token +├── advisories/ +│ ├── nvd/ # NVD advisories +│ ├── ghsa/ # GitHub advisories +│ └── ... # Other feeds +├── vex/ +│ ├── cisco/ +│ ├── redhat/ +│ └── ... +├── policies/ +│ └── policy-bundle.tar # OPA bundle +└── trust/ + └── trust-roots.pem # Signing key roots +``` + +### Staleness Budget +```yaml +airgap: + staleness: + max_age_hours: 168 # 7 days default + warn_age_hours: 72 # Warn after 3 days + require_time_anchor: true +``` + +--- + +## Risks & Mitigations + +| Risk | Impact | Mitigation | +|------|--------|------------| +| Large bundle size | Transfer challenges | Incremental updates, compression | +| Key compromise | Trust broken | Support key rotation, revocation lists | +| Time anchor unavailable | Cannot validate freshness | Fallback to operator attestation | + +--- + +## Documentation Updates + +- [ ] Add `docs/operations/airgap-knowledge-sync.md` +- [ ] Update air-gap architecture documentation +- [ ] Add staleness policy guide diff --git a/docs/implplan/SPRINT_4300_0003_0002_attestation_metrics.md b/docs/implplan/SPRINT_4300_0003_0002_attestation_metrics.md new file mode 100644 index 000000000..5ff5afd3f --- /dev/null +++ b/docs/implplan/SPRINT_4300_0003_0002_attestation_metrics.md @@ -0,0 +1,341 @@ +# Sprint 4300.0003.0002 - Attestation Completeness Metrics + +## Topic & Scope + +- Add metrics for attestation completeness and timeliness +- Expose via OpenTelemetry/Prometheus +- Add Grafana dashboard template + +**Working directory:** `src/Telemetry/StellaOps.Telemetry.Core/` + +## Dependencies & Concurrency + +- **Upstream (DONE):** + - TTFS Telemetry (TtfsIngestionService) + - OpenTelemetry integration +- **Downstream:** Grafana dashboards, SLO tracking +- **Safe to parallelize with:** All SPRINT_4300_* + +## Documentation Prerequisites + +- `docs/modules/telemetry/architecture.md` +- Advisory metrics requirements + +--- + +## Tasks + +### T1: Define Attestation Metrics + +**Assignee**: Telemetry Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: — + +**Description**: +Define the metrics from the advisory. + +**Implementation Path**: `Metrics/AttestationMetrics.cs` (new) + +**Implementation**: +```csharp +namespace StellaOps.Telemetry.Core.Metrics; + +/// +/// Metrics for attestation completeness and quality. +/// +public sealed class AttestationMetrics +{ + private readonly Meter _meter; + + // Counters + private readonly Counter _attestationsCreated; + private readonly Counter _attestationsVerified; + private readonly Counter _attestationsFailed; + + // Gauges + private readonly ObservableGauge _completenessRatio; + private readonly ObservableGauge _averageTtfe; + + // Histograms + private readonly Histogram _ttfeSeconds; + private readonly Histogram _verificationDuration; + + public AttestationMetrics(IMeterFactory meterFactory) + { + _meter = meterFactory.Create("StellaOps.Attestations"); + + _attestationsCreated = _meter.CreateCounter( + "stella_attestations_created_total", + unit: "{attestation}", + description: "Total attestations created"); + + _attestationsVerified = _meter.CreateCounter( + "stella_attestations_verified_total", + unit: "{attestation}", + description: "Total attestations verified successfully"); + + _attestationsFailed = _meter.CreateCounter( + "stella_attestations_failed_total", + unit: "{attestation}", + description: "Total attestation verifications failed"); + + _ttfeSeconds = _meter.CreateHistogram( + "stella_ttfe_seconds", + unit: "s", + description: "Time to first evidence (alert → evidence panel open)"); + + _verificationDuration = _meter.CreateHistogram( + "stella_attestation_verification_duration_seconds", + unit: "s", + description: "Time to verify an attestation"); + } + + /// + /// Record attestation created. + /// + public void RecordCreated(string predicateType, string signer) + { + _attestationsCreated.Add(1, + new KeyValuePair("predicate_type", predicateType), + new KeyValuePair("signer", signer)); + } + + /// + /// Record attestation verified. + /// + public void RecordVerified(string predicateType, bool success, TimeSpan duration) + { + if (success) + { + _attestationsVerified.Add(1, + new KeyValuePair("predicate_type", predicateType)); + } + else + { + _attestationsFailed.Add(1, + new KeyValuePair("predicate_type", predicateType)); + } + + _verificationDuration.Record(duration.TotalSeconds, + new KeyValuePair("predicate_type", predicateType), + new KeyValuePair("success", success)); + } + + /// + /// Record time to first evidence. + /// + public void RecordTtfe(TimeSpan duration, string evidenceType) + { + _ttfeSeconds.Record(duration.TotalSeconds, + new KeyValuePair("evidence_type", evidenceType)); + } +} +``` + +**Acceptance Criteria**: +- [ ] Counter: `stella_attestations_created_total` +- [ ] Counter: `stella_attestations_verified_total` +- [ ] Counter: `stella_attestations_failed_total` +- [ ] Histogram: `stella_ttfe_seconds` +- [ ] Histogram: `stella_attestation_verification_duration_seconds` +- [ ] Labels for predicate_type, signer, evidence_type + +--- + +### T2: Add Completeness Ratio Calculator + +**Assignee**: Telemetry Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T1 + +**Description**: +Calculate attestation completeness ratio per artifact. + +**Implementation**: +```csharp +public interface IAttestationCompletenessCalculator +{ + /// + /// Calculate completeness ratio for an artifact. + /// Complete = has all required attestation types. + /// + Task CalculateAsync( + string artifactDigest, + IReadOnlyList requiredTypes, + CancellationToken ct = default); +} + +public sealed class AttestationCompletenessCalculator : IAttestationCompletenessCalculator +{ + private readonly IOciReferrerDiscovery _discovery; + private readonly AttestationMetrics _metrics; + + public async Task CalculateAsync( + string artifactDigest, + IReadOnlyList requiredTypes, + CancellationToken ct = default) + { + var referrers = await _discovery.ListReferrersAsync( + /* registry, repo, digest */, ct); + + var foundTypes = referrers.Referrers + .Select(r => MapArtifactType(r.ArtifactType)) + .Distinct() + .ToHashSet(); + + var missingTypes = requiredTypes.Except(foundTypes).ToList(); + var ratio = (double)(requiredTypes.Count - missingTypes.Count) / requiredTypes.Count; + + return new CompletenessResult + { + ArtifactDigest = artifactDigest, + CompletenessRatio = ratio, + FoundTypes = foundTypes.ToList(), + MissingTypes = missingTypes, + IsComplete = missingTypes.Count == 0 + }; + } +} + +public sealed record CompletenessResult +{ + public required string ArtifactDigest { get; init; } + public required double CompletenessRatio { get; init; } + public required IReadOnlyList FoundTypes { get; init; } + public required IReadOnlyList MissingTypes { get; init; } + public required bool IsComplete { get; init; } +} +``` + +**Acceptance Criteria**: +- [ ] Ratio calculation correct +- [ ] Missing types identified +- [ ] Handles partial attestation sets + +--- + +### T3: Add Post-Deploy Reversion Tracking + +**Assignee**: Telemetry Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T1 + +**Description**: +Track reversions due to missing proof. + +**Implementation**: +```csharp +public sealed class DeploymentMetrics +{ + private readonly Counter _deploymentsTotal; + private readonly Counter _reversionsTotal; + + public DeploymentMetrics(IMeterFactory meterFactory) + { + var meter = meterFactory.Create("StellaOps.Deployments"); + + _deploymentsTotal = meter.CreateCounter( + "stella_deployments_total", + unit: "{deployment}", + description: "Total deployments attempted"); + + _reversionsTotal = meter.CreateCounter( + "stella_post_deploy_reversions_total", + unit: "{reversion}", + description: "Reversions due to missing or invalid proof"); + } + + public void RecordDeployment(string environment, bool hadCompleteProof) + { + _deploymentsTotal.Add(1, + new KeyValuePair("environment", environment), + new KeyValuePair("complete_proof", hadCompleteProof)); + } + + public void RecordReversion(string environment, string reason) + { + _reversionsTotal.Add(1, + new KeyValuePair("environment", environment), + new KeyValuePair("reason", reason)); + } +} +``` + +**Acceptance Criteria**: +- [ ] Deployment counter with proof status +- [ ] Reversion counter with reason +- [ ] Environment label + +--- + +### T4: Create Grafana Dashboard Template + +**Assignee**: Telemetry Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T1, T2, T3 + +**Description**: +Create Grafana dashboard for attestation metrics. + +**Implementation Path**: `deploy/grafana/dashboards/attestation-metrics.json` + +**Dashboard Panels**: +1. **Attestation Completeness Gauge** (target: >=95%) +2. **TTFE Distribution** (target: <=30s) +3. **Verification Success Rate** +4. **Post-Deploy Reversions** (trend to zero) +5. **Attestations by Type** (pie chart) +6. **Stale Evidence Alerts** (time series) + +**Acceptance Criteria**: +- [ ] Dashboard JSON created +- [ ] All 4 advisory metrics visualized +- [ ] SLO thresholds marked +- [ ] Time range selectors + +--- + +### T5: Add DI Registration + +**Assignee**: Telemetry Team +**Story Points**: 1 +**Status**: TODO +**Dependencies**: T1, T2, T3 + +**Acceptance Criteria**: +- [ ] `AttestationMetrics` registered +- [ ] `DeploymentMetrics` registered +- [ ] `IAttestationCompletenessCalculator` registered + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Telemetry Team | Define metrics | +| 2 | T2 | TODO | T1 | Telemetry Team | Completeness calculator | +| 3 | T3 | TODO | T1 | Telemetry Team | Reversion tracking | +| 4 | T4 | TODO | T1-T3 | Telemetry Team | Grafana dashboard | +| 5 | T5 | TODO | T1-T3 | Telemetry Team | DI registration | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint created from Explainable Triage advisory gap analysis (G5). | Agent | + +--- + +## Success Criteria + +- [ ] All 5 tasks marked DONE +- [ ] Metrics exposed via OpenTelemetry +- [ ] Grafana dashboard functional +- [ ] `dotnet build` succeeds diff --git a/docs/implplan/SPRINT_4300_MOAT_SUMMARY.md b/docs/implplan/SPRINT_4300_MOAT_SUMMARY.md new file mode 100644 index 000000000..f552ebfcc --- /dev/null +++ b/docs/implplan/SPRINT_4300_MOAT_SUMMARY.md @@ -0,0 +1,126 @@ +# SPRINT_4300 MOAT HARDENING: Verdict Attestation & Epistemic Mode + +## Program Overview + +| Field | Value | +|-------|-------| +| **Program ID** | 4300 (Moat Series) | +| **Theme** | Moat Hardening: Signed Verdicts & Epistemic Operations | +| **Priority** | P0-P1 (Critical to High) | +| **Total Effort** | ~9 weeks | +| **Advisory Source** | 19-Dec-2025 - Stella Ops candidate features mapped to moat strength | + +--- + +## Strategic Context + +This sprint program addresses the highest-moat features identified in the competitive analysis advisory. The goal is to harden StellaOps' structural advantages in: + +1. **Signed, replayable risk verdicts (Moat 5)** — The anchor differentiator +2. **Unknowns as first-class state (Moat 4)** — Governance primitive +3. **Air-gapped epistemic mode (Moat 4)** — Reproducibility moat + +--- + +## Sprint Breakdown + +### P0 Sprints (Critical) + +| Sprint ID | Title | Effort | Moat | +|-----------|-------|--------|------| +| 4300_0001_0001 | OCI Verdict Attestation Referrer Push | 2 weeks | 5 | +| 4300_0001_0002 | One-Command Audit Replay CLI | 2 weeks | 5 | + +**Outcome**: Verdicts become portable "ship tokens" that can be pushed to registries and replayed offline. + +### P1 Sprints (High) + +| Sprint ID | Title | Effort | Moat | +|-----------|-------|--------|------| +| 4300_0002_0001 | Unknowns Budget Policy Integration | 2 weeks | 4 | +| 4300_0002_0002 | Unknowns Attestation Predicates | 1 week | 4 | +| 4300_0003_0001 | Sealed Knowledge Snapshot Export/Import | 2 weeks | 4 | + +**Outcome**: Uncertainty becomes actionable through policy gates and attestable for audits. Air-gap customers get sealed knowledge bundles. + +--- + +## Related Sprint Programs + +| Program | Theme | Moat Focus | +|---------|-------|------------| +| **4400** | Delta Verdicts & Reachability Attestations | Smart-Diff, Reachability | +| **4500** | VEX Hub & Trust Scoring | VEX Distribution Network | +| **4600** | SBOM Lineage & BYOS | SBOM Ledger | + +--- + +## Dependency Graph + +``` +SPRINT_4300_0001_0001 (OCI Verdict Push) + │ + ├──► SPRINT_4300_0001_0002 (Audit Replay CLI) + │ + └──► SPRINT_4400_0001_0001 (Signed Delta Verdict) + +SPRINT_4300_0002_0001 (Unknowns Budget) + │ + └──► SPRINT_4300_0002_0002 (Unknowns Attestation) + +SPRINT_4300_0003_0001 (Sealed Snapshot) + │ + └──► [Standalone, enables air-gap scenarios] +``` + +--- + +## Success Metrics + +| Metric | Target | Measurement | +|--------|--------|-------------| +| Verdict push success rate | >99% | OTEL metrics | +| Audit replay pass rate | 100% on same inputs | CI tests | +| Unknown budget violations detected | >0 in test suite | Integration tests | +| Air-gap import success rate | >99% | Manual testing | + +--- + +## Risks & Dependencies + +| Risk | Impact | Mitigation | +|------|--------|------------| +| OCI registry incompatibility | Cannot push verdicts | Fallback to tag-based | +| Bundle size too large | Transfer issues | Streaming, compression | +| Key management complexity | Security | Document rotation procedures | + +--- + +## Timeline Recommendation + +**Phase 1 (Weeks 1-4)**: P0 Sprints +- OCI Verdict Push + Audit Replay + +**Phase 2 (Weeks 5-7)**: P1 Sprints +- Unknowns Budget + Attestations + +**Phase 3 (Weeks 8-9)**: P1 Sprints +- Sealed Knowledge Snapshots + +--- + +## Documentation Deliverables + +- [ ] `docs/operations/verdict-attestation-guide.md` +- [ ] `docs/operations/audit-replay-guide.md` +- [ ] `docs/operations/unknown-budgets-guide.md` +- [ ] `docs/operations/airgap-knowledge-sync.md` +- [ ] Update attestation type catalog +- [ ] Update CLI reference + +--- + +**Sprint Series Status:** TODO + +**Created:** 2025-12-22 +**Origin:** Gap analysis of 19-Dec-2025 moat strength advisory diff --git a/docs/implplan/SPRINT_4300_SUMMARY.md b/docs/implplan/SPRINT_4300_SUMMARY.md new file mode 100644 index 000000000..e0bc1036d --- /dev/null +++ b/docs/implplan/SPRINT_4300_SUMMARY.md @@ -0,0 +1,171 @@ +# SPRINT_4300 Summary - Explainable Triage Gaps + +## Overview + +This sprint series closes the remaining gaps between the "Designing Explainable Triage and Proof-Linked Evidence" advisory (18-Dec-2025) and the current implementation. + +**Origin Advisory:** `docs/product-advisories/18-Dec-2025 - Designing Explainable Triage and Proof‑Linked Evidence.md` + +**Gap Analysis:** `docs/implplan/analysis/4300_explainable_triage_gap_analysis.md` + +## Executive Summary + +The advisory defined a comprehensive vision for explainable, evidence-linked triage. **~85% was already implemented** through prior sprints (3800, 3801, 4100, 4200 series). This series addresses the remaining **6 gaps**: + +| Gap | Description | Sprint | Priority | Effort | +|-----|-------------|--------|----------|--------| +| G1 | CLI attestation chain verify command | 4300.0001.0001 | HIGH | M | +| G6 | Findings evidence API endpoint | 4300.0001.0002 | MEDIUM | S | +| G2 | Evidence privacy controls | 4300.0002.0001 | MEDIUM | M | +| G3 | Evidence TTL enforcement | 4300.0002.0002 | MEDIUM | S | +| G4 | Predicate JSON schemas | 4300.0003.0001 | LOW | S | +| G5 | Attestation completeness metrics | 4300.0003.0002 | LOW | M | + +**Total Effort:** ~10-14 days across teams + +## Sprint Structure + +``` +SPRINT_4300 (Explainable Triage Gaps) +├── 0001 (CLI & API) +│ ├── 0001 CLI Attestation Verify Command [HIGH] +│ └── 0002 Findings Evidence API [MEDIUM] +├── 0002 (Evidence Management) +│ ├── 0001 Evidence Privacy Controls [MEDIUM] +│ └── 0002 Evidence TTL Enforcement [MEDIUM] +└── 0003 (Quality & Observability) + ├── 0001 Predicate JSON Schemas [LOW] + └── 0002 Attestation Metrics [LOW] +``` + +## Dependencies + +### External Dependencies (Already DONE) + +| Dependency | Sprint | Status | +|------------|--------|--------| +| OCI Referrer Discovery | 4100.0003.0002 | DONE | +| Risk Verdict Attestation | 4100.0003.0001 | DONE | +| Human Approval Attestation | 3801.0001.0004 | DONE | +| Approve Button UI | 4100.0005.0001 | DONE | +| Evidence Composition Service | 3800.0003.0001 | DONE | +| Boundary Extractors | 3800.0002.* | DONE | +| Trust Lattice Engine | (core) | DONE | + +### Internal Dependencies + +``` +4300.0001.0001 ─┬─> (none, can start immediately) +4300.0001.0002 ─┤ +4300.0002.0001 ─┤ +4300.0002.0002 ─┤ +4300.0003.0001 ─┤ +4300.0003.0002 ─┘ +``` + +All sprints can run in parallel. + +## Recommended Execution Order + +**Wave 1 (Week 1):** HIGH priority + foundations +- 4300.0001.0001 - CLI Attestation Verify (CLI Team) +- 4300.0001.0002 - Findings Evidence API (Scanner Team) +- 4300.0002.0002 - Evidence TTL Enforcement (Policy Team) + +**Wave 2 (Week 2):** MEDIUM + LOW priority +- 4300.0002.0001 - Evidence Privacy Controls (Scanner Team) +- 4300.0003.0001 - Predicate Schemas (Attestor Team) +- 4300.0003.0002 - Attestation Metrics (Telemetry Team) + +## Success Criteria (from Advisory) + +| # | Criterion | Coverage | +|---|-----------|----------| +| 1 | Every risk row expands to path, boundary, VEX, last-seen in <300ms | 4200.0001.0001 (planned) + 4300.0001.0002 | +| 2 | "Approve" button disabled until SBOM+VEX+Decision attestations validate | 4100.0005.0001 (DONE) | +| 3 | One-click "Show DSSE chain" renders envelopes with digests and signers | 4200.0001.0001 (planned) | +| 4 | Audit log captures who approved, which digests, evidence hashes | 3801.0001.0004 (DONE) | +| 5 | CLI can verify attestation chain before deploy | **4300.0001.0001** | +| 6 | % attestation completeness >= 95% | **4300.0003.0002** | +| 7 | TTFE (time-to-first-evidence) <= 30s | **4300.0003.0002** | +| 8 | Post-deploy reversions trend to zero | **4300.0003.0002** | + +## Team Assignments + +| Team | Sprints | Total Effort | +|------|---------|--------------| +| CLI Team | 4300.0001.0001 | M (2-3d) | +| Scanner Team | 4300.0001.0002, 4300.0002.0001 | S+M (3-5d) | +| Policy Team | 4300.0002.0002 | S (1-2d) | +| Attestor Team | 4300.0003.0001 | S (1-2d) | +| Telemetry Team | 4300.0003.0002 | M (2-3d) | + +## Deliverables + +### New CLI Commands +- `stella verify image --require sbom,vex,decision` + +### New API Endpoints +- `GET /api/v1/findings/{findingId}/evidence` +- `POST /api/v1/findings/evidence/batch` + +### New Services +- `ImageAttestationVerifier` +- `TrustPolicyLoader` +- `EvidenceRedactionService` +- `EvidenceTtlEnforcer` +- `AttestationCompletenessCalculator` +- `PredicateSchemaValidator` + +### New Metrics +- `stella_attestations_created_total` +- `stella_attestations_verified_total` +- `stella_attestations_failed_total` +- `stella_ttfe_seconds` +- `stella_post_deploy_reversions_total` + +### New Schemas +- `docs/schemas/predicates/sbom.v1.schema.json` +- `docs/schemas/predicates/vex.v1.schema.json` +- `docs/schemas/predicates/reachability.v1.schema.json` +- `docs/schemas/predicates/boundary.v1.schema.json` +- `docs/schemas/predicates/policy-decision.v1.schema.json` +- `docs/schemas/predicates/human-approval.v1.schema.json` + +### New Dashboard +- `deploy/grafana/dashboards/attestation-metrics.json` + +## Risk Register + +| Risk | Impact | Mitigation | +|------|--------|------------| +| OCI referrers API not supported by all registries | Fallback tag discovery | Already implemented in 4100.0003.0002 | +| Schema validation performance | Latency on attestation creation | Cache compiled schemas | +| Metric cardinality explosion | Prometheus storage | Limit label values | + +## Completion Checklist + +- [ ] All 6 sprints marked DONE +- [ ] CLI verify command works end-to-end +- [ ] Evidence API returns advisory-compliant contract +- [ ] Privacy redaction enforced by default +- [ ] TTL staleness affects policy decisions +- [ ] All predicate schemas validate correctly +- [ ] Grafana dashboard shows all metrics +- [ ] Integration tests pass +- [ ] Documentation updated + +## Post-Completion + +After all sprints complete: +1. Update `docs/09_API_CLI_REFERENCE.md` with new CLI command +2. Update `docs/modules/scanner/architecture.md` with evidence API +3. Archive this summary to `docs/implplan/archived/` +4. Close advisory tracking issue + +--- + +**Sprint Series Status:** TODO (0/6 sprints complete) + +**Created:** 2025-12-22 +**Origin:** Gap analysis of 18-Dec-2025 advisory diff --git a/docs/implplan/SPRINT_4400_0001_0001_signed_delta_verdict.md b/docs/implplan/SPRINT_4400_0001_0001_signed_delta_verdict.md new file mode 100644 index 000000000..260925c87 --- /dev/null +++ b/docs/implplan/SPRINT_4400_0001_0001_signed_delta_verdict.md @@ -0,0 +1,112 @@ +# SPRINT_4400_0001_0001: Signed Delta Verdict Attestation + +## Sprint Metadata + +| Field | Value | +|-------|-------| +| **Sprint ID** | 4400_0001_0001 | +| **Title** | Signed Delta Verdict Attestation | +| **Priority** | P2 (Medium) | +| **Moat Strength** | 4 (Strong moat) | +| **Working Directory** | `src/Scanner/`, `src/Attestor/`, `src/Cli/` | +| **Estimated Effort** | 2 weeks | +| **Dependencies** | MaterialRiskChangeDetector (exists), SPRINT_4300_0001_0001 | + +--- + +## Objective + +Create a signed attestation format for Smart-Diff results, making semantic risk deltas portable, auditable, and verifiable as part of the change control process. + +**Moat thesis**: "We explain what changed in exploitable surface area, not what changed in CVE count." + +--- + +## Background + +Smart-Diff (`MaterialRiskChangeDetector`) exists with R1-R4 rules and priority scoring. **Gap**: Results are not attestable. + +--- + +## Deliverables + +### D1: Delta Verdict Attestation Predicate +- Define `delta-verdict.stella/v1` predicate type +- Include: changes detected, priority score, evidence references + +### D2: Delta Verdict Builder +- Build delta attestation from `MaterialRiskChangeResult` +- Link to before/after proof spines +- Include graph revision IDs + +### D3: OCI Delta Push +- Push delta verdict as OCI referrer +- Support linking to two image manifests (before/after) + +### D4: CLI Integration +- `stella diff --sign --push` flow +- `stella diff verify` command + +--- + +## Tasks + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| DELTA-001 | Define `DeltaVerdictStatement` predicate | TODO | | +| DELTA-002 | Create `DeltaVerdictBuilder` | TODO | | +| DELTA-003 | Implement before/after proof spine linking | TODO | | +| DELTA-004 | Add delta verdict to OCI pusher | TODO | | +| DELTA-005 | Implement `stella diff --sign` | TODO | | +| DELTA-006 | Implement `stella diff verify` | TODO | | +| DELTA-007 | Add SARIF output with attestation reference | TODO | | +| DELTA-008 | Integration tests | TODO | | + +--- + +## Acceptance Criteria + +1. **AC1**: Delta verdict is a signed in-toto statement +2. **AC2**: Delta can be pushed as OCI referrer +3. **AC3**: `stella diff verify` validates signature and content +4. **AC4**: Attestation links to both scan verdicts + +--- + +## Technical Notes + +### Delta Verdict Statement +```json +{ + "_type": "https://in-toto.io/Statement/v1", + "subject": [ + {"digest": {"sha256": ""}}, + {"digest": {"sha256": ""}} + ], + "predicateType": "delta-verdict.stella/v1", + "predicate": { + "beforeRevisionId": "...", + "afterRevisionId": "...", + "hasMaterialChange": true, + "priorityScore": 1750, + "changes": [ + { + "rule": "R1_ReachabilityFlip", + "findingKey": {"vulnId": "CVE-2024-1234", "purl": "..."}, + "direction": "increased", + "reason": "Reachability changed from false to true" + } + ], + "beforeVerdictDigest": "sha256:...", + "afterVerdictDigest": "sha256:...", + "comparedAt": "2025-12-22T00:00:00Z" + } +} +``` + +--- + +## Documentation Updates + +- [ ] Add delta verdict to attestation catalog +- [ ] Update Smart-Diff documentation diff --git a/docs/implplan/SPRINT_4400_0001_0002_reachability_subgraph_attestation.md b/docs/implplan/SPRINT_4400_0001_0002_reachability_subgraph_attestation.md new file mode 100644 index 000000000..d7a18c917 --- /dev/null +++ b/docs/implplan/SPRINT_4400_0001_0002_reachability_subgraph_attestation.md @@ -0,0 +1,119 @@ +# SPRINT_4400_0001_0002: Reachability Subgraph Attestation + +## Sprint Metadata + +| Field | Value | +|-------|-------| +| **Sprint ID** | 4400_0001_0002 | +| **Title** | Reachability Subgraph Attestation | +| **Priority** | P2 (Medium) | +| **Moat Strength** | 4 (Strong moat) | +| **Working Directory** | `src/Signals/`, `src/Attestor/`, `src/Scanner/` | +| **Estimated Effort** | 2 weeks | +| **Dependencies** | ReachabilityWitnessStatement (exists), CallPath (exists) | + +--- + +## Objective + +Package reachability analysis results as a standalone, attestable subgraph artifact that can be stored, transferred, and verified independently of the full scan context. + +**Moat thesis**: "We provide proof of exploitability in *this* artifact, not just a badge." + +--- + +## Background + +Current implementation has: +- `ReachabilityWitnessStatement` for single path witness +- `PathWitnessBuilder` for call path construction +- `CallPath` models + +**Gap**: No standalone reachability subgraph as portable artifact. + +--- + +## Deliverables + +### D1: Reachability Subgraph Format +- Define graph serialization format (nodes, edges, metadata) +- Include: entrypoints, symbols, call edges, gates +- Support partial graphs (per-finding) + +### D2: Subgraph Attestation Predicate +- Define `reachability-subgraph.stella/v1` predicate +- Include: graph digest, finding keys covered, analysis metadata + +### D3: Subgraph Builder +- Extract relevant subgraph from full call graph +- Prune to reachable paths only +- Include boundary detection results + +### D4: OCI Subgraph Push +- Push subgraph as OCI artifact +- Link to SBOM and verdict + +### D5: Subgraph Viewer +- CLI command to inspect subgraph +- Visualize call paths to vulnerable symbols + +--- + +## Tasks + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| SUBG-001 | Define `ReachabilitySubgraph` serialization format | TODO | | +| SUBG-002 | Create `ReachabilitySubgraphStatement` predicate | TODO | | +| SUBG-003 | Implement `SubgraphExtractor` from call graph | TODO | | +| SUBG-004 | Add subgraph to attestation pipeline | TODO | | +| SUBG-005 | Implement OCI subgraph push | TODO | | +| SUBG-006 | Create `stella reachability show` command | TODO | | +| SUBG-007 | Add DOT/Mermaid export for visualization | TODO | | +| SUBG-008 | Integration tests with real call graphs | TODO | | + +--- + +## Acceptance Criteria + +1. **AC1**: Subgraph captures all paths to vulnerable symbols +2. **AC2**: Subgraph is a signed attestation +3. **AC3**: Subgraph can be pushed as OCI artifact +4. **AC4**: CLI can visualize subgraph + +--- + +## Technical Notes + +### Subgraph Format +```json +{ + "version": "1.0", + "findingKeys": ["CVE-2024-1234@pkg:npm/lodash@4.17.20"], + "nodes": [ + {"id": "n1", "type": "entrypoint", "symbol": "main.handler"}, + {"id": "n2", "type": "call", "symbol": "lodash.merge"}, + {"id": "n3", "type": "vulnerable", "symbol": "lodash._baseAssign"} + ], + "edges": [ + {"from": "n1", "to": "n2", "type": "call"}, + {"from": "n2", "to": "n3", "type": "call"} + ], + "gates": [ + {"nodeId": "n1", "gateType": "http", "boundary": "public"} + ], + "analysisMetadata": { + "analyzer": "node-callgraph-v2", + "confidence": 0.95, + "completeness": "partial" + } +} +``` + +--- + +## Documentation Updates + +- [ ] Add reachability subgraph specification +- [ ] Update attestation type catalog +- [ ] Create reachability proof guide diff --git a/docs/implplan/SPRINT_4400_SUMMARY.md b/docs/implplan/SPRINT_4400_SUMMARY.md new file mode 100644 index 000000000..d0a562ec2 --- /dev/null +++ b/docs/implplan/SPRINT_4400_SUMMARY.md @@ -0,0 +1,50 @@ +# SPRINT_4400 SUMMARY: Delta Verdicts & Reachability Attestations + +## Program Overview + +| Field | Value | +|-------|-------| +| **Program ID** | 4400 | +| **Theme** | Attestable Change Control: Delta Verdicts & Reachability Proofs | +| **Priority** | P2 (Medium) | +| **Total Effort** | ~4 weeks | +| **Advisory Source** | 19-Dec-2025 - Stella Ops candidate features mapped to moat strength | + +--- + +## Strategic Context + +This program extends the attestation infrastructure to cover: +1. **Smart-Diff semantic delta** — Changes in exploitable surface as signed artifacts +2. **Reachability proofs** — Call-path subgraphs as portable evidence + +--- + +## Sprint Breakdown + +| Sprint ID | Title | Effort | Moat | +|-----------|-------|--------|------| +| 4400_0001_0001 | Signed Delta Verdict Attestation | 2 weeks | 4 | +| 4400_0001_0002 | Reachability Subgraph Attestation | 2 weeks | 4 | + +--- + +## Dependencies + +- **Requires**: SPRINT_4300_0001_0001 (OCI Verdict Push) +- **Requires**: MaterialRiskChangeDetector (exists) +- **Requires**: PathWitnessBuilder (exists) + +--- + +## Outcomes + +1. Delta verdicts become attestable change-control artifacts +2. Reachability analysis produces portable proof subgraphs +3. Both can be pushed to OCI registries as referrers + +--- + +**Sprint Series Status:** TODO + +**Created:** 2025-12-22 diff --git a/docs/implplan/SPRINT_4500_0001_0001_vex_hub_aggregation.md b/docs/implplan/SPRINT_4500_0001_0001_vex_hub_aggregation.md new file mode 100644 index 000000000..96804e90d --- /dev/null +++ b/docs/implplan/SPRINT_4500_0001_0001_vex_hub_aggregation.md @@ -0,0 +1,183 @@ +# SPRINT_4500_0001_0001: VEX Hub Aggregation Service + +## Sprint Metadata + +| Field | Value | +|-------|-------| +| **Sprint ID** | 4500_0001_0001 | +| **Title** | VEX Hub Aggregation Service | +| **Priority** | P1 (High) | +| **Moat Strength** | 3-4 (Moderate-Strong moat) | +| **Working Directory** | `src/Excititor/`, `src/VexLens/`, new `src/VexHub/` | +| **Estimated Effort** | 4 weeks | +| **Dependencies** | VexLens (exists), Excititor connectors (exist) | + +--- + +## Objective + +Build a VEX Hub aggregation layer that collects, validates, normalizes, and serves VEX statements at scale, positioning StellaOps as a trusted source for VEX distribution. + +**Competitive context**: Aqua's VEX Hub is explicitly called out in the advisory. Differentiation requires verification + trust scoring + tight coupling to deterministic decisioning. + +--- + +## Background + +The advisory notes VEX distribution network as **Moat 3-4**. Current implementation: +- Excititor ingests from 7+ VEX sources +- VexLens provides consensus engine +- VexConsensusEngine supports multiple modes + +**Gap**: No aggregation layer, no distribution API, no ecosystem play. + +--- + +## Deliverables + +### D1: VexHub Module +- New `src/VexHub/` module +- Aggregation scheduler +- Storage layer for normalized VEX + +### D2: VEX Ingestion Pipeline +- Scheduled polling of upstream sources +- Normalization to canonical VEX format +- Deduplication and conflict detection + +### D3: VEX Validation Pipeline +- Signature verification for signed VEX +- Schema validation +- Provenance tracking + +### D4: Distribution API +- REST API for VEX discovery +- Query by: CVE, package (PURL), source +- Pagination and filtering +- Subscription/webhook for updates + +### D5: Trivy/Grype Compatibility +- Export in OpenVEX format +- Compatible with Trivy `--vex-url` flag +- Index manifest for tool consumption + +--- + +## Tasks + +### Phase 1: Module Setup + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| HUB-001 | Create `StellaOps.VexHub` module structure | TODO | | +| HUB-002 | Define VexHub domain models | TODO | | +| HUB-003 | Create PostgreSQL schema for VEX aggregation | TODO | | +| HUB-004 | Set up web service skeleton | TODO | | + +### Phase 2: Ingestion Pipeline + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| HUB-005 | Create `VexIngestionScheduler` | TODO | | +| HUB-006 | Implement source polling orchestration | TODO | | +| HUB-007 | Create `VexNormalizationPipeline` | TODO | | +| HUB-008 | Implement deduplication logic | TODO | | +| HUB-009 | Detect and flag conflicting statements | TODO | | +| HUB-010 | Store normalized VEX with provenance | TODO | | + +### Phase 3: Validation Pipeline + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| HUB-011 | Implement signature verification for signed VEX | TODO | | +| HUB-012 | Add schema validation (OpenVEX, CycloneDX, CSAF) | TODO | | +| HUB-013 | Track and store provenance metadata | TODO | | +| HUB-014 | Flag unverified/untrusted statements | TODO | | + +### Phase 4: Distribution API + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| HUB-015 | Implement `GET /api/v1/vex/cve/{cve-id}` | TODO | | +| HUB-016 | Implement `GET /api/v1/vex/package/{purl}` | TODO | | +| HUB-017 | Implement `GET /api/v1/vex/source/{source-id}` | TODO | | +| HUB-018 | Add pagination and filtering | TODO | | +| HUB-019 | Implement subscription/webhook for updates | TODO | | +| HUB-020 | Add rate limiting and authentication | TODO | | + +### Phase 5: Tool Compatibility + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| HUB-021 | Implement OpenVEX bulk export | TODO | | +| HUB-022 | Create index manifest (vex-index.json) | TODO | | +| HUB-023 | Test with Trivy `--vex-url` | TODO | | +| HUB-024 | Test with Grype VEX support | TODO | | +| HUB-025 | Document integration instructions | TODO | | + +--- + +## Acceptance Criteria + +1. **AC1**: VEX Hub ingests from all configured sources on schedule +2. **AC2**: API returns VEX statements by CVE and PURL +3. **AC3**: Signed VEX statements are verified and flagged +4. **AC4**: Trivy can consume VEX from hub URL +5. **AC5**: Conflicts are detected and surfaced + +--- + +## Technical Notes + +### API Examples +```http +GET /api/v1/vex/cve/CVE-2024-1234 +Accept: application/vnd.openvex+json + +Response: +{ + "@context": "https://openvex.dev/ns", + "statements": [ + { + "vulnerability": "CVE-2024-1234", + "products": ["pkg:npm/express@4.17.1"], + "status": "not_affected", + "justification": "vulnerable_code_not_present", + "source": {"id": "redhat-csaf", "trustScore": 0.95} + } + ] +} +``` + +### Index Manifest +```json +{ + "version": "1.0", + "lastUpdated": "2025-12-22T00:00:00Z", + "sources": ["redhat-csaf", "cisco-csaf", "ubuntu-csaf"], + "totalStatements": 45678, + "endpoints": { + "byCve": "/api/v1/vex/cve/{cve}", + "byPackage": "/api/v1/vex/package/{purl}", + "bulk": "/api/v1/vex/export" + } +} +``` + +--- + +## Risks & Mitigations + +| Risk | Impact | Mitigation | +|------|--------|------------| +| Upstream source instability | Missing VEX | Multiple sources, caching | +| Conflicting VEX from sources | Confusion | Surface conflicts, trust scoring | +| Scale challenges | Performance | Caching, CDN, pagination | + +--- + +## Documentation Updates + +- [ ] Create `docs/modules/vexhub/architecture.md` +- [ ] Add VexHub API reference +- [ ] Create integration guide for Trivy/Grype diff --git a/docs/implplan/SPRINT_4500_0001_0002_vex_trust_scoring.md b/docs/implplan/SPRINT_4500_0001_0002_vex_trust_scoring.md new file mode 100644 index 000000000..f65a7639f --- /dev/null +++ b/docs/implplan/SPRINT_4500_0001_0002_vex_trust_scoring.md @@ -0,0 +1,180 @@ +# SPRINT_4500_0001_0002: VEX Trust Scoring Framework + +## Sprint Metadata + +| Field | Value | +|-------|-------| +| **Sprint ID** | 4500_0001_0002 | +| **Title** | VEX Trust Scoring Framework | +| **Priority** | P1 (High) | +| **Moat Strength** | 3-4 (Moderate-Strong moat) | +| **Working Directory** | `src/VexLens/`, `src/VexHub/`, `src/Policy/` | +| **Estimated Effort** | 2 weeks | +| **Dependencies** | SPRINT_4500_0001_0001, TrustWeightEngine (exists) | + +--- + +## Objective + +Develop a comprehensive trust scoring framework for VEX sources that goes beyond simple weighting, incorporating verification status, historical accuracy, and timeliness. + +**Differentiation**: Competitors treat VEX as suppression. StellaOps treats VEX as a logical claim system with trust semantics. + +--- + +## Background + +Current `TrustWeightEngine` provides basic issuer weighting. The advisory calls for: +- "Verification + trust scoring of VEX sources" +- "Trust frameworks" for network effects + +--- + +## Deliverables + +### D1: Trust Scoring Model +- Multi-dimensional trust score: authority, accuracy, timeliness, coverage +- Composite score calculation +- Historical accuracy tracking + +### D2: Source Verification +- Signature verification status +- Provenance chain validation +- Issuer identity verification + +### D3: Trust Decay +- Time-based trust decay for stale statements +- Recency bonus for fresh assessments +- Revocation/update handling + +### D4: Trust Policy Integration +- Policy rules based on trust scores +- Minimum trust thresholds +- Source allowlists/blocklists + +### D5: Trust Dashboard +- Source trust scorecards +- Historical accuracy metrics +- Conflict resolution audit + +--- + +## Tasks + +### Phase 1: Trust Model + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| TRUST-001 | Define `VexSourceTrustScore` model | TODO | | +| TRUST-002 | Implement authority score (issuer reputation) | TODO | | +| TRUST-003 | Implement accuracy score (historical correctness) | TODO | | +| TRUST-004 | Implement timeliness score (response speed) | TODO | | +| TRUST-005 | Implement coverage score (completeness) | TODO | | +| TRUST-006 | Create composite score calculator | TODO | | + +### Phase 2: Verification + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| TRUST-007 | Add signature verification to trust pipeline | TODO | | +| TRUST-008 | Implement provenance chain validator | TODO | | +| TRUST-009 | Create issuer identity registry | TODO | | +| TRUST-010 | Score boost for verified statements | TODO | | + +### Phase 3: Decay & Freshness + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| TRUST-011 | Implement time-based trust decay | TODO | | +| TRUST-012 | Add recency bonus calculation | TODO | | +| TRUST-013 | Handle statement revocation | TODO | | +| TRUST-014 | Track statement update history | TODO | | + +### Phase 4: Policy Integration + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| TRUST-015 | Add trust threshold to policy rules | TODO | | +| TRUST-016 | Implement source allowlist/blocklist | TODO | | +| TRUST-017 | Create `TrustInsufficientViolation` | TODO | | +| TRUST-018 | Add trust context to consensus engine | TODO | | + +### Phase 5: Dashboard & Reporting + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| TRUST-019 | Create source trust scorecard API | TODO | | +| TRUST-020 | Add historical accuracy metrics | TODO | | +| TRUST-021 | Implement conflict resolution audit log | TODO | | +| TRUST-022 | Add trust trends visualization data | TODO | | + +--- + +## Acceptance Criteria + +1. **AC1**: Each VEX source has a computed trust score +2. **AC2**: Verified statements receive score boost +3. **AC3**: Stale statements decay appropriately +4. **AC4**: Policy can enforce minimum trust thresholds +5. **AC5**: Trust scorecard available via API + +--- + +## Technical Notes + +### Trust Score Model +```csharp +public sealed record VexSourceTrustScore +{ + public required string SourceId { get; init; } + + // Component scores (0.0 - 1.0) + public required double AuthorityScore { get; init; } // Issuer reputation + public required double AccuracyScore { get; init; } // Historical correctness + public required double TimelinessScore { get; init; } // Response speed + public required double CoverageScore { get; init; } // Completeness + public required double VerificationScore { get; init; } // Signature/provenance + + // Composite score with weights + public double CompositeScore => + AuthorityScore * 0.25 + + AccuracyScore * 0.30 + + TimelinessScore * 0.15 + + CoverageScore * 0.10 + + VerificationScore * 0.20; + + public required DateTimeOffset ComputedAt { get; init; } +} +``` + +### Decay Formula +``` +effective_score = base_score * decay_factor +decay_factor = max(0.5, 1.0 - (age_days / max_age_days) * 0.5) +``` + +### Policy Rule Example +```yaml +vex_trust_rules: + - name: "require-high-trust" + minimum_composite_score: 0.7 + require_verification: true + action: block_if_below +``` + +--- + +## Risks & Mitigations + +| Risk | Impact | Mitigation | +|------|--------|------------| +| Inaccurate accuracy scores | Gaming, distrust | Manual calibration, transparency | +| New sources have no history | Cold start problem | Default scores, grace period | + +--- + +## Documentation Updates + +- [ ] Add `docs/modules/vexlens/trust-scoring.md` +- [ ] Update policy DSL for trust rules +- [ ] Create trust tuning guide diff --git a/docs/implplan/SPRINT_4500_SUMMARY.md b/docs/implplan/SPRINT_4500_SUMMARY.md new file mode 100644 index 000000000..a049b4eeb --- /dev/null +++ b/docs/implplan/SPRINT_4500_SUMMARY.md @@ -0,0 +1,67 @@ +# SPRINT_4500 SUMMARY: VEX Hub & Trust Scoring + +## Program Overview + +| Field | Value | +|-------|-------| +| **Program ID** | 4500 | +| **Theme** | VEX Distribution Network: Aggregation, Trust, and Ecosystem | +| **Priority** | P1 (High) | +| **Total Effort** | ~6 weeks | +| **Advisory Source** | 19-Dec-2025 - Stella Ops candidate features mapped to moat strength | + +--- + +## Strategic Context + +The advisory explicitly calls out Aqua's VEX Hub as competitive. This program establishes StellaOps as a trusted VEX distribution layer with: +1. **VEX Hub** — Aggregation, validation, and serving at scale +2. **Trust Scoring** — Multi-dimensional trust assessment of VEX sources + +--- + +## Sprint Breakdown + +| Sprint ID | Title | Effort | Moat | +|-----------|-------|--------|------| +| 4500_0001_0001 | VEX Hub Aggregation Service | 4 weeks | 3-4 | +| 4500_0001_0002 | VEX Trust Scoring Framework | 2 weeks | 3-4 | + +--- + +## New Module + +This program introduces a new module: `src/VexHub/` + +--- + +## Dependencies + +- **Requires**: VexLens (exists) +- **Requires**: Excititor connectors (exist) +- **Requires**: TrustWeightEngine (exists) + +--- + +## Outcomes + +1. VEX Hub aggregates statements from all configured sources +2. API enables query by CVE, PURL, source +3. Trivy/Grype can consume VEX from hub URL +4. Trust scores inform consensus decisions + +--- + +## Competitive Positioning + +| Competitor | VEX Capability | StellaOps Differentiation | +|------------|----------------|---------------------------| +| Aqua VEX Hub | Centralized repository | +Trust scoring, +Verification, +Decisioning coupling | +| Trivy | VEX consumption | +Aggregation source, +Consensus engine | +| Anchore | VEX annotation | +Multi-source, +Lattice logic | + +--- + +**Sprint Series Status:** TODO + +**Created:** 2025-12-22 diff --git a/docs/implplan/SPRINT_4600_0001_0001_sbom_lineage_ledger.md b/docs/implplan/SPRINT_4600_0001_0001_sbom_lineage_ledger.md new file mode 100644 index 000000000..10ddeb748 --- /dev/null +++ b/docs/implplan/SPRINT_4600_0001_0001_sbom_lineage_ledger.md @@ -0,0 +1,171 @@ +# SPRINT_4600_0001_0001: SBOM Lineage Ledger + +## Sprint Metadata + +| Field | Value | +|-------|-------| +| **Sprint ID** | 4600_0001_0001 | +| **Title** | SBOM Lineage Ledger | +| **Priority** | P2 (Medium) | +| **Moat Strength** | 3 (Moderate moat) | +| **Working Directory** | `src/SbomService/`, `src/Graph/` | +| **Estimated Effort** | 3 weeks | +| **Dependencies** | SbomService (exists), Graph module (exists) | + +--- + +## Objective + +Build a versioned SBOM ledger that tracks historical changes, enables diff queries, and maintains lineage relationships between SBOM versions for the same artifact. + +**Moat strategy**: Make the ledger valuable via **semantic diff, evidence joins, and provenance** rather than just storage. + +--- + +## Background + +Current `SbomService` has: +- Basic version events (registered, updated) +- CatalogRecord storage +- Graph indexing + +**Gap**: No historical tracking, no lineage semantics, no temporal queries. + +--- + +## Deliverables + +### D1: SBOM Version Chain +- Link SBOM versions by artifact identity +- Track version sequence with timestamps +- Support branching (multiple sources for same artifact) + +### D2: Historical Query API +- Query SBOM at point-in-time +- Get version history for artifact +- Diff between two versions + +### D3: Lineage Graph +- Build/source relationship tracking +- Parent/child SBOM relationships +- Aggregation relationships + +### D4: Change Detection +- Detect component additions/removals +- Detect version changes +- Detect license changes + +### D5: Retention Policy +- Configurable retention periods +- Archive/prune old versions +- Audit log preservation + +--- + +## Tasks + +### Phase 1: Version Chain + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| LEDGER-001 | Design version chain schema | TODO | | +| LEDGER-002 | Implement `SbomVersionChain` entity | TODO | | +| LEDGER-003 | Create version sequencing logic | TODO | | +| LEDGER-004 | Handle branching from multiple sources | TODO | | +| LEDGER-005 | Add version chain queries | TODO | | + +### Phase 2: Historical Queries + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| LEDGER-006 | Implement point-in-time SBOM retrieval | TODO | | +| LEDGER-007 | Create version history endpoint | TODO | | +| LEDGER-008 | Implement SBOM diff API | TODO | | +| LEDGER-009 | Add temporal range queries | TODO | | + +### Phase 3: Lineage Graph + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| LEDGER-010 | Define lineage relationship types | TODO | | +| LEDGER-011 | Implement parent/child tracking | TODO | | +| LEDGER-012 | Add build relationship links | TODO | | +| LEDGER-013 | Create lineage query API | TODO | | + +### Phase 4: Change Detection + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| LEDGER-014 | Implement component diff algorithm | TODO | | +| LEDGER-015 | Detect version changes | TODO | | +| LEDGER-016 | Detect license changes | TODO | | +| LEDGER-017 | Generate change summary | TODO | | + +### Phase 5: Retention + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| LEDGER-018 | Add retention policy configuration | TODO | | +| LEDGER-019 | Implement archive job | TODO | | +| LEDGER-020 | Preserve audit log entries | TODO | | + +--- + +## Acceptance Criteria + +1. **AC1**: SBOM versions are chained by artifact +2. **AC2**: Can query SBOM at any historical point +3. **AC3**: Diff shows component changes between versions +4. **AC4**: Lineage relationships are queryable +5. **AC5**: Retention policy enforced + +--- + +## Technical Notes + +### Version Chain Model +```csharp +public sealed record SbomVersionChain +{ + public required Guid ChainId { get; init; } + public required string ArtifactIdentity { get; init; } // PURL or image ref + public required IReadOnlyList Versions { get; init; } +} + +public sealed record SbomVersionEntry +{ + public required Guid VersionId { get; init; } + public required int SequenceNumber { get; init; } + public required string ContentDigest { get; init; } + public required DateTimeOffset CreatedAt { get; init; } + public required string Source { get; init; } // scanner, import, etc. + public Guid? ParentVersionId { get; init; } // For lineage +} +``` + +### Diff Response +```json +{ + "beforeVersion": "v1.2.3", + "afterVersion": "v1.2.4", + "changes": { + "added": [{"purl": "pkg:npm/new-dep@1.0.0", "license": "MIT"}], + "removed": [{"purl": "pkg:npm/old-dep@0.9.0"}], + "upgraded": [{"purl": "pkg:npm/lodash", "from": "4.17.20", "to": "4.17.21"}], + "licenseChanged": [] + }, + "summary": { + "addedCount": 1, + "removedCount": 1, + "upgradedCount": 1 + } +} +``` + +--- + +## Documentation Updates + +- [ ] Update `docs/modules/sbomservice/architecture.md` +- [ ] Add SBOM lineage guide +- [ ] Document retention policies diff --git a/docs/implplan/SPRINT_4600_0001_0002_byos_ingestion.md b/docs/implplan/SPRINT_4600_0001_0002_byos_ingestion.md new file mode 100644 index 000000000..62ae86e27 --- /dev/null +++ b/docs/implplan/SPRINT_4600_0001_0002_byos_ingestion.md @@ -0,0 +1,136 @@ +# SPRINT_4600_0001_0002: BYOS Ingestion Workflow + +## Sprint Metadata + +| Field | Value | +|-------|-------| +| **Sprint ID** | 4600_0001_0002 | +| **Title** | BYOS (Bring Your Own SBOM) Ingestion Workflow | +| **Priority** | P2 (Medium) | +| **Moat Strength** | 3 (Moderate moat) | +| **Working Directory** | `src/SbomService/`, `src/Scanner/`, `src/Cli/` | +| **Estimated Effort** | 2 weeks | +| **Dependencies** | SPRINT_4600_0001_0001, SbomService (exists) | + +--- + +## Objective + +Enable customers to bring their own SBOMs (from Syft, SPDX tools, CycloneDX generators, etc.) and have them processed through StellaOps vulnerability correlation, VEX decisioning, and policy evaluation. + +**Strategy**: SBOM generation is table stakes. Value comes from what you do with SBOMs. + +--- + +## Background + +Competitors like Anchore explicitly position "Bring Your Own SBOM" as a feature. StellaOps should: +1. Accept external SBOMs +2. Validate and normalize them +3. Run full analysis pipeline +4. Produce verdicts + +--- + +## Deliverables + +### D1: SBOM Upload API +- REST endpoint for SBOM submission +- Support: SPDX 2.3, SPDX 3.0, CycloneDX 1.4-1.6 +- Validation and normalization + +### D2: SBOM Validation Pipeline +- Schema validation +- Completeness checks +- Quality scoring + +### D3: CLI Upload Command +- `stella sbom upload --file=sbom.json --artifact=` +- Progress and validation feedback + +### D4: Analysis Triggering +- Trigger vulnerability correlation on upload +- Trigger VEX application +- Trigger policy evaluation + +### D5: Provenance Tracking +- Record SBOM source (tool, version) +- Track upload metadata +- Link to external CI/CD context + +--- + +## Tasks + +| ID | Task | Status | Assignee | +|----|------|--------|----------| +| BYOS-001 | Create SBOM upload API endpoint | TODO | | +| BYOS-002 | Implement format detection (SPDX/CycloneDX) | TODO | | +| BYOS-003 | Add schema validation per format | TODO | | +| BYOS-004 | Implement normalization to internal model | TODO | | +| BYOS-005 | Create quality scoring algorithm | TODO | | +| BYOS-006 | Trigger analysis pipeline on upload | TODO | | +| BYOS-007 | Add `stella sbom upload` CLI | TODO | | +| BYOS-008 | Track SBOM provenance metadata | TODO | | +| BYOS-009 | Link to artifact identity | TODO | | +| BYOS-010 | Integration tests with Syft/CycloneDX outputs | TODO | | + +--- + +## Acceptance Criteria + +1. **AC1**: Can upload SPDX 2.3 and 3.0 SBOMs +2. **AC2**: Can upload CycloneDX 1.4-1.6 SBOMs +3. **AC3**: Invalid SBOMs are rejected with clear errors +4. **AC4**: Uploaded SBOM triggers full analysis +5. **AC5**: Provenance is tracked and queryable + +--- + +## Technical Notes + +### Upload API +```http +POST /api/v1/sbom/upload +Content-Type: application/json + +{ + "artifactRef": "my-app:v1.2.3", + "sbom": { ... }, // Or base64 encoded + "format": "cyclonedx", // Auto-detected if omitted + "source": { + "tool": "syft", + "version": "1.0.0", + "ciContext": { + "buildId": "123", + "repository": "github.com/org/repo" + } + } +} + +Response: +{ + "sbomId": "uuid", + "validationResult": { + "valid": true, + "qualityScore": 0.85, + "warnings": ["Missing supplier information for 3 components"] + }, + "analysisJobId": "uuid" +} +``` + +### Quality Score Factors +- Component completeness (PURL, version, license) +- Relationship coverage +- Hash/checksum presence +- Supplier information +- External reference quality + +--- + +## Documentation Updates + +- [ ] Add BYOS integration guide +- [ ] Document supported formats +- [ ] Create troubleshooting guide for validation errors diff --git a/docs/implplan/SPRINT_4600_SUMMARY.md b/docs/implplan/SPRINT_4600_SUMMARY.md new file mode 100644 index 000000000..be02cda31 --- /dev/null +++ b/docs/implplan/SPRINT_4600_SUMMARY.md @@ -0,0 +1,57 @@ +# SPRINT_4600 SUMMARY: SBOM Lineage & BYOS Ingestion + +## Program Overview + +| Field | Value | +|-------|-------| +| **Program ID** | 4600 | +| **Theme** | SBOM Operations: Historical Tracking, Lineage, and Ingestion | +| **Priority** | P2 (Medium) | +| **Total Effort** | ~5 weeks | +| **Advisory Source** | 19-Dec-2025 - Stella Ops candidate features mapped to moat strength | + +--- + +## Strategic Context + +SBOM storage is becoming table stakes. Differentiation comes from: +1. **Lineage ledger** — Historical tracking with semantic diff +2. **BYOS ingestion** — Accept external SBOMs into the analysis pipeline + +--- + +## Sprint Breakdown + +| Sprint ID | Title | Effort | Moat | +|-----------|-------|--------|------| +| 4600_0001_0001 | SBOM Lineage Ledger | 3 weeks | 3 | +| 4600_0001_0002 | BYOS Ingestion Workflow | 2 weeks | 3 | + +--- + +## Dependencies + +- **Requires**: SbomService (exists) +- **Requires**: Graph module (exists) +- **Requires**: SPRINT_4600_0001_0001 for BYOS + +--- + +## Outcomes + +1. SBOM versions are chained by artifact identity +2. Historical queries and diffs are available +3. External SBOMs can be uploaded and analyzed +4. Lineage relationships are queryable + +--- + +## Moat Strategy + +> "Make the ledger valuable via **semantic diff, evidence joins, and provenance** rather than storage." + +--- + +**Sprint Series Status:** TODO + +**Created:** 2025-12-22 diff --git a/docs/implplan/SPRINT_6000_0002_0003_version_comparator_integration.md b/docs/implplan/SPRINT_6000_0002_0003_version_comparator_integration.md new file mode 100644 index 000000000..779ed558a --- /dev/null +++ b/docs/implplan/SPRINT_6000_0002_0003_version_comparator_integration.md @@ -0,0 +1,256 @@ +# Sprint 6000.0002.0003 · Version Comparator Integration + +## Topic & Scope + +- Extract existing version comparators from Concelier to shared library. +- Add proof-line generation for UX explainability. +- Reference shared library from BinaryIndex.FixIndex. +- **Working directory:** `src/__Libraries/StellaOps.VersionComparison/` + +## Advisory Reference + +- **Source:** `docs/product-advisories/archived/22-Dec-2025 - Getting Distro Backport Logic Right.md` +- **Related Sprints:** + - SPRINT_2000_0003_0001 (Alpine connector adds `ApkVersionComparer`) + - SPRINT_4000_0002_0001 (UI consumes proof lines) + +## Dependencies & Concurrency + +- **Upstream**: None (refactoring existing code) +- **Downstream**: SPRINT_6000.0002.0002 (Fix Index Builder), SPRINT_4000_0002_0001 (Backport UX) +- **Safe to parallelize with**: SPRINT_2000_0003_0001 + +## Documentation Prerequisites + +- `src/Concelier/__Libraries/StellaOps.Concelier.Merge/Comparers/Nevra.cs` +- `src/Concelier/__Libraries/StellaOps.Concelier.Merge/Comparers/DebianEvr.cs` +- `docs/product-advisories/archived/22-Dec-2025 - Getting Distro Backport Logic Right.md` + +--- + +## Tasks + +### T1: Create StellaOps.VersionComparison Project + +**Assignee**: Platform Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: — + +**Description**: +Create the shared library project for version comparison. + +**Implementation Path**: `src/__Libraries/StellaOps.VersionComparison/` + +**Project Structure**: +``` +StellaOps.VersionComparison/ +├── StellaOps.VersionComparison.csproj +├── IVersionComparator.cs +├── VersionComparisonResult.cs +├── Comparers/ +│ ├── RpmVersionComparer.cs +│ ├── DebianVersionComparer.cs +│ └── ApkVersionComparer.cs +├── Models/ +│ ├── RpmVersion.cs +│ ├── DebianVersion.cs +│ └── ApkVersion.cs +└── Extensions/ + └── ServiceCollectionExtensions.cs +``` + +**Acceptance Criteria**: +- [ ] Project created with .NET 10 target +- [ ] No external dependencies except System.Collections.Immutable +- [ ] XML documentation enabled + +--- + +### T2: Create IVersionComparator Interface with Proof Support + +**Assignee**: Platform Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: T1 + +**Description**: +Define the interface for version comparison with proof-line generation. + +**Implementation Path**: `src/__Libraries/StellaOps.VersionComparison/IVersionComparator.cs` + +**Acceptance Criteria**: +- [ ] Interface supports both simple Compare and CompareWithProof +- [ ] VersionComparisonResult includes proof lines +- [ ] ComparatorType enum for identification + +--- + +### T3: Extract and Enhance RpmVersionComparer + +**Assignee**: Platform Team +**Story Points**: 5 +**Status**: TODO +**Dependencies**: T1, T2 + +**Description**: +Extract RPM version comparison logic from Concelier and add proof-line generation. + +**Implementation Path**: `src/__Libraries/StellaOps.VersionComparison/Comparers/RpmVersionComparer.cs` + +**Acceptance Criteria**: +- [ ] Full rpmvercmp semantics preserved +- [ ] Proof lines generated for each comparison step +- [ ] RpmVersion model for parsed versions +- [ ] Epoch, version, release handled correctly +- [ ] Tilde pre-release handling with proofs + +--- + +### T4: Extract and Enhance DebianVersionComparer + +**Assignee**: Platform Team +**Story Points**: 5 +**Status**: TODO +**Dependencies**: T1, T2 + +**Description**: +Extract Debian version comparison logic from Concelier and add proof-line generation. + +**Implementation Path**: `src/__Libraries/StellaOps.VersionComparison/Comparers/DebianVersionComparer.cs` + +**Acceptance Criteria**: +- [ ] Full dpkg semantics preserved +- [ ] Proof lines generated for each comparison step +- [ ] DebianVersion model for parsed versions +- [ ] Epoch, upstream, revision handled correctly +- [ ] Tilde pre-release handling with proofs + +--- + +### T5: Update Concelier to Reference Shared Library + +**Assignee**: Concelier Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: T3, T4 + +**Description**: +Update Concelier.Merge to reference the shared library and deprecate local comparers. + +**Implementation Path**: `src/Concelier/__Libraries/StellaOps.Concelier.Merge/` + +**Changes**: +1. Add project reference to StellaOps.VersionComparison +2. Mark existing comparers as obsolete with pointer to shared library +3. Create thin wrappers for backward compatibility +4. Update tests to use shared library + +**Acceptance Criteria**: +- [ ] Project reference added +- [ ] Existing code paths still work (backward compatible) +- [ ] Obsolete attributes on old comparers +- [ ] All tests pass + +--- + +### T6: Add Reference from BinaryIndex.FixIndex + +**Assignee**: BinaryIndex Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T3, T4 + +**Description**: +Reference the shared version comparison library from BinaryIndex.FixIndex. + +**Implementation Path**: `src/BinaryIndex/__Libraries/StellaOps.BinaryIndex.FixIndex/` + +**Acceptance Criteria**: +- [ ] Project reference added +- [ ] FixIndex uses shared comparers +- [ ] Proof lines available for evidence recording + +--- + +### T7: Unit Tests for Proof-Line Generation + +**Assignee**: Platform Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: T3, T4 + +**Description**: +Create comprehensive tests for proof-line generation. + +**Implementation Path**: `src/__Libraries/__Tests/StellaOps.VersionComparison.Tests/` + +**Test Cases**: +- [ ] RPM epoch comparison proofs +- [ ] RPM tilde pre-release proofs +- [ ] RPM release qualifier proofs +- [ ] Debian epoch comparison proofs +- [ ] Debian revision comparison proofs +- [ ] Debian tilde pre-release proofs + +**Acceptance Criteria**: +- [ ] All proof-line formats validated +- [ ] Human-readable output verified +- [ ] Edge cases covered + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Platform Team | Create StellaOps.VersionComparison Project | +| 2 | T2 | TODO | T1 | Platform Team | Create IVersionComparator Interface | +| 3 | T3 | TODO | T1, T2 | Platform Team | Extract and Enhance RpmVersionComparer | +| 4 | T4 | TODO | T1, T2 | Platform Team | Extract and Enhance DebianVersionComparer | +| 5 | T5 | TODO | T3, T4 | Concelier Team | Update Concelier to Reference Shared Library | +| 6 | T6 | TODO | T3, T4 | BinaryIndex Team | Add Reference from BinaryIndex.FixIndex | +| 7 | T7 | TODO | T3, T4 | Platform Team | Unit Tests for Proof-Line Generation | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint created. Scope changed from "implement comparators" to "extract existing + add proof generation" based on advisory gap analysis. | Agent | + +--- + +## Decisions & Risks + +| Item | Type | Owner | Notes | +|------|------|-------|-------| +| Extract vs wrap | Decision | Platform Team | Extract to shared lib, mark old as obsolete, thin wrappers for compat | +| Proof line format | Decision | Platform Team | Human-readable English, suitable for UI display | +| Backward compatibility | Decision | Platform Team | Concelier existing code paths must continue working | + +--- + +## Success Criteria + +- [ ] All 7 tasks marked DONE +- [ ] Shared library created and referenced +- [ ] Proof-line generation working for RPM and Debian +- [ ] Concelier backward compatible +- [ ] BinaryIndex.FixIndex using shared library +- [ ] `dotnet build` succeeds +- [ ] `dotnet test` succeeds with 100% pass rate + +--- + +## References + +- Advisory: `docs/product-advisories/archived/22-Dec-2025 - Getting Distro Backport Logic Right.md` +- Existing comparers: `src/Concelier/__Libraries/StellaOps.Concelier.Merge/Comparers/` +- SPRINT_6000_SUMMARY.md (notes on this sprint) + +--- + +*Document Version: 1.0.0* +*Created: 2025-12-22* diff --git a/docs/implplan/SPRINT_6000_SUMMARY.md b/docs/implplan/SPRINT_6000_SUMMARY.md index 5e7768cf2..8a4a80427 100644 --- a/docs/implplan/SPRINT_6000_SUMMARY.md +++ b/docs/implplan/SPRINT_6000_SUMMARY.md @@ -33,11 +33,26 @@ The 6000 series implements the **BinaryIndex** module - a vulnerable binaries da |--------|-------|-------------| | 6000.0002.0001 | Fix Evidence Parser | Changelog and patch header parsing | | 6000.0002.0002 | Fix Index Builder | Merge evidence into fix index | -| 6000.0002.0003 | Version Comparators | Distro-specific version comparison | +| 6000.0002.0003 | Version Comparator Integration | **Reference existing Concelier comparators** (see note below) | | 6000.0002.0004 | RPM Corpus Connector | RHEL/Fedora package ingestion | **Acceptance:** For a CVE that upstream marks vulnerable, correctly identify distro backport as fixed. +> **Note (2025-12-22):** Sprint 6000.0002.0003 originally planned to implement distro-specific version comparators. However, production-ready comparators already exist in Concelier: +> - `src/Concelier/__Libraries/StellaOps.Concelier.Merge/Comparers/Nevra.cs` (RPM) +> - `src/Concelier/__Libraries/StellaOps.Concelier.Merge/Comparers/DebianEvr.cs` (Debian/Ubuntu) +> - `src/Concelier/__Libraries/StellaOps.Concelier.Merge/Comparers/ApkVersion.cs` (Alpine, via SPRINT_2000_0003_0001) +> +> This sprint should instead: +> 1. Create a shared `StellaOps.VersionComparison` library extracting existing comparators +> 2. Reference this library from BinaryIndex.FixIndex +> 3. Add proof-line generation per SPRINT_4000_0002_0001 +> +> See also: +> - SPRINT_2000_0003_0001 (Alpine connector/comparator) +> - SPRINT_2000_0003_0002 (Comprehensive version tests) +> - SPRINT_4000_0002_0001 (Backport UX explainability) + --- ### MVP 3: Binary Fingerprint Factory (Sprint 6000.0003) diff --git a/docs/implplan/SPRINT_7000_0001_0001_competitive_benchmarking.md b/docs/implplan/SPRINT_7000_0001_0001_competitive_benchmarking.md new file mode 100644 index 000000000..2a4ddd8e0 --- /dev/null +++ b/docs/implplan/SPRINT_7000_0001_0001_competitive_benchmarking.md @@ -0,0 +1,265 @@ +# SPRINT_7000_0001_0001 - Competitive Benchmarking Infrastructure + +## Sprint Metadata + +| Field | Value | +|-------|-------| +| **Sprint ID** | 7000.0001.0001 | +| **Topic** | Competitive Benchmarking Infrastructure | +| **Duration** | 2 weeks | +| **Priority** | HIGH | +| **Status** | TODO | +| **Owner** | QA + Scanner Team | +| **Working Directory** | `src/Scanner/__Libraries/StellaOps.Scanner.Benchmark/` | + +--- + +## Objective + +Establish infrastructure to validate and demonstrate Stella Ops' competitive advantages against Trivy, Grype, Syft, and other container scanners through verifiable benchmarks with ground-truth corpus. + +--- + +## Prerequisites + +- [ ] Scanner module functional with SBOM generation +- [ ] Access to competitor CLI tools (Trivy, Grype, Syft) +- [ ] Docker environment for corpus image builds + +--- + +## Delivery Tracker + +| ID | Task | Status | Assignee | Notes | +|----|------|--------|----------|-------| +| 7000.0001.01 | Create reference corpus with ground-truth annotations (50+ images) | TODO | | | +| 7000.0001.02 | Build comparison harness: Trivy, Grype, Syft SBOM ingestion | TODO | | | +| 7000.0001.03 | Implement precision/recall/F1 metric calculator | TODO | | | +| 7000.0001.04 | Add findings diff analyzer (TP/FP/TN/FN classification) | TODO | | | +| 7000.0001.05 | Create claims index with evidence links | TODO | | | +| 7000.0001.06 | CI workflow: `benchmark-vs-competitors.yml` | TODO | | | +| 7000.0001.07 | Marketing battlecard generator from benchmark results | TODO | | | + +--- + +## Task Details + +### 7000.0001.01: Reference Corpus with Ground-Truth + +**Description**: Create a curated corpus of container images with manually verified vulnerability ground truth. + +**Deliverables**: +- `bench/competitors/corpus/` directory structure +- 50+ images covering: + - Alpine, Debian, Ubuntu, RHEL base images + - Node.js, Python, Java, .NET application images + - Known CVE scenarios with verified exploitability + - False positive scenarios (backported fixes, unreachable code) +- Ground-truth manifest: `corpus-manifest.json` + ```json + { + "images": [ + { + "digest": "sha256:...", + "truePositives": ["CVE-2024-1234", "CVE-2024-5678"], + "falsePositives": ["CVE-2024-9999"], + "notes": "CVE-2024-9999 is backported in debian:bookworm" + } + ] + } + ``` + +**Acceptance Criteria**: +- [ ] 50+ images with ground-truth annotations +- [ ] Mix of base OS and application images +- [ ] Known FP scenarios documented +- [ ] Corpus reproducible from manifest + +--- + +### 7000.0001.02: Comparison Harness + +**Description**: Build harness to run competitor tools and normalize their output for comparison. + +**Deliverables**: +- `StellaOps.Scanner.Benchmark.Harness` namespace +- Adapters for: + - Trivy JSON output + - Grype JSON output + - Syft SBOM (CycloneDX/SPDX) +- Normalized finding model: `NormalizedFinding` +- Docker-based runner for competitor tools + +**Key Types**: +```csharp +public interface ICompetitorAdapter +{ + string ToolName { get; } + Task> ScanAsync(string imageRef, CancellationToken ct); +} + +public record NormalizedFinding( + string CveId, + string PackageName, + string PackageVersion, + string Severity, + string Source +); +``` + +**Acceptance Criteria**: +- [ ] Trivy adapter parses JSON output +- [ ] Grype adapter parses JSON output +- [ ] Syft SBOM ingestion works +- [ ] Normalized output is deterministic + +--- + +### 7000.0001.03: Precision/Recall/F1 Calculator + +**Description**: Implement metrics calculator comparing tool output against ground truth. + +**Deliverables**: +- `StellaOps.Scanner.Benchmark.Metrics` namespace +- `BenchmarkMetrics` record: + ```csharp + public record BenchmarkMetrics( + int TruePositives, + int FalsePositives, + int TrueNegatives, + int FalseNegatives, + double Precision, + double Recall, + double F1Score + ); + ``` +- Per-tool and aggregate metrics +- Breakdown by severity, ecosystem, CVE age + +**Acceptance Criteria**: +- [ ] Metrics match manual verification +- [ ] Deterministic output +- [ ] CSV/JSON export + +--- + +### 7000.0001.04: Findings Diff Analyzer + +**Description**: Classify findings as TP/FP/TN/FN with detailed reasoning. + +**Deliverables**: +- `FindingClassification` enum: `TruePositive`, `FalsePositive`, `TrueNegative`, `FalseNegative` +- Classification report with reasoning +- Drill-down by: + - Package ecosystem + - CVE severity + - Tool + - Reason (backport, version mismatch, unreachable) + +**Acceptance Criteria**: +- [ ] Classification logic documented +- [ ] Edge cases handled (version ranges, backports) +- [ ] Report includes reasoning + +--- + +### 7000.0001.05: Claims Index + +**Description**: Create verifiable claims index linking marketing claims to benchmark evidence. + +**Deliverables**: +- `docs/claims-index.md` with structure: + ```markdown + | Claim ID | Claim | Evidence | Verification | + |----------|-------|----------|--------------| + | REACH-001 | "Stella Ops detects 15% more reachable vulns than Trivy" | bench/results/2024-12-22.json | `stella bench verify REACH-001` | + ``` +- `ClaimsIndex` model in code +- Automated claim verification + +**Acceptance Criteria**: +- [ ] 10+ initial claims documented +- [ ] Each claim links to evidence +- [ ] Verification command works + +--- + +### 7000.0001.06: CI Workflow + +**Description**: GitHub Actions workflow for automated competitor benchmarking. + +**Deliverables**: +- `.gitea/workflows/benchmark-vs-competitors.yml` +- Triggers: weekly, manual, on benchmark code changes +- Outputs: + - Metrics JSON artifact + - Markdown summary + - Claims index update + +**Acceptance Criteria**: +- [ ] Workflow runs successfully +- [ ] Artifacts published +- [ ] No secrets exposed + +--- + +### 7000.0001.07: Marketing Battlecard Generator + +**Description**: Generate marketing-ready battlecard from benchmark results. + +**Deliverables**: +- Markdown battlecard template +- Auto-populated metrics +- Comparison tables +- Key differentiators section + +**Acceptance Criteria**: +- [ ] Battlecard generated from latest results +- [ ] Suitable for sales/marketing use +- [ ] Claims linked to evidence + +--- + +## Testing Requirements + +| Test Type | Location | Coverage | +|-----------|----------|----------| +| Unit tests | `StellaOps.Scanner.Benchmark.Tests` | Adapters, metrics calculator | +| Integration tests | `StellaOps.Scanner.Benchmark.Integration.Tests` | Full benchmark run | +| Golden fixtures | `bench/competitors/fixtures/` | Deterministic output verification | + +--- + +## Documentation Updates + +| Document | Update Required | +|----------|-----------------| +| `docs/claims-index.md` | CREATE - Claims with evidence links | +| `docs/modules/benchmark/architecture.md` | CREATE - Module dossier | +| `docs/testing/benchmark-guide.md` | CREATE - How to run benchmarks | + +--- + +## Decisions & Risks + +| ID | Decision/Risk | Status | Resolution | +|----|---------------|--------|------------| +| D1 | Which competitor tool versions to pin? | OPEN | | +| D2 | Corpus storage: Git LFS vs external? | OPEN | | +| R1 | Competitor tool output format changes | OPEN | Version pinning + adapter versioning | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint created from advisory gap analysis | Agent | + +--- + +## Required Reading + +- `docs/07_HIGH_LEVEL_ARCHITECTURE.md` +- `docs/modules/scanner/architecture.md` +- `docs/product-advisories/archived/*/19-Dec-2025 - Benchmarking Container Scanners Against Stella Ops.md` diff --git a/docs/implplan/SPRINT_7000_0001_0002_sbom_lineage.md b/docs/implplan/SPRINT_7000_0001_0002_sbom_lineage.md new file mode 100644 index 000000000..3bda1224f --- /dev/null +++ b/docs/implplan/SPRINT_7000_0001_0002_sbom_lineage.md @@ -0,0 +1,281 @@ +# SPRINT_7000_0001_0002 - SBOM Lineage & Repository Semantics + +## Sprint Metadata + +| Field | Value | +|-------|-------| +| **Sprint ID** | 7000.0001.0002 | +| **Topic** | SBOM Lineage & Repository Semantics | +| **Duration** | 2 weeks | +| **Priority** | HIGH | +| **Status** | TODO | +| **Owner** | Scanner Team | +| **Working Directory** | `src/Scanner/__Libraries/StellaOps.Scanner.Emit/` | + +--- + +## Objective + +Transform SBOM from static document artifact into a stateful ledger with lineage tracking, versioning, semantic diffing, and rebuild reproducibility proofs. This addresses the advisory gap: "SBOM must become a stateful ledger, not a document." + +--- + +## Prerequisites + +- [ ] Sprint 7000.0001.0001 (Benchmarking) complete or in progress +- [ ] `StellaOps.Scanner.Emit` CycloneDX/SPDX generation functional +- [ ] Database schema for scanner module accessible + +--- + +## Delivery Tracker + +| ID | Task | Status | Assignee | Notes | +|----|------|--------|----------|-------| +| 7000.0002.01 | Design SBOM lineage model (parent refs, diff pointers) | TODO | | | +| 7000.0002.02 | Add `sbom_lineage` table to scanner schema | TODO | | | +| 7000.0002.03 | Implement SBOM versioning with content-addressable storage | TODO | | | +| 7000.0002.04 | Build SBOM semantic diff engine (component-level deltas) | TODO | | | +| 7000.0002.05 | Add rebuild reproducibility proof manifest | TODO | | | +| 7000.0002.06 | API: `GET /sboms/{id}/lineage`, `GET /sboms/diff` | TODO | | | +| 7000.0002.07 | Tests: lineage traversal, diff determinism | TODO | | | + +--- + +## Task Details + +### 7000.0002.01: SBOM Lineage Model Design + +**Description**: Design the data model for tracking SBOM evolution across image versions. + +**Deliverables**: +- `SbomLineage` domain model: + ```csharp + public record SbomLineage( + SbomId Id, + SbomId? ParentId, + string ImageDigest, + string ContentHash, // SHA-256 of canonical SBOM + DateTimeOffset CreatedAt, + ImmutableArray Ancestors, + SbomDiffPointer? DiffFromParent + ); + + public record SbomDiffPointer( + int ComponentsAdded, + int ComponentsRemoved, + int ComponentsModified, + string DiffHash // Hash of diff document + ); + ``` +- Lineage DAG specification +- Content-addressable ID scheme + +**Acceptance Criteria**: +- [ ] Model supports DAG (merge scenarios) +- [ ] Content hash is deterministic +- [ ] Diff pointer enables lazy loading + +--- + +### 7000.0002.02: Database Schema + +**Description**: Add PostgreSQL schema for SBOM lineage tracking. + +**Deliverables**: +- Migration: `scanner.sbom_lineage` table + ```sql + CREATE TABLE scanner.sbom_lineage ( + id UUID PRIMARY KEY, + parent_id UUID REFERENCES scanner.sbom_lineage(id), + image_digest TEXT NOT NULL, + content_hash TEXT NOT NULL UNIQUE, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + diff_components_added INT, + diff_components_removed INT, + diff_components_modified INT, + diff_hash TEXT + ); + + CREATE INDEX idx_sbom_lineage_image ON scanner.sbom_lineage(image_digest); + CREATE INDEX idx_sbom_lineage_parent ON scanner.sbom_lineage(parent_id); + ``` +- Index for lineage traversal +- Constraints for referential integrity + +**Acceptance Criteria**: +- [ ] Migration applies cleanly +- [ ] Indexes support efficient traversal +- [ ] FK constraints enforced + +--- + +### 7000.0002.03: Content-Addressable Storage + +**Description**: Implement content-addressable storage for SBOMs with deduplication. + +**Deliverables**: +- `ISbomStore` interface: + ```csharp + public interface ISbomStore + { + Task StoreAsync(Sbom sbom, SbomId? parentId, CancellationToken ct); + Task GetByHashAsync(string contentHash, CancellationToken ct); + Task GetByIdAsync(SbomId id, CancellationToken ct); + Task> GetLineageAsync(SbomId id, CancellationToken ct); + } + ``` +- Canonical serialization for consistent hashing +- Deduplication on content hash + +**Acceptance Criteria**: +- [ ] Identical SBOMs produce identical hashes +- [ ] Deduplication works +- [ ] Lineage query efficient (< 100ms for 100 ancestors) + +--- + +### 7000.0002.04: Semantic Diff Engine + +**Description**: Build component-level diff engine that understands SBOM semantics. + +**Deliverables**: +- `SbomDiff` model: + ```csharp + public record SbomDiff( + SbomId FromId, + SbomId ToId, + ImmutableArray Deltas, + DiffSummary Summary + ); + + public record ComponentDelta( + ComponentDeltaType Type, // Added, Removed, VersionChanged, LicenseChanged + ComponentRef? Before, + ComponentRef? After, + ImmutableArray ChangedFields + ); + + public enum ComponentDeltaType { Added, Removed, VersionChanged, LicenseChanged, DependencyChanged } + ``` +- Diff algorithm preserving component identity across versions +- Deterministic diff output (sorted, stable) + +**Acceptance Criteria**: +- [ ] Detects version upgrades/downgrades +- [ ] Detects dependency changes +- [ ] Output is deterministic +- [ ] Handles component renames (via PURL matching) + +--- + +### 7000.0002.05: Rebuild Reproducibility Proof + +**Description**: Generate proof manifest that enables reproducible SBOM generation. + +**Deliverables**: +- `RebuildProof` model: + ```csharp + public record RebuildProof( + SbomId SbomId, + string ImageDigest, + string StellaOpsVersion, + ImmutableArray FeedSnapshots, + ImmutableArray AnalyzerVersions, + string PolicyHash, + DateTimeOffset GeneratedAt + ); + + public record FeedSnapshot( + string FeedId, + string SnapshotHash, + DateTimeOffset AsOf + ); + ``` +- Proof attestation (DSSE-signed) +- Replay verification command + +**Acceptance Criteria**: +- [ ] Proof captures all inputs +- [ ] DSSE-signed +- [ ] Replay produces identical SBOM + +--- + +### 7000.0002.06: Lineage API + +**Description**: HTTP API for querying SBOM lineage and diffs. + +**Deliverables**: +- `GET /api/v1/sboms/{id}/lineage` - Returns lineage DAG +- `GET /api/v1/sboms/diff?from={id}&to={id}` - Returns semantic diff +- `POST /api/v1/sboms/{id}/verify-rebuild` - Verifies rebuild reproducibility +- OpenAPI spec updates + +**Acceptance Criteria**: +- [ ] Lineage returns full ancestor chain +- [ ] Diff is deterministic +- [ ] Verify-rebuild confirms reproducibility + +--- + +### 7000.0002.07: Tests + +**Description**: Comprehensive tests for lineage and diff functionality. + +**Deliverables**: +- Unit tests: `SbomLineageTests`, `SbomDiffEngineTests` +- Integration tests: `SbomLineageApiTests` +- Golden fixtures: deterministic diff output +- Property-based tests: diff(A, B) + diff(B, C) = diff(A, C) + +**Acceptance Criteria**: +- [ ] 85%+ code coverage +- [ ] Golden fixtures pass +- [ ] Property tests pass + +--- + +## Testing Requirements + +| Test Type | Location | Coverage | +|-----------|----------|----------| +| Unit tests | `StellaOps.Scanner.Emit.Tests/Lineage/` | Models, diff engine | +| Integration tests | `StellaOps.Scanner.WebService.Tests/Lineage/` | API endpoints | +| Golden fixtures | `src/Scanner/__Tests/Fixtures/Lineage/` | Deterministic output | + +--- + +## Documentation Updates + +| Document | Update Required | +|----------|-----------------| +| `docs/api/sbom-lineage-api.md` | CREATE - Lineage API reference | +| `docs/db/schemas/scanner_schema_specification.md` | UPDATE - Add sbom_lineage table | +| `docs/modules/scanner/architecture.md` | UPDATE - Lineage section | + +--- + +## Decisions & Risks + +| ID | Decision/Risk | Status | Resolution | +|----|---------------|--------|------------| +| D1 | How to handle SBOM format changes across versions? | OPEN | | +| D2 | Max lineage depth to store? | OPEN | Propose: 1000 | +| R1 | Storage growth with lineage tracking | OPEN | Content deduplication mitigates | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint created from advisory gap analysis | Agent | + +--- + +## Required Reading + +- `docs/modules/scanner/architecture.md` +- `src/Scanner/__Libraries/StellaOps.Scanner.Emit/AGENTS.md` +- CycloneDX specification (lineage support) diff --git a/docs/implplan/SPRINT_7000_0001_0003_explainability.md b/docs/implplan/SPRINT_7000_0001_0003_explainability.md new file mode 100644 index 000000000..d7800cf9d --- /dev/null +++ b/docs/implplan/SPRINT_7000_0001_0003_explainability.md @@ -0,0 +1,325 @@ +# SPRINT_7000_0001_0003 - Explainability with Assumptions & Falsifiability + +## Sprint Metadata + +| Field | Value | +|-------|-------| +| **Sprint ID** | 7000.0001.0003 | +| **Topic** | Explainability with Assumptions & Falsifiability | +| **Duration** | 2 weeks | +| **Priority** | HIGH | +| **Status** | TODO | +| **Owner** | Scanner Team + Policy Team | +| **Working Directory** | `src/Scanner/__Libraries/StellaOps.Scanner.Explainability/`, `src/Policy/__Libraries/StellaOps.Policy.Explainability/` | + +--- + +## Objective + +Implement auditor-grade explainability that answers four non-negotiable questions for every finding: +1. What exact evidence triggered this finding? +2. What code or binary path makes it reachable? +3. What assumptions are being made? +4. **What would falsify this conclusion?** + +This addresses the advisory gap: "No existing scanner answers #4." + +--- + +## Prerequisites + +- [ ] Sprint 3500 (Score Proofs) complete +- [ ] `StellaOps.Scanner.EntryTrace.Risk` module available +- [ ] DSSE predicate schemas accessible + +--- + +## Delivery Tracker + +| ID | Task | Status | Assignee | Notes | +|----|------|--------|----------|-------| +| 7000.0003.01 | Design assumption-set model (compiler flags, runtime config, feature gates) | TODO | | | +| 7000.0003.02 | Implement `AssumptionSet` record in findings | TODO | | | +| 7000.0003.03 | Design falsifiability criteria model | TODO | | | +| 7000.0003.04 | Add "what would disprove this?" to `RiskExplainer` output | TODO | | | +| 7000.0003.05 | Implement evidence-density confidence scorer | TODO | | | +| 7000.0003.06 | Add assumption-set to DSSE predicate schema | TODO | | | +| 7000.0003.07 | UI: Explainability widget with assumption drill-down | TODO | | | + +--- + +## Task Details + +### 7000.0003.01: Assumption-Set Model Design + +**Description**: Design the data model for tracking assumptions made during analysis. + +**Deliverables**: +- `Assumption` domain model: + ```csharp + public record Assumption( + AssumptionCategory Category, + string Key, + string AssumedValue, + string? ObservedValue, + AssumptionSource Source, + ConfidenceLevel Confidence + ); + + public enum AssumptionCategory + { + CompilerFlag, // -fstack-protector, -D_FORTIFY_SOURCE + RuntimeConfig, // Environment variables, config files + FeatureGate, // Feature flags, build variants + LoaderBehavior, // LD_PRELOAD, RPATH, symbol versioning + NetworkExposure, // Port bindings, firewall rules + ProcessPrivilege // Capabilities, seccomp, AppArmor + } + + public enum AssumptionSource { Static, Dynamic, Inferred, Default } + ``` +- `AssumptionSet` aggregate: + ```csharp + public record AssumptionSet( + ImmutableArray Assumptions, + int TotalCount, + int VerifiedCount, + int InferredCount, + double AssumptionRisk // Higher = more unverified assumptions + ); + ``` + +**Acceptance Criteria**: +- [ ] All assumption categories covered +- [ ] Confidence levels defined +- [ ] Risk score derivable from assumptions + +--- + +### 7000.0003.02: AssumptionSet in Findings + +**Description**: Integrate assumption tracking into finding records. + +**Deliverables**: +- Update `VulnerabilityFinding` to include `AssumptionSet` +- Assumption collector during scan: + ```csharp + public interface IAssumptionCollector + { + void RecordAssumption(Assumption assumption); + AssumptionSet Build(); + } + ``` +- Wire into Scanner Worker pipeline + +**Acceptance Criteria**: +- [ ] Every finding has AssumptionSet +- [ ] Assumptions collected during analysis +- [ ] Deterministic ordering + +--- + +### 7000.0003.03: Falsifiability Criteria Model + +**Description**: Design model for expressing what would disprove a finding. + +**Deliverables**: +- `FalsifiabilityCriteria` model: + ```csharp + public record FalsifiabilityCriteria( + ImmutableArray Conditions, + string HumanReadable + ); + + public record FalsificationCondition( + FalsificationCategory Category, + string Description, + string? VerificationCommand, // CLI command to verify + string? VerificationQuery // API query to verify + ); + + public enum FalsificationCategory + { + CodeRemoved, // "Vulnerable function call removed" + PackageUpgraded, // "Package upgraded past fix version" + ConfigDisabled, // "Vulnerable feature disabled via config" + PathUnreachable, // "Call path no longer reachable from entrypoint" + RuntimeGuarded, // "Runtime check prevents exploitation" + SymbolUnresolved // "Vulnerable symbol not linked" + } + ``` +- Falsifiability generator per finding type + +**Acceptance Criteria**: +- [ ] Every finding has falsifiability criteria +- [ ] Human-readable description +- [ ] Verification command where applicable + +--- + +### 7000.0003.04: RiskExplainer Enhancement + +**Description**: Extend `RiskExplainer` to output falsifiability and assumptions. + +**Deliverables**: +- Update `RiskReport` to include: + ```csharp + public record RiskReport( + RiskAssessment Assessment, + string Explanation, + ImmutableArray Recommendations, + AssumptionSet Assumptions, // NEW + FalsifiabilityCriteria Falsifiability // NEW + ); + ``` +- Natural language generation for: + - "This finding assumes..." + - "To disprove this finding, verify that..." + +**Acceptance Criteria**: +- [ ] Explanation includes assumptions +- [ ] Explanation includes falsifiability +- [ ] Language is auditor-appropriate + +--- + +### 7000.0003.05: Evidence-Density Confidence Scorer + +**Description**: Implement confidence scoring based on evidence density, not CVSS. + +**Deliverables**: +- `EvidenceDensityScorer`: + ```csharp + public interface IEvidenceDensityScorer + { + ConfidenceScore Score(EvidenceBundle evidence, AssumptionSet assumptions); + } + + public record ConfidenceScore( + double Value, // 0.0 - 1.0 + ConfidenceTier Tier, // Confirmed, High, Medium, Low, Speculative + ImmutableArray Factors // What contributed to score + ); + + public enum ConfidenceTier { Confirmed, High, Medium, Low, Speculative } + ``` +- Scoring factors: + - Evidence count + - Evidence diversity (static + dynamic + runtime) + - Assumption penalty (more unverified = lower confidence) + - Corroboration bonus (multiple sources agree) + +**Acceptance Criteria**: +- [ ] Confidence derived from evidence, not CVSS +- [ ] Deterministic scoring +- [ ] Factors explainable + +--- + +### 7000.0003.06: DSSE Predicate Schema Update + +**Description**: Add assumption-set and falsifiability to DSSE predicate. + +**Deliverables**: +- Schema: `stellaops.dev/predicates/finding@v2` + ```json + { + "$schema": "...", + "type": "object", + "properties": { + "finding": { "$ref": "#/definitions/Finding" }, + "assumptions": { + "type": "array", + "items": { "$ref": "#/definitions/Assumption" } + }, + "falsifiability": { + "type": "object", + "properties": { + "conditions": { "type": "array" }, + "humanReadable": { "type": "string" } + } + }, + "evidenceConfidence": { + "type": "object", + "properties": { + "value": { "type": "number" }, + "tier": { "type": "string" }, + "factors": { "type": "array" } + } + } + } + } + ``` +- Migration path from v1 predicates + +**Acceptance Criteria**: +- [ ] Schema validates +- [ ] Backward compatible +- [ ] Registered in predicate registry + +--- + +### 7000.0003.07: UI Explainability Widget + +**Description**: Angular component for assumption and falsifiability drill-down. + +**Deliverables**: +- `` component +- Tabs: Evidence | Assumptions | "How to Disprove" +- Assumption table with confidence indicators +- Falsifiability checklist with verification commands +- Copy-to-clipboard for verification commands + +**Acceptance Criteria**: +- [ ] Renders for all finding types +- [ ] Assumptions sortable/filterable +- [ ] Verification commands copyable +- [ ] Accessible (WCAG 2.1 AA) + +--- + +## Testing Requirements + +| Test Type | Location | Coverage | +|-----------|----------|----------| +| Unit tests | `StellaOps.Scanner.Explainability.Tests/` | Models, scorers | +| Integration tests | `StellaOps.Scanner.WebService.Tests/Explainability/` | API endpoints | +| UI tests | `src/Web/StellaOps.Web/tests/explainability/` | Component tests | +| Golden fixtures | `src/Scanner/__Tests/Fixtures/Explainability/` | Deterministic output | + +--- + +## Documentation Updates + +| Document | Update Required | +|----------|-----------------| +| `docs/explainability/assumption-model.md` | CREATE - Assumption-set design | +| `docs/explainability/falsifiability.md` | CREATE - Falsifiability guide | +| `docs/schemas/finding-predicate-v2.md` | CREATE - Schema documentation | +| `docs/api/scanner-findings-api.md` | UPDATE - Explainability fields | + +--- + +## Decisions & Risks + +| ID | Decision/Risk | Status | Resolution | +|----|---------------|--------|------------| +| D1 | How to handle assumptions for legacy findings? | OPEN | Propose: empty set with "legacy" flag | +| D2 | Falsifiability verification commands: shell or API? | OPEN | Propose: both where applicable | +| R1 | Performance impact of assumption collection | OPEN | Profile and optimize | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint created from advisory gap analysis | Agent | + +--- + +## Required Reading + +- `src/Scanner/__Libraries/StellaOps.Scanner.EntryTrace/Risk/AGENTS.md` +- `docs/modules/scanner/architecture.md` +- `docs/product-advisories/archived/*/19-Dec-2025 - Benchmarking Container Scanners Against Stella Ops.md` (Section 4: Explainability) diff --git a/docs/implplan/SPRINT_7000_0001_0004_three_layer_reachability.md b/docs/implplan/SPRINT_7000_0001_0004_three_layer_reachability.md new file mode 100644 index 000000000..bb25ba9b5 --- /dev/null +++ b/docs/implplan/SPRINT_7000_0001_0004_three_layer_reachability.md @@ -0,0 +1,367 @@ +# SPRINT_7000_0001_0004 - Three-Layer Reachability Integration + +## Sprint Metadata + +| Field | Value | +|-------|-------| +| **Sprint ID** | 7000.0001.0004 | +| **Topic** | Three-Layer Reachability Integration | +| **Duration** | 2 weeks | +| **Priority** | MEDIUM | +| **Status** | TODO | +| **Owner** | Scanner Team | +| **Working Directory** | `src/Scanner/__Libraries/StellaOps.Scanner.Reachability/` | + +--- + +## Objective + +Integrate reachability analysis into a formal three-layer model where exploitability is proven only when ALL THREE layers align: +1. **Layer 1: Static Call Graph** - Vulnerable function reachable from entrypoint +2. **Layer 2: Binary Resolution** - Dynamic loader actually links the symbol +3. **Layer 3: Runtime Gating** - No feature flag/config/environment blocks execution + +This makes false positives "structurally impossible, not heuristically reduced." + +--- + +## Prerequisites + +- [ ] Sprint 7000.0001.0002 (SBOM Lineage) complete or in progress +- [ ] Sprint 7000.0001.0003 (Explainability) complete or in progress +- [ ] `StellaOps.Scanner.EntryTrace` functional (semantic, binary, speculative) +- [ ] `StellaOps.Scanner.CallGraph` extractors functional + +--- + +## Delivery Tracker + +| ID | Task | Status | Assignee | Notes | +|----|------|--------|----------|-------| +| 7000.0004.01 | Formalize 3-layer model: `ReachabilityStack` | TODO | | | +| 7000.0004.02 | Layer 1: Wire existing static call-graph extractors | TODO | | | +| 7000.0004.03 | Layer 2: ELF/PE loader rule resolution | TODO | | | +| 7000.0004.04 | Layer 3: Feature flag / config gating detection | TODO | | | +| 7000.0004.05 | Composite evaluator: all-three-align = exploitable | TODO | | | +| 7000.0004.06 | Tests: 3-layer corpus with known reachability | TODO | | | +| 7000.0004.07 | API: `GET /reachability/{id}/stack` with layer breakdown | TODO | | | + +--- + +## Task Details + +### 7000.0004.01: Formalize ReachabilityStack Model + +**Description**: Design the composite model representing three-layer reachability. + +**Deliverables**: +- `ReachabilityStack` model: + ```csharp + public record ReachabilityStack( + ReachabilityLayer1 StaticCallGraph, + ReachabilityLayer2 BinaryResolution, + ReachabilityLayer3 RuntimeGating, + ReachabilityVerdict Verdict + ); + + public record ReachabilityLayer1( + bool IsReachable, + ImmutableArray Paths, + ImmutableArray Entrypoints, + ConfidenceLevel Confidence + ); + + public record ReachabilityLayer2( + bool IsResolved, + SymbolResolution? Resolution, + LoaderRule? AppliedRule, + ConfidenceLevel Confidence + ); + + public record ReachabilityLayer3( + bool IsGated, + ImmutableArray Conditions, + GatingOutcome Outcome, + ConfidenceLevel Confidence + ); + + public enum ReachabilityVerdict + { + Exploitable, // All 3 layers confirm + LikelyExploitable, // L1+L2 confirm, L3 unknown + PossiblyExploitable, // L1 confirms, L2+L3 unknown + Unreachable, // Any layer definitively blocks + Unknown // Insufficient data + } + ``` + +**Acceptance Criteria**: +- [ ] All three layers represented +- [ ] Verdict derivation logic defined +- [ ] Confidence propagation documented + +--- + +### 7000.0004.02: Layer 1 - Static Call Graph Integration + +**Description**: Wire existing call-graph extractors into Layer 1. + +**Deliverables**: +- `ILayer1Analyzer` interface: + ```csharp + public interface ILayer1Analyzer + { + Task AnalyzeAsync( + VulnerableSymbol symbol, + CallGraph graph, + ImmutableArray entrypoints, + CancellationToken ct + ); + } + ``` +- Integration with: + - `DotNetCallGraphExtractor` + - `NodeCallGraphExtractor` + - `JavaCallGraphExtractor` +- Path witness generation + +**Acceptance Criteria**: +- [ ] All existing extractors integrated +- [ ] Paths include method signatures +- [ ] Entrypoints correctly identified + +--- + +### 7000.0004.03: Layer 2 - Binary Loader Resolution + +**Description**: Implement dynamic loader rule resolution for ELF and PE binaries. + +**Deliverables**: +- `ILayer2Analyzer` interface: + ```csharp + public interface ILayer2Analyzer + { + Task AnalyzeAsync( + VulnerableSymbol symbol, + BinaryArtifact binary, + LoaderContext context, + CancellationToken ct + ); + } + + public record LoaderContext( + ImmutableArray LdLibraryPath, + ImmutableArray Rpath, + ImmutableArray RunPath, + bool HasLdPreload, + SymbolVersioning? Versioning + ); + ``` +- ELF resolution: + - NEEDED entries + - RPATH/RUNPATH handling + - Symbol versioning (GLIBC_2.17, etc.) + - LD_PRELOAD detection +- PE resolution: + - Import table parsing + - Delay-load DLLs + - SxS manifests + +**Acceptance Criteria**: +- [ ] ELF loader rules implemented +- [ ] PE loader rules implemented +- [ ] Symbol versioning handled +- [ ] LD_PRELOAD/DLL injection detected + +--- + +### 7000.0004.04: Layer 3 - Runtime Gating Detection + +**Description**: Detect feature flags, configuration, and environment conditions that gate execution. + +**Deliverables**: +- `ILayer3Analyzer` interface: + ```csharp + public interface ILayer3Analyzer + { + Task AnalyzeAsync( + CallPath path, + RuntimeContext context, + CancellationToken ct + ); + } + + public record GatingCondition( + GatingType Type, + string Description, + string? ConfigKey, + string? EnvVar, + bool IsBlocking + ); + + public enum GatingType + { + FeatureFlag, // if (FeatureFlags.UseNewAuth) ... + EnvironmentVariable, // if (Environment.GetEnvironmentVariable("X") != null) ... + ConfigurationValue, // if (config["feature:enabled"] == "true") ... + CompileTimeConditional, // #if DEBUG + PlatformCheck, // if (RuntimeInformation.IsOSPlatform(...)) + CapabilityCheck // if (hasCapability(CAP_NET_ADMIN)) ... + } + ``` +- Integration with: + - `ShellSymbolicExecutor` (speculative execution) + - Static analysis for feature flag patterns + - Config file parsing + +**Acceptance Criteria**: +- [ ] Common feature flag patterns detected +- [ ] Environment variable checks detected +- [ ] Platform checks detected +- [ ] Gating blocks marked as blocking/non-blocking + +--- + +### 7000.0004.05: Composite Evaluator + +**Description**: Combine all three layers into final verdict. + +**Deliverables**: +- `ReachabilityStackEvaluator`: + ```csharp + public class ReachabilityStackEvaluator + { + public ReachabilityStack Evaluate( + ReachabilityLayer1 layer1, + ReachabilityLayer2 layer2, + ReachabilityLayer3 layer3 + ) + { + var verdict = DeriveVerdict(layer1, layer2, layer3); + return new ReachabilityStack(layer1, layer2, layer3, verdict); + } + + private ReachabilityVerdict DeriveVerdict(...) + { + // All three confirm reachable = Exploitable + // Any one definitively blocks = Unreachable + // Partial confirmation = Likely/Possibly + // Insufficient data = Unknown + } + } + ``` +- Verdict derivation truth table +- Confidence aggregation + +**Acceptance Criteria**: +- [ ] Verdict logic documented as truth table +- [ ] Confidence properly aggregated +- [ ] Edge cases handled (unknown layers) + +--- + +### 7000.0004.06: 3-Layer Test Corpus + +**Description**: Create test corpus with known reachability across all three layers. + +**Deliverables**: +- `bench/reachability-3layer/` corpus: + - `exploitable/` - All 3 layers confirm + - `unreachable-l1/` - Static graph blocks + - `unreachable-l2/` - Loader blocks (symbol not linked) + - `unreachable-l3/` - Feature flag blocks + - `partial/` - Mixed confidence +- Ground-truth manifest +- Determinism verification + +**Acceptance Criteria**: +- [ ] 20+ test cases per category +- [ ] Ground truth verified manually +- [ ] Deterministic analysis results + +--- + +### 7000.0004.07: Reachability Stack API + +**Description**: HTTP API for querying three-layer reachability. + +**Deliverables**: +- `GET /api/v1/reachability/{findingId}/stack` - Full 3-layer breakdown +- `GET /api/v1/reachability/{findingId}/stack/layer/{1|2|3}` - Single layer detail +- Response includes: + ```json + { + "verdict": "Exploitable", + "layer1": { + "isReachable": true, + "paths": [...], + "confidence": "High" + }, + "layer2": { + "isResolved": true, + "resolution": { "symbol": "EVP_DecryptUpdate", "library": "libcrypto.so.1.1" }, + "confidence": "Confirmed" + }, + "layer3": { + "isGated": false, + "conditions": [], + "confidence": "Medium" + } + } + ``` + +**Acceptance Criteria**: +- [ ] API returns all three layers +- [ ] Drill-down available +- [ ] OpenAPI spec updated + +--- + +## Testing Requirements + +| Test Type | Location | Coverage | +|-----------|----------|----------| +| Unit tests | `StellaOps.Scanner.Reachability.Tests/Stack/` | Models, evaluator | +| Integration tests | `StellaOps.Scanner.WebService.Tests/Reachability/` | API endpoints | +| Corpus tests | `StellaOps.Scanner.Reachability.CorpusTests/` | 3-layer corpus | +| Golden fixtures | `src/Scanner/__Tests/Fixtures/Reachability3Layer/` | Deterministic output | + +--- + +## Documentation Updates + +| Document | Update Required | +|----------|-----------------| +| `docs/reachability/three-layer-model.md` | CREATE - 3-layer architecture | +| `docs/reachability/verdict-truth-table.md` | CREATE - Verdict derivation | +| `docs/api/reachability-stack-api.md` | CREATE - API reference | +| `docs/modules/scanner/architecture.md` | UPDATE - Reachability section | + +--- + +## Decisions & Risks + +| ID | Decision/Risk | Status | Resolution | +|----|---------------|--------|------------| +| D1 | How to handle missing Layer 2/3 data? | OPEN | Propose: degrade to "Possibly" verdict | +| D2 | Layer 3 analysis scope (all configs or allowlist)? | OPEN | Propose: common patterns first | +| R1 | Performance impact of full 3-layer analysis | OPEN | Profile, cache layer results | +| R2 | False negatives from incomplete L3 detection | OPEN | Document known limitations | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint created from advisory gap analysis | Agent | + +--- + +## Required Reading + +- `src/Scanner/__Libraries/StellaOps.Scanner.EntryTrace/AGENTS.md` +- `src/Scanner/__Libraries/StellaOps.Scanner.EntryTrace/Binary/` +- `src/Scanner/__Libraries/StellaOps.Scanner.EntryTrace/Speculative/` +- `docs/reachability/function-level-evidence.md` +- `docs/product-advisories/archived/*/19-Dec-2025 - Benchmarking Container Scanners Against Stella Ops.md` (Section 6: Call-Stack Reachability) diff --git a/docs/implplan/SPRINT_7000_0002_0001_unified_confidence_model.md b/docs/implplan/SPRINT_7000_0002_0001_unified_confidence_model.md new file mode 100644 index 000000000..d3a49d136 --- /dev/null +++ b/docs/implplan/SPRINT_7000_0002_0001_unified_confidence_model.md @@ -0,0 +1,865 @@ +# Sprint 7000.0001.0001 · Unified Confidence Score Model + +## Topic & Scope + +- Define unified confidence score aggregating all evidence types +- Implement explainable confidence breakdown per input factor +- Establish bounded computation rules with documentation + +**Working directory:** `src/Policy/__Libraries/StellaOps.Policy.Confidence/` + +## Dependencies & Concurrency + +- **Upstream**: SPRINT_4100_0003_0001 (Risk Verdict Attestation), SPRINT_4100_0002_0001 (Knowledge Snapshot) +- **Downstream**: SPRINT_7000_0001_0002 (Vulnerability-First UX API) +- **Safe to parallelize with**: SPRINT_7000_0003_0001 (Progressive Fidelity) + +## Documentation Prerequisites + +- `docs/product-advisories/21-Dec-2025 - Designing Explainable Triage Workflows.md` +- `src/Policy/__Libraries/StellaOps.Policy/Scoring/ScoreExplanation.cs` +- `src/Policy/StellaOps.Policy.Engine/Vex/VexDecisionModels.cs` + +--- + +## Problem Statement + +The advisory requires: "Confidence score (bounded; explainable inputs)" for each verdict. Currently, confidence exists in VEX (0.0-1.0) but is not unified across all evidence types (reachability, runtime, provenance, policy). Users cannot understand why a verdict has a particular confidence level. + +--- + +## Tasks + +### T1: Define ConfidenceScore Model + +**Assignee**: Policy Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: — + +**Description**: +Create a unified confidence score model that aggregates multiple input factors. + +**Implementation Path**: `Models/ConfidenceScore.cs` (new file) + +**Model Definition**: +```csharp +namespace StellaOps.Policy.Confidence.Models; + +/// +/// Unified confidence score aggregating all evidence types. +/// Bounded between 0.0 (no confidence) and 1.0 (full confidence). +/// +public sealed record ConfidenceScore +{ + /// + /// Final aggregated confidence (0.0 - 1.0). + /// + public required decimal Value { get; init; } + + /// + /// Confidence tier for quick categorization. + /// + public ConfidenceTier Tier => Value switch + { + >= 0.9m => ConfidenceTier.VeryHigh, + >= 0.7m => ConfidenceTier.High, + >= 0.5m => ConfidenceTier.Medium, + >= 0.3m => ConfidenceTier.Low, + _ => ConfidenceTier.VeryLow + }; + + /// + /// Breakdown of contributing factors. + /// + public required IReadOnlyList Factors { get; init; } + + /// + /// Human-readable explanation of the score. + /// + public required string Explanation { get; init; } + + /// + /// What would improve this confidence score. + /// + public IReadOnlyList Improvements { get; init; } = []; +} + +/// +/// A single factor contributing to confidence. +/// +public sealed record ConfidenceFactor +{ + /// + /// Factor type (reachability, runtime, vex, provenance, policy). + /// + public required ConfidenceFactorType Type { get; init; } + + /// + /// Weight of this factor in aggregation (0.0 - 1.0). + /// + public required decimal Weight { get; init; } + + /// + /// Raw value before weighting (0.0 - 1.0). + /// + public required decimal RawValue { get; init; } + + /// + /// Weighted contribution to final score. + /// + public decimal Contribution => Weight * RawValue; + + /// + /// Human-readable reason for this value. + /// + public required string Reason { get; init; } + + /// + /// Evidence digests supporting this factor. + /// + public IReadOnlyList EvidenceDigests { get; init; } = []; +} + +public enum ConfidenceFactorType +{ + /// Call graph reachability analysis. + Reachability, + + /// Runtime corroboration (eBPF, dyld, ETW). + Runtime, + + /// VEX statement from vendor/distro. + Vex, + + /// Build provenance and SBOM quality. + Provenance, + + /// Policy rule match strength. + Policy, + + /// Advisory freshness and source quality. + Advisory +} + +public enum ConfidenceTier +{ + VeryLow, + Low, + Medium, + High, + VeryHigh +} + +/// +/// Actionable improvement to increase confidence. +/// +public sealed record ConfidenceImprovement( + ConfidenceFactorType Factor, + string Action, + decimal PotentialGain); +``` + +**Acceptance Criteria**: +- [ ] `ConfidenceScore.cs` created with all models +- [ ] Bounded 0.0-1.0 with tier categorization +- [ ] Factor breakdown with weights and raw values +- [ ] Improvement suggestions included +- [ ] XML documentation complete + +--- + +### T2: Define Weight Configuration + +**Assignee**: Policy Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T1 + +**Description**: +Create configurable weight schema for confidence factors. + +**Implementation Path**: `Configuration/ConfidenceWeightOptions.cs` (new file) + +**Implementation**: +```csharp +namespace StellaOps.Policy.Confidence.Configuration; + +/// +/// Configuration for confidence factor weights. +/// +public sealed class ConfidenceWeightOptions +{ + public const string SectionName = "ConfidenceWeights"; + + /// + /// Weight for reachability factor (default: 0.30). + /// + public decimal Reachability { get; set; } = 0.30m; + + /// + /// Weight for runtime corroboration (default: 0.20). + /// + public decimal Runtime { get; set; } = 0.20m; + + /// + /// Weight for VEX statements (default: 0.25). + /// + public decimal Vex { get; set; } = 0.25m; + + /// + /// Weight for provenance quality (default: 0.15). + /// + public decimal Provenance { get; set; } = 0.15m; + + /// + /// Weight for policy match (default: 0.10). + /// + public decimal Policy { get; set; } = 0.10m; + + /// + /// Minimum confidence for not_affected verdict. + /// + public decimal MinimumForNotAffected { get; set; } = 0.70m; + + /// + /// Validates weights sum to 1.0. + /// + public bool Validate() + { + var sum = Reachability + Runtime + Vex + Provenance + Policy; + return Math.Abs(sum - 1.0m) < 0.001m; + } +} +``` + +**Sample YAML**: +```yaml +# etc/policy.confidence.yaml +confidenceWeights: + reachability: 0.30 + runtime: 0.20 + vex: 0.25 + provenance: 0.15 + policy: 0.10 + minimumForNotAffected: 0.70 +``` + +**Acceptance Criteria**: +- [ ] `ConfidenceWeightOptions.cs` created +- [ ] Weights sum validation +- [ ] Sample YAML configuration +- [ ] Minimum threshold for not_affected + +--- + +### T3: Create ConfidenceCalculator Service + +**Assignee**: Policy Team +**Story Points**: 5 +**Status**: TODO +**Dependencies**: T1, T2 + +**Description**: +Implement service that calculates unified confidence from all evidence sources. + +**Implementation Path**: `Services/ConfidenceCalculator.cs` (new file) + +**Implementation**: +```csharp +namespace StellaOps.Policy.Confidence.Services; + +public interface IConfidenceCalculator +{ + ConfidenceScore Calculate(ConfidenceInput input); +} + +public sealed class ConfidenceCalculator : IConfidenceCalculator +{ + private readonly IOptionsMonitor _options; + + public ConfidenceCalculator(IOptionsMonitor options) + { + _options = options; + } + + public ConfidenceScore Calculate(ConfidenceInput input) + { + var weights = _options.CurrentValue; + var factors = new List(); + + // Calculate reachability factor + var reachabilityFactor = CalculateReachabilityFactor(input.Reachability, weights.Reachability); + factors.Add(reachabilityFactor); + + // Calculate runtime factor + var runtimeFactor = CalculateRuntimeFactor(input.Runtime, weights.Runtime); + factors.Add(runtimeFactor); + + // Calculate VEX factor + var vexFactor = CalculateVexFactor(input.Vex, weights.Vex); + factors.Add(vexFactor); + + // Calculate provenance factor + var provenanceFactor = CalculateProvenanceFactor(input.Provenance, weights.Provenance); + factors.Add(provenanceFactor); + + // Calculate policy factor + var policyFactor = CalculatePolicyFactor(input.Policy, weights.Policy); + factors.Add(policyFactor); + + // Aggregate + var totalValue = factors.Sum(f => f.Contribution); + var clampedValue = Math.Clamp(totalValue, 0m, 1m); + + // Generate explanation + var explanation = GenerateExplanation(factors, clampedValue); + + // Generate improvements + var improvements = GenerateImprovements(factors, weights); + + return new ConfidenceScore + { + Value = clampedValue, + Factors = factors, + Explanation = explanation, + Improvements = improvements + }; + } + + private ConfidenceFactor CalculateReachabilityFactor( + ReachabilityEvidence? evidence, decimal weight) + { + if (evidence is null) + { + return new ConfidenceFactor + { + Type = ConfidenceFactorType.Reachability, + Weight = weight, + RawValue = 0.5m, // Unknown = 50% + Reason = "No reachability analysis performed", + EvidenceDigests = [] + }; + } + + var rawValue = evidence.State switch + { + ReachabilityState.ConfirmedUnreachable => 1.0m, + ReachabilityState.StaticUnreachable => 0.85m, + ReachabilityState.Unknown => 0.5m, + ReachabilityState.StaticReachable => 0.3m, + ReachabilityState.ConfirmedReachable => 0.1m, + _ => 0.5m + }; + + // Adjust by confidence of the analysis itself + rawValue *= evidence.AnalysisConfidence; + + return new ConfidenceFactor + { + Type = ConfidenceFactorType.Reachability, + Weight = weight, + RawValue = rawValue, + Reason = $"Reachability: {evidence.State} (analysis confidence: {evidence.AnalysisConfidence:P0})", + EvidenceDigests = evidence.GraphDigests.ToList() + }; + } + + private ConfidenceFactor CalculateRuntimeFactor( + RuntimeEvidence? evidence, decimal weight) + { + if (evidence is null || !evidence.HasObservations) + { + return new ConfidenceFactor + { + Type = ConfidenceFactorType.Runtime, + Weight = weight, + RawValue = 0.5m, + Reason = "No runtime observations available", + EvidenceDigests = [] + }; + } + + var rawValue = evidence.Posture switch + { + RuntimePosture.Supports => 0.9m, + RuntimePosture.Contradicts => 0.2m, + RuntimePosture.Unknown => 0.5m, + _ => 0.5m + }; + + // Adjust by observation count and recency + var recencyBonus = evidence.ObservedWithinHours(24) ? 0.1m : 0m; + rawValue = Math.Min(1.0m, rawValue + recencyBonus); + + return new ConfidenceFactor + { + Type = ConfidenceFactorType.Runtime, + Weight = weight, + RawValue = rawValue, + Reason = $"Runtime {evidence.Posture.ToString().ToLowerInvariant()}: {evidence.ObservationCount} observations", + EvidenceDigests = evidence.SessionDigests.ToList() + }; + } + + private ConfidenceFactor CalculateVexFactor( + VexEvidence? evidence, decimal weight) + { + if (evidence is null || evidence.Statements.Count == 0) + { + return new ConfidenceFactor + { + Type = ConfidenceFactorType.Vex, + Weight = weight, + RawValue = 0.5m, + Reason = "No VEX statements available", + EvidenceDigests = [] + }; + } + + // Use the best VEX statement (by trust and recency) + var best = evidence.Statements + .OrderByDescending(s => s.TrustScore) + .ThenByDescending(s => s.Timestamp) + .First(); + + var rawValue = best.Status switch + { + VexStatus.NotAffected => best.TrustScore, + VexStatus.Fixed => best.TrustScore * 0.9m, + VexStatus.UnderInvestigation => 0.4m, + VexStatus.Affected => 0.1m, + _ => 0.5m + }; + + return new ConfidenceFactor + { + Type = ConfidenceFactorType.Vex, + Weight = weight, + RawValue = rawValue, + Reason = $"VEX {best.Status} from {best.Issuer} (trust: {best.TrustScore:P0})", + EvidenceDigests = [best.StatementDigest] + }; + } + + private ConfidenceFactor CalculateProvenanceFactor( + ProvenanceEvidence? evidence, decimal weight) + { + if (evidence is null) + { + return new ConfidenceFactor + { + Type = ConfidenceFactorType.Provenance, + Weight = weight, + RawValue = 0.3m, + Reason = "No provenance information", + EvidenceDigests = [] + }; + } + + var rawValue = evidence.Level switch + { + ProvenanceLevel.SlsaLevel3 => 1.0m, + ProvenanceLevel.SlsaLevel2 => 0.85m, + ProvenanceLevel.SlsaLevel1 => 0.7m, + ProvenanceLevel.Signed => 0.6m, + ProvenanceLevel.Unsigned => 0.3m, + _ => 0.3m + }; + + // SBOM completeness bonus + if (evidence.SbomCompleteness >= 0.9m) + rawValue = Math.Min(1.0m, rawValue + 0.1m); + + return new ConfidenceFactor + { + Type = ConfidenceFactorType.Provenance, + Weight = weight, + RawValue = rawValue, + Reason = $"Provenance: {evidence.Level}, SBOM completeness: {evidence.SbomCompleteness:P0}", + EvidenceDigests = evidence.AttestationDigests.ToList() + }; + } + + private ConfidenceFactor CalculatePolicyFactor( + PolicyEvidence? evidence, decimal weight) + { + if (evidence is null) + { + return new ConfidenceFactor + { + Type = ConfidenceFactorType.Policy, + Weight = weight, + RawValue = 0.5m, + Reason = "No policy evaluation", + EvidenceDigests = [] + }; + } + + // Policy confidence based on rule match quality + var rawValue = evidence.MatchStrength; + + return new ConfidenceFactor + { + Type = ConfidenceFactorType.Policy, + Weight = weight, + RawValue = rawValue, + Reason = $"Policy rule '{evidence.RuleName}' matched (strength: {evidence.MatchStrength:P0})", + EvidenceDigests = [evidence.EvaluationDigest] + }; + } + + private static string GenerateExplanation( + IReadOnlyList factors, decimal totalValue) + { + var tier = totalValue switch + { + >= 0.9m => "very high", + >= 0.7m => "high", + >= 0.5m => "medium", + >= 0.3m => "low", + _ => "very low" + }; + + var topFactors = factors + .OrderByDescending(f => f.Contribution) + .Take(2) + .Select(f => f.Type.ToString().ToLowerInvariant()); + + return $"Confidence is {tier} ({totalValue:P0}), primarily driven by {string.Join(" and ", topFactors)}."; + } + + private static IReadOnlyList GenerateImprovements( + IReadOnlyList factors, + ConfidenceWeightOptions weights) + { + var improvements = new List(); + + foreach (var factor in factors.Where(f => f.RawValue < 0.7m)) + { + var (action, potentialGain) = factor.Type switch + { + ConfidenceFactorType.Reachability => + ("Run deeper reachability analysis", factor.Weight * 0.3m), + ConfidenceFactorType.Runtime => + ("Deploy runtime sensor and collect observations", factor.Weight * 0.4m), + ConfidenceFactorType.Vex => + ("Obtain VEX statement from vendor", factor.Weight * 0.4m), + ConfidenceFactorType.Provenance => + ("Add SLSA provenance attestation", factor.Weight * 0.3m), + ConfidenceFactorType.Policy => + ("Review and refine policy rules", factor.Weight * 0.2m), + _ => ("Gather additional evidence", 0.1m) + }; + + improvements.Add(new ConfidenceImprovement(factor.Type, action, potentialGain)); + } + + return improvements.OrderByDescending(i => i.PotentialGain).Take(3).ToList(); + } +} + +/// +/// Input container for confidence calculation. +/// +public sealed record ConfidenceInput +{ + public ReachabilityEvidence? Reachability { get; init; } + public RuntimeEvidence? Runtime { get; init; } + public VexEvidence? Vex { get; init; } + public ProvenanceEvidence? Provenance { get; init; } + public PolicyEvidence? Policy { get; init; } +} +``` + +**Acceptance Criteria**: +- [ ] `ConfidenceCalculator.cs` created +- [ ] Calculates all 5 factor types +- [ ] Weights applied correctly +- [ ] Explanation generated automatically +- [ ] Improvements suggested based on low factors + +--- + +### T4: Create Evidence Input Models + +**Assignee**: Policy Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T1 + +**Description**: +Create input models for each evidence type used in confidence calculation. + +**Implementation Path**: `Models/ConfidenceEvidence.cs` (new file) + +**Implementation**: +```csharp +namespace StellaOps.Policy.Confidence.Models; + +public sealed record ReachabilityEvidence +{ + public required ReachabilityState State { get; init; } + public required decimal AnalysisConfidence { get; init; } + public IReadOnlyList GraphDigests { get; init; } = []; +} + +public enum ReachabilityState +{ + Unknown, + StaticReachable, + StaticUnreachable, + ConfirmedReachable, + ConfirmedUnreachable +} + +public sealed record RuntimeEvidence +{ + public required RuntimePosture Posture { get; init; } + public required int ObservationCount { get; init; } + public required DateTimeOffset LastObserved { get; init; } + public IReadOnlyList SessionDigests { get; init; } = []; + public bool HasObservations => ObservationCount > 0; + public bool ObservedWithinHours(int hours) => + LastObserved > DateTimeOffset.UtcNow.AddHours(-hours); +} + +public enum RuntimePosture +{ + Unknown, + Supports, + Contradicts +} + +public sealed record VexEvidence +{ + public required IReadOnlyList Statements { get; init; } +} + +public sealed record VexStatement +{ + public required VexStatus Status { get; init; } + public required string Issuer { get; init; } + public required decimal TrustScore { get; init; } + public required DateTimeOffset Timestamp { get; init; } + public required string StatementDigest { get; init; } +} + +public enum VexStatus +{ + Affected, + NotAffected, + Fixed, + UnderInvestigation +} + +public sealed record ProvenanceEvidence +{ + public required ProvenanceLevel Level { get; init; } + public required decimal SbomCompleteness { get; init; } + public IReadOnlyList AttestationDigests { get; init; } = []; +} + +public enum ProvenanceLevel +{ + Unsigned, + Signed, + SlsaLevel1, + SlsaLevel2, + SlsaLevel3 +} + +public sealed record PolicyEvidence +{ + public required string RuleName { get; init; } + public required decimal MatchStrength { get; init; } + public required string EvaluationDigest { get; init; } +} +``` + +**Acceptance Criteria**: +- [ ] All evidence input models defined +- [ ] Enums for state/status values +- [ ] Helper methods (ObservedWithinHours, HasObservations) +- [ ] Digest tracking for audit + +--- + +### T5: Integrate with PolicyEvaluator + +**Assignee**: Policy Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T3 + +**Description**: +Integrate confidence calculation into policy evaluation pipeline. + +**Implementation Path**: Modify `src/Policy/StellaOps.Policy.Engine/Services/PolicyEvaluator.cs` + +**Integration**: +```csharp +// Add to PolicyEvaluationResult +public sealed record PolicyEvaluationResult +{ + // ... existing fields ... + + /// + /// Unified confidence score for this verdict. + /// + public ConfidenceScore? Confidence { get; init; } +} + +// In PolicyEvaluator.EvaluateAsync +var confidenceInput = BuildConfidenceInput(context, result); +var confidence = _confidenceCalculator.Calculate(confidenceInput); + +return result with { Confidence = confidence }; +``` + +**Acceptance Criteria**: +- [ ] `PolicyEvaluationResult` includes `Confidence` +- [ ] Confidence calculated during evaluation +- [ ] All evidence sources mapped to input + +--- + +### T6: Add Tests + +**Assignee**: Policy Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T1-T5 + +**Description**: +Comprehensive tests for confidence calculation. + +**Implementation Path**: `src/Policy/__Tests/StellaOps.Policy.Confidence.Tests/` + +**Test Cases**: +```csharp +public class ConfidenceCalculatorTests +{ + [Fact] + public void Calculate_AllHighFactors_ReturnsVeryHighConfidence() + { + var input = CreateInput( + reachability: ReachabilityState.ConfirmedUnreachable, + runtime: RuntimePosture.Supports, + vex: VexStatus.NotAffected, + provenance: ProvenanceLevel.SlsaLevel3); + + var result = _calculator.Calculate(input); + + result.Tier.Should().Be(ConfidenceTier.VeryHigh); + result.Value.Should().BeGreaterOrEqualTo(0.9m); + } + + [Fact] + public void Calculate_AllLowFactors_ReturnsLowConfidence() + { + var input = CreateInput( + reachability: ReachabilityState.ConfirmedReachable, + runtime: RuntimePosture.Contradicts, + vex: VexStatus.Affected); + + var result = _calculator.Calculate(input); + + result.Tier.Should().Be(ConfidenceTier.Low); + } + + [Fact] + public void Calculate_MissingEvidence_UsesFallbackValues() + { + var input = new ConfidenceInput(); // All null + + var result = _calculator.Calculate(input); + + result.Value.Should().BeApproximately(0.5m, 0.05m); + result.Factors.Should().AllSatisfy(f => f.Reason.Should().Contain("No")); + } + + [Fact] + public void Calculate_GeneratesImprovements_ForLowFactors() + { + var input = CreateInput(reachability: ReachabilityState.Unknown); + + var result = _calculator.Calculate(input); + + result.Improvements.Should().Contain(i => + i.Factor == ConfidenceFactorType.Reachability); + } + + [Fact] + public void Calculate_WeightsSumToOne() + { + var options = new ConfidenceWeightOptions(); + + options.Validate().Should().BeTrue(); + } + + [Fact] + public void Calculate_FactorContributions_SumToValue() + { + var input = CreateFullInput(); + + var result = _calculator.Calculate(input); + + var sumOfContributions = result.Factors.Sum(f => f.Contribution); + result.Value.Should().BeApproximately(sumOfContributions, 0.001m); + } +} +``` + +**Acceptance Criteria**: +- [ ] Test for high confidence scenario +- [ ] Test for low confidence scenario +- [ ] Test for missing evidence fallback +- [ ] Test for improvement generation +- [ ] Test for weight validation +- [ ] All tests pass + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Policy Team | Define ConfidenceScore model | +| 2 | T2 | TODO | T1 | Policy Team | Define weight configuration | +| 3 | T3 | TODO | T1, T2 | Policy Team | Create ConfidenceCalculator service | +| 4 | T4 | TODO | T1 | Policy Team | Create evidence input models | +| 5 | T5 | TODO | T3 | Policy Team | Integrate with PolicyEvaluator | +| 6 | T6 | TODO | T1-T5 | Policy Team | Add tests | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint created from Explainable Triage Workflows advisory gap analysis. | Claude | + +--- + +## Decisions & Risks + +| Item | Type | Owner | Notes | +|------|------|-------|-------| +| Five factor types | Decision | Policy Team | Reachability, Runtime, VEX, Provenance, Policy | +| Default weights | Decision | Policy Team | 0.30/0.20/0.25/0.15/0.10 = 1.0 | +| Missing evidence = 0.5 | Decision | Policy Team | Unknown treated as medium confidence | +| Tier thresholds | Decision | Policy Team | VeryHigh ≥0.9, High ≥0.7, Medium ≥0.5, Low ≥0.3 | + +--- + +## Success Criteria + +- [ ] All 6 tasks marked DONE +- [ ] Confidence score bounded 0.0-1.0 +- [ ] Factor breakdown available for each score +- [ ] Improvements generated for low factors +- [ ] Integration with PolicyEvaluator complete +- [ ] 6+ tests passing +- [ ] `dotnet build` succeeds +- [ ] `dotnet test` succeeds diff --git a/docs/implplan/SPRINT_7000_0002_0002_vulnerability_first_ux_api.md b/docs/implplan/SPRINT_7000_0002_0002_vulnerability_first_ux_api.md new file mode 100644 index 000000000..4e2f61baf --- /dev/null +++ b/docs/implplan/SPRINT_7000_0002_0002_vulnerability_first_ux_api.md @@ -0,0 +1,844 @@ +# Sprint 7000.0001.0002 · Vulnerability-First UX API Contracts + +## Topic & Scope + +- Define API contracts for vulnerability-first finding views +- Implement verdict chip, confidence, and one-liner summary +- Create proof badge computation logic +- Enable click-through to detailed evidence + +**Working directory:** `src/Findings/StellaOps.Findings.WebService/` + +## Dependencies & Concurrency + +- **Upstream**: SPRINT_7000_0001_0001 (Unified Confidence Model) +- **Downstream**: SPRINT_7000_0002_0001 (Evidence Graph), SPRINT_7000_0002_0002 (Reachability Map), SPRINT_7000_0002_0003 (Runtime Timeline) +- **Safe to parallelize with**: None (depends on confidence model) + +## Documentation Prerequisites + +- `docs/product-advisories/21-Dec-2025 - Designing Explainable Triage Workflows.md` +- SPRINT_7000_0001_0001 completion +- `src/Findings/StellaOps.Findings.Ledger/Domain/DecisionModels.cs` + +--- + +## Problem Statement + +The advisory requires: "Finding row shows: Verdict chip + confidence + 'why' one-liner + proof badges (Reachability / Runtime / Policy / Provenance)." + +Currently, the backend has all necessary data but no unified API contracts for vulnerability-first presentation. Users must aggregate data from multiple endpoints. + +--- + +## Tasks + +### T1: Define FindingSummary Contract + +**Assignee**: Findings Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: — + +**Description**: +Create the unified finding summary response contract. + +**Implementation Path**: `Contracts/FindingSummaryContracts.cs` (new file) + +**Contract Definition**: +```csharp +namespace StellaOps.Findings.WebService.Contracts; + +/// +/// Compact finding summary for list views. +/// +public sealed record FindingSummaryResponse +{ + /// + /// Unique finding identifier. + /// + public required Guid FindingId { get; init; } + + /// + /// Vulnerability ID (CVE-XXXX-XXXXX). + /// + public required string VulnerabilityId { get; init; } + + /// + /// Affected component PURL. + /// + public required string ComponentPurl { get; init; } + + /// + /// Affected component version. + /// + public required string Version { get; init; } + + /// + /// Verdict chip for display. + /// + public required VerdictChip Verdict { get; init; } + + /// + /// Unified confidence score. + /// + public required ConfidenceChip Confidence { get; init; } + + /// + /// One-liner explanation of the verdict. + /// + public required string WhyOneLiner { get; init; } + + /// + /// Proof badges showing evidence status. + /// + public required ProofBadges Badges { get; init; } + + /// + /// CVSS score if available. + /// + public decimal? CvssScore { get; init; } + + /// + /// Severity label (Critical, High, Medium, Low). + /// + public string? Severity { get; init; } + + /// + /// Whether this finding is in CISA KEV. + /// + public bool IsKev { get; init; } + + /// + /// EPSS score if available. + /// + public decimal? EpssScore { get; init; } + + /// + /// Last updated timestamp. + /// + public DateTimeOffset UpdatedAt { get; init; } +} + +/// +/// Verdict chip for UI display. +/// +public sealed record VerdictChip +{ + /// + /// Verdict status: affected, not_affected, mitigated, needs_review. + /// + public required string Status { get; init; } + + /// + /// Display label for the chip. + /// + public required string Label { get; init; } + + /// + /// Color indicator: red, green, yellow, gray. + /// + public required string Color { get; init; } + + /// + /// Icon name for the chip. + /// + public required string Icon { get; init; } +} + +/// +/// Confidence chip for UI display. +/// +public sealed record ConfidenceChip +{ + /// + /// Numeric value (0-100 for percentage display). + /// + public required int Percentage { get; init; } + + /// + /// Tier label: Very High, High, Medium, Low, Very Low. + /// + public required string Tier { get; init; } + + /// + /// Color indicator based on tier. + /// + public required string Color { get; init; } + + /// + /// Tooltip with factor breakdown. + /// + public required string Tooltip { get; init; } +} + +/// +/// Proof badges showing evidence availability and status. +/// +public sealed record ProofBadges +{ + /// + /// Reachability proof badge. + /// + public required ProofBadge Reachability { get; init; } + + /// + /// Runtime corroboration badge. + /// + public required ProofBadge Runtime { get; init; } + + /// + /// Policy evaluation badge. + /// + public required ProofBadge Policy { get; init; } + + /// + /// Provenance/SBOM badge. + /// + public required ProofBadge Provenance { get; init; } +} + +/// +/// Individual proof badge. +/// +public sealed record ProofBadge +{ + /// + /// Badge status: available, missing, partial, error. + /// + public required string Status { get; init; } + + /// + /// Whether this proof is available. + /// + public bool IsAvailable => Status == "available"; + + /// + /// Short label for the badge. + /// + public required string Label { get; init; } + + /// + /// Tooltip with details. + /// + public required string Tooltip { get; init; } + + /// + /// Link to detailed view (if available). + /// + public string? DetailUrl { get; init; } + + /// + /// Evidence digest (if available). + /// + public string? EvidenceDigest { get; init; } +} + +/// +/// Paginated list of finding summaries. +/// +public sealed record FindingSummaryListResponse +{ + public required IReadOnlyList Items { get; init; } + public required int TotalCount { get; init; } + public string? NextCursor { get; init; } +} +``` + +**Acceptance Criteria**: +- [ ] `FindingSummaryResponse` with all fields +- [ ] `VerdictChip` with status, label, color, icon +- [ ] `ConfidenceChip` with percentage, tier, color +- [ ] `ProofBadges` with four badge types +- [ ] `ProofBadge` with status and detail URL + +--- + +### T2: Create FindingSummaryBuilder + +**Assignee**: Findings Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: T1 + +**Description**: +Implement service to build finding summaries from domain models. + +**Implementation Path**: `Services/FindingSummaryBuilder.cs` (new file) + +**Implementation**: +```csharp +namespace StellaOps.Findings.WebService.Services; + +public interface IFindingSummaryBuilder +{ + FindingSummaryResponse Build(Finding finding, EvidenceContext evidence); +} + +public sealed class FindingSummaryBuilder : IFindingSummaryBuilder +{ + public FindingSummaryResponse Build(Finding finding, EvidenceContext evidence) + { + var verdict = BuildVerdictChip(finding); + var confidence = BuildConfidenceChip(evidence.Confidence); + var badges = BuildProofBadges(finding, evidence); + var oneLiner = GenerateOneLiner(finding, verdict, evidence); + + return new FindingSummaryResponse + { + FindingId = finding.Id, + VulnerabilityId = finding.VulnerabilityId, + ComponentPurl = finding.Purl, + Version = finding.Version, + Verdict = verdict, + Confidence = confidence, + WhyOneLiner = oneLiner, + Badges = badges, + CvssScore = finding.CvssScore, + Severity = finding.Severity, + IsKev = finding.IsKev, + EpssScore = finding.EpssScore, + UpdatedAt = finding.UpdatedAt + }; + } + + private static VerdictChip BuildVerdictChip(Finding finding) + { + return finding.Status switch + { + FindingStatus.Affected => new VerdictChip + { + Status = "affected", + Label = "Affected", + Color = "red", + Icon = "alert-circle" + }, + FindingStatus.NotAffected => new VerdictChip + { + Status = "not_affected", + Label = "Not Affected", + Color = "green", + Icon = "check-circle" + }, + FindingStatus.Mitigated => new VerdictChip + { + Status = "mitigated", + Label = "Mitigated", + Color = "blue", + Icon = "shield-check" + }, + FindingStatus.NeedsReview => new VerdictChip + { + Status = "needs_review", + Label = "Needs Review", + Color = "yellow", + Icon = "help-circle" + }, + _ => new VerdictChip + { + Status = "unknown", + Label = "Unknown", + Color = "gray", + Icon = "question-circle" + } + }; + } + + private static ConfidenceChip BuildConfidenceChip(ConfidenceScore? confidence) + { + if (confidence is null) + { + return new ConfidenceChip + { + Percentage = 50, + Tier = "Unknown", + Color = "gray", + Tooltip = "Confidence not calculated" + }; + } + + var percentage = (int)(confidence.Value * 100); + var color = confidence.Tier switch + { + ConfidenceTier.VeryHigh => "green", + ConfidenceTier.High => "blue", + ConfidenceTier.Medium => "yellow", + ConfidenceTier.Low => "orange", + ConfidenceTier.VeryLow => "red", + _ => "gray" + }; + + var topFactors = confidence.Factors + .OrderByDescending(f => f.Contribution) + .Take(2) + .Select(f => $"{f.Type}: {f.RawValue:P0}"); + + return new ConfidenceChip + { + Percentage = percentage, + Tier = confidence.Tier.ToString(), + Color = color, + Tooltip = $"Driven by {string.Join(", ", topFactors)}" + }; + } + + private static ProofBadges BuildProofBadges(Finding finding, EvidenceContext evidence) + { + return new ProofBadges + { + Reachability = BuildReachabilityBadge(evidence), + Runtime = BuildRuntimeBadge(evidence), + Policy = BuildPolicyBadge(evidence), + Provenance = BuildProvenanceBadge(evidence) + }; + } + + private static ProofBadge BuildReachabilityBadge(EvidenceContext evidence) + { + if (evidence.Reachability is null) + { + return new ProofBadge + { + Status = "missing", + Label = "Reach", + Tooltip = "No reachability analysis" + }; + } + + return new ProofBadge + { + Status = "available", + Label = "Reach", + Tooltip = $"Reachability: {evidence.Reachability.State}", + DetailUrl = $"/api/v1/findings/{evidence.FindingId}/reachability-map", + EvidenceDigest = evidence.Reachability.GraphDigests.FirstOrDefault() + }; + } + + private static ProofBadge BuildRuntimeBadge(EvidenceContext evidence) + { + if (evidence.Runtime is null || !evidence.Runtime.HasObservations) + { + return new ProofBadge + { + Status = "missing", + Label = "Runtime", + Tooltip = "No runtime observations" + }; + } + + return new ProofBadge + { + Status = "available", + Label = "Runtime", + Tooltip = $"Runtime: {evidence.Runtime.ObservationCount} observations", + DetailUrl = $"/api/v1/findings/{evidence.FindingId}/runtime-timeline", + EvidenceDigest = evidence.Runtime.SessionDigests.FirstOrDefault() + }; + } + + private static ProofBadge BuildPolicyBadge(EvidenceContext evidence) + { + if (evidence.Policy is null) + { + return new ProofBadge + { + Status = "missing", + Label = "Policy", + Tooltip = "No policy evaluation" + }; + } + + return new ProofBadge + { + Status = "available", + Label = "Policy", + Tooltip = $"Policy rule: {evidence.Policy.RuleName}", + DetailUrl = $"/api/v1/findings/{evidence.FindingId}/policy-trace", + EvidenceDigest = evidence.Policy.EvaluationDigest + }; + } + + private static ProofBadge BuildProvenanceBadge(EvidenceContext evidence) + { + if (evidence.Provenance is null) + { + return new ProofBadge + { + Status = "missing", + Label = "Prov", + Tooltip = "No provenance information" + }; + } + + return new ProofBadge + { + Status = "available", + Label = "Prov", + Tooltip = $"Provenance: {evidence.Provenance.Level}", + DetailUrl = $"/api/v1/findings/{evidence.FindingId}/provenance", + EvidenceDigest = evidence.Provenance.AttestationDigests.FirstOrDefault() + }; + } + + private static string GenerateOneLiner( + Finding finding, + VerdictChip verdict, + EvidenceContext evidence) + { + if (verdict.Status == "not_affected" && evidence.Reachability is not null) + { + return $"Not affected: code path to {finding.VulnerabilityId} is not reachable."; + } + + if (verdict.Status == "affected" && finding.IsKev) + { + return $"Affected: {finding.VulnerabilityId} is actively exploited (KEV)."; + } + + if (verdict.Status == "affected") + { + return $"Affected: {finding.VulnerabilityId} impacts {finding.Purl}."; + } + + if (verdict.Status == "mitigated") + { + return $"Mitigated: compensating controls address {finding.VulnerabilityId}."; + } + + return $"Review required: {finding.VulnerabilityId} needs assessment."; + } +} +``` + +**Acceptance Criteria**: +- [ ] `FindingSummaryBuilder` implements `IFindingSummaryBuilder` +- [ ] Verdict chip mapping complete +- [ ] Confidence chip with color and tooltip +- [ ] All four proof badges built +- [ ] One-liner generation with context + +--- + +### T3: Create API Endpoints + +**Assignee**: Findings Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: T2 + +**Description**: +Create REST API endpoints for finding summaries. + +**Implementation Path**: `Endpoints/FindingSummaryEndpoints.cs` (new file) + +**Implementation**: +```csharp +namespace StellaOps.Findings.WebService.Endpoints; + +public static class FindingSummaryEndpoints +{ + public static void MapFindingSummaryEndpoints(this WebApplication app) + { + var group = app.MapGroup("/api/v1/findings") + .WithTags("Finding Summaries") + .RequireAuthorization(); + + // GET /api/v1/findings?artifact={digest}&limit={n}&cursor={c} + group.MapGet("/", async ( + [FromQuery] string? artifact, + [FromQuery] string? vulnerability, + [FromQuery] string? status, + [FromQuery] string? severity, + [FromQuery] int limit = 50, + [FromQuery] string? cursor, + IFindingSummaryService service, + CancellationToken ct) => + { + var query = new FindingSummaryQuery + { + ArtifactDigest = artifact, + VulnerabilityId = vulnerability, + Status = status, + Severity = severity, + Limit = Math.Clamp(limit, 1, 100), + Cursor = cursor + }; + + var result = await service.QueryAsync(query, ct); + return Results.Ok(result); + }) + .WithName("ListFindingSummaries") + .WithDescription("List finding summaries with verdict chips and proof badges"); + + // GET /api/v1/findings/{findingId}/summary + group.MapGet("/{findingId:guid}/summary", async ( + Guid findingId, + IFindingSummaryService service, + CancellationToken ct) => + { + var result = await service.GetSummaryAsync(findingId, ct); + return result is not null + ? Results.Ok(result) + : Results.NotFound(); + }) + .WithName("GetFindingSummary") + .WithDescription("Get detailed finding summary with all badges and evidence links"); + + // GET /api/v1/findings/{findingId}/evidence-graph + group.MapGet("/{findingId:guid}/evidence-graph", async ( + Guid findingId, + IEvidenceGraphService service, + CancellationToken ct) => + { + var result = await service.GetGraphAsync(findingId, ct); + return result is not null + ? Results.Ok(result) + : Results.NotFound(); + }) + .WithName("GetFindingEvidenceGraph") + .WithDescription("Get evidence graph for click-through visualization"); + } +} +``` + +**Acceptance Criteria**: +- [ ] List endpoint with filtering +- [ ] Single summary endpoint +- [ ] Evidence graph endpoint stub +- [ ] Pagination support +- [ ] OpenAPI documentation + +--- + +### T4: Implement FindingSummaryService + +**Assignee**: Findings Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: T2, T3 + +**Description**: +Implement service that aggregates data for finding summaries. + +**Implementation Path**: `Services/FindingSummaryService.cs` (new file) + +**Implementation**: +```csharp +namespace StellaOps.Findings.WebService.Services; + +public interface IFindingSummaryService +{ + Task QueryAsync(FindingSummaryQuery query, CancellationToken ct); + Task GetSummaryAsync(Guid findingId, CancellationToken ct); +} + +public sealed class FindingSummaryService : IFindingSummaryService +{ + private readonly IFindingRepository _findingRepo; + private readonly IEvidenceRepository _evidenceRepo; + private readonly IConfidenceCalculator _confidenceCalculator; + private readonly IFindingSummaryBuilder _builder; + + public FindingSummaryService( + IFindingRepository findingRepo, + IEvidenceRepository evidenceRepo, + IConfidenceCalculator confidenceCalculator, + IFindingSummaryBuilder builder) + { + _findingRepo = findingRepo; + _evidenceRepo = evidenceRepo; + _confidenceCalculator = confidenceCalculator; + _builder = builder; + } + + public async Task QueryAsync( + FindingSummaryQuery query, + CancellationToken ct) + { + var findings = await _findingRepo.QueryAsync(query, ct); + var findingIds = findings.Select(f => f.Id).ToList(); + + // Batch load evidence + var evidenceMap = await _evidenceRepo.GetBatchAsync(findingIds, ct); + + var summaries = new List(); + foreach (var finding in findings) + { + var evidence = evidenceMap.GetValueOrDefault(finding.Id) + ?? new EvidenceContext { FindingId = finding.Id }; + + // Calculate confidence + var confidenceInput = MapToConfidenceInput(evidence); + evidence.Confidence = _confidenceCalculator.Calculate(confidenceInput); + + summaries.Add(_builder.Build(finding, evidence)); + } + + return new FindingSummaryListResponse + { + Items = summaries, + TotalCount = findings.TotalCount, + NextCursor = findings.NextCursor + }; + } + + public async Task GetSummaryAsync( + Guid findingId, + CancellationToken ct) + { + var finding = await _findingRepo.GetByIdAsync(findingId, ct); + if (finding is null) return null; + + var evidence = await _evidenceRepo.GetAsync(findingId, ct) + ?? new EvidenceContext { FindingId = findingId }; + + var confidenceInput = MapToConfidenceInput(evidence); + evidence.Confidence = _confidenceCalculator.Calculate(confidenceInput); + + return _builder.Build(finding, evidence); + } + + private static ConfidenceInput MapToConfidenceInput(EvidenceContext evidence) + { + return new ConfidenceInput + { + Reachability = evidence.Reachability, + Runtime = evidence.Runtime, + Vex = evidence.Vex, + Provenance = evidence.Provenance, + Policy = evidence.Policy + }; + } +} + +public sealed record FindingSummaryQuery +{ + public string? ArtifactDigest { get; init; } + public string? VulnerabilityId { get; init; } + public string? Status { get; init; } + public string? Severity { get; init; } + public int Limit { get; init; } = 50; + public string? Cursor { get; init; } +} +``` + +**Acceptance Criteria**: +- [ ] Query with filtering and pagination +- [ ] Batch evidence loading for performance +- [ ] Confidence calculation integrated +- [ ] Single finding lookup with full context + +--- + +### T5: Add Tests + +**Assignee**: Findings Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T1-T4 + +**Description**: +Tests for finding summary functionality. + +**Test Cases**: +```csharp +public class FindingSummaryBuilderTests +{ + [Fact] + public void Build_AffectedFinding_ReturnsRedVerdictChip() + { + var finding = CreateFinding(FindingStatus.Affected); + var evidence = CreateEvidence(); + + var result = _builder.Build(finding, evidence); + + result.Verdict.Status.Should().Be("affected"); + result.Verdict.Color.Should().Be("red"); + } + + [Fact] + public void Build_WithReachabilityEvidence_ReturnsAvailableBadge() + { + var finding = CreateFinding(); + var evidence = CreateEvidence(hasReachability: true); + + var result = _builder.Build(finding, evidence); + + result.Badges.Reachability.Status.Should().Be("available"); + result.Badges.Reachability.DetailUrl.Should().NotBeNullOrEmpty(); + } + + [Fact] + public void Build_WithHighConfidence_ReturnsGreenConfidenceChip() + { + var finding = CreateFinding(); + var evidence = CreateEvidence(confidenceValue: 0.9m); + + var result = _builder.Build(finding, evidence); + + result.Confidence.Tier.Should().Be("VeryHigh"); + result.Confidence.Color.Should().Be("green"); + } + + [Fact] + public void Build_KevFinding_GeneratesKevOneLiner() + { + var finding = CreateFinding(isKev: true); + var evidence = CreateEvidence(); + + var result = _builder.Build(finding, evidence); + + result.WhyOneLiner.Should().Contain("actively exploited"); + } +} +``` + +**Acceptance Criteria**: +- [ ] Verdict chip tests +- [ ] Confidence chip tests +- [ ] Proof badge tests +- [ ] One-liner generation tests +- [ ] All tests pass + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Findings Team | Define FindingSummary contract | +| 2 | T2 | TODO | T1 | Findings Team | Create FindingSummaryBuilder | +| 3 | T3 | TODO | T2 | Findings Team | Create API endpoints | +| 4 | T4 | TODO | T2, T3 | Findings Team | Implement FindingSummaryService | +| 5 | T5 | TODO | T1-T4 | Findings Team | Add tests | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint created from Explainable Triage Workflows advisory gap analysis. | Claude | + +--- + +## Decisions & Risks + +| Item | Type | Owner | Notes | +|------|------|-------|-------| +| Four proof badges | Decision | Findings Team | Reachability, Runtime, Policy, Provenance | +| Color scheme | Decision | Findings Team | Red=affected, Green=not_affected, Yellow=review, Blue=mitigated | +| One-liner logic | Decision | Findings Team | Context-aware based on status and evidence | + +--- + +## Success Criteria + +- [ ] All 5 tasks marked DONE +- [ ] API returns complete finding summaries +- [ ] Verdict chips with correct colors +- [ ] Proof badges with detail URLs +- [ ] Confidence integrated +- [ ] Pagination working +- [ ] All tests pass diff --git a/docs/implplan/SPRINT_7000_0003_0001_evidence_graph_api.md b/docs/implplan/SPRINT_7000_0003_0001_evidence_graph_api.md new file mode 100644 index 000000000..dce51d660 --- /dev/null +++ b/docs/implplan/SPRINT_7000_0003_0001_evidence_graph_api.md @@ -0,0 +1,550 @@ +# Sprint 7000.0002.0001 · Evidence Graph Visualization API + +## Topic & Scope + +- Create API for evidence graph visualization +- Model evidence nodes, edges, and derivation relationships +- Include signature status per evidence node +- Enable audit-ready evidence exploration + +**Working directory:** `src/Findings/StellaOps.Findings.WebService/` + +## Dependencies & Concurrency + +- **Upstream**: SPRINT_7000_0001_0002 (Vulnerability-First UX API) +- **Downstream**: None +- **Safe to parallelize with**: SPRINT_7000_0002_0002, SPRINT_7000_0002_0003 + +## Documentation Prerequisites + +- `docs/product-advisories/21-Dec-2025 - Designing Explainable Triage Workflows.md` +- `src/Attestor/__Libraries/StellaOps.Attestor.ProofChain/` + +--- + +## Tasks + +### T1: Define EvidenceGraph Model + +**Assignee**: Findings Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: — + +**Description**: +Create the evidence graph response model. + +**Implementation Path**: `Contracts/EvidenceGraphContracts.cs` (new file) + +**Contract Definition**: +```csharp +namespace StellaOps.Findings.WebService.Contracts; + +/// +/// Evidence graph for a finding showing all contributing evidence. +/// +public sealed record EvidenceGraphResponse +{ + /// + /// Finding this graph is for. + /// + public required Guid FindingId { get; init; } + + /// + /// Vulnerability ID. + /// + public required string VulnerabilityId { get; init; } + + /// + /// All evidence nodes. + /// + public required IReadOnlyList Nodes { get; init; } + + /// + /// Edges representing derivation relationships. + /// + public required IReadOnlyList Edges { get; init; } + + /// + /// Root node (verdict). + /// + public required string RootNodeId { get; init; } + + /// + /// Graph generation timestamp. + /// + public required DateTimeOffset GeneratedAt { get; init; } +} + +/// +/// A node in the evidence graph. +/// +public sealed record EvidenceNode +{ + /// + /// Node identifier (content-addressed). + /// + public required string Id { get; init; } + + /// + /// Node type. + /// + public required EvidenceNodeType Type { get; init; } + + /// + /// Human-readable label. + /// + public required string Label { get; init; } + + /// + /// Content digest (sha256:...). + /// + public required string Digest { get; init; } + + /// + /// Issuer of this evidence. + /// + public string? Issuer { get; init; } + + /// + /// Timestamp when created. + /// + public required DateTimeOffset Timestamp { get; init; } + + /// + /// Signature status. + /// + public required SignatureStatus Signature { get; init; } + + /// + /// Additional metadata. + /// + public IReadOnlyDictionary Metadata { get; init; } + = new Dictionary(); + + /// + /// URL to fetch raw content. + /// + public string? ContentUrl { get; init; } +} + +public enum EvidenceNodeType +{ + /// Final verdict. + Verdict, + + /// Policy evaluation trace. + PolicyTrace, + + /// VEX statement. + VexStatement, + + /// Reachability analysis. + Reachability, + + /// Runtime observation. + RuntimeObservation, + + /// SBOM component. + SbomComponent, + + /// Advisory source. + Advisory, + + /// Build provenance. + Provenance, + + /// Attestation envelope. + Attestation +} + +/// +/// Signature verification status. +/// +public sealed record SignatureStatus +{ + /// + /// Whether signed. + /// + public required bool IsSigned { get; init; } + + /// + /// Whether signature is valid. + /// + public bool? IsValid { get; init; } + + /// + /// Signer identity (if known). + /// + public string? SignerIdentity { get; init; } + + /// + /// Signing timestamp. + /// + public DateTimeOffset? SignedAt { get; init; } + + /// + /// Key ID used for signing. + /// + public string? KeyId { get; init; } + + /// + /// Rekor log index (if published). + /// + public long? RekorLogIndex { get; init; } +} + +/// +/// Edge representing derivation relationship. +/// +public sealed record EvidenceEdge +{ + /// + /// Source node ID. + /// + public required string From { get; init; } + + /// + /// Target node ID. + /// + public required string To { get; init; } + + /// + /// Relationship type. + /// + public required EvidenceRelation Relation { get; init; } + + /// + /// Human-readable label. + /// + public string? Label { get; init; } +} + +public enum EvidenceRelation +{ + /// Derived from (input to output). + DerivedFrom, + + /// Verified by (attestation verifies content). + VerifiedBy, + + /// Supersedes (newer replaces older). + Supersedes, + + /// References (general reference). + References, + + /// Corroborates (supports claim). + Corroborates +} +``` + +**Acceptance Criteria**: +- [ ] EvidenceGraphResponse with nodes and edges +- [ ] EvidenceNode with type, digest, signature +- [ ] SignatureStatus with Rekor integration +- [ ] EvidenceEdge with relation type + +--- + +### T2: Create EvidenceGraphBuilder + +**Assignee**: Findings Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: T1 + +**Description**: +Build evidence graphs from finding evidence. + +**Implementation Path**: `Services/EvidenceGraphBuilder.cs` (new file) + +**Implementation**: +```csharp +namespace StellaOps.Findings.WebService.Services; + +public interface IEvidenceGraphBuilder +{ + Task BuildAsync(Guid findingId, CancellationToken ct); +} + +public sealed class EvidenceGraphBuilder : IEvidenceGraphBuilder +{ + private readonly IEvidenceRepository _evidenceRepo; + private readonly IAttestationVerifier _attestationVerifier; + + public async Task BuildAsync( + Guid findingId, + CancellationToken ct) + { + var evidence = await _evidenceRepo.GetFullEvidenceAsync(findingId, ct); + var nodes = new List(); + var edges = new List(); + + // Build verdict node (root) + var verdictNode = BuildVerdictNode(evidence.Verdict); + nodes.Add(verdictNode); + + // Build policy trace node + if (evidence.PolicyTrace is not null) + { + var policyNode = await BuildPolicyNodeAsync(evidence.PolicyTrace, ct); + nodes.Add(policyNode); + edges.Add(new EvidenceEdge + { + From = policyNode.Id, + To = verdictNode.Id, + Relation = EvidenceRelation.DerivedFrom, + Label = "policy evaluation" + }); + } + + // Build VEX nodes + foreach (var vex in evidence.VexStatements) + { + var vexNode = await BuildVexNodeAsync(vex, ct); + nodes.Add(vexNode); + edges.Add(new EvidenceEdge + { + From = vexNode.Id, + To = verdictNode.Id, + Relation = EvidenceRelation.DerivedFrom, + Label = vex.Status.ToString().ToLowerInvariant() + }); + } + + // Build reachability node + if (evidence.Reachability is not null) + { + var reachNode = await BuildReachabilityNodeAsync(evidence.Reachability, ct); + nodes.Add(reachNode); + edges.Add(new EvidenceEdge + { + From = reachNode.Id, + To = verdictNode.Id, + Relation = EvidenceRelation.Corroborates, + Label = "reachability analysis" + }); + } + + // Build runtime nodes + foreach (var runtime in evidence.RuntimeObservations) + { + var runtimeNode = await BuildRuntimeNodeAsync(runtime, ct); + nodes.Add(runtimeNode); + edges.Add(new EvidenceEdge + { + From = runtimeNode.Id, + To = verdictNode.Id, + Relation = EvidenceRelation.Corroborates, + Label = "runtime observation" + }); + } + + // Build SBOM node + if (evidence.SbomComponent is not null) + { + var sbomNode = BuildSbomNode(evidence.SbomComponent); + nodes.Add(sbomNode); + edges.Add(new EvidenceEdge + { + From = sbomNode.Id, + To = verdictNode.Id, + Relation = EvidenceRelation.References, + Label = "component" + }); + } + + // Build provenance node + if (evidence.Provenance is not null) + { + var provNode = await BuildProvenanceNodeAsync(evidence.Provenance, ct); + nodes.Add(provNode); + edges.Add(new EvidenceEdge + { + From = provNode.Id, + To = verdictNode.Id, + Relation = EvidenceRelation.VerifiedBy, + Label = "provenance" + }); + } + + return new EvidenceGraphResponse + { + FindingId = findingId, + VulnerabilityId = evidence.VulnerabilityId, + Nodes = nodes, + Edges = edges, + RootNodeId = verdictNode.Id, + GeneratedAt = DateTimeOffset.UtcNow + }; + } + + private async Task VerifySignatureAsync( + string? attestationDigest, + CancellationToken ct) + { + if (attestationDigest is null) + { + return new SignatureStatus { IsSigned = false }; + } + + var result = await _attestationVerifier.VerifyAsync(attestationDigest, ct); + return new SignatureStatus + { + IsSigned = true, + IsValid = result.IsValid, + SignerIdentity = result.SignerIdentity, + SignedAt = result.SignedAt, + KeyId = result.KeyId, + RekorLogIndex = result.RekorLogIndex + }; + } +} +``` + +**Acceptance Criteria**: +- [ ] Builds complete evidence graph +- [ ] Includes all evidence types +- [ ] Signature verification for each node +- [ ] Proper edge relationships + +--- + +### T3: Create API Endpoint + +**Assignee**: Findings Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T2 + +**Description**: +Create the evidence graph API endpoint. + +**Implementation Path**: `Endpoints/EvidenceGraphEndpoints.cs` (new file) + +```csharp +namespace StellaOps.Findings.WebService.Endpoints; + +public static class EvidenceGraphEndpoints +{ + public static void MapEvidenceGraphEndpoints(this WebApplication app) + { + var group = app.MapGroup("/api/v1/findings") + .WithTags("Evidence Graph") + .RequireAuthorization(); + + // GET /api/v1/findings/{findingId}/evidence-graph + group.MapGet("/{findingId:guid}/evidence-graph", async ( + Guid findingId, + [FromQuery] bool includeContent = false, + IEvidenceGraphBuilder builder, + CancellationToken ct) => + { + var graph = await builder.BuildAsync(findingId, ct); + return graph is not null + ? Results.Ok(graph) + : Results.NotFound(); + }) + .WithName("GetEvidenceGraph") + .WithDescription("Get evidence graph for finding visualization") + .Produces(200) + .Produces(404); + + // GET /api/v1/findings/{findingId}/evidence/{nodeId} + group.MapGet("/{findingId:guid}/evidence/{nodeId}", async ( + Guid findingId, + string nodeId, + IEvidenceContentService contentService, + CancellationToken ct) => + { + var content = await contentService.GetContentAsync(findingId, nodeId, ct); + return content is not null + ? Results.Ok(content) + : Results.NotFound(); + }) + .WithName("GetEvidenceNodeContent") + .WithDescription("Get raw content for an evidence node"); + } +} +``` + +**Acceptance Criteria**: +- [ ] GET /evidence-graph endpoint +- [ ] GET /evidence/{nodeId} for content +- [ ] OpenAPI documentation +- [ ] 404 handling + +--- + +### T4: Add Tests + +**Assignee**: Findings Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T1-T3 + +**Test Cases**: +```csharp +public class EvidenceGraphBuilderTests +{ + [Fact] + public async Task BuildAsync_WithAllEvidence_ReturnsCompleteGraph() + { + var evidence = CreateFullEvidence(); + _evidenceRepo.Setup(r => r.GetFullEvidenceAsync(It.IsAny(), It.IsAny())) + .ReturnsAsync(evidence); + + var result = await _builder.BuildAsync(Guid.NewGuid(), CancellationToken.None); + + result.Nodes.Should().HaveCountGreaterThan(1); + result.Edges.Should().NotBeEmpty(); + result.RootNodeId.Should().NotBeNullOrEmpty(); + } + + [Fact] + public async Task BuildAsync_SignedAttestation_IncludesSignatureStatus() + { + var evidence = CreateEvidenceWithSignedAttestation(); + + var result = await _builder.BuildAsync(Guid.NewGuid(), CancellationToken.None); + + var signedNode = result.Nodes.First(n => n.Signature.IsSigned); + signedNode.Signature.IsValid.Should().BeTrue(); + } +} +``` + +**Acceptance Criteria**: +- [ ] Graph building tests +- [ ] Signature verification tests +- [ ] Edge relationship tests + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Findings Team | Define EvidenceGraph model | +| 2 | T2 | TODO | T1 | Findings Team | Create EvidenceGraphBuilder | +| 3 | T3 | TODO | T2 | Findings Team | Create API endpoint | +| 4 | T4 | TODO | T1-T3 | Findings Team | Add tests | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint created from Explainable Triage Workflows advisory gap analysis. | Claude | + +--- + +## Success Criteria + +- [ ] All 4 tasks marked DONE +- [ ] Evidence graph includes all node types +- [ ] Signature status verified and displayed +- [ ] API returns valid graph structure +- [ ] All tests pass diff --git a/docs/implplan/SPRINT_7000_0003_0002_reachability_minimap_api.md b/docs/implplan/SPRINT_7000_0003_0002_reachability_minimap_api.md new file mode 100644 index 000000000..491c61510 --- /dev/null +++ b/docs/implplan/SPRINT_7000_0003_0002_reachability_minimap_api.md @@ -0,0 +1,602 @@ +# Sprint 7000.0002.0002 · Reachability Mini-Map API + +## Topic & Scope + +- Create API for condensed reachability subgraph visualization +- Extract entrypoints → affected component → sinks paths +- Provide visual-friendly serialization for UI rendering + +**Working directory:** `src/Scanner/__Libraries/StellaOps.Scanner.Reachability/` + +## Dependencies & Concurrency + +- **Upstream**: SPRINT_7000_0001_0002 (Vulnerability-First UX API) +- **Downstream**: None +- **Safe to parallelize with**: SPRINT_7000_0002_0001, SPRINT_7000_0002_0003 + +## Documentation Prerequisites + +- `docs/product-advisories/21-Dec-2025 - Designing Explainable Triage Workflows.md` +- `src/Scanner/__Libraries/StellaOps.Scanner.Reachability/RichGraph.cs` +- `src/Scanner/__Libraries/StellaOps.Scanner.Reachability/Witnesses/PathWitness.cs` + +--- + +## Tasks + +### T1: Define ReachabilityMiniMap Model + +**Assignee**: Scanner Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: — + +**Implementation Path**: `MiniMap/ReachabilityMiniMap.cs` (new file) + +**Contract Definition**: +```csharp +namespace StellaOps.Scanner.Reachability.MiniMap; + +/// +/// Condensed reachability visualization for a finding. +/// Shows paths from entrypoints to vulnerable component to sinks. +/// +public sealed record ReachabilityMiniMap +{ + /// + /// Finding this map is for. + /// + public required Guid FindingId { get; init; } + + /// + /// Vulnerability ID. + /// + public required string VulnerabilityId { get; init; } + + /// + /// The vulnerable component. + /// + public required MiniMapNode VulnerableComponent { get; init; } + + /// + /// Entry points that reach the vulnerable component. + /// + public required IReadOnlyList Entrypoints { get; init; } + + /// + /// Paths from entrypoints to vulnerable component. + /// + public required IReadOnlyList Paths { get; init; } + + /// + /// Overall reachability state. + /// + public required ReachabilityState State { get; init; } + + /// + /// Confidence of the analysis. + /// + public required decimal Confidence { get; init; } + + /// + /// Full graph digest for verification. + /// + public required string GraphDigest { get; init; } + + /// + /// When analysis was performed. + /// + public required DateTimeOffset AnalyzedAt { get; init; } +} + +/// +/// A node in the mini-map. +/// +public sealed record MiniMapNode +{ + /// + /// Node identifier. + /// + public required string Id { get; init; } + + /// + /// Display label. + /// + public required string Label { get; init; } + + /// + /// Node type. + /// + public required MiniMapNodeType Type { get; init; } + + /// + /// Package URL (if applicable). + /// + public string? Purl { get; init; } + + /// + /// Source file location. + /// + public string? SourceFile { get; init; } + + /// + /// Line number in source. + /// + public int? LineNumber { get; init; } +} + +public enum MiniMapNodeType +{ + Entrypoint, + Function, + Class, + Module, + VulnerableComponent, + Sink +} + +/// +/// An entry point in the mini-map. +/// +public sealed record MiniMapEntrypoint +{ + /// + /// Entry point node. + /// + public required MiniMapNode Node { get; init; } + + /// + /// Entry point kind. + /// + public required EntrypointKind Kind { get; init; } + + /// + /// Number of paths from this entrypoint. + /// + public required int PathCount { get; init; } + + /// + /// Shortest path length to vulnerable component. + /// + public required int ShortestPathLength { get; init; } +} + +public enum EntrypointKind +{ + HttpEndpoint, + GrpcMethod, + MessageHandler, + CliCommand, + MainFunction, + PublicApi, + EventHandler, + Other +} + +/// +/// A path from entrypoint to vulnerable component. +/// +public sealed record MiniMapPath +{ + /// + /// Path identifier. + /// + public required string PathId { get; init; } + + /// + /// Starting entrypoint ID. + /// + public required string EntrypointId { get; init; } + + /// + /// Ordered steps in the path. + /// + public required IReadOnlyList Steps { get; init; } + + /// + /// Path length. + /// + public int Length => Steps.Count; + + /// + /// Whether path has runtime corroboration. + /// + public bool HasRuntimeEvidence { get; init; } + + /// + /// Confidence for this specific path. + /// + public decimal PathConfidence { get; init; } +} + +/// +/// A step in a path. +/// +public sealed record MiniMapPathStep +{ + /// + /// Step index (0-based). + /// + public required int Index { get; init; } + + /// + /// Node at this step. + /// + public required MiniMapNode Node { get; init; } + + /// + /// Call type to next step. + /// + public string? CallType { get; init; } +} + +public enum ReachabilityState +{ + Unknown, + StaticReachable, + StaticUnreachable, + ConfirmedReachable, + ConfirmedUnreachable +} +``` + +**Acceptance Criteria**: +- [ ] ReachabilityMiniMap model complete +- [ ] MiniMapNode with type and location +- [ ] MiniMapEntrypoint with kind +- [ ] MiniMapPath with steps +- [ ] XML documentation + +--- + +### T2: Create MiniMapExtractor + +**Assignee**: Scanner Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: T1 + +**Implementation Path**: `MiniMap/MiniMapExtractor.cs` (new file) + +**Implementation**: +```csharp +namespace StellaOps.Scanner.Reachability.MiniMap; + +public interface IMiniMapExtractor +{ + ReachabilityMiniMap Extract(RichGraph graph, string vulnerableComponent, int maxPaths = 10); +} + +public sealed class MiniMapExtractor : IMiniMapExtractor +{ + public ReachabilityMiniMap Extract( + RichGraph graph, + string vulnerableComponent, + int maxPaths = 10) + { + // Find vulnerable component node + var vulnNode = graph.Nodes.FirstOrDefault(n => + n.Purl == vulnerableComponent || + n.SymbolId?.Contains(vulnerableComponent) == true); + + if (vulnNode is null) + { + return CreateNotFoundMap(vulnerableComponent); + } + + // Find all entrypoints + var entrypoints = graph.Nodes + .Where(n => IsEntrypoint(n)) + .ToList(); + + // BFS from each entrypoint to vulnerable component + var paths = new List(); + var entrypointInfos = new List(); + + foreach (var ep in entrypoints) + { + var epPaths = FindPaths(graph, ep, vulnNode, maxDepth: 20); + + if (epPaths.Count > 0) + { + entrypointInfos.Add(new MiniMapEntrypoint + { + Node = ToMiniMapNode(ep), + Kind = ClassifyEntrypoint(ep), + PathCount = epPaths.Count, + ShortestPathLength = epPaths.Min(p => p.Length) + }); + + paths.AddRange(epPaths.Take(maxPaths / entrypoints.Count + 1)); + } + } + + // Determine state + var state = paths.Count > 0 + ? (paths.Any(p => p.HasRuntimeEvidence) + ? ReachabilityState.ConfirmedReachable + : ReachabilityState.StaticReachable) + : ReachabilityState.StaticUnreachable; + + // Calculate confidence + var confidence = CalculateConfidence(paths, entrypointInfos, graph); + + return new ReachabilityMiniMap + { + FindingId = Guid.Empty, // Set by caller + VulnerabilityId = string.Empty, // Set by caller + VulnerableComponent = ToMiniMapNode(vulnNode), + Entrypoints = entrypointInfos.OrderBy(e => e.ShortestPathLength).ToList(), + Paths = paths.OrderBy(p => p.Length).Take(maxPaths).ToList(), + State = state, + Confidence = confidence, + GraphDigest = graph.Digest, + AnalyzedAt = DateTimeOffset.UtcNow + }; + } + + private static bool IsEntrypoint(RichGraphNode node) + { + return node.Kind is "entrypoint" or "export" or "main" or "handler"; + } + + private static EntrypointKind ClassifyEntrypoint(RichGraphNode node) + { + if (node.Attributes.TryGetValue("http_method", out _)) + return EntrypointKind.HttpEndpoint; + if (node.Attributes.TryGetValue("grpc_service", out _)) + return EntrypointKind.GrpcMethod; + if (node.Kind == "main") + return EntrypointKind.MainFunction; + if (node.Kind == "handler") + return EntrypointKind.EventHandler; + if (node.Attributes.TryGetValue("cli_command", out _)) + return EntrypointKind.CliCommand; + + return EntrypointKind.PublicApi; + } + + private List FindPaths( + RichGraph graph, + RichGraphNode start, + RichGraphNode end, + int maxDepth) + { + var paths = new List(); + var queue = new Queue<(RichGraphNode node, List path)>(); + queue.Enqueue((start, [start])); + + while (queue.Count > 0 && paths.Count < 100) + { + var (current, path) = queue.Dequeue(); + + if (path.Count > maxDepth) continue; + + if (current.Id == end.Id) + { + paths.Add(BuildPath(path, graph)); + continue; + } + + var edges = graph.Edges.Where(e => e.From == current.Id); + foreach (var edge in edges) + { + var nextNode = graph.Nodes.FirstOrDefault(n => n.Id == edge.To); + if (nextNode is not null && !path.Any(n => n.Id == nextNode.Id)) + { + queue.Enqueue((nextNode, [.. path, nextNode])); + } + } + } + + return paths; + } + + private static MiniMapPath BuildPath(List nodes, RichGraph graph) + { + var steps = nodes.Select((n, i) => + { + var edge = i < nodes.Count - 1 + ? graph.Edges.FirstOrDefault(e => e.From == n.Id && e.To == nodes[i + 1].Id) + : null; + + return new MiniMapPathStep + { + Index = i, + Node = ToMiniMapNode(n), + CallType = edge?.Kind + }; + }).ToList(); + + var hasRuntime = graph.Edges + .Where(e => nodes.Any(n => n.Id == e.From)) + .Any(e => e.Evidence?.Contains("runtime") == true); + + return new MiniMapPath + { + PathId = $"path:{ComputePathHash(nodes)}", + EntrypointId = nodes.First().Id, + Steps = steps, + HasRuntimeEvidence = hasRuntime, + PathConfidence = hasRuntime ? 0.95m : 0.75m + }; + } + + private static MiniMapNode ToMiniMapNode(RichGraphNode node) + { + return new MiniMapNode + { + Id = node.Id, + Label = node.Display ?? node.SymbolId ?? node.Id, + Type = node.Kind switch + { + "entrypoint" or "export" or "main" => MiniMapNodeType.Entrypoint, + "function" or "method" => MiniMapNodeType.Function, + "class" => MiniMapNodeType.Class, + "module" or "package" => MiniMapNodeType.Module, + "sink" => MiniMapNodeType.Sink, + _ => MiniMapNodeType.Function + }, + Purl = node.Purl, + SourceFile = node.Attributes.GetValueOrDefault("source_file"), + LineNumber = node.Attributes.TryGetValue("line", out var line) ? int.Parse(line) : null + }; + } + + private static decimal CalculateConfidence( + List paths, + List entrypoints, + RichGraph graph) + { + if (paths.Count == 0) return 0.9m; // High confidence in unreachability + + var runtimePaths = paths.Count(p => p.HasRuntimeEvidence); + var runtimeRatio = (decimal)runtimePaths / paths.Count; + + return 0.6m + (0.3m * runtimeRatio); + } + + private static string ComputePathHash(List nodes) + { + var ids = string.Join("|", nodes.Select(n => n.Id)); + return Convert.ToHexString(SHA256.HashData(Encoding.UTF8.GetBytes(ids)))[..16].ToLowerInvariant(); + } +} +``` + +**Acceptance Criteria**: +- [ ] Extracts paths from RichGraph +- [ ] Classifies entrypoints correctly +- [ ] BFS path finding with depth limit +- [ ] Confidence calculation +- [ ] Runtime evidence detection + +--- + +### T3: Create API Endpoint + +**Assignee**: Scanner Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T2 + +**Implementation Path**: `src/Findings/StellaOps.Findings.WebService/Endpoints/ReachabilityMapEndpoints.cs` + +```csharp +namespace StellaOps.Findings.WebService.Endpoints; + +public static class ReachabilityMapEndpoints +{ + public static void MapReachabilityMapEndpoints(this WebApplication app) + { + var group = app.MapGroup("/api/v1/findings") + .WithTags("Reachability") + .RequireAuthorization(); + + // GET /api/v1/findings/{findingId}/reachability-map + group.MapGet("/{findingId:guid}/reachability-map", async ( + Guid findingId, + [FromQuery] int maxPaths = 10, + IReachabilityMapService service, + CancellationToken ct) => + { + var map = await service.GetMiniMapAsync(findingId, maxPaths, ct); + return map is not null + ? Results.Ok(map) + : Results.NotFound(); + }) + .WithName("GetReachabilityMiniMap") + .WithDescription("Get condensed reachability visualization") + .Produces(200) + .Produces(404); + } +} +``` + +**Acceptance Criteria**: +- [ ] GET endpoint implemented +- [ ] maxPaths query parameter +- [ ] OpenAPI documentation + +--- + +### T4: Add Tests + +**Assignee**: Scanner Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T1-T3 + +**Test Cases**: +```csharp +public class MiniMapExtractorTests +{ + [Fact] + public void Extract_ReachableComponent_ReturnsPaths() + { + var graph = CreateGraphWithPaths(); + + var result = _extractor.Extract(graph, "pkg:npm/vulnerable@1.0.0"); + + result.State.Should().Be(ReachabilityState.StaticReachable); + result.Paths.Should().NotBeEmpty(); + result.Entrypoints.Should().NotBeEmpty(); + } + + [Fact] + public void Extract_UnreachableComponent_ReturnsEmptyPaths() + { + var graph = CreateGraphWithoutPaths(); + + var result = _extractor.Extract(graph, "pkg:npm/isolated@1.0.0"); + + result.State.Should().Be(ReachabilityState.StaticUnreachable); + result.Paths.Should().BeEmpty(); + } + + [Fact] + public void Extract_WithRuntimeEvidence_ReturnsConfirmedReachable() + { + var graph = CreateGraphWithRuntimeEvidence(); + + var result = _extractor.Extract(graph, "pkg:npm/vulnerable@1.0.0"); + + result.State.Should().Be(ReachabilityState.ConfirmedReachable); + result.Paths.Should().Contain(p => p.HasRuntimeEvidence); + } +} +``` + +**Acceptance Criteria**: +- [ ] Reachable component tests +- [ ] Unreachable component tests +- [ ] Runtime evidence tests + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Scanner Team | Define ReachabilityMiniMap model | +| 2 | T2 | TODO | T1 | Scanner Team | Create MiniMapExtractor | +| 3 | T3 | TODO | T2 | Scanner Team | Create API endpoint | +| 4 | T4 | TODO | T1-T3 | Scanner Team | Add tests | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint created from Explainable Triage Workflows advisory gap analysis. | Claude | + +--- + +## Success Criteria + +- [ ] All 4 tasks marked DONE +- [ ] Mini-map shows entrypoints to vulnerable component +- [ ] Paths with runtime evidence highlighted +- [ ] Confidence reflects analysis quality +- [ ] All tests pass diff --git a/docs/implplan/SPRINT_7000_0003_0003_runtime_timeline_api.md b/docs/implplan/SPRINT_7000_0003_0003_runtime_timeline_api.md new file mode 100644 index 000000000..6f6f73f42 --- /dev/null +++ b/docs/implplan/SPRINT_7000_0003_0003_runtime_timeline_api.md @@ -0,0 +1,624 @@ +# Sprint 7000.0002.0003 · Runtime Timeline API + +## Topic & Scope + +- Create API for runtime corroboration timeline visualization +- Show time-windowed load events, syscalls, network exposure +- Map observations to supports/contradicts/unknown posture + +**Working directory:** `src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Native/` + +## Dependencies & Concurrency + +- **Upstream**: SPRINT_7000_0001_0002 (Vulnerability-First UX API) +- **Downstream**: None +- **Safe to parallelize with**: SPRINT_7000_0002_0001, SPRINT_7000_0002_0002 + +## Documentation Prerequisites + +- `docs/product-advisories/21-Dec-2025 - Designing Explainable Triage Workflows.md` +- `src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Native/RuntimeCapture/` + +--- + +## Tasks + +### T1: Define RuntimeTimeline Model + +**Assignee**: Scanner Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: — + +**Implementation Path**: `RuntimeCapture/Timeline/RuntimeTimeline.cs` (new file) + +**Contract Definition**: +```csharp +namespace StellaOps.Scanner.Analyzers.Native.RuntimeCapture.Timeline; + +/// +/// Runtime observation timeline for a finding. +/// +public sealed record RuntimeTimeline +{ + /// + /// Finding this timeline is for. + /// + public required Guid FindingId { get; init; } + + /// + /// Vulnerable component being tracked. + /// + public required string ComponentPurl { get; init; } + + /// + /// Time window start. + /// + public required DateTimeOffset WindowStart { get; init; } + + /// + /// Time window end. + /// + public required DateTimeOffset WindowEnd { get; init; } + + /// + /// Overall posture based on observations. + /// + public required RuntimePosture Posture { get; init; } + + /// + /// Posture explanation. + /// + public required string PostureExplanation { get; init; } + + /// + /// Time buckets with observation summaries. + /// + public required IReadOnlyList Buckets { get; init; } + + /// + /// Significant events in the timeline. + /// + public required IReadOnlyList Events { get; init; } + + /// + /// Total observation count. + /// + public int TotalObservations => Buckets.Sum(b => b.ObservationCount); + + /// + /// Capture session digests. + /// + public required IReadOnlyList SessionDigests { get; init; } +} + +public enum RuntimePosture +{ + /// No runtime data available. + Unknown, + + /// Runtime evidence supports the verdict. + Supports, + + /// Runtime evidence contradicts the verdict. + Contradicts, + + /// Runtime evidence is inconclusive. + Inconclusive +} + +/// +/// A time bucket in the timeline. +/// +public sealed record TimelineBucket +{ + /// + /// Bucket start time. + /// + public required DateTimeOffset Start { get; init; } + + /// + /// Bucket end time. + /// + public required DateTimeOffset End { get; init; } + + /// + /// Number of observations in this bucket. + /// + public required int ObservationCount { get; init; } + + /// + /// Observation types in this bucket. + /// + public required IReadOnlyList ByType { get; init; } + + /// + /// Whether component was loaded in this bucket. + /// + public required bool ComponentLoaded { get; init; } + + /// + /// Whether vulnerable code was executed. + /// + public bool? VulnerableCodeExecuted { get; init; } +} + +/// +/// Summary of observations by type. +/// +public sealed record ObservationTypeSummary +{ + public required ObservationType Type { get; init; } + public required int Count { get; init; } +} + +public enum ObservationType +{ + LibraryLoad, + Syscall, + NetworkConnection, + FileAccess, + ProcessSpawn, + SymbolResolution +} + +/// +/// A significant event in the timeline. +/// +public sealed record TimelineEvent +{ + /// + /// Event timestamp. + /// + public required DateTimeOffset Timestamp { get; init; } + + /// + /// Event type. + /// + public required TimelineEventType Type { get; init; } + + /// + /// Event description. + /// + public required string Description { get; init; } + + /// + /// Significance level. + /// + public required EventSignificance Significance { get; init; } + + /// + /// Related evidence digest. + /// + public string? EvidenceDigest { get; init; } + + /// + /// Additional details. + /// + public IReadOnlyDictionary Details { get; init; } + = new Dictionary(); +} + +public enum TimelineEventType +{ + ComponentLoaded, + ComponentUnloaded, + VulnerableFunctionCalled, + NetworkExposure, + SyscallBlocked, + ProcessForked, + CaptureStarted, + CaptureStopped +} + +public enum EventSignificance +{ + Low, + Medium, + High, + Critical +} +``` + +**Acceptance Criteria**: +- [ ] RuntimeTimeline with window and posture +- [ ] TimelineBucket with observation summary +- [ ] TimelineEvent for significant events +- [ ] Posture enum with explanations + +--- + +### T2: Create TimelineBuilder + +**Assignee**: Scanner Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: T1 + +**Implementation Path**: `RuntimeCapture/Timeline/TimelineBuilder.cs` (new file) + +**Implementation**: +```csharp +namespace StellaOps.Scanner.Analyzers.Native.RuntimeCapture.Timeline; + +public interface ITimelineBuilder +{ + RuntimeTimeline Build( + RuntimeEvidence evidence, + string componentPurl, + TimelineOptions options); +} + +public sealed class TimelineBuilder : ITimelineBuilder +{ + public RuntimeTimeline Build( + RuntimeEvidence evidence, + string componentPurl, + TimelineOptions options) + { + var windowStart = options.WindowStart ?? evidence.FirstObservation; + var windowEnd = options.WindowEnd ?? evidence.LastObservation; + + // Build time buckets + var buckets = BuildBuckets(evidence, componentPurl, windowStart, windowEnd, options.BucketSize); + + // Extract significant events + var events = ExtractEvents(evidence, componentPurl); + + // Determine posture + var (posture, explanation) = DeterminePosture(buckets, events, componentPurl); + + return new RuntimeTimeline + { + FindingId = Guid.Empty, // Set by caller + ComponentPurl = componentPurl, + WindowStart = windowStart, + WindowEnd = windowEnd, + Posture = posture, + PostureExplanation = explanation, + Buckets = buckets, + Events = events.OrderBy(e => e.Timestamp).ToList(), + SessionDigests = evidence.SessionDigests.ToList() + }; + } + + private List BuildBuckets( + RuntimeEvidence evidence, + string componentPurl, + DateTimeOffset start, + DateTimeOffset end, + TimeSpan bucketSize) + { + var buckets = new List(); + var current = start; + + while (current < end) + { + var bucketEnd = current + bucketSize; + if (bucketEnd > end) bucketEnd = end; + + var observations = evidence.Observations + .Where(o => o.Timestamp >= current && o.Timestamp < bucketEnd) + .ToList(); + + var byType = observations + .GroupBy(o => ClassifyObservation(o)) + .Select(g => new ObservationTypeSummary + { + Type = g.Key, + Count = g.Count() + }) + .ToList(); + + var componentLoaded = observations.Any(o => + o.Type == "library_load" && + o.Path?.Contains(ExtractComponentName(componentPurl)) == true); + + buckets.Add(new TimelineBucket + { + Start = current, + End = bucketEnd, + ObservationCount = observations.Count, + ByType = byType, + ComponentLoaded = componentLoaded, + VulnerableCodeExecuted = componentLoaded ? DetectVulnerableExecution(observations) : null + }); + + current = bucketEnd; + } + + return buckets; + } + + private List ExtractEvents(RuntimeEvidence evidence, string componentPurl) + { + var events = new List(); + var componentName = ExtractComponentName(componentPurl); + + foreach (var obs in evidence.Observations) + { + if (obs.Type == "library_load" && obs.Path?.Contains(componentName) == true) + { + events.Add(new TimelineEvent + { + Timestamp = obs.Timestamp, + Type = TimelineEventType.ComponentLoaded, + Description = $"Component {componentName} loaded", + Significance = EventSignificance.High, + EvidenceDigest = obs.Digest, + Details = new Dictionary + { + ["path"] = obs.Path ?? "", + ["process_id"] = obs.ProcessId.ToString() + } + }); + } + + if (obs.Type == "network" && obs.Port is > 0 and < 1024) + { + events.Add(new TimelineEvent + { + Timestamp = obs.Timestamp, + Type = TimelineEventType.NetworkExposure, + Description = $"Network exposure on port {obs.Port}", + Significance = EventSignificance.Critical, + EvidenceDigest = obs.Digest + }); + } + } + + // Add capture session events + foreach (var session in evidence.Sessions) + { + events.Add(new TimelineEvent + { + Timestamp = session.StartTime, + Type = TimelineEventType.CaptureStarted, + Description = $"Capture session started ({session.Platform})", + Significance = EventSignificance.Low + }); + + if (session.EndTime.HasValue) + { + events.Add(new TimelineEvent + { + Timestamp = session.EndTime.Value, + Type = TimelineEventType.CaptureStopped, + Description = "Capture session stopped", + Significance = EventSignificance.Low + }); + } + } + + return events; + } + + private static (RuntimePosture posture, string explanation) DeterminePosture( + List buckets, + List events, + string componentPurl) + { + if (buckets.Count == 0 || buckets.All(b => b.ObservationCount == 0)) + { + return (RuntimePosture.Unknown, "No runtime observations collected"); + } + + var componentLoadedCount = buckets.Count(b => b.ComponentLoaded); + var totalBuckets = buckets.Count; + + if (componentLoadedCount == 0) + { + return (RuntimePosture.Supports, + $"Component {ExtractComponentName(componentPurl)} was not loaded during observation window"); + } + + var hasNetworkExposure = events.Any(e => e.Type == TimelineEventType.NetworkExposure); + var hasVulnerableExecution = buckets.Any(b => b.VulnerableCodeExecuted == true); + + if (hasVulnerableExecution || hasNetworkExposure) + { + return (RuntimePosture.Contradicts, + "Runtime evidence shows component is actively used and exposed"); + } + + if (componentLoadedCount < totalBuckets / 2) + { + return (RuntimePosture.Inconclusive, + $"Component loaded in {componentLoadedCount}/{totalBuckets} time periods"); + } + + return (RuntimePosture.Supports, + "Component loaded but no evidence of vulnerable code execution"); + } + + private static ObservationType ClassifyObservation(RuntimeObservation obs) + { + return obs.Type switch + { + "library_load" or "dlopen" => ObservationType.LibraryLoad, + "syscall" => ObservationType.Syscall, + "network" or "connect" => ObservationType.NetworkConnection, + "file" or "open" => ObservationType.FileAccess, + "fork" or "exec" => ObservationType.ProcessSpawn, + "symbol" => ObservationType.SymbolResolution, + _ => ObservationType.LibraryLoad + }; + } + + private static string ExtractComponentName(string purl) + { + // Extract name from PURL like pkg:npm/lodash@4.17.21 + var parts = purl.Split('/'); + var namePart = parts.LastOrDefault() ?? purl; + return namePart.Split('@').FirstOrDefault() ?? namePart; + } + + private static bool? DetectVulnerableExecution(List observations) + { + // Check if any observation indicates vulnerable code path execution + return observations.Any(o => + o.Type == "symbol" || + o.Attributes?.ContainsKey("vulnerable_function") == true); + } +} + +public sealed record TimelineOptions +{ + public DateTimeOffset? WindowStart { get; init; } + public DateTimeOffset? WindowEnd { get; init; } + public TimeSpan BucketSize { get; init; } = TimeSpan.FromHours(1); +} +``` + +**Acceptance Criteria**: +- [ ] Builds timeline from runtime evidence +- [ ] Groups into time buckets +- [ ] Extracts significant events +- [ ] Determines posture with explanation + +--- + +### T3: Create API Endpoint + +**Assignee**: Scanner Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T2 + +**Implementation Path**: `src/Findings/StellaOps.Findings.WebService/Endpoints/RuntimeTimelineEndpoints.cs` + +```csharp +namespace StellaOps.Findings.WebService.Endpoints; + +public static class RuntimeTimelineEndpoints +{ + public static void MapRuntimeTimelineEndpoints(this WebApplication app) + { + var group = app.MapGroup("/api/v1/findings") + .WithTags("Runtime") + .RequireAuthorization(); + + // GET /api/v1/findings/{findingId}/runtime-timeline + group.MapGet("/{findingId:guid}/runtime-timeline", async ( + Guid findingId, + [FromQuery] DateTimeOffset? from, + [FromQuery] DateTimeOffset? to, + [FromQuery] int bucketHours = 1, + IRuntimeTimelineService service, + CancellationToken ct) => + { + var options = new TimelineOptions + { + WindowStart = from, + WindowEnd = to, + BucketSize = TimeSpan.FromHours(Math.Clamp(bucketHours, 1, 24)) + }; + + var timeline = await service.GetTimelineAsync(findingId, options, ct); + return timeline is not null + ? Results.Ok(timeline) + : Results.NotFound(); + }) + .WithName("GetRuntimeTimeline") + .WithDescription("Get runtime corroboration timeline") + .Produces(200) + .Produces(404); + } +} +``` + +**Acceptance Criteria**: +- [ ] GET endpoint with time window params +- [ ] Bucket size configuration +- [ ] OpenAPI documentation + +--- + +### T4: Add Tests + +**Assignee**: Scanner Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T1-T3 + +**Test Cases**: +```csharp +public class TimelineBuilderTests +{ + [Fact] + public void Build_WithNoObservations_ReturnsUnknownPosture() + { + var evidence = CreateEmptyEvidence(); + + var result = _builder.Build(evidence, "pkg:npm/test@1.0.0", new TimelineOptions()); + + result.Posture.Should().Be(RuntimePosture.Unknown); + } + + [Fact] + public void Build_ComponentNotLoaded_ReturnsSupportsPosture() + { + var evidence = CreateEvidenceWithoutComponent(); + + var result = _builder.Build(evidence, "pkg:npm/vulnerable@1.0.0", new TimelineOptions()); + + result.Posture.Should().Be(RuntimePosture.Supports); + result.PostureExplanation.Should().Contain("not loaded"); + } + + [Fact] + public void Build_WithNetworkExposure_ReturnsContradictsPosture() + { + var evidence = CreateEvidenceWithNetworkExposure(); + + var result = _builder.Build(evidence, "pkg:npm/vulnerable@1.0.0", new TimelineOptions()); + + result.Posture.Should().Be(RuntimePosture.Contradicts); + } + + [Fact] + public void Build_CreatesCorrectBuckets() + { + var evidence = CreateEvidenceOver24Hours(); + var options = new TimelineOptions { BucketSize = TimeSpan.FromHours(6) }; + + var result = _builder.Build(evidence, "pkg:npm/test@1.0.0", options); + + result.Buckets.Should().HaveCount(4); + } +} +``` + +**Acceptance Criteria**: +- [ ] Posture determination tests +- [ ] Bucket building tests +- [ ] Event extraction tests + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Scanner Team | Define RuntimeTimeline model | +| 2 | T2 | TODO | T1 | Scanner Team | Create TimelineBuilder | +| 3 | T3 | TODO | T2 | Scanner Team | Create API endpoint | +| 4 | T4 | TODO | T1-T3 | Scanner Team | Add tests | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint created from Explainable Triage Workflows advisory gap analysis. | Claude | + +--- + +## Success Criteria + +- [ ] All 4 tasks marked DONE +- [ ] Timeline shows time-windowed observations +- [ ] Posture correctly determined +- [ ] Events extracted with significance +- [ ] All tests pass diff --git a/docs/implplan/SPRINT_7000_0004_0001_progressive_fidelity.md b/docs/implplan/SPRINT_7000_0004_0001_progressive_fidelity.md new file mode 100644 index 000000000..580c208a5 --- /dev/null +++ b/docs/implplan/SPRINT_7000_0004_0001_progressive_fidelity.md @@ -0,0 +1,654 @@ +# Sprint 7000.0003.0001 · Progressive Fidelity Mode + +## Topic & Scope + +- Implement tiered analysis fidelity (Quick, Standard, Deep) +- Enable fast heuristic triage with option for deeper proof +- Reflect fidelity level in verdict confidence +- Support "request deeper analysis" workflow + +**Working directory:** `src/Scanner/__Libraries/StellaOps.Scanner.Orchestration/` + +## Dependencies & Concurrency + +- **Upstream**: None (independent) +- **Downstream**: SPRINT_7000_0001_0001 (Confidence reflects fidelity) +- **Safe to parallelize with**: SPRINT_7000_0003_0002 + +## Documentation Prerequisites + +- `docs/product-advisories/21-Dec-2025 - Designing Explainable Triage Workflows.md` +- `src/Scanner/StellaOps.Scanner.WebService/` + +--- + +## Problem Statement + +The advisory requires: "Progressive fidelity: fast heuristic → deeper proof when requested; verdict must reflect confidence accordingly." + +Currently, reachability analysis is all-or-nothing. Users cannot quickly triage thousands of findings and then selectively request deeper analysis for high-priority items. + +--- + +## Tasks + +### T1: Define FidelityLevel Enum and Configuration + +**Assignee**: Scanner Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: — + +**Implementation Path**: `Fidelity/FidelityLevel.cs` (new file) + +**Implementation**: +```csharp +namespace StellaOps.Scanner.Orchestration.Fidelity; + +/// +/// Analysis fidelity level controlling depth vs speed tradeoff. +/// +public enum FidelityLevel +{ + /// + /// Fast heuristic analysis. Uses package-level matching only. + /// ~10x faster than Standard. Lower confidence. + /// + Quick, + + /// + /// Standard analysis. Includes call graph for top languages. + /// Balanced speed and accuracy. + /// + Standard, + + /// + /// Deep analysis. Full call graph, runtime correlation, binary mapping. + /// Highest confidence but slowest. + /// + Deep +} + +/// +/// Configuration for each fidelity level. +/// +public sealed record FidelityConfiguration +{ + public required FidelityLevel Level { get; init; } + + /// + /// Whether to perform call graph extraction. + /// + public bool EnableCallGraph { get; init; } + + /// + /// Whether to correlate with runtime evidence. + /// + public bool EnableRuntimeCorrelation { get; init; } + + /// + /// Whether to perform binary mapping. + /// + public bool EnableBinaryMapping { get; init; } + + /// + /// Maximum call graph depth. + /// + public int MaxCallGraphDepth { get; init; } + + /// + /// Timeout for analysis. + /// + public TimeSpan Timeout { get; init; } + + /// + /// Base confidence for this fidelity level. + /// + public decimal BaseConfidence { get; init; } + + /// + /// Languages to analyze (null = all). + /// + public IReadOnlyList? TargetLanguages { get; init; } + + public static FidelityConfiguration Quick => new() + { + Level = FidelityLevel.Quick, + EnableCallGraph = false, + EnableRuntimeCorrelation = false, + EnableBinaryMapping = false, + MaxCallGraphDepth = 0, + Timeout = TimeSpan.FromSeconds(30), + BaseConfidence = 0.5m, + TargetLanguages = null + }; + + public static FidelityConfiguration Standard => new() + { + Level = FidelityLevel.Standard, + EnableCallGraph = true, + EnableRuntimeCorrelation = false, + EnableBinaryMapping = false, + MaxCallGraphDepth = 10, + Timeout = TimeSpan.FromMinutes(5), + BaseConfidence = 0.75m, + TargetLanguages = ["java", "dotnet", "python", "go", "node"] + }; + + public static FidelityConfiguration Deep => new() + { + Level = FidelityLevel.Deep, + EnableCallGraph = true, + EnableRuntimeCorrelation = true, + EnableBinaryMapping = true, + MaxCallGraphDepth = 50, + Timeout = TimeSpan.FromMinutes(30), + BaseConfidence = 0.9m, + TargetLanguages = null + }; + + public static FidelityConfiguration FromLevel(FidelityLevel level) => level switch + { + FidelityLevel.Quick => Quick, + FidelityLevel.Standard => Standard, + FidelityLevel.Deep => Deep, + _ => Standard + }; +} +``` + +**Acceptance Criteria**: +- [ ] FidelityLevel enum defined +- [ ] FidelityConfiguration for each level +- [ ] Configurable timeouts and depths +- [ ] Base confidence per level + +--- + +### T2: Create FidelityAwareAnalyzer + +**Assignee**: Scanner Team +**Story Points**: 5 +**Status**: TODO +**Dependencies**: T1 + +**Implementation Path**: `Fidelity/FidelityAwareAnalyzer.cs` (new file) + +**Implementation**: +```csharp +namespace StellaOps.Scanner.Orchestration.Fidelity; + +public interface IFidelityAwareAnalyzer +{ + Task AnalyzeAsync( + AnalysisRequest request, + FidelityLevel level, + CancellationToken ct); + + Task UpgradeFidelityAsync( + Guid findingId, + FidelityLevel targetLevel, + CancellationToken ct); +} + +public sealed class FidelityAwareAnalyzer : IFidelityAwareAnalyzer +{ + private readonly ICallGraphExtractor _callGraphExtractor; + private readonly IRuntimeCorrelator _runtimeCorrelator; + private readonly IBinaryMapper _binaryMapper; + private readonly IPackageMatcher _packageMatcher; + private readonly ILogger _logger; + + public async Task AnalyzeAsync( + AnalysisRequest request, + FidelityLevel level, + CancellationToken ct) + { + var config = FidelityConfiguration.FromLevel(level); + var stopwatch = Stopwatch.StartNew(); + + using var cts = CancellationTokenSource.CreateLinkedTokenSource(ct); + cts.CancelAfter(config.Timeout); + + try + { + // Level 1: Package matching (always done) + var packageResult = await _packageMatcher.MatchAsync(request, cts.Token); + + if (level == FidelityLevel.Quick) + { + return BuildResult(packageResult, config, stopwatch.Elapsed); + } + + // Level 2: Call graph analysis (Standard and Deep) + CallGraphResult? callGraphResult = null; + if (config.EnableCallGraph) + { + var languages = config.TargetLanguages ?? request.DetectedLanguages; + callGraphResult = await _callGraphExtractor.ExtractAsync( + request, + languages, + config.MaxCallGraphDepth, + cts.Token); + } + + if (level == FidelityLevel.Standard) + { + return BuildResult(packageResult, callGraphResult, config, stopwatch.Elapsed); + } + + // Level 3: Binary mapping and runtime (Deep only) + BinaryMappingResult? binaryResult = null; + RuntimeCorrelationResult? runtimeResult = null; + + if (config.EnableBinaryMapping) + { + binaryResult = await _binaryMapper.MapAsync(request, cts.Token); + } + + if (config.EnableRuntimeCorrelation) + { + runtimeResult = await _runtimeCorrelator.CorrelateAsync(request, cts.Token); + } + + return BuildResult( + packageResult, + callGraphResult, + binaryResult, + runtimeResult, + config, + stopwatch.Elapsed); + } + catch (OperationCanceledException) when (cts.IsCancellationRequested && !ct.IsCancellationRequested) + { + _logger.LogWarning( + "Analysis timeout at fidelity {Level} after {Elapsed}", + level, stopwatch.Elapsed); + + return BuildTimeoutResult(level, config, stopwatch.Elapsed); + } + } + + public async Task UpgradeFidelityAsync( + Guid findingId, + FidelityLevel targetLevel, + CancellationToken ct) + { + // Load existing analysis + var existing = await LoadExistingAnalysisAsync(findingId, ct); + if (existing is null) + { + return FidelityUpgradeResult.NotFound(findingId); + } + + if (existing.FidelityLevel >= targetLevel) + { + return FidelityUpgradeResult.AlreadyAtLevel(existing); + } + + // Perform incremental upgrade + var request = existing.ToAnalysisRequest(); + var result = await AnalyzeAsync(request, targetLevel, ct); + + // Merge with existing + var merged = MergeResults(existing, result); + + // Persist upgraded result + await PersistResultAsync(merged, ct); + + return new FidelityUpgradeResult + { + Success = true, + FindingId = findingId, + PreviousLevel = existing.FidelityLevel, + NewLevel = targetLevel, + ConfidenceImprovement = merged.Confidence - existing.Confidence, + NewResult = merged + }; + } + + private FidelityAnalysisResult BuildResult( + PackageMatchResult packageResult, + FidelityConfiguration config, + TimeSpan elapsed) + { + var confidence = config.BaseConfidence; + + // Adjust confidence based on match quality + if (packageResult.HasExactMatch) + confidence += 0.1m; + + return new FidelityAnalysisResult + { + FidelityLevel = config.Level, + Confidence = Math.Min(confidence, 1.0m), + IsReachable = null, // Unknown at Quick level + PackageMatches = packageResult.Matches, + CallGraph = null, + BinaryMapping = null, + RuntimeCorrelation = null, + AnalysisTime = elapsed, + CanUpgrade = true, + UpgradeRecommendation = "Upgrade to Standard for call graph analysis" + }; + } + + private FidelityAnalysisResult BuildResult( + PackageMatchResult packageResult, + CallGraphResult? callGraphResult, + FidelityConfiguration config, + TimeSpan elapsed) + { + var confidence = config.BaseConfidence; + + // Adjust based on call graph completeness + if (callGraphResult?.IsComplete == true) + confidence += 0.15m; + + var isReachable = callGraphResult?.HasPathToVulnerable; + + return new FidelityAnalysisResult + { + FidelityLevel = config.Level, + Confidence = Math.Min(confidence, 1.0m), + IsReachable = isReachable, + PackageMatches = packageResult.Matches, + CallGraph = callGraphResult, + BinaryMapping = null, + RuntimeCorrelation = null, + AnalysisTime = elapsed, + CanUpgrade = true, + UpgradeRecommendation = isReachable == true + ? "Upgrade to Deep for runtime verification" + : "Upgrade to Deep for binary mapping confirmation" + }; + } + + private FidelityAnalysisResult BuildResult( + PackageMatchResult packageResult, + CallGraphResult? callGraphResult, + BinaryMappingResult? binaryResult, + RuntimeCorrelationResult? runtimeResult, + FidelityConfiguration config, + TimeSpan elapsed) + { + var confidence = config.BaseConfidence; + + // Adjust based on runtime corroboration + if (runtimeResult?.HasCorroboration == true) + confidence = 0.95m; + else if (binaryResult?.HasMapping == true) + confidence += 0.05m; + + var isReachable = DetermineReachability( + callGraphResult, + binaryResult, + runtimeResult); + + return new FidelityAnalysisResult + { + FidelityLevel = config.Level, + Confidence = Math.Min(confidence, 1.0m), + IsReachable = isReachable, + PackageMatches = packageResult.Matches, + CallGraph = callGraphResult, + BinaryMapping = binaryResult, + RuntimeCorrelation = runtimeResult, + AnalysisTime = elapsed, + CanUpgrade = false, + UpgradeRecommendation = null + }; + } + + private static bool? DetermineReachability( + CallGraphResult? callGraph, + BinaryMappingResult? binary, + RuntimeCorrelationResult? runtime) + { + // Runtime is authoritative + if (runtime?.WasExecuted == true) + return true; + if (runtime?.WasExecuted == false && runtime.ObservationCount > 100) + return false; + + // Fall back to call graph + if (callGraph?.HasPathToVulnerable == true) + return true; + if (callGraph?.HasPathToVulnerable == false && callGraph.IsComplete) + return false; + + return null; // Unknown + } + + private FidelityAnalysisResult BuildTimeoutResult( + FidelityLevel attemptedLevel, + FidelityConfiguration config, + TimeSpan elapsed) + { + return new FidelityAnalysisResult + { + FidelityLevel = attemptedLevel, + Confidence = 0.3m, + IsReachable = null, + PackageMatches = [], + AnalysisTime = elapsed, + TimedOut = true, + CanUpgrade = false, + UpgradeRecommendation = "Analysis timed out. Try with smaller scope." + }; + } +} + +public sealed record FidelityAnalysisResult +{ + public required FidelityLevel FidelityLevel { get; init; } + public required decimal Confidence { get; init; } + public bool? IsReachable { get; init; } + public required IReadOnlyList PackageMatches { get; init; } + public CallGraphResult? CallGraph { get; init; } + public BinaryMappingResult? BinaryMapping { get; init; } + public RuntimeCorrelationResult? RuntimeCorrelation { get; init; } + public required TimeSpan AnalysisTime { get; init; } + public bool TimedOut { get; init; } + public required bool CanUpgrade { get; init; } + public string? UpgradeRecommendation { get; init; } +} + +public sealed record FidelityUpgradeResult +{ + public required bool Success { get; init; } + public Guid FindingId { get; init; } + public FidelityLevel? PreviousLevel { get; init; } + public FidelityLevel? NewLevel { get; init; } + public decimal ConfidenceImprovement { get; init; } + public FidelityAnalysisResult? NewResult { get; init; } + public string? Error { get; init; } + + public static FidelityUpgradeResult NotFound(Guid id) => new() + { + Success = false, + FindingId = id, + Error = "Finding not found" + }; + + public static FidelityUpgradeResult AlreadyAtLevel(FidelityAnalysisResult existing) => new() + { + Success = true, + PreviousLevel = existing.FidelityLevel, + NewLevel = existing.FidelityLevel, + ConfidenceImprovement = 0, + NewResult = existing + }; +} +``` + +**Acceptance Criteria**: +- [ ] Implements Quick/Standard/Deep analysis +- [ ] Respects timeouts per level +- [ ] Supports fidelity upgrade +- [ ] Confidence reflects fidelity level + +--- + +### T3: Create API Endpoints + +**Assignee**: Scanner Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T2 + +**Implementation Path**: `src/Scanner/StellaOps.Scanner.WebService/Endpoints/FidelityEndpoints.cs` + +```csharp +namespace StellaOps.Scanner.WebService.Endpoints; + +public static class FidelityEndpoints +{ + public static void MapFidelityEndpoints(this WebApplication app) + { + var group = app.MapGroup("/api/v1/scan") + .WithTags("Fidelity") + .RequireAuthorization(); + + // POST /api/v1/scan/analyze?fidelity={level} + group.MapPost("/analyze", async ( + [FromBody] AnalysisRequest request, + [FromQuery] FidelityLevel fidelity = FidelityLevel.Standard, + IFidelityAwareAnalyzer analyzer, + CancellationToken ct) => + { + var result = await analyzer.AnalyzeAsync(request, fidelity, ct); + return Results.Ok(result); + }) + .WithName("AnalyzeWithFidelity") + .WithDescription("Analyze with specified fidelity level"); + + // POST /api/v1/scan/findings/{findingId}/upgrade + group.MapPost("/findings/{findingId:guid}/upgrade", async ( + Guid findingId, + [FromQuery] FidelityLevel target = FidelityLevel.Deep, + IFidelityAwareAnalyzer analyzer, + CancellationToken ct) => + { + var result = await analyzer.UpgradeFidelityAsync(findingId, target, ct); + return result.Success + ? Results.Ok(result) + : Results.BadRequest(result); + }) + .WithName("UpgradeFidelity") + .WithDescription("Upgrade analysis fidelity for a finding"); + } +} +``` + +**Acceptance Criteria**: +- [ ] Analyze endpoint with fidelity param +- [ ] Upgrade endpoint for findings +- [ ] OpenAPI documentation + +--- + +### T4: Add Tests + +**Assignee**: Scanner Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T1-T3 + +**Test Cases**: +```csharp +public class FidelityAwareAnalyzerTests +{ + [Fact] + public async Task AnalyzeAsync_QuickLevel_SkipsCallGraph() + { + var request = CreateAnalysisRequest(); + + var result = await _analyzer.AnalyzeAsync(request, FidelityLevel.Quick, CancellationToken.None); + + result.FidelityLevel.Should().Be(FidelityLevel.Quick); + result.CallGraph.Should().BeNull(); + result.Confidence.Should().BeLessThan(0.7m); + } + + [Fact] + public async Task AnalyzeAsync_StandardLevel_IncludesCallGraph() + { + var request = CreateAnalysisRequest(); + + var result = await _analyzer.AnalyzeAsync(request, FidelityLevel.Standard, CancellationToken.None); + + result.FidelityLevel.Should().Be(FidelityLevel.Standard); + result.CallGraph.Should().NotBeNull(); + } + + [Fact] + public async Task AnalyzeAsync_DeepLevel_IncludesRuntime() + { + var request = CreateAnalysisRequest(); + + var result = await _analyzer.AnalyzeAsync(request, FidelityLevel.Deep, CancellationToken.None); + + result.FidelityLevel.Should().Be(FidelityLevel.Deep); + result.RuntimeCorrelation.Should().NotBeNull(); + result.CanUpgrade.Should().BeFalse(); + } + + [Fact] + public async Task UpgradeFidelityAsync_FromQuickToStandard_ImprovesConfidence() + { + var findingId = await CreateFindingAtQuickLevel(); + + var result = await _analyzer.UpgradeFidelityAsync(findingId, FidelityLevel.Standard, CancellationToken.None); + + result.Success.Should().BeTrue(); + result.ConfidenceImprovement.Should().BePositive(); + } +} +``` + +**Acceptance Criteria**: +- [ ] Level-specific tests +- [ ] Upgrade tests +- [ ] Timeout tests + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Scanner Team | Define FidelityLevel and configuration | +| 2 | T2 | TODO | T1 | Scanner Team | Create FidelityAwareAnalyzer | +| 3 | T3 | TODO | T2 | Scanner Team | Create API endpoints | +| 4 | T4 | TODO | T1-T3 | Scanner Team | Add tests | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint created from Explainable Triage Workflows advisory gap analysis. | Claude | + +--- + +## Decisions & Risks + +| Item | Type | Owner | Notes | +|------|------|-------|-------| +| Three fidelity levels | Decision | Scanner Team | Quick, Standard, Deep | +| Quick timeout | Decision | Scanner Team | 30 seconds | +| Standard languages | Decision | Scanner Team | Java, .NET, Python, Go, Node | +| Deep includes runtime | Decision | Scanner Team | Only Deep level correlates runtime | + +--- + +## Success Criteria + +- [ ] All 4 tasks marked DONE +- [ ] Quick analysis completes in <30s +- [ ] Standard analysis includes call graph +- [ ] Deep analysis includes runtime +- [ ] Upgrade path works correctly +- [ ] All tests pass diff --git a/docs/implplan/SPRINT_7000_0004_0002_evidence_size_budgets.md b/docs/implplan/SPRINT_7000_0004_0002_evidence_size_budgets.md new file mode 100644 index 000000000..a389bba04 --- /dev/null +++ b/docs/implplan/SPRINT_7000_0004_0002_evidence_size_budgets.md @@ -0,0 +1,606 @@ +# Sprint 7000.0003.0002 · Evidence Size Budgets + +## Topic & Scope + +- Implement per-scan evidence size caps +- Define retention tier policies (hot/warm/cold/archive) +- Enforce budgets during evidence generation +- Ensure audit pack completeness with tier-aware pruning + +**Working directory:** `src/__Libraries/StellaOps.Evidence/` + +## Dependencies & Concurrency + +- **Upstream**: None (independent) +- **Downstream**: SPRINT_5100_0006_0001 (Audit Pack Export) +- **Safe to parallelize with**: SPRINT_7000_0003_0001 + +## Documentation Prerequisites + +- `docs/product-advisories/21-Dec-2025 - Designing Explainable Triage Workflows.md` +- `docs/24_OFFLINE_KIT.md` + +--- + +## Tasks + +### T1: Define EvidenceBudget Model + +**Assignee**: Platform Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: — + +**Implementation Path**: `Budgets/EvidenceBudget.cs` (new file) + +**Implementation**: +```csharp +namespace StellaOps.Evidence.Budgets; + +/// +/// Budget configuration for evidence storage. +/// +public sealed record EvidenceBudget +{ + /// + /// Maximum total evidence size per scan (bytes). + /// + public required long MaxScanSizeBytes { get; init; } + + /// + /// Maximum size per evidence type (bytes). + /// + public IReadOnlyDictionary MaxPerType { get; init; } + = new Dictionary(); + + /// + /// Retention policy by tier. + /// + public required IReadOnlyDictionary RetentionPolicies { get; init; } + + /// + /// Action when budget is exceeded. + /// + public BudgetExceededAction ExceededAction { get; init; } = BudgetExceededAction.Warn; + + /// + /// Evidence types to always preserve (never prune). + /// + public IReadOnlySet AlwaysPreserve { get; init; } + = new HashSet { EvidenceType.Verdict, EvidenceType.Attestation }; + + public static EvidenceBudget Default => new() + { + MaxScanSizeBytes = 100 * 1024 * 1024, // 100 MB + MaxPerType = new Dictionary + { + [EvidenceType.CallGraph] = 50 * 1024 * 1024, + [EvidenceType.RuntimeCapture] = 20 * 1024 * 1024, + [EvidenceType.Sbom] = 10 * 1024 * 1024, + [EvidenceType.PolicyTrace] = 5 * 1024 * 1024 + }, + RetentionPolicies = new Dictionary + { + [RetentionTier.Hot] = new RetentionPolicy { Duration = TimeSpan.FromDays(7) }, + [RetentionTier.Warm] = new RetentionPolicy { Duration = TimeSpan.FromDays(30) }, + [RetentionTier.Cold] = new RetentionPolicy { Duration = TimeSpan.FromDays(90) }, + [RetentionTier.Archive] = new RetentionPolicy { Duration = TimeSpan.FromDays(365) } + } + }; +} + +public enum EvidenceType +{ + Verdict, + PolicyTrace, + CallGraph, + RuntimeCapture, + Sbom, + Vex, + Attestation, + PathWitness, + Advisory +} + +public enum RetentionTier +{ + /// Immediately accessible, highest cost. + Hot, + + /// Quick retrieval, moderate cost. + Warm, + + /// Delayed retrieval, lower cost. + Cold, + + /// Long-term storage, lowest cost. + Archive +} + +public sealed record RetentionPolicy +{ + /// + /// How long evidence stays in this tier. + /// + public required TimeSpan Duration { get; init; } + + /// + /// Compression algorithm for this tier. + /// + public CompressionLevel Compression { get; init; } = CompressionLevel.None; + + /// + /// Whether to deduplicate within this tier. + /// + public bool Deduplicate { get; init; } = true; +} + +public enum CompressionLevel +{ + None, + Fast, + Optimal, + Maximum +} + +public enum BudgetExceededAction +{ + /// Log warning but continue. + Warn, + + /// Block the operation. + Block, + + /// Automatically prune lowest priority evidence. + AutoPrune +} +``` + +**Acceptance Criteria**: +- [ ] EvidenceBudget with size limits +- [ ] RetentionTier enum with policies +- [ ] Default budget configuration +- [ ] AlwaysPreserve set for critical evidence + +--- + +### T2: Create EvidenceBudgetService + +**Assignee**: Platform Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: T1 + +**Implementation Path**: `Budgets/EvidenceBudgetService.cs` (new file) + +**Implementation**: +```csharp +namespace StellaOps.Evidence.Budgets; + +public interface IEvidenceBudgetService +{ + BudgetCheckResult CheckBudget(Guid scanId, EvidenceItem item); + BudgetStatus GetBudgetStatus(Guid scanId); + Task PruneToFitAsync(Guid scanId, long targetBytes, CancellationToken ct); +} + +public sealed class EvidenceBudgetService : IEvidenceBudgetService +{ + private readonly IEvidenceRepository _repository; + private readonly IOptionsMonitor _options; + private readonly ILogger _logger; + + public BudgetCheckResult CheckBudget(Guid scanId, EvidenceItem item) + { + var budget = _options.CurrentValue; + var currentUsage = GetCurrentUsage(scanId); + + var issues = new List(); + + // Check total budget + var projectedTotal = currentUsage.TotalBytes + item.SizeBytes; + if (projectedTotal > budget.MaxScanSizeBytes) + { + issues.Add($"Would exceed total budget: {projectedTotal:N0} > {budget.MaxScanSizeBytes:N0} bytes"); + } + + // Check per-type budget + if (budget.MaxPerType.TryGetValue(item.Type, out var typeLimit)) + { + var typeUsage = currentUsage.ByType.GetValueOrDefault(item.Type, 0); + var projectedType = typeUsage + item.SizeBytes; + if (projectedType > typeLimit) + { + issues.Add($"Would exceed {item.Type} budget: {projectedType:N0} > {typeLimit:N0} bytes"); + } + } + + if (issues.Count == 0) + { + return BudgetCheckResult.WithinBudget(); + } + + return new BudgetCheckResult + { + IsWithinBudget = false, + Issues = issues, + RecommendedAction = budget.ExceededAction, + CanAutoPrune = budget.ExceededAction == BudgetExceededAction.AutoPrune, + BytesToFree = projectedTotal - budget.MaxScanSizeBytes + }; + } + + public BudgetStatus GetBudgetStatus(Guid scanId) + { + var budget = _options.CurrentValue; + var usage = GetCurrentUsage(scanId); + + return new BudgetStatus + { + ScanId = scanId, + TotalBudgetBytes = budget.MaxScanSizeBytes, + UsedBytes = usage.TotalBytes, + RemainingBytes = Math.Max(0, budget.MaxScanSizeBytes - usage.TotalBytes), + UtilizationPercent = (decimal)usage.TotalBytes / budget.MaxScanSizeBytes * 100, + ByType = usage.ByType.ToDictionary( + kvp => kvp.Key, + kvp => new TypeBudgetStatus + { + Type = kvp.Key, + UsedBytes = kvp.Value, + LimitBytes = budget.MaxPerType.GetValueOrDefault(kvp.Key), + UtilizationPercent = budget.MaxPerType.TryGetValue(kvp.Key, out var limit) + ? (decimal)kvp.Value / limit * 100 + : 0 + }) + }; + } + + public async Task PruneToFitAsync( + Guid scanId, + long targetBytes, + CancellationToken ct) + { + var budget = _options.CurrentValue; + var usage = GetCurrentUsage(scanId); + + if (usage.TotalBytes <= targetBytes) + { + return PruneResult.NoPruningNeeded(); + } + + var bytesToPrune = usage.TotalBytes - targetBytes; + var pruned = new List(); + + // Get all evidence items, sorted by pruning priority + var items = await _repository.GetByScanIdAsync(scanId, ct); + var candidates = items + .Where(i => !budget.AlwaysPreserve.Contains(i.Type)) + .OrderBy(i => GetPrunePriority(i)) + .ToList(); + + long prunedBytes = 0; + foreach (var item in candidates) + { + if (prunedBytes >= bytesToPrune) + break; + + // Move to archive tier or delete + await _repository.MoveToTierAsync(item.Id, RetentionTier.Archive, ct); + pruned.Add(new PrunedItem(item.Id, item.Type, item.SizeBytes)); + prunedBytes += item.SizeBytes; + } + + _logger.LogInformation( + "Pruned {Count} items ({Bytes:N0} bytes) for scan {ScanId}", + pruned.Count, prunedBytes, scanId); + + return new PruneResult + { + Success = prunedBytes >= bytesToPrune, + BytesPruned = prunedBytes, + ItemsPruned = pruned, + BytesRemaining = usage.TotalBytes - prunedBytes + }; + } + + private static int GetPrunePriority(EvidenceItem item) + { + // Lower = prune first + return item.Type switch + { + EvidenceType.RuntimeCapture => 1, + EvidenceType.CallGraph => 2, + EvidenceType.Advisory => 3, + EvidenceType.PathWitness => 4, + EvidenceType.PolicyTrace => 5, + EvidenceType.Sbom => 6, + EvidenceType.Vex => 7, + EvidenceType.Attestation => 8, + EvidenceType.Verdict => 9, // Never prune + _ => 5 + }; + } + + private UsageStats GetCurrentUsage(Guid scanId) + { + // Implementation to calculate current usage + return new UsageStats(); + } +} + +public sealed record BudgetCheckResult +{ + public required bool IsWithinBudget { get; init; } + public IReadOnlyList Issues { get; init; } = []; + public BudgetExceededAction RecommendedAction { get; init; } + public bool CanAutoPrune { get; init; } + public long BytesToFree { get; init; } + + public static BudgetCheckResult WithinBudget() => new() { IsWithinBudget = true }; +} + +public sealed record BudgetStatus +{ + public required Guid ScanId { get; init; } + public required long TotalBudgetBytes { get; init; } + public required long UsedBytes { get; init; } + public required long RemainingBytes { get; init; } + public required decimal UtilizationPercent { get; init; } + public required IReadOnlyDictionary ByType { get; init; } +} + +public sealed record TypeBudgetStatus +{ + public required EvidenceType Type { get; init; } + public required long UsedBytes { get; init; } + public long? LimitBytes { get; init; } + public decimal UtilizationPercent { get; init; } +} + +public sealed record PruneResult +{ + public required bool Success { get; init; } + public long BytesPruned { get; init; } + public IReadOnlyList ItemsPruned { get; init; } = []; + public long BytesRemaining { get; init; } + + public static PruneResult NoPruningNeeded() => new() { Success = true }; +} + +public sealed record PrunedItem(Guid ItemId, EvidenceType Type, long SizeBytes); + +public sealed record UsageStats +{ + public long TotalBytes { get; init; } + public IReadOnlyDictionary ByType { get; init; } = new Dictionary(); +} +``` + +**Acceptance Criteria**: +- [ ] Budget checking before storage +- [ ] Budget status reporting +- [ ] Auto-pruning with priority +- [ ] AlwaysPreserve respected + +--- + +### T3: Create RetentionTierManager + +**Assignee**: Platform Team +**Story Points**: 3 +**Status**: TODO +**Dependencies**: T1 + +**Implementation Path**: `Retention/RetentionTierManager.cs` (new file) + +**Implementation**: +```csharp +namespace StellaOps.Evidence.Retention; + +public interface IRetentionTierManager +{ + Task RunMigrationAsync(CancellationToken ct); + RetentionTier GetCurrentTier(EvidenceItem item); + Task EnsureAuditCompleteAsync(Guid scanId, CancellationToken ct); +} + +public sealed class RetentionTierManager : IRetentionTierManager +{ + private readonly IEvidenceRepository _repository; + private readonly IArchiveStorage _archiveStorage; + private readonly IOptionsMonitor _options; + + public async Task RunMigrationAsync(CancellationToken ct) + { + var budget = _options.CurrentValue; + var now = DateTimeOffset.UtcNow; + var migrated = new List(); + + // Hot → Warm + var hotExpiry = now - budget.RetentionPolicies[RetentionTier.Hot].Duration; + var toWarm = await _repository.GetOlderThanAsync(RetentionTier.Hot, hotExpiry, ct); + foreach (var item in toWarm) + { + await MigrateAsync(item, RetentionTier.Warm, ct); + migrated.Add(new MigratedItem(item.Id, RetentionTier.Hot, RetentionTier.Warm)); + } + + // Warm → Cold + var warmExpiry = now - budget.RetentionPolicies[RetentionTier.Warm].Duration; + var toCold = await _repository.GetOlderThanAsync(RetentionTier.Warm, warmExpiry, ct); + foreach (var item in toCold) + { + await MigrateAsync(item, RetentionTier.Cold, ct); + migrated.Add(new MigratedItem(item.Id, RetentionTier.Warm, RetentionTier.Cold)); + } + + // Cold → Archive + var coldExpiry = now - budget.RetentionPolicies[RetentionTier.Cold].Duration; + var toArchive = await _repository.GetOlderThanAsync(RetentionTier.Cold, coldExpiry, ct); + foreach (var item in toArchive) + { + await MigrateAsync(item, RetentionTier.Archive, ct); + migrated.Add(new MigratedItem(item.Id, RetentionTier.Cold, RetentionTier.Archive)); + } + + return new TierMigrationResult + { + MigratedCount = migrated.Count, + Items = migrated + }; + } + + public RetentionTier GetCurrentTier(EvidenceItem item) + { + var budget = _options.CurrentValue; + var age = DateTimeOffset.UtcNow - item.CreatedAt; + + if (age < budget.RetentionPolicies[RetentionTier.Hot].Duration) + return RetentionTier.Hot; + if (age < budget.RetentionPolicies[RetentionTier.Warm].Duration) + return RetentionTier.Warm; + if (age < budget.RetentionPolicies[RetentionTier.Cold].Duration) + return RetentionTier.Cold; + + return RetentionTier.Archive; + } + + public async Task EnsureAuditCompleteAsync(Guid scanId, CancellationToken ct) + { + var budget = _options.CurrentValue; + + // Ensure all AlwaysPreserve types are in Hot tier for audit export + foreach (var type in budget.AlwaysPreserve) + { + var items = await _repository.GetByScanIdAndTypeAsync(scanId, type, ct); + foreach (var item in items.Where(i => i.Tier != RetentionTier.Hot)) + { + await RestoreToHotAsync(item, ct); + } + } + } + + private async Task MigrateAsync(EvidenceItem item, RetentionTier targetTier, CancellationToken ct) + { + var policy = _options.CurrentValue.RetentionPolicies[targetTier]; + + if (policy.Compression != CompressionLevel.None) + { + // Compress before migration + var compressed = await CompressAsync(item, policy.Compression, ct); + await _repository.UpdateContentAsync(item.Id, compressed, ct); + } + + await _repository.MoveToTierAsync(item.Id, targetTier, ct); + } + + private async Task RestoreToHotAsync(EvidenceItem item, CancellationToken ct) + { + if (item.Tier == RetentionTier.Archive) + { + // Retrieve from archive storage + var content = await _archiveStorage.RetrieveAsync(item.ArchiveKey!, ct); + await _repository.UpdateContentAsync(item.Id, content, ct); + } + + await _repository.MoveToTierAsync(item.Id, RetentionTier.Hot, ct); + } +} + +public sealed record TierMigrationResult +{ + public required int MigratedCount { get; init; } + public IReadOnlyList Items { get; init; } = []; +} + +public sealed record MigratedItem(Guid ItemId, RetentionTier FromTier, RetentionTier ToTier); +``` + +**Acceptance Criteria**: +- [ ] Tier migration based on age +- [ ] Compression on tier change +- [ ] Audit completeness restoration +- [ ] Archive storage integration + +--- + +### T4: Add Tests + +**Assignee**: Platform Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T1-T3 + +**Test Cases**: +```csharp +public class EvidenceBudgetServiceTests +{ + [Fact] + public void CheckBudget_WithinLimit_ReturnsSuccess() + { + var item = CreateItem(sizeBytes: 1024); + + var result = _service.CheckBudget(Guid.NewGuid(), item); + + result.IsWithinBudget.Should().BeTrue(); + } + + [Fact] + public void CheckBudget_ExceedsTotal_ReturnsViolation() + { + var scanId = SetupScanAtBudgetLimit(); + var item = CreateItem(sizeBytes: 1024 * 1024); + + var result = _service.CheckBudget(scanId, item); + + result.IsWithinBudget.Should().BeFalse(); + result.Issues.Should().Contain(i => i.Contains("total budget")); + } + + [Fact] + public async Task PruneToFitAsync_PreservesAlwaysPreserveTypes() + { + var scanId = SetupScanOverBudget(); + + var result = await _service.PruneToFitAsync(scanId, 50 * 1024 * 1024, CancellationToken.None); + + result.ItemsPruned.Should().NotContain(i => i.Type == EvidenceType.Verdict); + result.ItemsPruned.Should().NotContain(i => i.Type == EvidenceType.Attestation); + } +} +``` + +**Acceptance Criteria**: +- [ ] Budget check tests +- [ ] Pruning priority tests +- [ ] AlwaysPreserve tests + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Platform Team | Define EvidenceBudget model | +| 2 | T2 | TODO | T1 | Platform Team | Create EvidenceBudgetService | +| 3 | T3 | TODO | T1 | Platform Team | Create RetentionTierManager | +| 4 | T4 | TODO | T1-T3 | Platform Team | Add tests | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint created from Explainable Triage Workflows advisory gap analysis. | Claude | + +--- + +## Success Criteria + +- [ ] All 4 tasks marked DONE +- [ ] Budget enforcement prevents oversized scans +- [ ] Retention tiers migrate automatically +- [ ] Audit packs remain complete +- [ ] All tests pass diff --git a/docs/implplan/SPRINT_7000_0005_0001_quality_kpis_tracking.md b/docs/implplan/SPRINT_7000_0005_0001_quality_kpis_tracking.md new file mode 100644 index 000000000..2728adfdb --- /dev/null +++ b/docs/implplan/SPRINT_7000_0005_0001_quality_kpis_tracking.md @@ -0,0 +1,681 @@ +# Sprint 7000.0004.0001 · Quality KPIs Tracking + +## Topic & Scope + +- Implement KPI tracking infrastructure for explainable triage +- Track: % non-UNKNOWN reachability, runtime corroboration, explainability completeness, replay success +- Create dashboard API endpoints +- Enable weekly KPI reporting + +**Working directory:** `src/__Libraries/StellaOps.Metrics/` + +## Dependencies & Concurrency + +- **Upstream**: All SPRINT_7000 sprints (uses their outputs) +- **Downstream**: None +- **Safe to parallelize with**: None (depends on other features) + +## Documentation Prerequisites + +- `docs/product-advisories/21-Dec-2025 - Designing Explainable Triage Workflows.md` + +--- + +## Problem Statement + +The advisory defines quality KPIs: +- % findings with non-UNKNOWN reachability +- % findings with runtime corroboration available +- False-positive reduction vs baseline +- "Explainability completeness": % verdicts with reason steps + at least one proof pointer +- Replay success rate: % attestations replaying deterministically + +Currently, no infrastructure exists to track these metrics. + +--- + +## Tasks + +### T1: Define KPI Models + +**Assignee**: Platform Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: — + +**Implementation Path**: `Kpi/KpiModels.cs` (new file) + +**Implementation**: +```csharp +namespace StellaOps.Metrics.Kpi; + +/// +/// Quality KPIs for explainable triage. +/// +public sealed record TriageQualityKpis +{ + /// + /// Reporting period start. + /// + public required DateTimeOffset PeriodStart { get; init; } + + /// + /// Reporting period end. + /// + public required DateTimeOffset PeriodEnd { get; init; } + + /// + /// Tenant ID (null for global). + /// + public string? TenantId { get; init; } + + /// + /// Reachability KPIs. + /// + public required ReachabilityKpis Reachability { get; init; } + + /// + /// Runtime KPIs. + /// + public required RuntimeKpis Runtime { get; init; } + + /// + /// Explainability KPIs. + /// + public required ExplainabilityKpis Explainability { get; init; } + + /// + /// Replay/Determinism KPIs. + /// + public required ReplayKpis Replay { get; init; } + + /// + /// Unknown budget KPIs. + /// + public required UnknownBudgetKpis Unknowns { get; init; } + + /// + /// Operational KPIs. + /// + public required OperationalKpis Operational { get; init; } +} + +public sealed record ReachabilityKpis +{ + /// + /// Total findings analyzed. + /// + public required int TotalFindings { get; init; } + + /// + /// Findings with non-UNKNOWN reachability. + /// + public required int WithKnownReachability { get; init; } + + /// + /// Percentage with known reachability. + /// + public decimal PercentKnown => TotalFindings > 0 + ? (decimal)WithKnownReachability / TotalFindings * 100 + : 0; + + /// + /// Breakdown by reachability state. + /// + public required IReadOnlyDictionary ByState { get; init; } + + /// + /// Findings confirmed unreachable. + /// + public int ConfirmedUnreachable => + ByState.GetValueOrDefault("ConfirmedUnreachable", 0); + + /// + /// Noise reduction (unreachable / total). + /// + public decimal NoiseReductionPercent => TotalFindings > 0 + ? (decimal)ConfirmedUnreachable / TotalFindings * 100 + : 0; +} + +public sealed record RuntimeKpis +{ + /// + /// Total findings in environments with sensors. + /// + public required int TotalWithSensorDeployed { get; init; } + + /// + /// Findings with runtime observations. + /// + public required int WithRuntimeCorroboration { get; init; } + + /// + /// Coverage percentage. + /// + public decimal CoveragePercent => TotalWithSensorDeployed > 0 + ? (decimal)WithRuntimeCorroboration / TotalWithSensorDeployed * 100 + : 0; + + /// + /// Breakdown by posture. + /// + public required IReadOnlyDictionary ByPosture { get; init; } +} + +public sealed record ExplainabilityKpis +{ + /// + /// Total verdicts generated. + /// + public required int TotalVerdicts { get; init; } + + /// + /// Verdicts with reason steps. + /// + public required int WithReasonSteps { get; init; } + + /// + /// Verdicts with at least one proof pointer. + /// + public required int WithProofPointer { get; init; } + + /// + /// Verdicts that are "complete" (both reason steps AND proof pointer). + /// + public required int FullyExplainable { get; init; } + + /// + /// Explainability completeness percentage. + /// + public decimal CompletenessPercent => TotalVerdicts > 0 + ? (decimal)FullyExplainable / TotalVerdicts * 100 + : 0; +} + +public sealed record ReplayKpis +{ + /// + /// Total replay attempts. + /// + public required int TotalAttempts { get; init; } + + /// + /// Successful replays (identical verdict). + /// + public required int Successful { get; init; } + + /// + /// Replay success rate. + /// + public decimal SuccessRate => TotalAttempts > 0 + ? (decimal)Successful / TotalAttempts * 100 + : 0; + + /// + /// Common failure reasons. + /// + public required IReadOnlyDictionary FailureReasons { get; init; } +} + +public sealed record UnknownBudgetKpis +{ + /// + /// Total environments tracked. + /// + public required int TotalEnvironments { get; init; } + + /// + /// Budget breaches by environment. + /// + public required IReadOnlyDictionary BreachesByEnvironment { get; init; } + + /// + /// Total overrides/exceptions granted. + /// + public required int OverridesGranted { get; init; } + + /// + /// Average override age (days). + /// + public decimal AvgOverrideAgeDays { get; init; } +} + +public sealed record OperationalKpis +{ + /// + /// Median time to first verdict (seconds). + /// + public required double MedianTimeToVerdictSeconds { get; init; } + + /// + /// Cache hit rate for graphs/proofs. + /// + public required decimal CacheHitRate { get; init; } + + /// + /// Average evidence size per scan (bytes). + /// + public required long AvgEvidenceSizeBytes { get; init; } + + /// + /// 95th percentile verdict time (seconds). + /// + public required double P95VerdictTimeSeconds { get; init; } +} +``` + +**Acceptance Criteria**: +- [ ] All KPI categories defined +- [ ] Percentage calculations +- [ ] Breakdown dictionaries +- [ ] Period tracking + +--- + +### T2: Create KpiCollector Service + +**Assignee**: Platform Team +**Story Points**: 5 +**Status**: TODO +**Dependencies**: T1 + +**Implementation Path**: `Kpi/KpiCollector.cs` (new file) + +**Implementation**: +```csharp +namespace StellaOps.Metrics.Kpi; + +public interface IKpiCollector +{ + Task CollectAsync( + DateTimeOffset start, + DateTimeOffset end, + string? tenantId = null, + CancellationToken ct = default); + + Task RecordReachabilityResultAsync(Guid findingId, string state, CancellationToken ct); + Task RecordRuntimeObservationAsync(Guid findingId, string posture, CancellationToken ct); + Task RecordVerdictAsync(Guid verdictId, bool hasReasonSteps, bool hasProofPointer, CancellationToken ct); + Task RecordReplayAttemptAsync(Guid attestationId, bool success, string? failureReason, CancellationToken ct); +} + +public sealed class KpiCollector : IKpiCollector +{ + private readonly IKpiRepository _repository; + private readonly IFindingRepository _findingRepo; + private readonly IVerdictRepository _verdictRepo; + private readonly IReplayRepository _replayRepo; + private readonly ILogger _logger; + + public async Task CollectAsync( + DateTimeOffset start, + DateTimeOffset end, + string? tenantId = null, + CancellationToken ct = default) + { + var reachability = await CollectReachabilityKpisAsync(start, end, tenantId, ct); + var runtime = await CollectRuntimeKpisAsync(start, end, tenantId, ct); + var explainability = await CollectExplainabilityKpisAsync(start, end, tenantId, ct); + var replay = await CollectReplayKpisAsync(start, end, tenantId, ct); + var unknowns = await CollectUnknownBudgetKpisAsync(start, end, tenantId, ct); + var operational = await CollectOperationalKpisAsync(start, end, tenantId, ct); + + return new TriageQualityKpis + { + PeriodStart = start, + PeriodEnd = end, + TenantId = tenantId, + Reachability = reachability, + Runtime = runtime, + Explainability = explainability, + Replay = replay, + Unknowns = unknowns, + Operational = operational + }; + } + + private async Task CollectReachabilityKpisAsync( + DateTimeOffset start, + DateTimeOffset end, + string? tenantId, + CancellationToken ct) + { + var findings = await _findingRepo.GetInPeriodAsync(start, end, tenantId, ct); + + var byState = findings + .GroupBy(f => f.ReachabilityState ?? "Unknown") + .ToDictionary(g => g.Key, g => g.Count()); + + var withKnown = findings.Count(f => + f.ReachabilityState is not null and not "Unknown"); + + return new ReachabilityKpis + { + TotalFindings = findings.Count, + WithKnownReachability = withKnown, + ByState = byState + }; + } + + private async Task CollectRuntimeKpisAsync( + DateTimeOffset start, + DateTimeOffset end, + string? tenantId, + CancellationToken ct) + { + var findings = await _findingRepo.GetWithSensorDeployedAsync(start, end, tenantId, ct); + + var withRuntime = findings.Count(f => f.HasRuntimeEvidence); + + var byPosture = findings + .Where(f => f.RuntimePosture is not null) + .GroupBy(f => f.RuntimePosture!) + .ToDictionary(g => g.Key, g => g.Count()); + + return new RuntimeKpis + { + TotalWithSensorDeployed = findings.Count, + WithRuntimeCorroboration = withRuntime, + ByPosture = byPosture + }; + } + + private async Task CollectExplainabilityKpisAsync( + DateTimeOffset start, + DateTimeOffset end, + string? tenantId, + CancellationToken ct) + { + var verdicts = await _verdictRepo.GetInPeriodAsync(start, end, tenantId, ct); + + var withReasonSteps = verdicts.Count(v => v.ReasonSteps?.Count > 0); + var withProofPointer = verdicts.Count(v => v.ProofPointers?.Count > 0); + var fullyExplainable = verdicts.Count(v => + v.ReasonSteps?.Count > 0 && v.ProofPointers?.Count > 0); + + return new ExplainabilityKpis + { + TotalVerdicts = verdicts.Count, + WithReasonSteps = withReasonSteps, + WithProofPointer = withProofPointer, + FullyExplainable = fullyExplainable + }; + } + + private async Task CollectReplayKpisAsync( + DateTimeOffset start, + DateTimeOffset end, + string? tenantId, + CancellationToken ct) + { + var replays = await _replayRepo.GetInPeriodAsync(start, end, tenantId, ct); + + var successful = replays.Count(r => r.Success); + + var failureReasons = replays + .Where(r => !r.Success && r.FailureReason is not null) + .GroupBy(r => r.FailureReason!) + .ToDictionary(g => g.Key, g => g.Count()); + + return new ReplayKpis + { + TotalAttempts = replays.Count, + Successful = successful, + FailureReasons = failureReasons + }; + } + + private async Task CollectUnknownBudgetKpisAsync( + DateTimeOffset start, + DateTimeOffset end, + string? tenantId, + CancellationToken ct) + { + var breaches = await _repository.GetBudgetBreachesAsync(start, end, tenantId, ct); + var overrides = await _repository.GetOverridesAsync(start, end, tenantId, ct); + + return new UnknownBudgetKpis + { + TotalEnvironments = breaches.Keys.Count, + BreachesByEnvironment = breaches, + OverridesGranted = overrides.Count, + AvgOverrideAgeDays = overrides.Any() + ? (decimal)overrides.Average(o => (DateTimeOffset.UtcNow - o.GrantedAt).TotalDays) + : 0 + }; + } + + private async Task CollectOperationalKpisAsync( + DateTimeOffset start, + DateTimeOffset end, + string? tenantId, + CancellationToken ct) + { + var metrics = await _repository.GetOperationalMetricsAsync(start, end, tenantId, ct); + + return new OperationalKpis + { + MedianTimeToVerdictSeconds = metrics.MedianVerdictTime.TotalSeconds, + CacheHitRate = metrics.CacheHitRate, + AvgEvidenceSizeBytes = metrics.AvgEvidenceSize, + P95VerdictTimeSeconds = metrics.P95VerdictTime.TotalSeconds + }; + } + + // Recording methods for real-time tracking + public Task RecordReachabilityResultAsync(Guid findingId, string state, CancellationToken ct) => + _repository.IncrementCounterAsync("reachability", state, ct); + + public Task RecordRuntimeObservationAsync(Guid findingId, string posture, CancellationToken ct) => + _repository.IncrementCounterAsync("runtime", posture, ct); + + public Task RecordVerdictAsync(Guid verdictId, bool hasReasonSteps, bool hasProofPointer, CancellationToken ct) + { + var label = (hasReasonSteps, hasProofPointer) switch + { + (true, true) => "fully_explainable", + (true, false) => "reasons_only", + (false, true) => "proofs_only", + (false, false) => "unexplained" + }; + return _repository.IncrementCounterAsync("explainability", label, ct); + } + + public Task RecordReplayAttemptAsync(Guid attestationId, bool success, string? failureReason, CancellationToken ct) + { + var label = success ? "success" : (failureReason ?? "unknown_failure"); + return _repository.IncrementCounterAsync("replay", label, ct); + } +} +``` + +**Acceptance Criteria**: +- [ ] Collects all KPI categories +- [ ] Supports period and tenant filtering +- [ ] Real-time recording methods +- [ ] Handles missing data gracefully + +--- + +### T3: Create API Endpoints + +**Assignee**: Platform Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T2 + +**Implementation Path**: `src/Platform/StellaOps.Platform.WebService/Endpoints/KpiEndpoints.cs` + +```csharp +namespace StellaOps.Platform.WebService.Endpoints; + +public static class KpiEndpoints +{ + public static void MapKpiEndpoints(this WebApplication app) + { + var group = app.MapGroup("/api/v1/metrics/kpis") + .WithTags("Quality KPIs") + .RequireAuthorization("metrics:read"); + + // GET /api/v1/metrics/kpis + group.MapGet("/", async ( + [FromQuery] DateTimeOffset? from, + [FromQuery] DateTimeOffset? to, + [FromQuery] string? tenant, + IKpiCollector collector, + CancellationToken ct) => + { + var start = from ?? DateTimeOffset.UtcNow.AddDays(-7); + var end = to ?? DateTimeOffset.UtcNow; + + var kpis = await collector.CollectAsync(start, end, tenant, ct); + return Results.Ok(kpis); + }) + .WithName("GetQualityKpis") + .WithDescription("Get quality KPIs for explainable triage"); + + // GET /api/v1/metrics/kpis/reachability + group.MapGet("/reachability", async ( + [FromQuery] DateTimeOffset? from, + [FromQuery] DateTimeOffset? to, + [FromQuery] string? tenant, + IKpiCollector collector, + CancellationToken ct) => + { + var kpis = await collector.CollectAsync( + from ?? DateTimeOffset.UtcNow.AddDays(-7), + to ?? DateTimeOffset.UtcNow, + tenant, + ct); + return Results.Ok(kpis.Reachability); + }) + .WithName("GetReachabilityKpis"); + + // GET /api/v1/metrics/kpis/explainability + group.MapGet("/explainability", async ( + [FromQuery] DateTimeOffset? from, + [FromQuery] DateTimeOffset? to, + [FromQuery] string? tenant, + IKpiCollector collector, + CancellationToken ct) => + { + var kpis = await collector.CollectAsync( + from ?? DateTimeOffset.UtcNow.AddDays(-7), + to ?? DateTimeOffset.UtcNow, + tenant, + ct); + return Results.Ok(kpis.Explainability); + }) + .WithName("GetExplainabilityKpis"); + + // GET /api/v1/metrics/kpis/trend + group.MapGet("/trend", async ( + [FromQuery] int days = 30, + [FromQuery] string? tenant, + IKpiTrendService trendService, + CancellationToken ct) => + { + var trend = await trendService.GetTrendAsync(days, tenant, ct); + return Results.Ok(trend); + }) + .WithName("GetKpiTrend") + .WithDescription("Get KPI trend over time"); + } +} +``` + +**Acceptance Criteria**: +- [ ] Main KPI endpoint +- [ ] Category-specific endpoints +- [ ] Trend endpoint +- [ ] Period filtering + +--- + +### T4: Add Tests + +**Assignee**: Platform Team +**Story Points**: 2 +**Status**: TODO +**Dependencies**: T1-T3 + +**Test Cases**: +```csharp +public class KpiCollectorTests +{ + [Fact] + public async Task CollectAsync_ReturnsAllCategories() + { + var result = await _collector.CollectAsync( + DateTimeOffset.UtcNow.AddDays(-7), + DateTimeOffset.UtcNow, + ct: CancellationToken.None); + + result.Reachability.Should().NotBeNull(); + result.Runtime.Should().NotBeNull(); + result.Explainability.Should().NotBeNull(); + result.Replay.Should().NotBeNull(); + } + + [Fact] + public async Task CollectAsync_CalculatesPercentagesCorrectly() + { + SetupTestData(totalFindings: 100, withKnownReachability: 75); + + var result = await _collector.CollectAsync( + DateTimeOffset.UtcNow.AddDays(-7), + DateTimeOffset.UtcNow, + ct: CancellationToken.None); + + result.Reachability.PercentKnown.Should().Be(75m); + } + + [Fact] + public async Task RecordVerdictAsync_IncrementsCorrectCounter() + { + await _collector.RecordVerdictAsync( + Guid.NewGuid(), + hasReasonSteps: true, + hasProofPointer: true, + CancellationToken.None); + + _repository.Verify(r => r.IncrementCounterAsync( + "explainability", "fully_explainable", It.IsAny())); + } +} +``` + +**Acceptance Criteria**: +- [ ] Collection tests +- [ ] Calculation tests +- [ ] Recording tests + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Platform Team | Define KPI models | +| 2 | T2 | TODO | T1 | Platform Team | Create KpiCollector service | +| 3 | T3 | TODO | T2 | Platform Team | Create API endpoints | +| 4 | T4 | TODO | T1-T3 | Platform Team | Add tests | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint created from Explainable Triage Workflows advisory gap analysis. | Claude | + +--- + +## Success Criteria + +- [ ] All 4 tasks marked DONE +- [ ] All KPI categories tracked +- [ ] Dashboard API functional +- [ ] Historical trend available +- [ ] All tests pass diff --git a/docs/implplan/SPRINT_7000_SUMMARY.md b/docs/implplan/SPRINT_7000_SUMMARY.md new file mode 100644 index 000000000..391f36eb0 --- /dev/null +++ b/docs/implplan/SPRINT_7000_SUMMARY.md @@ -0,0 +1,414 @@ +# Sprint Epic 7000 - Competitive Moat & Explainable Triage + +## Overview + +Epic 7000 encompasses two major capability sets: + +1. **Competitive Benchmarking** (batch 0001): Verifiable competitive differentiation through benchmarking infrastructure, SBOM lineage semantics, auditor-grade explainability, and integrated three-layer reachability analysis. *Source: 19-Dec-2025 advisory* + +2. **Explainable Triage Workflows** (batches 0002-0005): Policy-backed, reachability-informed, runtime-corroborated verdicts with full explainability and auditability. *Source: 21-Dec-2025 advisory* + +**IMPLID**: 7000 (Competitive Moat & Explainable Triage) +**Total Sprints**: 12 +**Total Tasks**: 68 +**Source Advisories**: +- `docs/product-advisories/archived/19-Dec-2025 - Benchmarking Container Scanners Against Stella Ops.md` +- `docs/product-advisories/archived/21-Dec-2025 - Designing Explainable Triage Workflows.md` + +--- + +## Gap Analysis Summary + +| Gap | Severity | Sprint | Status | +|-----|----------|--------|--------| +| No competitive benchmarking infrastructure | HIGH | 7000.0001.0001 | TODO | +| SBOM as static document, no lineage/versioning | HIGH | 7000.0001.0002 | TODO | +| No assumption-set or falsifiability tracking | HIGH | 7000.0001.0003 | TODO | +| 3-layer reachability not integrated | MEDIUM | 7000.0001.0004 | TODO | + +--- + +## Epic Structure + +### Phase 1: Benchmarking Foundation + +| Sprint | Name | Tasks | Priority | Duration | +|--------|------|-------|----------|----------| +| 7000.0001.0001 | [Competitive Benchmarking Infrastructure](SPRINT_7000_0001_0001_competitive_benchmarking.md) | 7 | HIGH | 2 weeks | + +**Key Deliverables**: +- Reference corpus with ground-truth annotations +- Comparison harness for Trivy, Grype, Syft +- Precision/recall/F1 metrics +- Claims index with verifiable evidence +- Marketing battlecard generator + +--- + +### Phase 2: SBOM Evolution + +| Sprint | Name | Tasks | Priority | Duration | +|--------|------|-------|----------|----------| +| 7000.0001.0002 | [SBOM Lineage & Repository Semantics](SPRINT_7000_0001_0002_sbom_lineage.md) | 7 | HIGH | 2 weeks | + +**Key Deliverables**: +- SBOM lineage DAG with content-addressable storage +- Semantic diff engine (component-level deltas) +- Rebuild reproducibility proof manifest +- Lineage traversal API + +--- + +### Phase 3: Explainability Enhancement + +| Sprint | Name | Tasks | Priority | Duration | +|--------|------|-------|----------|----------| +| 7000.0001.0003 | [Explainability with Assumptions & Falsifiability](SPRINT_7000_0001_0003_explainability.md) | 7 | HIGH | 2 weeks | + +**Key Deliverables**: +- Assumption-set model (compiler flags, runtime config, feature gates) +- Falsifiability criteria ("what would disprove this?") +- Evidence-density confidence scorer +- Updated DSSE predicate schema + +--- + +### Phase 4: Reachability Integration + +| Sprint | Name | Tasks | Priority | Duration | +|--------|------|-------|----------|----------| +| 7000.0001.0004 | [Three-Layer Reachability Integration](SPRINT_7000_0001_0004_three_layer_reachability.md) | 7 | MEDIUM | 2 weeks | + +**Key Deliverables**: +- `ReachabilityStack` composite model +- Layer 2: Binary loader resolution (ELF/PE) +- Layer 3: Feature flag / config gating +- "All-three-align" exploitability proof + +--- + +## Batch 2: Explainable Triage Foundation + +### Phase 5: Confidence & UX + +| Sprint | Name | Tasks | Priority | +|--------|------|-------|----------| +| 7000.0002.0001 | [Unified Confidence Model](SPRINT_7000_0002_0001_unified_confidence_model.md) | 5 | HIGH | +| 7000.0002.0002 | [Vulnerability-First UX API](SPRINT_7000_0002_0002_vulnerability_first_ux_api.md) | 5 | HIGH | + +**Key Deliverables**: +- `ConfidenceScore` with 5-factor breakdown (Reachability, Runtime, VEX, Provenance, Policy) +- `FindingSummaryResponse` with verdict chip, confidence chip, one-liner +- `ProofBadges` for visual evidence indicators +- Findings list and detail API endpoints + +--- + +### Phase 6: Visualization APIs + +| Sprint | Name | Tasks | Priority | +|--------|------|-------|----------| +| 7000.0003.0001 | [Evidence Graph API](SPRINT_7000_0003_0001_evidence_graph_api.md) | 4 | MEDIUM | +| 7000.0003.0002 | [Reachability Mini-Map API](SPRINT_7000_0003_0002_reachability_minimap_api.md) | 4 | MEDIUM | +| 7000.0003.0003 | [Runtime Timeline API](SPRINT_7000_0003_0003_runtime_timeline_api.md) | 4 | MEDIUM | + +**Key Deliverables**: +- Evidence graph with nodes, edges, signature status +- Reachability mini-map with condensed call paths +- Runtime timeline with time-windowed observations and posture + +--- + +### Phase 7: Fidelity & Budgets + +| Sprint | Name | Tasks | Priority | +|--------|------|-------|----------| +| 7000.0004.0001 | [Progressive Fidelity Mode](SPRINT_7000_0004_0001_progressive_fidelity.md) | 5 | HIGH | +| 7000.0004.0002 | [Evidence Size Budgets](SPRINT_7000_0004_0002_evidence_size_budgets.md) | 4 | MEDIUM | + +**Key Deliverables**: +- `FidelityLevel` enum with Quick/Standard/Deep modes +- Fidelity-aware analyzer orchestration with timeouts +- `EvidenceBudget` with per-scan caps +- Retention tier management (Hot/Warm/Cold/Archive) + +--- + +### Phase 8: Metrics & Observability + +| Sprint | Name | Tasks | Priority | +|--------|------|-------|----------| +| 7000.0005.0001 | [Quality KPIs Tracking](SPRINT_7000_0005_0001_quality_kpis_tracking.md) | 5 | MEDIUM | + +**Key Deliverables**: +- `TriageQualityKpis` model +- KPI collection and snapshotting +- Dashboard API endpoint + +--- + +## Dependency Graph + +```mermaid +graph TD + subgraph Batch1["Batch 1: Competitive Moat"] + S7001[7000.0001.0001
Benchmarking] + S7002[7000.0001.0002
SBOM Lineage] + S7003[7000.0001.0003
Explainability] + S7004[7000.0001.0004
3-Layer Reach] + + S7001 --> S7002 + S7002 --> S7004 + S7003 --> S7004 + end + + subgraph Batch2["Batch 2: Explainable Triage"] + S7021[7000.0002.0001
Confidence Model] + S7022[7000.0002.0002
UX API] + S7031[7000.0003.0001
Evidence Graph] + S7032[7000.0003.0002
Mini-Map] + S7033[7000.0003.0003
Timeline] + S7041[7000.0004.0001
Fidelity] + S7042[7000.0004.0002
Budgets] + S7051[7000.0005.0001
KPIs] + + S7021 --> S7022 + S7022 --> S7031 + S7022 --> S7032 + S7022 --> S7033 + S7021 --> S7051 + end + + subgraph External["Related Sprints"] + S4200[4200.0001.0002
VEX Lattice] + S4500[4500.0002.0001
VEX Conflict Studio] + S3500[3500 Series
Score Proofs - DONE] + S4100[4100.0003.0001
Risk Verdict] + end + + S7001 --> S4500 + S3500 --> S7003 + S7021 --> S4100 +``` + +--- + +## Integration Points + +### Scanner Module +- `StellaOps.Scanner.Benchmark` - New library for competitor comparison +- `StellaOps.Scanner.Emit` - Enhanced with lineage tracking +- `StellaOps.Scanner.Reachability` - 3-layer stack integration + +### Policy Module +- `StellaOps.Policy.Explainability` - Assumption-set and falsifiability models + +### Attestor Module +- Updated predicate schemas for explainability fields + +--- + +## Success Criteria + +### Batch 1: Competitive Moat + +#### Sprint 7000.0001.0001 (Benchmarking) +- [ ] 50+ image corpus with ground-truth annotations +- [ ] Automated comparison against Trivy, Grype, Syft +- [ ] Precision/recall metrics published +- [ ] Claims index with evidence links + +#### Sprint 7000.0001.0002 (SBOM Lineage) +- [ ] SBOM versioning with content-addressable storage +- [ ] Semantic diff between SBOM versions +- [ ] Lineage API operational +- [ ] Deterministic diff output + +#### Sprint 7000.0001.0003 (Explainability) +- [ ] Assumption-set tracked for all findings +- [ ] Falsifiability criteria in explainer output +- [ ] Evidence-density confidence scores +- [ ] UI widget for assumption drill-down + +#### Sprint 7000.0001.0004 (3-Layer Reachability) +- [ ] All 3 layers integrated in reachability analysis +- [ ] Binary loader resolution for ELF/PE +- [ ] Feature flag gating detection +- [ ] "Structurally proven" exploitability tier + +### Batch 2: Explainable Triage + +#### Sprint 7000.0002.0001 (Unified Confidence Model) +- [ ] ConfidenceScore model with 5-factor breakdown +- [ ] ConfidenceCalculator service +- [ ] Factor explanations with evidence links +- [ ] Bounded 0.0-1.0 scores + +#### Sprint 7000.0002.0002 (Vulnerability-First UX API) +- [ ] FindingSummaryResponse with verdict/confidence chips +- [ ] ProofBadges for visual indicators +- [ ] Findings list and detail endpoints +- [ ] Drill-down into evidence graph + +#### Sprint 7000.0003.0001 (Evidence Graph API) +- [ ] EvidenceGraphResponse with nodes and edges +- [ ] Signature status per evidence node +- [ ] Click-through to raw evidence +- [ ] OpenAPI documentation + +#### Sprint 7000.0003.0002 (Reachability Mini-Map API) +- [ ] Condensed call paths +- [ ] Entrypoint to vulnerable component visualization +- [ ] Depth-limited graph extraction +- [ ] Path highlighting + +#### Sprint 7000.0003.0003 (Runtime Timeline API) +- [ ] Time-windowed observation buckets +- [ ] Posture determination (Supports/Contradicts/Unknown) +- [ ] Significant event extraction +- [ ] Session correlation + +#### Sprint 7000.0004.0001 (Progressive Fidelity) +- [ ] FidelityLevel enum (Quick/Standard/Deep) +- [ ] Fidelity-aware analyzer orchestration +- [ ] Configurable timeouts per level +- [ ] Fidelity upgrade endpoint + +#### Sprint 7000.0004.0002 (Evidence Size Budgets) +- [ ] Per-scan evidence caps +- [ ] Retention tier management +- [ ] Size tracking and pruning +- [ ] Budget configuration API + +#### Sprint 7000.0005.0001 (Quality KPIs) +- [ ] % non-UNKNOWN reachability >80% +- [ ] % runtime corroboration >50% +- [ ] Explainability completeness >95% +- [ ] Dashboard endpoint operational + +--- + +## Module Structure + +### Batch 1: Competitive Moat + +``` +src/Scanner/ +├── __Libraries/ +│ ├── StellaOps.Scanner.Benchmark/ # NEW: Competitor comparison +│ │ ├── Corpus/ # Ground-truth corpus +│ │ ├── Harness/ # Comparison harness +│ │ ├── Metrics/ # Precision/recall +│ │ └── Claims/ # Claims index +│ ├── StellaOps.Scanner.Emit/ # ENHANCED +│ │ └── Lineage/ # SBOM lineage tracking +│ ├── StellaOps.Scanner.Explainability/ # NEW: Assumption/falsifiability +│ └── StellaOps.Scanner.Reachability/ # ENHANCED +│ └── Stack/ # 3-layer integration + +src/Policy/ +├── __Libraries/ +│ └── StellaOps.Policy.Explainability/ # NEW: Assumption models +``` + +### Batch 2: Explainable Triage + +``` +src/ +├── Policy/ +│ └── __Libraries/ +│ └── StellaOps.Policy.Confidence/ # NEW: Confidence model +│ ├── Models/ +│ │ ├── ConfidenceScore.cs +│ │ └── ConfidenceFactor.cs +│ └── Services/ +│ └── ConfidenceCalculator.cs +├── Scanner/ +│ └── __Libraries/ +│ └── StellaOps.Scanner.Orchestration/ # NEW: Fidelity orchestration +│ └── Fidelity/ +│ ├── FidelityLevel.cs +│ └── FidelityAwareAnalyzer.cs +├── Findings/ +│ └── StellaOps.Findings.WebService/ # EXTEND: UX APIs +│ ├── Contracts/ +│ │ ├── FindingSummaryResponse.cs +│ │ ├── EvidenceGraphResponse.cs +│ │ ├── ReachabilityMiniMap.cs +│ │ └── RuntimeTimeline.cs +│ └── Endpoints/ +│ ├── FindingsEndpoints.cs +│ ├── EvidenceGraphEndpoints.cs +│ ├── ReachabilityMapEndpoints.cs +│ └── RuntimeTimelineEndpoints.cs +├── Evidence/ # NEW: Evidence management +│ └── StellaOps.Evidence/ +│ ├── Budgets/ +│ └── Retention/ +└── Metrics/ # NEW: KPI tracking + └── StellaOps.Metrics/ + └── Kpi/ + ├── TriageQualityKpis.cs + └── KpiCollector.cs +``` + +--- + +## Documentation Created + +### Batch 1: Competitive Moat + +| Document | Location | Purpose | +|----------|----------|---------| +| Sprint Summary | `docs/implplan/SPRINT_7000_SUMMARY.md` | This file | +| Benchmarking Sprint | `docs/implplan/SPRINT_7000_0001_0001_competitive_benchmarking.md` | Sprint details | +| SBOM Lineage Sprint | `docs/implplan/SPRINT_7000_0001_0002_sbom_lineage.md` | Sprint details | +| Explainability Sprint | `docs/implplan/SPRINT_7000_0001_0003_explainability.md` | Sprint details | +| 3-Layer Reachability Sprint | `docs/implplan/SPRINT_7000_0001_0004_three_layer_reachability.md` | Sprint details | +| Claims Index | `docs/claims-index.md` | Verifiable competitive claims | +| Benchmark Architecture | `docs/modules/benchmark/architecture.md` | Module dossier | + +### Batch 2: Explainable Triage + +| Document | Location | Purpose | +|----------|----------|---------| +| Implementation Plan | `docs/modules/platform/explainable-triage-implementation-plan.md` | High-level plan | +| Unified Confidence Model | `docs/implplan/SPRINT_7000_0002_0001_unified_confidence_model.md` | Sprint details | +| Vulnerability-First UX API | `docs/implplan/SPRINT_7000_0002_0002_vulnerability_first_ux_api.md` | Sprint details | +| Evidence Graph API | `docs/implplan/SPRINT_7000_0003_0001_evidence_graph_api.md` | Sprint details | +| Reachability Mini-Map API | `docs/implplan/SPRINT_7000_0003_0002_reachability_minimap_api.md` | Sprint details | +| Runtime Timeline API | `docs/implplan/SPRINT_7000_0003_0003_runtime_timeline_api.md` | Sprint details | +| Progressive Fidelity Mode | `docs/implplan/SPRINT_7000_0004_0001_progressive_fidelity.md` | Sprint details | +| Evidence Size Budgets | `docs/implplan/SPRINT_7000_0004_0002_evidence_size_budgets.md` | Sprint details | +| Quality KPIs Tracking | `docs/implplan/SPRINT_7000_0005_0001_quality_kpis_tracking.md` | Sprint details | + +--- + +## Related Work + +### Completed (Leverage) +- **Sprint 3500**: Score Proofs, Unknowns Registry, Reachability foundations +- **Sprint 3600**: CycloneDX 1.7, SPDX 3.0.1 generation +- **EntryTrace**: Semantic, temporal, mesh, binary intelligence + +### In Progress (Coordinate) +- **Sprint 4100**: Unknowns decay, knowledge snapshots +- **Sprint 4200**: Triage API, policy lattice +- **Sprint 5100**: Comprehensive testing strategy +- **Sprint 6000**: BinaryIndex module + +### Planned (Accelerate) +- **Sprint 4500.0002.0001**: VEX Conflict Studio + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Batch 1 (Competitive Moat) created from 19-Dec-2025 advisory. 4 sprints defined. | Agent | +| 2025-12-22 | Batch 2 (Explainable Triage) added from 21-Dec-2025 advisory. 8 sprints defined (73 story points). | Claude | + +--- + +**Epic Status**: PLANNING (0/12 sprints complete) diff --git a/docs/implplan/SPRINT_7100_0001_0001_trust_vector_foundation.md b/docs/implplan/SPRINT_7100_0001_0001_trust_vector_foundation.md new file mode 100644 index 000000000..e44acf111 --- /dev/null +++ b/docs/implplan/SPRINT_7100_0001_0001_trust_vector_foundation.md @@ -0,0 +1,356 @@ +# Sprint 7100.0001.0001 — Trust Vector Foundation + +## Topic & Scope +- Implement the foundational 3-component trust vector model (Provenance, Coverage, Replayability) for VEX sources. +- Create claim scoring with strength multipliers and freshness decay. +- Extend VexProvider to support trust vector configuration. +- **Working directory:** `src/Excititor/__Libraries/StellaOps.Excititor.Core/TrustVector/` + +## Dependencies & Concurrency +- **Upstream**: None (foundational sprint) +- **Downstream**: Sprint 7100.0001.0002 (Verdict Manifest) depends on this +- **Safe to parallelize with**: Unrelated epics + +## Documentation Prerequisites +- `docs/product-advisories/archived/22-Dec-2026 - Building a Trust Lattice for VEX Sources.md` +- `docs/modules/excititor/architecture.md` +- `docs/modules/excititor/scoring.md` + +--- + +## Tasks + +### T1: TrustVector Record + +**Assignee**: Excititor Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Create the core TrustVector record with P/C/R components and configurable weights. + +**Implementation Path**: `src/Excititor/__Libraries/StellaOps.Excititor.Core/TrustVector/TrustVector.cs` + +**Acceptance Criteria**: +- [ ] `TrustVector` record with Provenance, Coverage, Replayability scores +- [ ] `TrustWeights` record with wP, wC, wR (defaults: 0.45, 0.35, 0.20) +- [ ] `BaseTrust` computed property: `wP*P + wC*C + wR*R` +- [ ] Validation: all scores in [0..1] range +- [ ] Immutable, deterministic equality + +**Domain Model Spec**: +```csharp +/// +/// 3-component trust vector for VEX sources. +/// +public sealed record TrustVector +{ + /// Provenance score: cryptographic & process integrity [0..1]. + public required double Provenance { get; init; } + + /// Coverage score: how well the statement scope maps to the asset [0..1]. + public required double Coverage { get; init; } + + /// Replayability score: can we deterministically re-derive the claim? [0..1]. + public required double Replayability { get; init; } + + /// Compute base trust using provided weights. + public double ComputeBaseTrust(TrustWeights weights) + => weights.WP * Provenance + weights.WC * Coverage + weights.WR * Replayability; +} + +/// +/// Configurable weights for trust vector components. +/// +public sealed record TrustWeights +{ + public double WP { get; init; } = 0.45; + public double WC { get; init; } = 0.35; + public double WR { get; init; } = 0.20; + + public static TrustWeights Default => new(); +} +``` + +--- + +### T2: Provenance Scoring Rules + +**Assignee**: Excititor Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Implement provenance score calculation based on cryptographic and process integrity. + +**Implementation Path**: `src/Excititor/__Libraries/StellaOps.Excititor.Core/TrustVector/ProvenanceScorer.cs` + +**Acceptance Criteria**: +- [ ] Score 1.00: DSSE-signed, timestamped, Rekor/Git anchored, key in allow-list +- [ ] Score 0.75: DSSE-signed + public key known, no transparency log +- [ ] Score 0.40: Unsigned but authenticated, immutable artifact repo +- [ ] Score 0.10: Opaque/CSV/email/manual import +- [ ] `IProvenanceScorer` interface for extensibility +- [ ] Unit tests for each scoring tier + +**Scoring Table**: +```csharp +public static class ProvenanceScores +{ + public const double FullyAttested = 1.00; // DSSE + Rekor + key allow-list + public const double SignedNoLog = 0.75; // DSSE + known key, no log + public const double AuthenticatedUnsigned = 0.40; // Immutable repo, no sig + public const double ManualImport = 0.10; // Opaque/CSV/email +} +``` + +--- + +### T3: Coverage Scoring Rules + +**Assignee**: Excititor Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Implement coverage score calculation based on scope matching precision. + +**Implementation Path**: `src/Excititor/__Libraries/StellaOps.Excititor.Core/TrustVector/CoverageScorer.cs` + +**Acceptance Criteria**: +- [ ] Score 1.00: Exact package + version/build digest + feature/flag context matched +- [ ] Score 0.75: Exact pkg + version range matched; partial feature context +- [ ] Score 0.50: Product-level only; maps via CPE/PURL family +- [ ] Score 0.25: Family-level heuristics; no version proof +- [ ] `ICoverageScorer` interface for extensibility +- [ ] Unit tests for each scoring tier + +--- + +### T4: Replayability Scoring Rules + +**Assignee**: Excititor Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Implement replayability score calculation based on input pinning. + +**Implementation Path**: `src/Excititor/__Libraries/StellaOps.Excititor.Core/TrustVector/ReplayabilityScorer.cs` + +**Acceptance Criteria**: +- [ ] Score 1.00: All inputs pinned (feeds, SBOM hash, ruleset hash, lattice version); replays byte-identical +- [ ] Score 0.60: Inputs mostly pinned; non-deterministic ordering tolerated but stable outcome +- [ ] Score 0.20: Ephemeral APIs; no snapshot +- [ ] `IReplayabilityScorer` interface for extensibility +- [ ] Unit tests for each scoring tier + +--- + +### T5: ClaimStrength Enum + +**Assignee**: Excititor Team +**Story Points**: 2 +**Status**: TODO + +**Description**: +Create claim strength enum with evidence-based multipliers. + +**Implementation Path**: `src/Excititor/__Libraries/StellaOps.Excititor.Core/TrustVector/ClaimStrength.cs` + +**Acceptance Criteria**: +- [ ] `ClaimStrength` enum with numeric multiplier values +- [ ] `ExploitabilityWithReachability` = 1.00 (analysis + reachability proof subgraph) +- [ ] `ConfigWithEvidence` = 0.80 (config/feature-flag with evidence) +- [ ] `VendorBlanket` = 0.60 (vendor blanket statement) +- [ ] `UnderInvestigation` = 0.40 (investigation in progress) +- [ ] Extension method `ToMultiplier()` for calculations + +**Domain Model Spec**: +```csharp +public enum ClaimStrength +{ + /// Exploitability analysis with reachability proof subgraph. + ExploitabilityWithReachability = 100, + + /// Config/feature-flag reason with evidence. + ConfigWithEvidence = 80, + + /// Vendor blanket statement. + VendorBlanket = 60, + + /// Under investigation. + UnderInvestigation = 40 +} + +public static class ClaimStrengthExtensions +{ + public static double ToMultiplier(this ClaimStrength strength) + => (int)strength / 100.0; +} +``` + +--- + +### T6: FreshnessCalculator + +**Assignee**: Excititor Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Implement freshness decay calculation with configurable half-life. + +**Implementation Path**: `src/Excititor/__Libraries/StellaOps.Excititor.Core/TrustVector/FreshnessCalculator.cs` + +**Acceptance Criteria**: +- [ ] Exponential decay formula: `F = exp(-ln(2) * age_days / half_life)` +- [ ] Configurable half-life (default 90 days) +- [ ] Configurable floor (default 0.35, minimum freshness) +- [ ] `Compute(DateTimeOffset issuedAt, DateTimeOffset cutoff)` method +- [ ] Pure function, deterministic output +- [ ] Unit tests for decay curve, boundary conditions + +**Implementation Spec**: +```csharp +public sealed class FreshnessCalculator +{ + public double HalfLifeDays { get; init; } = 90.0; + public double Floor { get; init; } = 0.35; + + public double Compute(DateTimeOffset issuedAt, DateTimeOffset cutoff) + { + var ageDays = (cutoff - issuedAt).TotalDays; + if (ageDays < 0) return 1.0; // Future date, full freshness + var decay = Math.Exp(-Math.Log(2) * ageDays / HalfLifeDays); + return Math.Max(decay, Floor); + } +} +``` + +--- + +### T7: ClaimScoreCalculator + +**Assignee**: Excititor Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Implement the complete claim score calculation: `ClaimScore = BaseTrust(S) * M * F`. + +**Implementation Path**: `src/Excititor/__Libraries/StellaOps.Excititor.Core/TrustVector/ClaimScoreCalculator.cs` + +**Acceptance Criteria**: +- [ ] `IClaimScoreCalculator` interface +- [ ] `ClaimScoreCalculator` implementation +- [ ] `Compute(TrustVector, TrustWeights, ClaimStrength, DateTimeOffset issuedAt, DateTimeOffset cutoff)` method +- [ ] Returns `ClaimScoreResult` with score + breakdown (baseTrust, strength, freshness) +- [ ] Pure function, deterministic output +- [ ] Unit tests for various input combinations + +**Domain Model Spec**: +```csharp +public sealed record ClaimScoreResult +{ + public required double Score { get; init; } + public required double BaseTrust { get; init; } + public required double StrengthMultiplier { get; init; } + public required double FreshnessMultiplier { get; init; } + public required TrustVector Vector { get; init; } + public required TrustWeights Weights { get; init; } +} + +public interface IClaimScoreCalculator +{ + ClaimScoreResult Compute( + TrustVector vector, + TrustWeights weights, + ClaimStrength strength, + DateTimeOffset issuedAt, + DateTimeOffset cutoff); +} +``` + +--- + +### T8: Extend VexProvider with TrustVector + +**Assignee**: Excititor Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Extend the existing VexProvider model to support TrustVector configuration. + +**Implementation Path**: `src/Excititor/__Libraries/StellaOps.Excititor.Core/VexProvider.cs` + +**Acceptance Criteria**: +- [ ] Add `TrustVector? Vector` property to `VexProviderTrust` +- [ ] Backward compatibility: if Vector is null, fall back to legacy Weight +- [ ] Add `TrustWeights? Weights` property for per-provider weight overrides +- [ ] Migration path from legacy Weight to TrustVector documented +- [ ] Unit tests for backward compatibility + +--- + +### T9: Unit Tests — Determinism Validation + +**Assignee**: Excititor Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Comprehensive unit tests ensuring deterministic scoring across all components. + +**Implementation Path**: `src/Excititor/__Tests/StellaOps.Excititor.Core.Tests/TrustVector/` + +**Acceptance Criteria**: +- [ ] TrustVector construction and validation tests +- [ ] ProvenanceScorer tests for all tiers +- [ ] CoverageScorer tests for all tiers +- [ ] ReplayabilityScorer tests for all tiers +- [ ] FreshnessCalculator decay curve tests +- [ ] ClaimScoreCalculator integration tests +- [ ] Determinism tests: same inputs → identical outputs (1000 iterations) +- [ ] Boundary condition tests (edge values, nulls, extremes) +- [ ] Test coverage ≥90% + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Excititor Team | TrustVector Record | +| 2 | T2 | TODO | T1 | Excititor Team | Provenance Scoring Rules | +| 3 | T3 | TODO | T1 | Excititor Team | Coverage Scoring Rules | +| 4 | T4 | TODO | T1 | Excititor Team | Replayability Scoring Rules | +| 5 | T5 | TODO | — | Excititor Team | ClaimStrength Enum | +| 6 | T6 | TODO | — | Excititor Team | FreshnessCalculator | +| 7 | T7 | TODO | T1-T6 | Excititor Team | ClaimScoreCalculator | +| 8 | T8 | TODO | T1 | Excititor Team | Extend VexProvider | +| 9 | T9 | TODO | T1-T8 | Excititor Team | Unit Tests — Determinism | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint file created from advisory processing. | Agent | + +--- + +## Decisions & Risks + +| Item | Type | Owner | Notes | +|------|------|-------|-------| +| Weight defaults | Decision | Excititor Team | Using wP=0.45, wC=0.35, wR=0.20 per advisory | +| Freshness floor | Decision | Excititor Team | 0.35 floor prevents complete decay to zero | +| Backward compatibility | Risk | Excititor Team | Legacy single-weight mode must work during transition | +| Scorer extensibility | Decision | Excititor Team | Interface-based design allows custom scoring rules | + +--- + +**Sprint Status**: TODO (0/9 tasks complete) diff --git a/docs/implplan/SPRINT_7100_0001_0002_verdict_manifest_replay.md b/docs/implplan/SPRINT_7100_0001_0002_verdict_manifest_replay.md new file mode 100644 index 000000000..194ea936d --- /dev/null +++ b/docs/implplan/SPRINT_7100_0001_0002_verdict_manifest_replay.md @@ -0,0 +1,462 @@ +# Sprint 7100.0001.0002 — Verdict Manifest & Deterministic Replay + +## Topic & Scope +- Implement DSSE-signed verdict manifests for replayable VEX decisions. +- Create PostgreSQL storage and indexing for verdict manifests. +- Build replay verification endpoint. +- **Working directory:** `src/Authority/__Libraries/StellaOps.Authority.Core/Verdicts/` + +## Dependencies & Concurrency +- **Upstream**: Sprint 7100.0001.0001 (Trust Vector Foundation) +- **Downstream**: Sprint 7100.0002.0002 (Calibration), Sprint 7100.0003.0001 (UI) +- **Safe to parallelize with**: Sprint 7100.0002.0001 (Policy Gates) + +## Documentation Prerequisites +- `docs/product-advisories/archived/22-Dec-2026 - Building a Trust Lattice for VEX Sources.md` +- `docs/modules/authority/architecture.md` +- `docs/modules/excititor/architecture.md` +- `src/Attestor/__Libraries/StellaOps.Attestor.Dsse/` (DSSE implementation) + +--- + +## Tasks + +### T1: VerdictManifest Domain Model + +**Assignee**: Authority Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Create the VerdictManifest model that captures all inputs and outputs for deterministic replay. + +**Implementation Path**: `src/Authority/__Libraries/StellaOps.Authority.Core/Verdicts/VerdictManifest.cs` + +**Acceptance Criteria**: +- [ ] `VerdictManifest` record with all required fields +- [ ] Input pinning: SBOM digests, vuln feed snapshot IDs, VEX document digests +- [ ] Output fields: status, confidence, explanation, evidence refs +- [ ] Policy context: policy_hash, lattice_version +- [ ] Immutable, deterministic serialization +- [ ] JSON Schema validation + +**Domain Model Spec**: +```csharp +public sealed record VerdictManifest +{ + // Identity + public required string ManifestId { get; init; } + public required string Tenant { get; init; } + + // Scope + public required string AssetDigest { get; init; } + public required string VulnerabilityId { get; init; } + + // Inputs (pinned for replay) + public required VerdictInputs Inputs { get; init; } + + // Verdict + public required VerdictResult Result { get; init; } + + // Policy context + public required string PolicyHash { get; init; } + public required string LatticeVersion { get; init; } + + // Metadata + public required DateTimeOffset EvaluatedAt { get; init; } + public required string ManifestDigest { get; init; } +} + +public sealed record VerdictInputs +{ + public required ImmutableArray SbomDigests { get; init; } + public required ImmutableArray VulnFeedSnapshotIds { get; init; } + public required ImmutableArray VexDocumentDigests { get; init; } + public required ImmutableArray ReachabilityGraphIds { get; init; } + public required DateTimeOffset ClockCutoff { get; init; } +} + +public sealed record VerdictResult +{ + public required VexStatus Status { get; init; } + public required double Confidence { get; init; } + public required ImmutableArray Explanations { get; init; } + public required ImmutableArray EvidenceRefs { get; init; } +} + +public sealed record VerdictExplanation +{ + public required string SourceId { get; init; } + public required string Reason { get; init; } + public required double ProvenanceScore { get; init; } + public required double CoverageScore { get; init; } + public required double ReplayabilityScore { get; init; } + public required double StrengthMultiplier { get; init; } + public required double FreshnessMultiplier { get; init; } + public required double ClaimScore { get; init; } +} +``` + +--- + +### T2: VerdictManifestBuilder + +**Assignee**: Authority Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Create builder for deterministic assembly of verdict manifests with stable ordering. + +**Implementation Path**: `src/Authority/__Libraries/StellaOps.Authority.Core/Verdicts/VerdictManifestBuilder.cs` + +**Acceptance Criteria**: +- [ ] Fluent builder API for manifest construction +- [ ] Stable sorting of all collections (by issuer_did, statement_digest) +- [ ] Canonical JSON serialization (sorted keys, UTC timestamps) +- [ ] Automatic manifest digest computation (sha256) +- [ ] Validation before build (required fields, valid ranges) +- [ ] Pure function, deterministic output + +**Implementation Spec**: +```csharp +public sealed class VerdictManifestBuilder +{ + public VerdictManifestBuilder WithTenant(string tenant); + public VerdictManifestBuilder WithAsset(string assetDigest, string vulnId); + public VerdictManifestBuilder WithInputs(VerdictInputs inputs); + public VerdictManifestBuilder WithResult(VerdictResult result); + public VerdictManifestBuilder WithPolicy(string policyHash, string latticeVersion); + public VerdictManifestBuilder WithClock(DateTimeOffset evaluatedAt); + + public VerdictManifest Build(); +} +``` + +--- + +### T3: DSSE Signing for Verdict Manifests + +**Assignee**: Authority Team + Signer Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Implement DSSE envelope signing for verdict manifests using existing Signer infrastructure. + +**Implementation Path**: `src/Authority/__Libraries/StellaOps.Authority.Core/Verdicts/VerdictManifestSigner.cs` + +**Acceptance Criteria**: +- [ ] `IVerdictManifestSigner` interface +- [ ] Integration with `StellaOps.Signer` module +- [ ] Predicate type: `https://stella-ops.org/attestations/vex-verdict/1` +- [ ] Support for multiple signature schemes (DSSE, Sigstore) +- [ ] Optional Rekor transparency logging +- [ ] Signature verification method + +**Predicate Schema**: +```json +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "type": "object", + "required": ["manifestId", "assetDigest", "vulnerabilityId", "status", "confidence", "policyHash", "latticeVersion"], + "properties": { + "manifestId": { "type": "string" }, + "assetDigest": { "type": "string" }, + "vulnerabilityId": { "type": "string" }, + "status": { "type": "string", "enum": ["affected", "not_affected", "fixed", "under_investigation"] }, + "confidence": { "type": "number", "minimum": 0, "maximum": 1 }, + "policyHash": { "type": "string" }, + "latticeVersion": { "type": "string" }, + "evaluatedAt": { "type": "string", "format": "date-time" } + } +} +``` + +--- + +### T4: PostgreSQL Schema for Verdict Manifests + +**Assignee**: Authority Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Create database migration for verdict manifest storage. + +**Implementation Path**: `src/Authority/__Libraries/StellaOps.Authority.Storage.Postgres/Migrations/` + +**Acceptance Criteria**: +- [ ] `authority.verdict_manifests` table +- [ ] Indexes on: (asset_digest, vulnerability_id), (policy_hash, lattice_version), (evaluated_at) +- [ ] Compound index for replay queries +- [ ] BRIN index on evaluated_at for time-based queries +- [ ] Signature storage in JSONB column + +**Schema Spec**: +```sql +CREATE TABLE authority.verdict_manifests ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + manifest_id TEXT NOT NULL UNIQUE, + tenant TEXT NOT NULL, + + -- Scope + asset_digest TEXT NOT NULL, + vulnerability_id TEXT NOT NULL, + + -- Inputs (JSONB for flexibility) + inputs_json JSONB NOT NULL, + + -- Result + status TEXT NOT NULL CHECK (status IN ('affected', 'not_affected', 'fixed', 'under_investigation')), + confidence DOUBLE PRECISION NOT NULL CHECK (confidence >= 0 AND confidence <= 1), + result_json JSONB NOT NULL, + + -- Policy context + policy_hash TEXT NOT NULL, + lattice_version TEXT NOT NULL, + + -- Metadata + evaluated_at TIMESTAMPTZ NOT NULL, + manifest_digest TEXT NOT NULL, + + -- Signature + signature_json JSONB, + rekor_log_id TEXT, + + -- Timestamps + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Primary lookup: asset + CVE +CREATE INDEX idx_verdict_asset_vuln ON authority.verdict_manifests(tenant, asset_digest, vulnerability_id); + +-- Replay queries: same policy + lattice +CREATE INDEX idx_verdict_policy ON authority.verdict_manifests(tenant, policy_hash, lattice_version); + +-- Time-based queries +CREATE INDEX idx_verdict_time USING BRIN ON authority.verdict_manifests(evaluated_at); + +-- Composite for deterministic replay +CREATE UNIQUE INDEX idx_verdict_replay ON authority.verdict_manifests( + tenant, asset_digest, vulnerability_id, policy_hash, lattice_version +); +``` + +--- + +### T5: IVerdictManifestStore Interface + +**Assignee**: Authority Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Create repository interface for verdict manifest persistence. + +**Implementation Path**: `src/Authority/__Libraries/StellaOps.Authority.Core/Verdicts/IVerdictManifestStore.cs` + +**Acceptance Criteria**: +- [ ] `IVerdictManifestStore` interface +- [ ] Methods: Store, GetById, GetByScope, GetByPolicy, GetLatest +- [ ] Support for signed manifest retrieval +- [ ] Pagination for list queries +- [ ] Tenant isolation + +**Interface Spec**: +```csharp +public interface IVerdictManifestStore +{ + Task StoreAsync( + VerdictManifest manifest, + byte[]? signature = null, + string? rekorLogId = null, + CancellationToken ct = default); + + Task GetByIdAsync( + string tenant, + string manifestId, + CancellationToken ct = default); + + Task GetByScopeAsync( + string tenant, + string assetDigest, + string vulnerabilityId, + string? policyHash = null, + string? latticeVersion = null, + CancellationToken ct = default); + + Task> ListByPolicyAsync( + string tenant, + string policyHash, + string latticeVersion, + int limit = 100, + string? pageToken = null, + CancellationToken ct = default); +} +``` + +--- + +### T6: PostgreSQL Store Implementation + +**Assignee**: Authority Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Implement PostgreSQL repository for verdict manifests. + +**Implementation Path**: `src/Authority/__Libraries/StellaOps.Authority.Storage.Postgres/VerdictManifestStore.cs` + +**Acceptance Criteria**: +- [ ] `PostgresVerdictManifestStore` implementation +- [ ] Uses Npgsql with Dapper +- [ ] Canonical JSON serialization for JSONB columns +- [ ] Efficient scope queries +- [ ] Deterministic ordering for pagination + +--- + +### T7: Replay Verification Service + +**Assignee**: Authority Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Create service that verifies verdict manifests can be replayed to produce identical results. + +**Implementation Path**: `src/Authority/__Libraries/StellaOps.Authority.Core/Verdicts/VerdictReplayVerifier.cs` + +**Acceptance Criteria**: +- [ ] `IVerdictReplayVerifier` interface +- [ ] Retrieves pinned inputs from manifest +- [ ] Re-executes trust lattice evaluation +- [ ] Compares result with stored verdict +- [ ] Returns detailed diff on mismatch +- [ ] Verifies signature if present + +**Interface Spec**: +```csharp +public sealed record ReplayVerificationResult +{ + public required bool Success { get; init; } + public required VerdictManifest OriginalManifest { get; init; } + public VerdictManifest? ReplayedManifest { get; init; } + public ImmutableArray? Differences { get; init; } + public bool SignatureValid { get; init; } + public string? Error { get; init; } +} + +public interface IVerdictReplayVerifier +{ + Task VerifyAsync( + string manifestId, + CancellationToken ct = default); +} +``` + +--- + +### T8: Replay Verification API Endpoint + +**Assignee**: Authority Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Create API endpoint for replay verification. + +**Implementation Path**: `src/Authority/StellaOps.Authority.WebService/Controllers/VerdictController.cs` + +**Acceptance Criteria**: +- [ ] `POST /api/v1/authority/verdicts/{manifestId}/replay` endpoint +- [ ] Scope: `verdict.read` +- [ ] Returns `ReplayVerificationResult` +- [ ] Rate limiting: 10 req/min per tenant +- [ ] OpenAPI documentation + +**API Spec**: +```yaml +/api/v1/authority/verdicts/{manifestId}/replay: + post: + operationId: replayVerdict + summary: Verify verdict can be replayed + parameters: + - name: manifestId + in: path + required: true + schema: + type: string + responses: + 200: + description: Replay verification result + content: + application/json: + schema: + $ref: '#/components/schemas/ReplayVerificationResult' + 404: + description: Manifest not found +``` + +--- + +### T9: Integration Tests + +**Assignee**: Authority Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Integration tests for verdict manifest pipeline. + +**Implementation Path**: `src/Authority/__Tests/StellaOps.Authority.Core.Tests/Verdicts/` + +**Acceptance Criteria**: +- [ ] Manifest construction tests +- [ ] DSSE signing and verification tests +- [ ] PostgreSQL store CRUD tests +- [ ] Replay verification tests (success and failure cases) +- [ ] Determinism tests: same inputs → identical manifests (1000 iterations) +- [ ] Concurrent access tests +- [ ] Test coverage ≥85% + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Authority Team | VerdictManifest Domain Model | +| 2 | T2 | TODO | T1 | Authority Team | VerdictManifestBuilder | +| 3 | T3 | TODO | T1 | Authority + Signer | DSSE Signing | +| 4 | T4 | TODO | T1 | Authority Team | PostgreSQL Schema | +| 5 | T5 | TODO | T1 | Authority Team | Store Interface | +| 6 | T6 | TODO | T4, T5 | Authority Team | PostgreSQL Implementation | +| 7 | T7 | TODO | T1, T6 | Authority Team | Replay Verification Service | +| 8 | T8 | TODO | T7 | Authority Team | Replay API Endpoint | +| 9 | T9 | TODO | T1-T8 | Authority Team | Integration Tests | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint file created from advisory processing. | Agent | + +--- + +## Decisions & Risks + +| Item | Type | Owner | Notes | +|------|------|-------|-------| +| Predicate type | Decision | Authority Team | Using `https://stella-ops.org/attestations/vex-verdict/1` | +| Composite unique index | Decision | Authority Team | Ensures one verdict per (asset, CVE, policy, lattice) | +| Rekor optional | Decision | Authority Team | Transparency logging is opt-in per policy | +| Replay performance | Risk | Authority Team | Full replay may be expensive; consider caching | + +--- + +**Sprint Status**: TODO (0/9 tasks complete) diff --git a/docs/implplan/SPRINT_7100_0002_0001_policy_gates_merge.md b/docs/implplan/SPRINT_7100_0002_0001_policy_gates_merge.md new file mode 100644 index 000000000..fe4918030 --- /dev/null +++ b/docs/implplan/SPRINT_7100_0002_0001_policy_gates_merge.md @@ -0,0 +1,422 @@ +# Sprint 7100.0002.0001 — Policy Gates & Lattice Merge + +## Topic & Scope +- Extend TrustLatticeEngine with ClaimScore-based merge algorithm. +- Implement policy gates for explainable decision control. +- Add conflict penalty mechanism for contradictory claims. +- **Working directory:** `src/Policy/__Libraries/StellaOps.Policy/TrustLattice/` and `src/Policy/__Libraries/StellaOps.Policy/Gates/` + +## Dependencies & Concurrency +- **Upstream**: Sprint 7100.0001.0001 (Trust Vector Foundation) +- **Downstream**: Sprint 7100.0002.0002 (Calibration), Sprint 7100.0003.0001 (UI) +- **Safe to parallelize with**: Sprint 7100.0001.0002 (Verdict Manifest) + +## Documentation Prerequisites +- `docs/product-advisories/archived/22-Dec-2026 - Building a Trust Lattice for VEX Sources.md` +- `docs/modules/policy/architecture.md` +- `src/Policy/__Libraries/StellaOps.Policy/TrustLattice/TrustLatticeEngine.cs` + +--- + +## Tasks + +### T1: ClaimScoreMerger + +**Assignee**: Policy Team +**Story Points**: 8 +**Status**: TODO + +**Description**: +Implement the core merge algorithm that selects verdicts based on ClaimScore with conflict handling. + +**Implementation Path**: `src/Policy/__Libraries/StellaOps.Policy/TrustLattice/ClaimScoreMerger.cs` + +**Acceptance Criteria**: +- [ ] `IClaimScoreMerger` interface +- [ ] Partial order on claims by (scope specificity, ClaimScore) +- [ ] Conflict detection: contradictory statuses trigger conflict mode +- [ ] Conflict penalty: 0.25 down-weight on older/weaker claims +- [ ] Winner selection: `argmax(ClaimScore)` after adjustments +- [ ] Audit trail generation with all considered claims +- [ ] Deterministic: stable ordering for tie-breaking + +**Algorithm Spec**: +```csharp +public sealed record MergeResult +{ + public required VexStatus Status { get; init; } + public required double Confidence { get; init; } + public required bool HasConflicts { get; init; } + public required ImmutableArray AllClaims { get; init; } + public required ScoredClaim WinningClaim { get; init; } + public required ImmutableArray Conflicts { get; init; } +} + +public sealed record ScoredClaim +{ + public required string SourceId { get; init; } + public required VexStatus Status { get; init; } + public required double OriginalScore { get; init; } + public required double AdjustedScore { get; init; } + public required int ScopeSpecificity { get; init; } + public required bool Accepted { get; init; } + public required string Reason { get; init; } +} + +public interface IClaimScoreMerger +{ + MergeResult Merge( + IEnumerable<(VexClaim Claim, ClaimScoreResult Score)> scoredClaims, + MergePolicy policy, + CancellationToken ct = default); +} +``` + +--- + +### T2: Conflict Penalty Implementation + +**Assignee**: Policy Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Implement conflict penalty mechanism for contradictory VEX claims. + +**Implementation Path**: `src/Policy/__Libraries/StellaOps.Policy/TrustLattice/ConflictPenalizer.cs` + +**Acceptance Criteria**: +- [ ] Detect contradictory claims (different statuses for same CVE+asset) +- [ ] Apply configurable penalty (default delta=0.25) +- [ ] Penalty applied to older/weaker claims, not the strongest +- [ ] Preserve original scores for audit trail +- [ ] Trigger replay proof requirement when conflicts exist + +**Implementation Spec**: +```csharp +public sealed class ConflictPenalizer +{ + public double ConflictPenalty { get; init; } = 0.25; + + public IReadOnlyList ApplyPenalties( + IReadOnlyList claims) + { + var statuses = claims.Select(c => c.Status).Distinct().ToList(); + if (statuses.Count <= 1) + return claims; // No conflict + + // Find strongest claim + var strongest = claims.OrderByDescending(c => c.OriginalScore).First(); + + // Penalize all claims that disagree with strongest + return claims.Select(c => + { + if (c.Status == strongest.Status) + return c; + + return c with + { + AdjustedScore = c.OriginalScore * (1 - ConflictPenalty), + Reason = $"Conflict penalty applied (disagrees with {strongest.SourceId})" + }; + }).ToList(); + } +} +``` + +--- + +### T3: MinimumConfidenceGate + +**Assignee**: Policy Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Implement policy gate that requires minimum confidence by environment. + +**Implementation Path**: `src/Policy/__Libraries/StellaOps.Policy/Gates/MinimumConfidenceGate.cs` + +**Acceptance Criteria**: +- [ ] `IPolicy Gate` interface implementation +- [ ] Configurable minimum confidence per environment (e.g., prod ≥ 0.75) +- [ ] Fail verdict if confidence below threshold for "not_affected" +- [ ] Allow "affected" status regardless of confidence (conservative) +- [ ] Return clear gate failure reason + +**Configuration Spec**: +```yaml +gates: + minimumConfidence: + enabled: true + thresholds: + production: 0.75 + staging: 0.60 + development: 0.40 + applyToStatuses: + - not_affected + - fixed +``` + +--- + +### T4: UnknownsBudgetGate + +**Assignee**: Policy Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Implement policy gate that fails if unknowns exceed budget. + +**Implementation Path**: `src/Policy/__Libraries/StellaOps.Policy/Gates/UnknownsBudgetGate.cs` + +**Acceptance Criteria**: +- [ ] Configurable max unknown count (e.g., N=5) +- [ ] Configurable cumulative uncertainty threshold (e.g., T=2.0) +- [ ] Fail if #unknown deps > N +- [ ] Fail if Σ(1-ClaimScore) over unknowns > T +- [ ] Integration with Unknowns Registry from Sprint 3500 + +**Configuration Spec**: +```yaml +gates: + unknownsBudget: + enabled: true + maxUnknownCount: 5 + maxCumulativeUncertainty: 2.0 + escalateOnFail: true +``` + +--- + +### T5: SourceQuotaGate + +**Assignee**: Policy Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Implement policy gate that caps influence from any single vendor. + +**Implementation Path**: `src/Policy/__Libraries/StellaOps.Policy/Gates/SourceQuotaGate.cs` + +**Acceptance Criteria**: +- [ ] Configurable max influence per source (default 60%) +- [ ] Fail if single source dominates verdict without corroboration +- [ ] Corroboration threshold: second source within delta=0.1 +- [ ] Apply to verdicts where source influence exceeds quota +- [ ] Return details of which sources exceeded quota + +**Configuration Spec**: +```yaml +gates: + sourceQuota: + enabled: true + maxInfluencePercent: 60 + corroborationDelta: 0.10 + requireCorroborationFor: + - not_affected + - fixed +``` + +--- + +### T6: ReachabilityRequirementGate + +**Assignee**: Policy Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Implement policy gate that requires reachability proof for critical vulnerabilities. + +**Implementation Path**: `src/Policy/__Libraries/StellaOps.Policy/Gates/ReachabilityRequirementGate.cs` + +**Acceptance Criteria**: +- [ ] Require reachability proof for "not_affected" on critical CVEs +- [ ] Integration with reachability graph from Scanner module +- [ ] Configurable severity threshold (default: CRITICAL) +- [ ] Configurable bypass for specific reason codes +- [ ] Fail with clear reason if reachability proof missing + +**Configuration Spec**: +```yaml +gates: + reachabilityRequirement: + enabled: true + severityThreshold: CRITICAL + requiredForStatuses: + - not_affected + bypassReasons: + - component_not_present + - vulnerable_configuration_unused +``` + +--- + +### T7: Policy Gate Registry + +**Assignee**: Policy Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Create registry for managing and executing policy gates. + +**Implementation Path**: `src/Policy/__Libraries/StellaOps.Policy/Gates/PolicyGateRegistry.cs` + +**Acceptance Criteria**: +- [ ] `IPolicyGateRegistry` interface +- [ ] Register gates by name +- [ ] Execute gates in configured order +- [ ] Short-circuit on first failure (configurable) +- [ ] Aggregate all gate results for audit +- [ ] DI integration for gate injection + +**Interface Spec**: +```csharp +public sealed record GateResult +{ + public required string GateName { get; init; } + public required bool Passed { get; init; } + public required string? Reason { get; init; } + public required ImmutableDictionary Details { get; init; } +} + +public sealed record GateEvaluationResult +{ + public required bool AllPassed { get; init; } + public required ImmutableArray Results { get; init; } + public GateResult? FirstFailure => Results.FirstOrDefault(r => !r.Passed); +} + +public interface IPolicyGateRegistry +{ + void Register(string name) where TGate : IPolicyGate; + Task EvaluateAsync( + MergeResult mergeResult, + PolicyGateContext context, + CancellationToken ct = default); +} +``` + +--- + +### T8: Policy Configuration Schema + +**Assignee**: Policy Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Create configuration schema for policy gates and merge settings. + +**Implementation Path**: `etc/policy-gates.yaml.sample` + +**Acceptance Criteria**: +- [ ] YAML schema for all gates +- [ ] JSON Schema validation +- [ ] Sample configuration file +- [ ] Documentation in `docs/modules/policy/` +- [ ] Environment variable overrides + +**Sample Configuration**: +```yaml +# etc/policy-gates.yaml.sample +version: "1.0" +trustLattice: + weights: + provenance: 0.45 + coverage: 0.35 + replayability: 0.20 + freshness: + halfLifeDays: 90 + floor: 0.35 + conflictPenalty: 0.25 + +gates: + minimumConfidence: + enabled: true + thresholds: + production: 0.75 + staging: 0.60 + development: 0.40 + + unknownsBudget: + enabled: true + maxUnknownCount: 5 + maxCumulativeUncertainty: 2.0 + + sourceQuota: + enabled: true + maxInfluencePercent: 60 + corroborationDelta: 0.10 + + reachabilityRequirement: + enabled: true + severityThreshold: CRITICAL +``` + +--- + +### T9: Unit Tests + +**Assignee**: Policy Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Comprehensive unit tests for merge algorithm and all gates. + +**Implementation Path**: `src/Policy/__Tests/StellaOps.Policy.Tests/TrustLattice/` + +**Acceptance Criteria**: +- [ ] ClaimScoreMerger tests for all scenarios +- [ ] Conflict penalty tests +- [ ] MinimumConfidenceGate edge cases +- [ ] UnknownsBudgetGate threshold tests +- [ ] SourceQuotaGate corroboration tests +- [ ] ReachabilityRequirementGate integration tests +- [ ] Gate registry ordering tests +- [ ] Determinism tests (1000 iterations) +- [ ] Test coverage ≥90% + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Policy Team | ClaimScoreMerger | +| 2 | T2 | TODO | T1 | Policy Team | Conflict Penalty | +| 3 | T3 | TODO | T1 | Policy Team | MinimumConfidenceGate | +| 4 | T4 | TODO | T1 | Policy Team | UnknownsBudgetGate | +| 5 | T5 | TODO | T1 | Policy Team | SourceQuotaGate | +| 6 | T6 | TODO | T1 | Policy Team | ReachabilityRequirementGate | +| 7 | T7 | TODO | T3-T6 | Policy Team | Gate Registry | +| 8 | T8 | TODO | T3-T6 | Policy Team | Configuration Schema | +| 9 | T9 | TODO | T1-T8 | Policy Team | Unit Tests | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint file created from advisory processing. | Agent | + +--- + +## Decisions & Risks + +| Item | Type | Owner | Notes | +|------|------|-------|-------| +| Gate execution order | Decision | Policy Team | MinConfidence → Unknowns → SourceQuota → Reachability | +| Short-circuit behavior | Decision | Policy Team | First failure stops evaluation by default | +| Conflict penalty value | Decision | Policy Team | Using 0.25 (25%) per advisory | +| Reachability integration | Risk | Policy Team | Depends on Sprint 3500 reachability graphs | + +--- + +**Sprint Status**: TODO (0/9 tasks complete) diff --git a/docs/implplan/SPRINT_7100_0002_0002_source_defaults_calibration.md b/docs/implplan/SPRINT_7100_0002_0002_source_defaults_calibration.md new file mode 100644 index 000000000..5dac6f27b --- /dev/null +++ b/docs/implplan/SPRINT_7100_0002_0002_source_defaults_calibration.md @@ -0,0 +1,537 @@ +# Sprint 7100.0002.0002 — Source Defaults & Calibration + +## Topic & Scope +- Define default trust vectors for Vendor/Distro/Internal source classes. +- Implement calibration system for rolling trust weight adjustment. +- Create CalibrationManifest for auditable tuning history. +- **Working directory:** `src/Excititor/__Libraries/StellaOps.Excititor.Core/TrustVector/` and `src/Excititor/__Libraries/StellaOps.Excititor.Core/Calibration/` + +## Dependencies & Concurrency +- **Upstream**: Sprint 7100.0001.0001 (Trust Vector), Sprint 7100.0002.0001 (Policy Gates) +- **Downstream**: Sprint 7100.0003.0002 (Integration) +- **Safe to parallelize with**: Sprint 7100.0003.0001 (UI) + +## Documentation Prerequisites +- `docs/product-advisories/archived/22-Dec-2026 - Building a Trust Lattice for VEX Sources.md` +- `docs/modules/excititor/architecture.md` +- `docs/modules/excititor/scoring.md` + +--- + +## Tasks + +### T1: Default Trust Vectors + +**Assignee**: Excititor Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Define default trust vectors for the three major source classes. + +**Implementation Path**: `src/Excititor/__Libraries/StellaOps.Excititor.Core/TrustVector/DefaultTrustVectors.cs` + +**Acceptance Criteria**: +- [ ] `DefaultTrustVectors` static class with pre-defined vectors +- [ ] Vendor defaults: P=0.90, C=0.70, R=0.60 +- [ ] Distro defaults: P=0.80, C=0.85, R=0.60 +- [ ] Internal defaults: P=0.85, C=0.95, R=0.90 +- [ ] Hub defaults: P=0.60, C=0.50, R=0.40 +- [ ] Attestation defaults: P=0.95, C=0.80, R=0.70 + +**Implementation Spec**: +```csharp +public static class DefaultTrustVectors +{ + /// Software vendor (e.g., Microsoft, Red Hat as vendor). + public static TrustVector Vendor => new() + { + Provenance = 0.90, + Coverage = 0.70, // Often coarse-grained + Replayability = 0.60 + }; + + /// Distribution security team (e.g., Debian, Ubuntu, RHEL as distro). + public static TrustVector Distro => new() + { + Provenance = 0.80, + Coverage = 0.85, // Build-aware + Replayability = 0.60 + }; + + /// Internal organization source (org-signed, exact SBOM+reach). + public static TrustVector Internal => new() + { + Provenance = 0.85, + Coverage = 0.95, // Exact SBOM match + Replayability = 0.90 + }; + + /// Aggregator hubs (e.g., OSV, GitHub Advisory). + public static TrustVector Hub => new() + { + Provenance = 0.60, + Coverage = 0.50, + Replayability = 0.40 + }; + + /// OCI attestations. + public static TrustVector Attestation => new() + { + Provenance = 0.95, + Coverage = 0.80, + Replayability = 0.70 + }; + + public static TrustVector GetDefault(VexProviderKind kind) => kind switch + { + VexProviderKind.Vendor => Vendor, + VexProviderKind.Distro => Distro, + VexProviderKind.Platform => Internal, + VexProviderKind.Hub => Hub, + VexProviderKind.Attestation => Attestation, + _ => Hub // Conservative default + }; +} +``` + +--- + +### T2: Source Classification Service + +**Assignee**: Excititor Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Create service for auto-classifying VEX sources into source classes. + +**Implementation Path**: `src/Excititor/__Libraries/StellaOps.Excititor.Core/TrustVector/SourceClassificationService.cs` + +**Acceptance Criteria**: +- [ ] `ISourceClassificationService` interface +- [ ] Classify based on issuer domain, signature type, content format +- [ ] Known vendor/distro registry lookup +- [ ] Heuristic classification for unknown sources +- [ ] Override capability via configuration +- [ ] Audit trail of classification decisions + +**Interface Spec**: +```csharp +public sealed record SourceClassification +{ + public required VexProviderKind Kind { get; init; } + public required TrustVector DefaultVector { get; init; } + public required double Confidence { get; init; } + public required string Reason { get; init; } + public required bool IsOverride { get; init; } +} + +public interface ISourceClassificationService +{ + SourceClassification Classify( + string issuerId, + string? issuerDomain, + string? signatureType, + string contentFormat); + + void RegisterOverride(string issuerPattern, VexProviderKind kind); +} +``` + +--- + +### T3: Calibration Manifest Model + +**Assignee**: Excititor Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Create CalibrationManifest model for auditable trust weight tuning history. + +**Implementation Path**: `src/Excititor/__Libraries/StellaOps.Excititor.Core/Calibration/CalibrationManifest.cs` + +**Acceptance Criteria**: +- [ ] `CalibrationManifest` record with epoch, adjustments, signatures +- [ ] `CalibrationEpoch` with timestamp, baseline, and adjusted vectors +- [ ] `CalibrationAdjustment` with source, old/new values, reason +- [ ] Signed manifest for audit compliance +- [ ] Deterministic serialization + +**Domain Model Spec**: +```csharp +public sealed record CalibrationManifest +{ + public required string ManifestId { get; init; } + public required string Tenant { get; init; } + public required int EpochNumber { get; init; } + public required DateTimeOffset EpochStart { get; init; } + public required DateTimeOffset EpochEnd { get; init; } + public required ImmutableArray Adjustments { get; init; } + public required CalibrationMetrics Metrics { get; init; } + public required string ManifestDigest { get; init; } + public string? Signature { get; init; } +} + +public sealed record CalibrationAdjustment +{ + public required string SourceId { get; init; } + public required TrustVector OldVector { get; init; } + public required TrustVector NewVector { get; init; } + public required double Delta { get; init; } + public required string Reason { get; init; } + public required int SampleCount { get; init; } + public required double AccuracyBefore { get; init; } + public required double AccuracyAfter { get; init; } +} + +public sealed record CalibrationMetrics +{ + public required int TotalVerdicts { get; init; } + public required int CorrectVerdicts { get; init; } + public required int PostMortemReversals { get; init; } + public required double OverallAccuracy { get; init; } +} +``` + +--- + +### T4: Calibration Comparison Engine + +**Assignee**: Excititor Team +**Story Points**: 8 +**Status**: TODO + +**Description**: +Implement calibration comparison between VEX claims and post-mortem truth. + +**Implementation Path**: `src/Excititor/__Libraries/StellaOps.Excititor.Core/Calibration/CalibrationComparisonEngine.cs` + +**Acceptance Criteria**: +- [ ] Compare historical verdicts against post-mortem truth data +- [ ] Post-mortem sources: KEV confirmations, exploit publications, vendor patches +- [ ] Track prediction accuracy per source +- [ ] Identify sources with systematic bias +- [ ] Generate comparison report with confidence intervals + +**Interface Spec**: +```csharp +public sealed record ComparisonResult +{ + public required string SourceId { get; init; } + public required int TotalPredictions { get; init; } + public required int CorrectPredictions { get; init; } + public required int FalseNegatives { get; init; } // Said not_affected, was exploited + public required int FalsePositives { get; init; } // Said affected, never exploited + public required double Accuracy { get; init; } + public required double ConfidenceInterval { get; init; } + public required CalibrationBias? DetectedBias { get; init; } +} + +public enum CalibrationBias +{ + None, + OptimisticBias, // Tends to say not_affected when actually affected + PessimisticBias, // Tends to say affected when actually not_affected + ScopeBias // Coverage claims don't match actual scope +} + +public interface ICalibrationComparisonEngine +{ + Task> CompareAsync( + string tenant, + DateTimeOffset epochStart, + DateTimeOffset epochEnd, + CancellationToken ct = default); +} +``` + +--- + +### T5: Learning Rate Adjustment + +**Assignee**: Excititor Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Implement learning rate adjustment for trust vector calibration. + +**Implementation Path**: `src/Excititor/__Libraries/StellaOps.Excititor.Core/Calibration/TrustVectorCalibrator.cs` + +**Acceptance Criteria**: +- [ ] Configurable learning rate (default ±0.02/epoch) +- [ ] Bounded adjustments to prevent oscillation +- [ ] Separate learning rates for P/C/R components +- [ ] Momentum factor for stable convergence +- [ ] Roll back capability on accuracy regression + +**Implementation Spec**: +```csharp +public sealed class TrustVectorCalibrator +{ + public double LearningRate { get; init; } = 0.02; + public double MaxAdjustmentPerEpoch { get; init; } = 0.05; + public double MinValue { get; init; } = 0.10; + public double MaxValue { get; init; } = 1.00; + public double MomentumFactor { get; init; } = 0.9; + + public TrustVector Calibrate( + TrustVector current, + ComparisonResult comparison, + CalibrationBias? detectedBias) + { + if (comparison.Accuracy >= 0.95) + return current; // No adjustment needed + + var adjustment = CalculateAdjustment(comparison, detectedBias); + return ApplyAdjustment(current, adjustment); + } + + private CalibrationDelta CalculateAdjustment( + ComparisonResult comparison, + CalibrationBias? bias) + { + // Adjust based on bias type and accuracy + var delta = (1.0 - comparison.Accuracy) * LearningRate; + delta = Math.Min(delta, MaxAdjustmentPerEpoch); + + return bias switch + { + CalibrationBias.OptimisticBias => new(-delta, 0, 0), // Reduce P + CalibrationBias.PessimisticBias => new(+delta, 0, 0), // Increase P + CalibrationBias.ScopeBias => new(0, -delta, 0), // Reduce C + _ => new(-delta / 3, -delta / 3, -delta / 3) // Uniform + }; + } +} + +public sealed record CalibrationDelta(double DeltaP, double DeltaC, double DeltaR); +``` + +--- + +### T6: Calibration Service + +**Assignee**: Excititor Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Create orchestration service for running calibration epochs. + +**Implementation Path**: `src/Excititor/__Libraries/StellaOps.Excititor.Core/Calibration/TrustCalibrationService.cs` + +**Acceptance Criteria**: +- [ ] `ITrustCalibrationService` interface +- [ ] Run calibration epoch on schedule or demand +- [ ] Generate and sign CalibrationManifest +- [ ] Store calibration history +- [ ] Apply calibrated vectors to provider registry +- [ ] Rollback on accuracy regression + +**Interface Spec**: +```csharp +public interface ITrustCalibrationService +{ + Task RunEpochAsync( + string tenant, + DateTimeOffset? epochEnd = null, + CancellationToken ct = default); + + Task GetLatestAsync( + string tenant, + CancellationToken ct = default); + + Task ApplyCalibrationAsync( + string tenant, + string manifestId, + CancellationToken ct = default); + + Task RollbackAsync( + string tenant, + string manifestId, + CancellationToken ct = default); +} +``` + +--- + +### T7: PostgreSQL Schema for Calibration + +**Assignee**: Excititor Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Create database migration for calibration storage. + +**Implementation Path**: `src/Excititor/__Libraries/StellaOps.Excititor.Storage.Postgres/Migrations/` + +**Acceptance Criteria**: +- [ ] `excititor.calibration_manifests` table +- [ ] `excititor.calibration_adjustments` table +- [ ] `excititor.source_trust_vectors` table (current active vectors) +- [ ] Indexes for tenant + epoch queries +- [ ] Foreign key to source registry + +**Schema Spec**: +```sql +CREATE TABLE excititor.calibration_manifests ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + manifest_id TEXT NOT NULL UNIQUE, + tenant TEXT NOT NULL, + epoch_number INTEGER NOT NULL, + epoch_start TIMESTAMPTZ NOT NULL, + epoch_end TIMESTAMPTZ NOT NULL, + metrics_json JSONB NOT NULL, + manifest_digest TEXT NOT NULL, + signature TEXT, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + applied_at TIMESTAMPTZ, + + UNIQUE (tenant, epoch_number) +); + +CREATE TABLE excititor.calibration_adjustments ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + manifest_id TEXT NOT NULL REFERENCES excititor.calibration_manifests(manifest_id), + source_id TEXT NOT NULL, + old_provenance DOUBLE PRECISION NOT NULL, + old_coverage DOUBLE PRECISION NOT NULL, + old_replayability DOUBLE PRECISION NOT NULL, + new_provenance DOUBLE PRECISION NOT NULL, + new_coverage DOUBLE PRECISION NOT NULL, + new_replayability DOUBLE PRECISION NOT NULL, + delta DOUBLE PRECISION NOT NULL, + reason TEXT NOT NULL, + sample_count INTEGER NOT NULL, + accuracy_before DOUBLE PRECISION NOT NULL, + accuracy_after DOUBLE PRECISION NOT NULL +); + +CREATE TABLE excititor.source_trust_vectors ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + tenant TEXT NOT NULL, + source_id TEXT NOT NULL, + provenance DOUBLE PRECISION NOT NULL, + coverage DOUBLE PRECISION NOT NULL, + replayability DOUBLE PRECISION NOT NULL, + calibration_manifest_id TEXT REFERENCES excititor.calibration_manifests(manifest_id), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + + UNIQUE (tenant, source_id) +); + +CREATE INDEX idx_calibration_tenant_epoch ON excititor.calibration_manifests(tenant, epoch_number DESC); +CREATE INDEX idx_calibration_adjustments_manifest ON excititor.calibration_adjustments(manifest_id); +CREATE INDEX idx_source_vectors_tenant ON excititor.source_trust_vectors(tenant); +``` + +--- + +### T8: Configuration for Calibration + +**Assignee**: Excititor Team +**Story Points**: 2 +**Status**: TODO + +**Description**: +Create configuration schema for calibration settings. + +**Implementation Path**: `etc/excititor-calibration.yaml.sample` + +**Acceptance Criteria**: +- [ ] YAML configuration for calibration policy +- [ ] Epoch duration settings +- [ ] Learning rate configuration +- [ ] Rollback thresholds +- [ ] Post-mortem source configuration + +**Sample Configuration**: +```yaml +# etc/excititor-calibration.yaml.sample +calibration: + enabled: true + schedule: + epochDuration: "30d" # 30-day calibration epochs + runAt: "02:00" # Run at 2 AM UTC + learning: + rate: 0.02 + maxAdjustmentPerEpoch: 0.05 + momentumFactor: 0.9 + rollback: + accuracyRegressionThreshold: 0.05 + autoRollbackEnabled: true + postMortem: + sources: + - type: kev + weight: 1.0 + - type: exploit-db + weight: 0.8 + - type: vendor-patch + weight: 0.9 +``` + +--- + +### T9: Unit Tests + +**Assignee**: Excititor Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Comprehensive unit tests for calibration system. + +**Implementation Path**: `src/Excititor/__Tests/StellaOps.Excititor.Core.Tests/Calibration/` + +**Acceptance Criteria**: +- [ ] Default trust vector tests +- [ ] Source classification tests +- [ ] Calibration comparison tests +- [ ] Learning rate adjustment tests (convergence, bounds) +- [ ] Rollback tests +- [ ] Determinism tests (1000 iterations) +- [ ] Integration tests with PostgreSQL +- [ ] Test coverage ≥85% + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Excititor Team | Default Trust Vectors | +| 2 | T2 | TODO | T1 | Excititor Team | Source Classification Service | +| 3 | T3 | TODO | — | Excititor Team | Calibration Manifest Model | +| 4 | T4 | TODO | T3 | Excititor Team | Calibration Comparison Engine | +| 5 | T5 | TODO | T4 | Excititor Team | Learning Rate Adjustment | +| 6 | T6 | TODO | T4, T5 | Excititor Team | Calibration Service | +| 7 | T7 | TODO | T3 | Excititor Team | PostgreSQL Schema | +| 8 | T8 | TODO | T6 | Excititor Team | Configuration | +| 9 | T9 | TODO | T1-T8 | Excititor Team | Unit Tests | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint file created from advisory processing. | Agent | + +--- + +## Decisions & Risks + +| Item | Type | Owner | Notes | +|------|------|-------|-------| +| Default vector values | Decision | Excititor Team | Using advisory values: Vendor(0.9,0.7,0.6), Distro(0.8,0.85,0.6), Internal(0.85,0.95,0.9) | +| Learning rate | Decision | Excititor Team | Using ±0.02/epoch per advisory | +| Post-mortem sources | Risk | Excititor Team | Need reliable ground truth data for calibration | +| Calibration frequency | Decision | Excititor Team | 30-day epochs by default | + +--- + +**Sprint Status**: TODO (0/9 tasks complete) diff --git a/docs/implplan/SPRINT_7100_0003_0001_ui_trust_algebra.md b/docs/implplan/SPRINT_7100_0003_0001_ui_trust_algebra.md new file mode 100644 index 000000000..63c09c17f --- /dev/null +++ b/docs/implplan/SPRINT_7100_0003_0001_ui_trust_algebra.md @@ -0,0 +1,365 @@ +# Sprint 7100.0003.0001 — UI Trust Algebra Panel + +## Topic & Scope +- Implement the "Trust Algebra" visualization panel for explaining VEX verdicts. +- Create confidence meter, P/C/R stacked bars, and claim comparison table. +- Add replay button for verdict reproduction. +- **Working directory:** `src/Web/StellaOps.Web/src/app/features/vulnerabilities/components/trust-algebra/` + +## Dependencies & Concurrency +- **Upstream**: Sprint 7100.0001.0002 (Verdict Manifest), Sprint 7100.0002.0001 (Policy Gates) +- **Downstream**: Sprint 7100.0003.0002 (Integration) +- **Safe to parallelize with**: Sprint 7100.0002.0002 (Calibration) + +## Documentation Prerequisites +- `docs/product-advisories/archived/22-Dec-2026 - Building a Trust Lattice for VEX Sources.md` +- Angular v17 best practices +- Existing vulnerability detail views in `src/Web/StellaOps.Web/` + +--- + +## Tasks + +### T1: TrustAlgebraComponent + +**Assignee**: UI Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Create the main Trust Algebra Angular component for verdict explanation. + +**Implementation Path**: `src/Web/StellaOps.Web/src/app/features/vulnerabilities/components/trust-algebra/trust-algebra.component.ts` + +**Acceptance Criteria**: +- [ ] Angular standalone component +- [ ] Input: VerdictManifest from API +- [ ] Header: CVE x Asset digest → final status + confidence meter +- [ ] Expandable/collapsible sections for detailed breakdown +- [ ] Integration with existing vulnerability detail view +- [ ] Responsive design for different screen sizes + +**Component Structure**: +```typescript +@Component({ + selector: 'app-trust-algebra', + standalone: true, + imports: [ + CommonModule, + ConfidenceMeterComponent, + TrustVectorBarsComponent, + ClaimTableComponent, + PolicyChipsComponent, + ReplayButtonComponent + ], + templateUrl: './trust-algebra.component.html', + styleUrls: ['./trust-algebra.component.scss'] +}) +export class TrustAlgebraComponent { + @Input() verdictManifest!: VerdictManifest; + @Input() isReplayMode = false; + + showConflicts = false; + expandedSections: Set = new Set(['summary']); + + toggleSection(section: string): void; + toggleConflicts(): void; +} +``` + +--- + +### T2: Confidence Meter Visualization + +**Assignee**: UI Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Create confidence meter visualization showing 0-1 scale with color coding. + +**Implementation Path**: `src/Web/StellaOps.Web/src/app/features/vulnerabilities/components/trust-algebra/confidence-meter.component.ts` + +**Acceptance Criteria**: +- [ ] Circular or linear meter showing confidence 0-1 +- [ ] Color gradient: red (0-0.4) → yellow (0.4-0.7) → green (0.7-1.0) +- [ ] Numeric display with 2 decimal precision +- [ ] Threshold markers for policy gates (e.g., prod minimum at 0.75) +- [ ] Animation on value change +- [ ] Accessible: ARIA labels, keyboard navigation + +**Visual Spec**: +``` +┌─────────────────────────────────────┐ +│ ┌───────────────────────────┐ │ +│ │ ◐ 0.82 │ │ +│ │ CONFIDENCE │ │ +│ └───────────────────────────┘ │ +│ ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓░░░░ │0.75 │ +│ ↑ min-prod │ +└─────────────────────────────────────┘ +``` + +--- + +### T3: P/C/R Stacked Bar Chart + +**Assignee**: UI Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Create stacked bar visualization for trust vector components. + +**Implementation Path**: `src/Web/StellaOps.Web/src/app/features/vulnerabilities/components/trust-algebra/trust-vector-bars.component.ts` + +**Acceptance Criteria**: +- [ ] Horizontal stacked bar showing P/C/R contributions +- [ ] Color-coded segments: P=blue, C=green, R=purple +- [ ] Hover/click for detailed breakdown +- [ ] Show weighted vs. raw values +- [ ] Legend with component labels +- [ ] Responsive sizing + +**Visual Spec**: +``` +┌─────────────────────────────────────┐ +│ Trust Vector Breakdown │ +│ │ +│ ████████████▓▓▓▓▓▓▓▓░░░░░░ = 0.78 │ +│ └──P:0.41──┘└─C:0.26─┘└R:0.11┘ │ +│ │ +│ ○ Provenance (wP=0.45) 0.90 │ +│ ○ Coverage (wC=0.35) 0.75 │ +│ ○ Replayability (wR=0.20) 0.55 │ +└─────────────────────────────────────┘ +``` + +--- + +### T4: Claim Comparison Table + +**Assignee**: UI Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Create sortable table showing all claims with scores and conflict highlighting. + +**Implementation Path**: `src/Web/StellaOps.Web/src/app/features/vulnerabilities/components/trust-algebra/claim-table.component.ts` + +**Acceptance Criteria**: +- [ ] Table columns: Source, Status, Reason, P/C/R, Strength, Freshness, ClaimScore +- [ ] Sortable by any column +- [ ] Winning claim highlighted +- [ ] Conflict toggle: show/hide conflicting claims +- [ ] Row expansion for full claim details +- [ ] Export to CSV/JSON + +**Visual Spec**: +``` +┌──────────────────────────────────────────────────────────────────────┐ +│ VEX Claims (3) [Toggle Conflicts ☐] │ +├──────────┬─────────────┬──────────────┬─────┬─────┬─────┬───────────┤ +│ Source │ Status │ Reason │ P │ C │ R │ ClaimScore│ +├──────────┼─────────────┼──────────────┼─────┼─────┼─────┼───────────┤ +│ ★redhat │ not_affected│ config_off │ 0.90│ 0.85│ 0.60│ 0.82 ▲ │ +│ ubuntu │ not_affected│ not_present │ 0.80│ 0.75│ 0.50│ 0.71 │ +│ ⚠internal│ affected │ under_invest │ 0.85│ 0.95│ 0.90│ 0.58* │ +└──────────┴─────────────┴──────────────┴─────┴─────┴─────┴───────────┘ +★ = Winner ⚠ = Conflict * = Penalty Applied +``` + +--- + +### T5: Policy Chips Display + +**Assignee**: UI Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Create chip/tag display showing which policy gates were applied. + +**Implementation Path**: `src/Web/StellaOps.Web/src/app/features/vulnerabilities/components/trust-algebra/policy-chips.component.ts` + +**Acceptance Criteria**: +- [ ] Chips for each applied gate (MinConfidence, SourceQuota, etc.) +- [ ] Color: green=passed, red=failed, gray=not applicable +- [ ] Click to open policy YAML/JSON viewer (read-only in replay mode) +- [ ] Tooltip with gate configuration +- [ ] Show policy_hash and lattice_version + +**Visual Spec**: +``` +┌─────────────────────────────────────────────────────────────┐ +│ Policy Gates │ +│ │ +│ [✓ MinConfidence] [✓ SourceQuota] [— Reachability] [✓ PASS]│ +│ │ +│ Policy: sha256:abc123... Lattice: v1.2.0 │ +│ [View Policy YAML] │ +└─────────────────────────────────────────────────────────────┘ +``` + +--- + +### T6: Replay Button Component + +**Assignee**: UI Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +Create "Reproduce Verdict" button that triggers replay verification. + +**Implementation Path**: `src/Web/StellaOps.Web/src/app/features/vulnerabilities/components/trust-algebra/replay-button.component.ts` + +**Acceptance Criteria**: +- [ ] Button triggers replay verification API call +- [ ] Loading state during verification +- [ ] Success: show checkmark + "Verdict Reproduced" +- [ ] Failure: show diff panel with discrepancies +- [ ] Download signed VerdictManifest option +- [ ] Copy manifest ID to clipboard + +**Visual Spec**: +``` +┌─────────────────────────────────────┐ +│ [🔄 Reproduce Verdict] [📋 Copy ID]│ +│ │ +│ After click (success): │ +│ [✓ Verdict Reproduced] [⬇ Download]│ +│ │ +│ After click (failure): │ +│ [✗ Mismatch Detected] │ +│ ┌─────────────────────────────────┐ │ +│ │ Differences: │ │ +│ │ - confidence: 0.82 → 0.81 │ │ +│ │ - freshness: 0.95 → 0.94 │ │ +│ └─────────────────────────────────┘ │ +└─────────────────────────────────────┘ +``` + +--- + +### T7: Trust Algebra API Service + +**Assignee**: UI Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Create Angular service for Trust Algebra API calls. + +**Implementation Path**: `src/Web/StellaOps.Web/src/app/features/vulnerabilities/services/trust-algebra.service.ts` + +**Acceptance Criteria**: +- [ ] `TrustAlgebraService` for API integration +- [ ] Get verdict manifest by ID +- [ ] Get verdict by (asset, CVE) +- [ ] Trigger replay verification +- [ ] Download signed manifest +- [ ] Error handling with user-friendly messages + +**Service Spec**: +```typescript +@Injectable({ providedIn: 'root' }) +export class TrustAlgebraService { + constructor(private http: HttpClient) {} + + getVerdictManifest(manifestId: string): Observable; + + getVerdictByScope( + assetDigest: string, + vulnerabilityId: string + ): Observable; + + replayVerdict(manifestId: string): Observable; + + downloadManifest(manifestId: string): Observable; +} +``` + +--- + +### T8: Accessibility & Keyboard Navigation + +**Assignee**: UI Team +**Story Points**: 3 +**Status**: TODO + +**Description**: +Ensure Trust Algebra panel meets accessibility standards. + +**Implementation Path**: All components in `trust-algebra/` + +**Acceptance Criteria**: +- [ ] WCAG 2.1 AA compliance +- [ ] Keyboard navigation for all interactive elements +- [ ] Screen reader support with ARIA labels +- [ ] High contrast mode support +- [ ] Focus indicators +- [ ] Color-blind friendly palette options + +--- + +### T9: E2E Tests + +**Assignee**: UI Team +**Story Points**: 5 +**Status**: TODO + +**Description**: +End-to-end tests for Trust Algebra panel. + +**Implementation Path**: `src/Web/StellaOps.Web/e2e/trust-algebra/` + +**Acceptance Criteria**: +- [ ] Component rendering tests +- [ ] Confidence meter accuracy tests +- [ ] Claim table sorting/filtering tests +- [ ] Replay button flow tests +- [ ] Policy chips interaction tests +- [ ] Accessibility tests (axe-core) +- [ ] Responsive design tests +- [ ] Cross-browser tests (Chrome, Firefox, Safari) + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | UI Team | TrustAlgebraComponent | +| 2 | T2 | TODO | T1 | UI Team | Confidence Meter | +| 3 | T3 | TODO | T1 | UI Team | P/C/R Stacked Bars | +| 4 | T4 | TODO | T1 | UI Team | Claim Comparison Table | +| 5 | T5 | TODO | T1 | UI Team | Policy Chips Display | +| 6 | T6 | TODO | T1, T7 | UI Team | Replay Button | +| 7 | T7 | TODO | — | UI Team | API Service | +| 8 | T8 | TODO | T1-T6 | UI Team | Accessibility | +| 9 | T9 | TODO | T1-T8 | UI Team | E2E Tests | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint file created from advisory processing. | Agent | + +--- + +## Decisions & Risks + +| Item | Type | Owner | Notes | +|------|------|-------|-------| +| Angular standalone | Decision | UI Team | Using Angular 17 standalone components | +| Chart library | Decision | UI Team | Consider ngx-charts or custom SVG for visualizations | +| Real-time updates | Risk | UI Team | May need WebSocket for live verdict updates | +| UX wireframes | Dependency | Product | Wireframes needed before implementation | + +--- + +**Sprint Status**: TODO (0/9 tasks complete) diff --git a/docs/implplan/SPRINT_7100_0003_0002_integration_documentation.md b/docs/implplan/SPRINT_7100_0003_0002_integration_documentation.md new file mode 100644 index 000000000..f5fcaeffd --- /dev/null +++ b/docs/implplan/SPRINT_7100_0003_0002_integration_documentation.md @@ -0,0 +1,338 @@ +# Sprint 7100.0003.0002 — Integration & Documentation + +## Topic & Scope +- End-to-end integration of all Trust Lattice components. +- Create comprehensive documentation and specifications. +- Update sample configuration files. +- **Working directory:** `docs/` and cross-module integration + +## Dependencies & Concurrency +- **Upstream**: All prior sprints (7100.0001.0001 through 7100.0003.0001) +- **Downstream**: None (final sprint) +- **Safe to parallelize with**: None (integration sprint) + +## Documentation Prerequisites +- All prior sprint deliverables completed +- `docs/product-advisories/archived/22-Dec-2026 - Building a Trust Lattice for VEX Sources.md` + +--- + +## Tasks + +### T1: Update Excititor Architecture Documentation + +**Assignee**: Docs Guild +**Story Points**: 3 +**Status**: TODO + +**Description**: +Update Excititor architecture documentation to include trust lattice. + +**Implementation Path**: `docs/modules/excititor/architecture.md` + +**Acceptance Criteria**: +- [ ] Add Trust Lattice section to architecture overview +- [ ] Document TrustVector model and scoring +- [ ] Document ClaimScore calculation pipeline +- [ ] Update data flow diagrams +- [ ] Cross-reference to trust-lattice.md specification + +--- + +### T2: Create Trust Lattice Specification + +**Assignee**: Docs Guild +**Story Points**: 8 +**Status**: TODO + +**Description**: +Create comprehensive trust lattice specification document. + +**Implementation Path**: `docs/modules/excititor/trust-lattice.md` + +**Acceptance Criteria**: +- [ ] Trust vector model (P/C/R components) +- [ ] Scoring formulas with examples +- [ ] Claim strength and freshness calculations +- [ ] Merge algorithm specification +- [ ] Conflict handling rules +- [ ] Policy gates reference +- [ ] Configuration reference +- [ ] API endpoint reference + +**Document Outline**: +```markdown +# VEX Trust Lattice Specification + +## 1. Overview +## 2. Trust Vector Model + 2.1 Provenance (P) + 2.2 Coverage (C) + 2.3 Replayability (R) + 2.4 Weight Configuration +## 3. Claim Scoring + 3.1 Base Trust Calculation + 3.2 Claim Strength Multipliers + 3.3 Freshness Decay + 3.4 ClaimScore Formula +## 4. Lattice Merge Algorithm + 4.1 Partial Ordering + 4.2 Conflict Detection + 4.3 Winner Selection + 4.4 Audit Trail Generation +## 5. Policy Gates + 5.1 MinimumConfidenceGate + 5.2 UnknownsBudgetGate + 5.3 SourceQuotaGate + 5.4 ReachabilityRequirementGate +## 6. Deterministic Replay + 6.1 Input Pinning + 6.2 Verdict Manifest + 6.3 Replay Verification +## 7. Configuration Reference +## 8. API Reference +## 9. Examples +``` + +--- + +### T3: Update Policy Architecture Documentation + +**Assignee**: Docs Guild +**Story Points**: 3 +**Status**: TODO + +**Description**: +Update Policy module documentation with gate specifications. + +**Implementation Path**: `docs/modules/policy/architecture.md` + +**Acceptance Criteria**: +- [ ] Add Policy Gates section +- [ ] Document gate interface and registry +- [ ] Document gate configuration schema +- [ ] Include decision flow diagrams +- [ ] Cross-reference to trust-lattice.md + +--- + +### T4: Create Verdict Manifest Specification + +**Assignee**: Docs Guild +**Story Points**: 5 +**Status**: TODO + +**Description**: +Create specification for verdict manifest format and signing. + +**Implementation Path**: `docs/modules/authority/verdict-manifest.md` + +**Acceptance Criteria**: +- [ ] Verdict manifest schema +- [ ] Input pinning requirements +- [ ] DSSE signing process +- [ ] Storage and indexing +- [ ] Replay verification protocol +- [ ] JSON Schema definition + +**Document Outline**: +```markdown +# Verdict Manifest Specification + +## 1. Overview +## 2. Manifest Schema + 2.1 Identity Fields + 2.2 Input Pinning + 2.3 Verdict Result + 2.4 Policy Context +## 3. Deterministic Serialization + 3.1 Canonical JSON + 3.2 Digest Computation +## 4. Signing + 4.1 DSSE Envelope + 4.2 Predicate Type + 4.3 Rekor Integration +## 5. Storage + 5.1 PostgreSQL Schema + 5.2 Indexing Strategy +## 6. Replay Verification + 6.1 Verification Protocol + 6.2 Failure Handling +## 7. API Reference +## 8. JSON Schema +``` + +--- + +### T5: Create JSON Schemas + +**Assignee**: Docs Guild +**Story Points**: 3 +**Status**: TODO + +**Description**: +Create JSON Schemas for trust lattice data structures. + +**Implementation Path**: `docs/attestor/schemas/` + +**Acceptance Criteria**: +- [ ] `verdict-manifest.schema.json` +- [ ] `calibration-manifest.schema.json` +- [ ] `trust-vector.schema.json` +- [ ] Schema validation tests +- [ ] Integration with OpenAPI specs + +**Schema Files**: +``` +docs/attestor/schemas/ +├── verdict-manifest.schema.json +├── calibration-manifest.schema.json +├── trust-vector.schema.json +└── claim-score.schema.json +``` + +--- + +### T6: Update API Reference + +**Assignee**: Docs Guild +**Story Points**: 3 +**Status**: TODO + +**Description**: +Update API reference documentation with new endpoints. + +**Implementation Path**: `docs/09_API_CLI_REFERENCE.md` and OpenAPI specs + +**Acceptance Criteria**: +- [ ] Document verdict manifest endpoints +- [ ] Document replay verification endpoint +- [ ] Document calibration endpoints +- [ ] Update OpenAPI specifications +- [ ] Add example requests/responses + +--- + +### T7: Create Sample Configuration Files + +**Assignee**: Docs Guild +**Story Points**: 2 +**Status**: TODO + +**Description**: +Create sample configuration files for trust lattice. + +**Implementation Path**: `etc/` + +**Acceptance Criteria**: +- [ ] `etc/trust-lattice.yaml.sample` - Trust vector defaults and weights +- [ ] `etc/policy-gates.yaml.sample` - Gate configuration +- [ ] `etc/excititor-calibration.yaml.sample` - Calibration settings +- [ ] Comments explaining each setting +- [ ] Environment variable overrides documented + +--- + +### T8: End-to-End Integration Tests + +**Assignee**: QA Team +**Story Points**: 8 +**Status**: TODO + +**Description**: +Create comprehensive E2E tests for trust lattice flow. + +**Implementation Path**: `src/Scanner/__Tests/StellaOps.Scanner.Integration.Tests/TrustLattice/` + +**Acceptance Criteria**: +- [ ] Full flow: VEX ingest → score → merge → verdict → sign → replay +- [ ] Multi-source conflict scenarios +- [ ] Policy gate triggering scenarios +- [ ] Calibration epoch simulation +- [ ] UI integration verification +- [ ] Air-gap bundle verification +- [ ] Performance benchmarks + +**Test Scenarios**: +``` +1. Single source, high confidence → PASS +2. Multiple agreeing sources → PASS with corroboration boost +3. Conflicting sources → Conflict penalty applied +4. Below minimum confidence → FAIL gate +5. Source quota exceeded → FAIL gate (no corroboration) +6. Critical CVE without reachability → FAIL gate +7. Replay verification → Success (identical) +8. Replay with changed inputs → Failure (diff reported) +9. Calibration epoch → Adjustments applied correctly +``` + +--- + +### T9: Training and Handoff Documentation + +**Assignee**: Docs Guild +**Story Points**: 3 +**Status**: TODO + +**Description**: +Create training materials for support and operations teams. + +**Implementation Path**: `docs/operations/` and `docs/training/` + +**Acceptance Criteria**: +- [ ] Operations runbook: `docs/operations/trust-lattice-runbook.md` +- [ ] Troubleshooting guide: `docs/operations/trust-lattice-troubleshooting.md` +- [ ] Support FAQ +- [ ] Architecture overview for new team members +- [ ] Claims index update: TRUST-001, VERDICT-001, CALIBRATION-001 + +--- + +## Delivery Tracker + +| # | Task ID | Status | Dependency | Owners | Task Definition | +|---|---------|--------|------------|--------|-----------------| +| 1 | T1 | TODO | — | Docs Guild | Excititor Architecture Update | +| 2 | T2 | TODO | T1 | Docs Guild | Trust Lattice Specification | +| 3 | T3 | TODO | — | Docs Guild | Policy Architecture Update | +| 4 | T4 | TODO | — | Docs Guild | Verdict Manifest Specification | +| 5 | T5 | TODO | T2, T4 | Docs Guild | JSON Schemas | +| 6 | T6 | TODO | T2, T4 | Docs Guild | API Reference Update | +| 7 | T7 | TODO | T2 | Docs Guild | Sample Configuration Files | +| 8 | T8 | TODO | All prior | QA Team | E2E Integration Tests | +| 9 | T9 | TODO | T1-T7 | Docs Guild | Training & Handoff | + +--- + +## Execution Log + +| Date (UTC) | Update | Owner | +|------------|--------|-------| +| 2025-12-22 | Sprint file created from advisory processing. | Agent | + +--- + +## Decisions & Risks + +| Item | Type | Owner | Notes | +|------|------|-------|-------| +| Documentation format | Decision | Docs Guild | Using existing markdown format | +| Schema validation | Decision | Docs Guild | Using JSON Schema draft 2020-12 | +| Training timing | Risk | Docs Guild | Training should happen before GA release | +| E2E test infrastructure | Dependency | QA Team | Requires all modules deployed together | + +--- + +## Definition of Done + +Before marking this sprint complete: +- [ ] All documentation reviewed by 2+ stakeholders +- [ ] All JSON schemas validate against sample data +- [ ] E2E tests pass in CI pipeline +- [ ] Sample configs tested in development environment +- [ ] Training materials reviewed by support team +- [ ] Advisory archived to `docs/product-advisories/archived/` + +--- + +**Sprint Status**: TODO (0/9 tasks complete) diff --git a/docs/implplan/SPRINT_7100_SUMMARY.md b/docs/implplan/SPRINT_7100_SUMMARY.md new file mode 100644 index 000000000..09cfdcd3f --- /dev/null +++ b/docs/implplan/SPRINT_7100_SUMMARY.md @@ -0,0 +1,268 @@ +# SPRINT_7100 Summary — VEX Trust Lattice + +**Epic**: VEX Trust Lattice for Explainable, Replayable Decisioning +**Total Duration**: 12 weeks (6 sprints) +**Status**: TODO +**Source Advisory**: `docs/product-advisories/archived/22-Dec-2026 - Building a Trust Lattice for VEX Sources.md` + +--- + +## Executive Summary + +Implement a sophisticated 3-component trust vector model (Provenance, Coverage, Replayability) for VEX sources, enabling explainable and deterministically replayable vulnerability decisioning. This replaces the current single-weight trust model with a mathematically rigorous lattice-based approach that produces signed, auditable verdict manifests. + +### Key Features + +1. **Trust Vector (P/C/R)**: 3-component scoring per VEX source +2. **Claim Scoring**: `ClaimScore = BaseTrust(S) * M * F` with strength and freshness multipliers +3. **Policy Gates**: Minimum confidence, unknowns budget, source quotas, reachability requirements +4. **Verdict Manifest**: DSSE-signed, indexed, replayable verdicts +5. **Trust Algebra UI**: Visual explanation panel for trust decisions +6. **Calibration**: Rolling trust weight adjustment based on post-mortem truth + +--- + +## Sprint Overview + +| Sprint ID | Topic | Duration | Status | Key Deliverables | +|-----------|-------|----------|--------|------------------| +| **7100.0001.0001** | Trust Vector Foundation | 2 weeks | TODO | TrustVector, ClaimStrength, FreshnessCalculator, ClaimScoreCalculator | +| **7100.0001.0002** | Verdict Manifest & Replay | 2 weeks | TODO | VerdictManifest, DSSE signing, PostgreSQL store, replay verification | +| **7100.0002.0001** | Policy Gates & Lattice Merge | 2 weeks | TODO | ClaimScoreMerger, MinimumConfidenceGate, SourceQuotaGate, UnknownsBudgetGate | +| **7100.0002.0002** | Source Defaults & Calibration | 2 weeks | TODO | DefaultTrustVectors, CalibrationManifest, TrustCalibrationService | +| **7100.0003.0001** | UI Trust Algebra Panel | 2 weeks | TODO | TrustAlgebraComponent, confidence meter, P/C/R bars, claim table | +| **7100.0003.0002** | Integration & Documentation | 2 weeks | TODO | Architecture docs, trust-lattice.md, verdict-manifest.md, API reference | + +--- + +## Gap Analysis (Advisory vs. Current Implementation) + +| Advisory Feature | Current State | Gap Severity | Sprint | +|-----------------|---------------|--------------|--------| +| 3-Component Trust Vector (P/C/R) | Single weight per provider | MAJOR | 7100.0001.0001 | +| Claim Strength Multiplier (M) | Status-based adjustments only | MEDIUM | 7100.0001.0001 | +| Freshness Decay (F) | Fixed staleness penalties (-5%/-10%) | MEDIUM | 7100.0001.0001 | +| ClaimScore = BaseTrust*M*F | Not implemented | MAJOR | 7100.0001.0001 | +| Conflict Mode + Replay Proof | K4 conflict detection, no down-weight | MINOR | 7100.0002.0001 | +| Verdict Manifest (DSSE-signed) | Not implemented | MAJOR | 7100.0001.0002 | +| Policy Gates (min confidence, quotas) | Partial (jurisdiction rules) | MEDIUM | 7100.0002.0001 | +| Deterministic Replay Pinning | Determinism prioritized, no manifest | MEDIUM | 7100.0001.0002 | +| UI Trust Algebra Panel | Not implemented | MEDIUM | 7100.0003.0001 | +| Calibration Manifest | Not implemented | MINOR | 7100.0002.0002 | + +--- + +## Batch A: Core Models (Sprints 7100.0001.0001–0002) + +### Sprint 7100.0001.0001: Trust Vector Foundation +**Owner**: Excititor Team + Policy Team +**Working Directory**: `src/Excititor/__Libraries/StellaOps.Excititor.Core/` +**Deliverables**: +- [ ] `TrustVector` record with P/C/R components and configurable weights +- [ ] `ClaimStrength` enum with evidence-based multipliers (0.40–1.00) +- [ ] `FreshnessCalculator` with configurable half-life decay (default 90 days) +- [ ] `ClaimScoreCalculator` implementing `BaseTrust(S) * M * F` +- [ ] Extended `VexProvider` with TrustVector configuration +- [ ] Unit tests for scoring calculations (determinism validation) + +**Tests**: ≥90% coverage, determinism assertions + +--- + +### Sprint 7100.0001.0002: Verdict Manifest & Replay +**Owner**: Authority Team + Excititor Team +**Working Directory**: `src/Authority/__Libraries/StellaOps.Authority.Core/` +**Deliverables**: +- [ ] `VerdictManifest` model with inputs pinning +- [ ] `VerdictManifestBuilder` for deterministic assembly +- [ ] DSSE signing for verdict manifests via Signer module +- [ ] `IVerdictManifestStore` interface and PostgreSQL implementation +- [ ] Indexing by (asset_digest, CVE, policy_hash, lattice_version) +- [ ] Replay verification endpoint +- [ ] Integration tests with determinism assertions + +**Tests**: DSSE signing tests, replay verification tests + +--- + +## Batch B: Policy Integration (Sprints 7100.0002.0001–0002) + +### Sprint 7100.0002.0001: Policy Gates & Lattice Merge +**Owner**: Policy Team +**Working Directory**: `src/Policy/__Libraries/StellaOps.Policy/` +**Deliverables**: +- [ ] Extend `TrustLatticeEngine` with ClaimScore-based merge +- [ ] Implement conflict penalty (delta=0.25) on contradictory claims +- [ ] `MinimumConfidenceGate` policy hook (prod requires ≥0.75) +- [ ] `UnknownsBudgetGate` policy hook (fail if unknowns > N) +- [ ] `SourceQuotaGate` (cap influence at 60% unless corroborated) +- [ ] `ReachabilityRequirementGate` for criticals +- [ ] Policy configuration schema (YAML/JSON) +- [ ] Unit tests for all gates with edge cases + +**Tests**: Gate edge cases, conflict scenarios + +--- + +### Sprint 7100.0002.0002: Source Defaults & Calibration +**Owner**: Excititor Team +**Working Directory**: `src/Excititor/__Libraries/StellaOps.Excititor.Core/` +**Deliverables**: +- [ ] Default trust vectors for Vendor/Distro/Internal source classes +- [ ] `SourceClassification` service for auto-classification +- [ ] `CalibrationManifest` model for tuning history +- [ ] Calibration comparison (claim vs. post-mortem truth) +- [ ] Learning rate adjustment (±0.02/epoch) +- [ ] Configuration for calibration policy + +**Tests**: Default vector tests, calibration accuracy tests + +--- + +## Batch C: UI & Integration (Sprints 7100.0003.0001–0002) + +### Sprint 7100.0003.0001: UI Trust Algebra Panel +**Owner**: UI Team +**Working Directory**: `src/Web/StellaOps.Web/` +**Deliverables**: +- [ ] `TrustAlgebraComponent` Angular component +- [ ] Confidence meter visualization (0–1 scale) +- [ ] P/C/R stacked bar chart for winning claim +- [ ] Claim comparison table with conflict toggle +- [ ] Policy chips display with YAML viewer (read-only in replay) +- [ ] "Reproduce verdict" replay button +- [ ] E2E tests for trust algebra panel + +**Tests**: Component tests, accessibility tests + +--- + +### Sprint 7100.0003.0002: Integration & Documentation +**Owner**: Docs Guild + All Teams +**Working Directory**: `docs/` and cross-module integration +**Deliverables**: +- [ ] Update `docs/modules/excititor/architecture.md` with trust lattice +- [ ] Create `docs/modules/excititor/trust-lattice.md` specification +- [ ] Update `docs/modules/policy/architecture.md` with gates +- [ ] Create `docs/modules/authority/verdict-manifest.md` specification +- [ ] Add JSON Schema for VerdictManifest to `docs/attestor/` +- [ ] Update API reference with verdict manifest endpoints +- [ ] Integration tests: end-to-end trust lattice flow +- [ ] Update `etc/*.yaml.sample` configuration files + +**Tests**: Documentation review, E2E integration tests + +--- + +## Dependencies + +```mermaid +graph TD + A[7100.0001.0001 Trust Vector] --> B[7100.0001.0002 Verdict Manifest] + A --> C[7100.0002.0001 Policy Gates] + B --> D[7100.0002.0002 Calibration] + C --> D + B --> E[7100.0003.0001 UI Panel] + C --> E + D --> F[7100.0003.0002 Integration] + E --> F +``` + +--- + +## Technical Design + +### Trust Vector Formula + +``` +BaseTrust(S) = wP*P + wC*C + wR*R + +Where: +- P = Provenance score [0..1] +- C = Coverage score [0..1] +- R = Replayability score [0..1] +- wP = 0.45 (default) +- wC = 0.35 (default) +- wR = 0.20 (default) +``` + +### Claim Score Formula + +``` +ClaimScore = BaseTrust(S) * M * F + +Where: +- M = Claim strength multiplier [0.40..1.00] +- F = Freshness decay = max(exp(-ln(2) * age_days / half_life), floor) +- half_life = 90 days (default) +- floor = 0.35 (minimum freshness) +``` + +### Default Trust Vectors by Source Class + +| Source Class | P | C | R | +|-------------|---|---|---| +| Vendor | 0.90 | 0.70 | 0.60 | +| Distro | 0.80 | 0.85 | 0.60 | +| Internal | 0.85 | 0.95 | 0.90 | + +### Claim Strength Values + +| Evidence Type | Strength (M) | +|--------------|--------------| +| Exploitability analysis + reachability proof | 1.00 | +| Config/feature-flag reason with evidence | 0.80 | +| Vendor blanket statement | 0.60 | +| Under investigation | 0.40 | + +--- + +## Success Metrics + +### Technical Metrics +- **Determinism**: 100% bit-identical verdict manifests for same inputs +- **Performance**: Verdict computation <100ms for 1k claims (p95) +- **Accuracy**: Calibration drift <5% per epoch +- **Scalability**: 100k verdicts/day without degradation + +### Business Metrics +- **Explainability**: 100% of verdicts include full audit trail +- **Compliance**: DSSE-signed verdicts meet audit requirements +- **Customer adoption**: ≥30% enable trust algebra UI (12 months) + +--- + +## Architectural Decisions + +| Decision | Rationale | +|----------|-----------| +| Extend, don't replace | Build trust vectors alongside existing append-only linksets | +| Backward compatibility | Existing `VexProvider.Trust.Weight` maps to legacy mode | +| Scoring at evaluation time | No ingestion-time decisioning per AOC-19 | +| Air-gap support | Trust vectors work offline with local signature verification | +| Calibration as separate manifest | Allows auditable tuning history | + +--- + +## Quick Links + +**Sprint Files**: +- [SPRINT_7100_0001_0001 - Trust Vector Foundation](SPRINT_7100_0001_0001_trust_vector_foundation.md) +- [SPRINT_7100_0001_0002 - Verdict Manifest & Replay](SPRINT_7100_0001_0002_verdict_manifest_replay.md) +- [SPRINT_7100_0002_0001 - Policy Gates & Merge](SPRINT_7100_0002_0001_policy_gates_merge.md) +- [SPRINT_7100_0002_0002 - Source Defaults & Calibration](SPRINT_7100_0002_0002_source_defaults_calibration.md) +- [SPRINT_7100_0003_0001 - UI Trust Algebra Panel](SPRINT_7100_0003_0001_ui_trust_algebra.md) +- [SPRINT_7100_0003_0002 - Integration & Documentation](SPRINT_7100_0003_0002_integration_documentation.md) + +**Documentation**: +- [Trust Lattice Specification](../modules/excititor/trust-lattice.md) +- [Verdict Manifest Specification](../modules/authority/verdict-manifest.md) +- [Excititor Architecture](../modules/excititor/architecture.md) + +**Source Advisory**: +- [22-Dec-2026 - Building a Trust Lattice for VEX Sources](../product-advisories/archived/22-Dec-2026%20-%20Building%20a%20Trust%20Lattice%20for%20VEX%20Sources.md) + +--- + +**Last Updated**: 2025-12-22 +**Next Review**: Weekly during sprint execution diff --git a/docs/implplan/analysis/4300_explainable_triage_gap_analysis.md b/docs/implplan/analysis/4300_explainable_triage_gap_analysis.md new file mode 100644 index 000000000..3ccfe7ed4 --- /dev/null +++ b/docs/implplan/analysis/4300_explainable_triage_gap_analysis.md @@ -0,0 +1,305 @@ +# Gap Analysis: Explainable Triage and Proof-Linked Evidence + +**Date:** 2025-12-22 +**Advisory:** 18-Dec-2025 - Designing Explainable Triage and Proof-Linked Evidence +**Analyst:** Agent + +--- + +## 1. Executive Summary + +The advisory "Designing Explainable Triage and Proof-Linked Evidence" defines a comprehensive vision for making security triage **explainable** and approvals **provably evidence-linked**. This gap analysis compares the advisory requirements against the current StellaOps implementation. + +**Key Finding:** ~85% of the advisory is already implemented through prior sprint work (3800, 3801, 4100, 4200 series). Six specific gaps remain, addressed by the SPRINT_4300 series. + +--- + +## 2. Advisory Requirements Summary + +### 2.1 Explainable Triage UX +- Every risk row shows: Score, CVE, service, package +- Expand panel shows: Path, Boundary, VEX, Last-seen, Actions +- Data contract for evidence retrieval + +### 2.2 Evidence-Linked Approvals +- Chain: SBOM → VEX → Policy Decision +- in-toto/DSSE attestations with signatures +- Gate merges/deploys on chain validation + +### 2.3 Backend Requirements +- `/findings/:id/evidence` endpoint +- `/approvals/:artifact/attestations` endpoint +- Proof bundles as content-addressed blobs +- DSSE envelopes for signatures + +### 2.4 CLI/API +- `stella verify image: --require sbom,vex,decision` +- Signed summary return +- Non-zero exit for CI/CD gates + +### 2.5 Invariants +- Artifact anchoring (no "latest tag" approvals) +- Evidence closure (decision refs exact evidence) +- Signature chain (DSSE, signed, verifiable) +- Staleness (last_seen, expires_at, TTL) + +### 2.6 Metrics +- % attestation completeness (target ≥95%) +- TTFE (time-to-first-evidence, target ≤30s) +- Post-deploy reversions (target: zero) + +--- + +## 3. Implementation Status + +### 3.1 Fully Implemented (No Action Needed) + +| Requirement | Implementation | Evidence | +|-------------|----------------|----------| +| **Triage DB Schema** | TriageDbContext with 8 entities | `src/Scanner/__Libraries/StellaOps.Scanner.Triage/` | +| **Evidence Bundle** | EvidenceBundle with 6 evidence types | `src/__Libraries/StellaOps.Evidence.Bundle/` | +| **VEX Decision Models** | OpenVEX output with x-stellaops-evidence | `src/Policy/StellaOps.Policy.Engine/Vex/` | +| **Score Explanation** | ScoreExplanationService, additive model | `src/Signals/StellaOps.Signals/Services/` | +| **Trust Lattice Engine** | K4 evaluation, claim aggregation | `src/Policy/__Libraries/StellaOps.Policy/TrustLattice/` | +| **Boundary Extractors** | K8s, Gateway, IaC extractors | SPRINT_3800_0002_* (archived, DONE) | +| **Human Approval Attestation** | stella.ops/human-approval@v1 | SPRINT_3801_0001_0004 (DONE) | +| **Risk Verdict Attestation** | RiskVerdictAttestation, RvaBuilder | SPRINT_4100_0003_0001 (DONE) | +| **OCI Referrer Push** | OciPushClient, RvaOciPublisher | SPRINT_4100_0003_0002 (DONE) | +| **Approve Button UI** | ApprovalButtonComponent (624 lines) | SPRINT_4100_0005_0001 (DONE) | +| **Decision Recording** | DecisionService, replay tokens | `src/Findings/StellaOps.Findings.Ledger/` | +| **Policy Gates** | PolicyGateEvaluator, Pass/Block/Warn | `src/Policy/StellaOps.Policy.Engine/Gates/` | +| **Exception Evaluation** | ExceptionEvaluator, compensating controls | SPRINT_3900 series (DONE) | +| **TTFS Telemetry** | TtfsIngestionService | `src/Telemetry/StellaOps.Telemetry.Core/Triage/` | + +### 3.2 Planned (In Progress) + +| Requirement | Sprint | Status | +|-------------|--------|--------| +| Proof Chain Verification UI | SPRINT_4200_0001_0001 | TODO | + +### 3.3 Gaps Identified + +| ID | Gap | Advisory Section | Priority | +|----|-----|------------------|----------| +| G1 | CLI Attestation Chain Verify | CLI/API, Pipeline gate | HIGH | +| G2 | Evidence Privacy Controls | Evidence privacy | MEDIUM | +| G3 | Evidence TTL Strategy API | Staleness invariant | MEDIUM | +| G4 | Predicate Type JSON Schemas | Predicate types | LOW | +| G5 | Metrics Dashboard | Metrics | LOW | +| G6 | Findings Evidence API | Backend, Data contract | MEDIUM | + +--- + +## 4. Gap Details + +### G1: CLI Attestation Chain Verify Command + +**Advisory Requirement:** +``` +stella verify image: --require sbom,vex,decision +``` +Returns signed summary; pipelines fail on non-zero. + +**Current State:** +- `stella verify offline` exists for offline verification +- No image-based attestation chain verification +- No `--require` attestation type filtering + +**Gap:** Need online image verification with attestation requirements. + +**Resolution:** SPRINT_4300_0001_0001 + +--- + +### G2: Evidence Privacy Controls + +**Advisory Requirement:** +> Store file hashes, symbol names, and line ranges (no raw source required). Gate raw source behind elevated permissions. + +**Current State:** +- Evidence contains full details +- No redaction service +- No permission-based access control + +**Gap:** Need redaction levels and permission checks. + +**Resolution:** SPRINT_4300_0002_0001 + +--- + +### G3: Evidence TTL Strategy Enforcement + +**Advisory Requirement:** +> SBOM: long TTL (weeks/months). Boundary: short TTL (hours/days). Reachability: medium TTL. Staleness behavior in policy. + +**Current State:** +- TTL fields exist on evidence entities +- No enforcement in policy gate +- No staleness warnings + +**Gap:** Need TTL enforcer service integrated with policy. + +**Resolution:** SPRINT_4300_0002_0002 + +--- + +### G4: Predicate Type JSON Schemas + +**Advisory Requirement:** +> Predicate types: stella/sbom@v1, stella/vex@v1, stella/reachability@v1, stella/boundary@v1, stella/policy-decision@v1, stella/human-approval@v1 + +**Current State:** +- C# models exist for all predicate types +- No formal JSON Schema definitions +- No schema validation on attestation creation + +**Gap:** Need JSON schemas and validation. + +**Resolution:** SPRINT_4300_0003_0001 + +--- + +### G5: Attestation Completeness Metrics + +**Advisory Requirement:** +> Metrics: % changes with complete attestations (target ≥95%), TTFE (target ≤30s), Post-deploy reversions (trend to zero) + +**Current State:** +- TTFS telemetry exists (time-to-first-skeleton) +- No attestation completeness ratio +- No reversion tracking +- No Grafana dashboard + +**Gap:** Need full metrics suite and dashboard. + +**Resolution:** SPRINT_4300_0003_0002 + +--- + +### G6: Findings Evidence API Endpoint + +**Advisory Requirement:** +> Backend: add `/findings/:id/evidence` (returns the contract). + +Contract: +```json +{ + "finding_id": "f-7b3c", + "cve": "CVE-2024-12345", + "component": {...}, + "reachable_path": [...], + "entrypoint": {...}, + "vex": {...}, + "last_seen": "...", + "attestation_refs": [...] +} +``` + +**Current State:** +- EvidenceCompositionService exists internally +- No REST endpoint exposing advisory contract +- Different internal response format + +**Gap:** Need REST endpoint with advisory-compliant contract. + +**Resolution:** SPRINT_4300_0001_0002 + +--- + +## 5. Coverage Matrix + +| Advisory Section | Subsection | Implemented | Gap Sprint | +|------------------|------------|-------------|------------| +| Explainable Triage UX | Row (collapsed) | ✅ | — | +| | Expand panel | ✅ | — | +| | Data contract | ⚠️ | 4300.0001.0002 | +| Evidence-Linked Approvals | Chain exists | ✅ | — | +| | in-toto/DSSE | ✅ | — | +| | Gate merges | ✅ | — | +| Backend | /findings/:id/evidence | ❌ | 4300.0001.0002 | +| | /approvals/:artifact/attestations | ✅ | — | +| | Proof bundles | ✅ | — | +| CLI/API | stella verify image | ❌ | 4300.0001.0001 | +| Invariants | Artifact anchoring | ✅ | — | +| | Evidence closure | ✅ | — | +| | Signature chain | ✅ | — | +| | Staleness | ⚠️ | 4300.0002.0002 | +| Data Model | artifacts table | ✅ | — | +| | findings table | ✅ | — | +| | evidence table | ✅ | — | +| | attestations table | ✅ | — | +| | approvals table | ✅ | — | +| Evidence Types | Reachable path proof | ✅ | — | +| | Boundary proof | ✅ | — | +| | VEX status | ✅ | — | +| | Score explanation | ✅ | — | +| Predicate Types | stella/sbom@v1 | ⚠️ | 4300.0003.0001 | +| | stella/vex@v1 | ⚠️ | 4300.0003.0001 | +| | stella/reachability@v1 | ⚠️ | 4300.0003.0001 | +| | stella/boundary@v1 | ⚠️ | 4300.0003.0001 | +| | stella/policy-decision@v1 | ⚠️ | 4300.0003.0001 | +| | stella/human-approval@v1 | ⚠️ | 4300.0003.0001 | +| Policy Gate | OPA/Rego | ✅ | — | +| | Signed decision | ✅ | — | +| Approve Button | Disabled until valid | ✅ | — | +| | Creates approval attestation | ✅ | — | +| Verification | Shared verifier library | ✅ | — | +| Privacy | Redacted proofs | ❌ | 4300.0002.0001 | +| | Elevated permissions | ❌ | 4300.0002.0001 | +| TTL Strategy | Per-type TTLs | ⚠️ | 4300.0002.0002 | +| Metrics | % completeness | ❌ | 4300.0003.0002 | +| | TTFE | ⚠️ | 4300.0003.0002 | +| | Reversions | ❌ | 4300.0003.0002 | +| UI Components | Findings list | ✅ | — | +| | Evidence drawer | ⏳ | 4200.0001.0001 | +| | Proof bundle viewer | ⏳ | 4200.0001.0001 | + +**Legend:** ✅ Implemented | ⚠️ Partial | ❌ Missing | ⏳ Planned + +--- + +## 6. Effort Estimation + +| Sprint | Effort | Team | Parallelizable | +|--------|--------|------|----------------| +| 4300.0001.0001 | M (2-3d) | CLI | Yes | +| 4300.0001.0002 | S (1-2d) | Scanner | Yes | +| 4300.0002.0001 | M (2-3d) | Scanner | Yes | +| 4300.0002.0002 | S (1-2d) | Policy | Yes | +| 4300.0003.0001 | S (1-2d) | Attestor | Yes | +| 4300.0003.0002 | M (2-3d) | Telemetry | Yes | + +**Total:** 10-14 days (can complete in 1-2 weeks with parallel execution) + +--- + +## 7. Recommendations + +1. **Prioritize G1 (CLI Verify)** - This is the only HIGH priority gap and enables CI/CD integration. + +2. **Bundle G2+G3** - Evidence privacy and TTL can share context in Scanner/Policy teams. + +3. **Defer G4+G5** - Predicate schemas and metrics are LOW priority; can follow after core functionality. + +4. **Leverage 4200.0001.0001** - Proof Chain UI sprint is already planned; ensure it consumes new evidence API. + +--- + +## 8. Appendix: Prior Sprint References + +| Sprint | Topic | Status | +|--------|-------|--------| +| 3800.0000.0000 | Explainable Triage Master | DONE | +| 3800.0002.0001 | RichGraph Boundary Extractor | DONE | +| 3800.0002.0002 | K8s Boundary Extractor | DONE | +| 3800.0003.0001 | Evidence API Endpoint | DONE | +| 3801.0001.0001 | Policy Decision Attestation | DONE | +| 3801.0001.0004 | Human Approval Attestation | DONE | +| 4100.0003.0001 | Risk Verdict Attestation | DONE | +| 4100.0003.0002 | OCI Referrer Push | DONE | +| 4100.0005.0001 | Approve Button UI | DONE | +| 4200.0001.0001 | Proof Chain Verification UI | TODO | + +--- + +**Analysis Complete:** 2025-12-22 diff --git a/docs/implplan/archived/2025-12-20/SPRINT_3600_0001_0001_trust_algebra_lattice.md b/docs/implplan/archived/2025-12-20/SPRINT_3600_0001_0001_trust_algebra_lattice.md index d14be04b0..ca1e0320d 100644 --- a/docs/implplan/archived/2025-12-20/SPRINT_3600_0001_0001_trust_algebra_lattice.md +++ b/docs/implplan/archived/2025-12-20/SPRINT_3600_0001_0001_trust_algebra_lattice.md @@ -23,7 +23,7 @@ ## Documentation Prerequisites -- `docs/product-advisories/unprocessed/19-Dec-2025 - Trust Algebra and Lattice Engine Specification.md` +- `docs/product-advisories/archived/19-Dec-2025 - Trust Algebra and Lattice Engine Specification.md` - `docs/modules/policy/architecture.md` - `docs/reachability/lattice.md` diff --git a/docs/implplan/archived/SPRINT_1200_001_000_router_rate_limiting_master.md b/docs/implplan/archived/SPRINT_1200_001_000_router_rate_limiting_master.md index 42222748c..372f6417f 100644 --- a/docs/implplan/archived/SPRINT_1200_001_000_router_rate_limiting_master.md +++ b/docs/implplan/archived/SPRINT_1200_001_000_router_rate_limiting_master.md @@ -2,7 +2,7 @@ **IMPLID:** 1200 (Router infrastructure) **Feature:** Centralized rate limiting for Stella Router as standalone product -**Advisory Source:** `docs/product-advisories/unprocessed/15-Dec-2025 - Designing 202 + Retry‑After Backpressure Control.md` +**Advisory Source:** `docs/product-advisories/archived/15-Dec-2025 - Designing 202 + Retry‑After Backpressure Control.md` **Owner:** Router Team **Status:** DONE (Sprints 1–6 closed; Sprint 4 closed N/A) **Priority:** HIGH - Core feature for Router product @@ -210,7 +210,7 @@ Each target can have multiple rules (AND logic): ## Related Documentation -- **Advisory:** `docs/product-advisories/unprocessed/15-Dec-2025 - Designing 202 + Retry‑After Backpressure Control.md` +- **Advisory:** `docs/product-advisories/archived/15-Dec-2025 - Designing 202 + Retry‑After Backpressure Control.md` - **Implementation:** `src/__Libraries/StellaOps.Router.Gateway/RateLimit/` - **Tests:** `tests/StellaOps.Router.Gateway.Tests/` - **Implementation Guides:** `docs/implplan/SPRINT_1200_001_00X_*.md` (see below) diff --git a/docs/implplan/archived/SPRINT_1200_001_IMPLEMENTATION_GUIDE.md b/docs/implplan/archived/SPRINT_1200_001_IMPLEMENTATION_GUIDE.md index 996a28f6b..2cb4d5759 100644 --- a/docs/implplan/archived/SPRINT_1200_001_IMPLEMENTATION_GUIDE.md +++ b/docs/implplan/archived/SPRINT_1200_001_IMPLEMENTATION_GUIDE.md @@ -701,7 +701,7 @@ rate_limiting: ## References -- **Advisory:** `docs/product-advisories/unprocessed/15-Dec-2025 - Designing 202 + Retry‑After Backpressure Control.md` +- **Advisory:** `docs/product-advisories/archived/15-Dec-2025 - Designing 202 + Retry‑After Backpressure Control.md` - **Master Sprint Tracker:** `docs/implplan/SPRINT_1200_001_000_router_rate_limiting_master.md` - **Sprint Files:** `docs/implplan/SPRINT_1200_001_00X_*.md` - **HTTP 429 Semantics:** RFC 6585 diff --git a/docs/implplan/archived/SPRINT_1200_001_README.md b/docs/implplan/archived/SPRINT_1200_001_README.md index c30c8ee61..4a5f1dcdd 100644 --- a/docs/implplan/archived/SPRINT_1200_001_README.md +++ b/docs/implplan/archived/SPRINT_1200_001_README.md @@ -3,7 +3,7 @@ **Package Created:** 2025-12-17 **For:** Implementation agents / reviewers **Status:** DONE (Sprints 1–6 closed; Sprint 4 closed N/A) -**Advisory Source:** `docs/product-advisories/unprocessed/15-Dec-2025 - Designing 202 + Retry‑After Backpressure Control.md` +**Advisory Source:** `docs/product-advisories/archived/15-Dec-2025 - Designing 202 + Retry‑After Backpressure Control.md` --- @@ -114,7 +114,7 @@ Week 4+: Service Migration 1. `SPRINT_1200_001_000_router_rate_limiting_master.md` - Overview 2. `SPRINT_1200_001_IMPLEMENTATION_GUIDE.md` - Technical details -3. Original advisory: `docs/product-advisories/unprocessed/15-Dec-2025 - Designing 202 + Retry‑After Backpressure Control.md` +3. Original advisory: `docs/product-advisories/archived/15-Dec-2025 - Designing 202 + Retry‑After Backpressure Control.md` 4. Analysis plan: `C:\Users\VladimirMoushkov\.claude\plans\vectorized-kindling-rocket.md` ### 2. Environment Setup @@ -471,7 +471,7 @@ rate_limiting: ## Related Documentation ### Source Documents -- **Advisory:** `docs/product-advisories/unprocessed/15-Dec-2025 - Designing 202 + Retry‑After Backpressure Control.md` +- **Advisory:** `docs/product-advisories/archived/15-Dec-2025 - Designing 202 + Retry‑After Backpressure Control.md` - **Analysis Plan:** `C:\Users\VladimirMoushkov\.claude\plans\vectorized-kindling-rocket.md` - **Architecture:** `docs/modules/platform/architecture-overview.md` diff --git a/docs/implplan/archived/SPRINT_3401_0002_0001_score_replay_proof_bundle.md b/docs/implplan/archived/SPRINT_3401_0002_0001_score_replay_proof_bundle.md index 585c31bca..6efd539aa 100644 --- a/docs/implplan/archived/SPRINT_3401_0002_0001_score_replay_proof_bundle.md +++ b/docs/implplan/archived/SPRINT_3401_0002_0001_score_replay_proof_bundle.md @@ -9,7 +9,7 @@ Implement the score replay capability and proof bundle writer from the "Building 3. **Score Replay Endpoint** - `POST /score/replay` to recompute scores without rescanning 4. **Scan Manifest** - DSSE-signed manifest capturing all inputs affecting results -**Source Advisory**: `docs/product-advisories/unprocessed/16-Dec-2025 - Building a Deeper Moat Beyond Reachability.md` +**Source Advisory**: `docs/product-advisories/archived/16-Dec-2025 - Building a Deeper Moat Beyond Reachability.md` **Related Docs**: `docs/product-advisories/14-Dec-2025 - Determinism and Reproducibility Technical Reference.md` §11.2, §12 **Working Directory**: `src/Scanner/StellaOps.Scanner.WebService`, `src/Policy/__Libraries/StellaOps.Policy/` diff --git a/docs/implplan/archived/SPRINT_3500_0003_0001_ground_truth_corpus_ci_gates.md b/docs/implplan/archived/SPRINT_3500_0003_0001_ground_truth_corpus_ci_gates.md index 3f694d03a..19b514a4d 100644 --- a/docs/implplan/archived/SPRINT_3500_0003_0001_ground_truth_corpus_ci_gates.md +++ b/docs/implplan/archived/SPRINT_3500_0003_0001_ground_truth_corpus_ci_gates.md @@ -9,7 +9,7 @@ Establish the ground-truth corpus for binary-only reachability benchmarking and 3. **CI Regression Gates** - Fail build on precision/recall/determinism regressions 4. **Baseline Management** - Tooling to update baselines when improvements land -**Source Advisory**: `docs/product-advisories/unprocessed/16-Dec-2025 - Building a Deeper Moat Beyond Reachability.md` +**Source Advisory**: `docs/product-advisories/archived/16-Dec-2025 - Building a Deeper Moat Beyond Reachability.md` **Related Docs**: `docs/benchmarks/ground-truth-corpus.md` (new) **Working Directory**: `bench/reachability-benchmark/`, `datasets/reachability/`, `src/Scanner/` diff --git a/docs/implplan/archived/SPRINT_3600_0002_0001_unknowns_ranking_containment.md b/docs/implplan/archived/SPRINT_3600_0002_0001_unknowns_ranking_containment.md index 9da3ab8e0..e888d052e 100644 --- a/docs/implplan/archived/SPRINT_3600_0002_0001_unknowns_ranking_containment.md +++ b/docs/implplan/archived/SPRINT_3600_0002_0001_unknowns_ranking_containment.md @@ -9,7 +9,7 @@ Enhance the Unknowns ranking model with blast radius and runtime containment sig 3. **Unknown Proof Trail** - Emit proof nodes explaining rank factors 4. **API: `/unknowns/list?sort=score`** - Expose ranked unknowns -**Source Advisory**: `docs/product-advisories/unprocessed/16-Dec-2025 - Building a Deeper Moat Beyond Reachability.md` +**Source Advisory**: `docs/product-advisories/archived/16-Dec-2025 - Building a Deeper Moat Beyond Reachability.md` **Related Docs**: `docs/product-advisories/14-Dec-2025 - Triage and Unknowns Technical Reference.md` §17.5 **Working Directory**: `src/Scanner/__Libraries/StellaOps.Scanner.Unknowns/`, `src/Scanner/StellaOps.Scanner.WebService/` diff --git a/docs/implplan/archived/SPRINT_3700_0001_0001_triage_db_schema.md b/docs/implplan/archived/SPRINT_3700_0001_0001_triage_db_schema.md index 511c90fa3..baed855de 100644 --- a/docs/implplan/archived/SPRINT_3700_0001_0001_triage_db_schema.md +++ b/docs/implplan/archived/SPRINT_3700_0001_0001_triage_db_schema.md @@ -240,4 +240,4 @@ public class TriageSchemaTests : IAsyncLifetime - Schema definition: `docs/db/triage_schema.sql` - UX Guide: `docs/ux/TRIAGE_UX_GUIDE.md` - API Contract: `docs/api/triage.contract.v1.md` -- Advisory: `docs/product-advisories/unprocessed/16-Dec-2025 - Reimagining Proof-Linked UX in Security Workflows.md` +- Advisory: `docs/product-advisories/archived/16-Dec-2025 - Reimagining Proof-Linked UX in Security Workflows.md` diff --git a/docs/modules/authority/verdict-manifest.md b/docs/modules/authority/verdict-manifest.md new file mode 100644 index 000000000..49fdee150 --- /dev/null +++ b/docs/modules/authority/verdict-manifest.md @@ -0,0 +1,462 @@ +# Verdict Manifest Specification + +> **Status**: Draft (Sprint 7100) +> **Last Updated**: 2025-12-22 +> **Source Advisory**: `docs/product-advisories/archived/22-Dec-2026 - Building a Trust Lattice for VEX Sources.md` + +## 1. Overview + +A Verdict Manifest is a signed, immutable record of a VEX decisioning outcome. It captures all inputs used to produce a verdict, enabling deterministic replay and audit compliance. + +### Purpose + +1. **Auditability**: Prove exactly how a verdict was reached +2. **Reproducibility**: Replay the decision with identical results +3. **Compliance**: Meet regulatory requirements for security decisions +4. **Debugging**: Investigate unexpected verdict changes + +--- + +## 2. Manifest Schema + +### 2.1 Complete Schema + +```typescript +interface VerdictManifest { + // Identity + manifestId: string; // Unique identifier + tenant: string; // Tenant scope + + // Scope + assetDigest: string; // SHA256 of asset/SBOM + vulnerabilityId: string; // CVE/GHSA/vendor ID + + // Inputs (pinned for replay) + inputs: VerdictInputs; + + // Result + result: VerdictResult; + + // Policy context + policyHash: string; // SHA256 of policy file + latticeVersion: string; // Trust lattice version + + // Metadata + evaluatedAt: string; // ISO 8601 UTC timestamp + manifestDigest: string; // SHA256 of canonical manifest +} + +interface VerdictInputs { + sbomDigests: string[]; // SBOM document digests + vulnFeedSnapshotIds: string[]; // Feed snapshot identifiers + vexDocumentDigests: string[]; // VEX document digests + reachabilityGraphIds: string[]; // Call graph identifiers + clockCutoff: string; // Evaluation timestamp +} + +interface VerdictResult { + status: "affected" | "not_affected" | "fixed" | "under_investigation"; + confidence: number; // 0.0 to 1.0 + explanations: VerdictExplanation[]; + evidenceRefs: string[]; // Attestation/proof references +} + +interface VerdictExplanation { + sourceId: string; + reason: string; + provenanceScore: number; + coverageScore: number; + replayabilityScore: number; + strengthMultiplier: number; + freshnessMultiplier: number; + claimScore: number; +} +``` + +### 2.2 Identity Fields + +| Field | Type | Description | +|-------|------|-------------| +| `manifestId` | string | Format: `verd:{tenant}:{asset_short}:{vuln_id}:{timestamp}` | +| `tenant` | string | Tenant identifier for multi-tenancy | + +### 2.3 Input Pinning + +All inputs that affect the verdict must be pinned for deterministic replay: + +| Field | Description | +|-------|-------------| +| `sbomDigests` | SHA256 digests of SBOM documents used | +| `vulnFeedSnapshotIds` | Identifiers for vulnerability feed snapshots | +| `vexDocumentDigests` | SHA256 digests of VEX documents considered | +| `reachabilityGraphIds` | Identifiers for call graph snapshots | +| `clockCutoff` | Timestamp used for freshness calculations | + +### 2.4 Verdict Result + +The result section contains the actual verdict and full explanation: + +| Field | Description | +|-------|-------------| +| `status` | Final verdict: affected, not_affected, fixed, under_investigation | +| `confidence` | Numeric confidence score (0.0 to 1.0) | +| `explanations` | Per-source breakdown of scoring | +| `evidenceRefs` | Links to attestations and proof bundles | + +--- + +## 3. Deterministic Serialization + +### 3.1 Canonical JSON + +Manifests are serialized using canonical JSON rules: + +1. Keys sorted alphabetically (ASCII order) +2. No insignificant whitespace +3. UTF-8 encoding without BOM +4. Timestamps in ISO 8601 format with 'Z' suffix +5. Arrays sorted by natural key (sourceId, then score) +6. Numbers without trailing zeros + +### 3.2 Digest Computation + +The manifest digest is computed over the canonical JSON: + +```csharp +public static string ComputeDigest(VerdictManifest manifest) +{ + var json = CanonicalJsonSerializer.Serialize(manifest with { ManifestDigest = "" }); + var bytes = Encoding.UTF8.GetBytes(json); + var hash = SHA256.HashData(bytes); + return $"sha256:{Convert.ToHexString(hash).ToLowerInvariant()}"; +} +``` + +--- + +## 4. Signing + +### 4.1 DSSE Envelope + +Verdict manifests are signed using [DSSE (Dead Simple Signing Envelope)](https://github.com/secure-systems-lab/dsse): + +```json +{ + "payloadType": "application/vnd.stellaops.verdict+json", + "payload": "", + "signatures": [ + { + "keyid": "projects/stellaops/keys/verdict-signer-2025", + "sig": "" + } + ] +} +``` + +### 4.2 Predicate Type + +For in-toto attestations: + +``` +https://stella-ops.org/attestations/vex-verdict/1 +``` + +### 4.3 Rekor Integration + +Optionally, verdicts can be logged to Sigstore Rekor for transparency: + +```json +{ + "rekorLogId": "rekor.sigstore.dev", + "rekorLogIndex": 12345678, + "rekorEntryUrl": "https://rekor.sigstore.dev/api/v1/log/entries/..." +} +``` + +--- + +## 5. Storage + +### 5.1 PostgreSQL Schema + +```sql +CREATE TABLE authority.verdict_manifests ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + manifest_id TEXT NOT NULL UNIQUE, + tenant TEXT NOT NULL, + + -- Scope + asset_digest TEXT NOT NULL, + vulnerability_id TEXT NOT NULL, + + -- Inputs (JSONB) + inputs_json JSONB NOT NULL, + + -- Result + status TEXT NOT NULL, + confidence DOUBLE PRECISION NOT NULL, + result_json JSONB NOT NULL, + + -- Policy context + policy_hash TEXT NOT NULL, + lattice_version TEXT NOT NULL, + + -- Metadata + evaluated_at TIMESTAMPTZ NOT NULL, + manifest_digest TEXT NOT NULL, + + -- Signature + signature_json JSONB, + rekor_log_id TEXT, + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); +``` + +### 5.2 Indexing Strategy + +| Index | Purpose | +|-------|---------| +| `(tenant, asset_digest, vulnerability_id)` | Primary lookup | +| `(tenant, policy_hash, lattice_version)` | Policy version queries | +| `(evaluated_at)` BRIN | Time-based queries | +| Unique: `(tenant, asset_digest, vulnerability_id, policy_hash, lattice_version)` | Ensure one verdict per configuration | + +--- + +## 6. Replay Verification + +### 6.1 Verification Protocol + +1. Retrieve stored manifest by ID +2. Fetch pinned inputs (SBOM, VEX docs, feeds) by digest +3. Re-execute trust lattice evaluation with identical inputs +4. Compare result with stored verdict +5. Verify signature if present + +### 6.2 Verification Request + +```http +POST /api/v1/authority/verdicts/{manifestId}/replay +Authorization: Bearer +``` + +### 6.3 Verification Response + +```json +{ + "success": true, + "originalManifest": { ... }, + "replayedManifest": { ... }, + "differences": [], + "signatureValid": true, + "verifiedAt": "2025-12-22T15:30:00Z" +} +``` + +### 6.4 Failure Handling + +When replay produces different results: + +```json +{ + "success": false, + "originalManifest": { ... }, + "replayedManifest": { ... }, + "differences": [ + { + "field": "result.confidence", + "original": 0.82, + "replayed": 0.79, + "reason": "VEX document digest mismatch" + } + ], + "signatureValid": true, + "error": "Verdict replay produced different confidence score" +} +``` + +--- + +## 7. API Reference + +### Get Verdict Manifest + +```http +GET /api/v1/authority/verdicts/{manifestId} +Authorization: Bearer + +Response: 200 OK +{ + "manifest": { ... }, + "signature": { ... }, + "rekorEntry": { ... } +} +``` + +### List Verdicts by Scope + +```http +GET /api/v1/authority/verdicts?assetDigest={digest}&vulnerabilityId={cve} +Authorization: Bearer + +Response: 200 OK +{ + "verdicts": [ ... ], + "pageToken": "..." +} +``` + +### Replay Verdict + +```http +POST /api/v1/authority/verdicts/{manifestId}/replay +Authorization: Bearer + +Response: 200 OK +{ + "success": true, + ... +} +``` + +### Download Signed Manifest + +```http +GET /api/v1/authority/verdicts/{manifestId}/download +Authorization: Bearer + +Response: 200 OK +Content-Type: application/vnd.stellaops.verdict+json +Content-Disposition: attachment; filename="verdict-{manifestId}.json" +``` + +--- + +## 8. JSON Schema + +### verdict-manifest.schema.json + +```json +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://stella-ops.org/schemas/verdict-manifest/1.0.0", + "type": "object", + "required": [ + "manifestId", + "tenant", + "assetDigest", + "vulnerabilityId", + "inputs", + "result", + "policyHash", + "latticeVersion", + "evaluatedAt", + "manifestDigest" + ], + "properties": { + "manifestId": { + "type": "string", + "pattern": "^verd:[a-z0-9-]+:[a-f0-9]+:[A-Z0-9-]+:[0-9]+$" + }, + "tenant": { "type": "string", "minLength": 1 }, + "assetDigest": { + "type": "string", + "pattern": "^sha256:[a-f0-9]{64}$" + }, + "vulnerabilityId": { "type": "string", "minLength": 1 }, + "inputs": { "$ref": "#/$defs/VerdictInputs" }, + "result": { "$ref": "#/$defs/VerdictResult" }, + "policyHash": { + "type": "string", + "pattern": "^sha256:[a-f0-9]{64}$" + }, + "latticeVersion": { + "type": "string", + "pattern": "^[0-9]+\\.[0-9]+\\.[0-9]+$" + }, + "evaluatedAt": { + "type": "string", + "format": "date-time" + }, + "manifestDigest": { + "type": "string", + "pattern": "^sha256:[a-f0-9]{64}$" + } + }, + "$defs": { + "VerdictInputs": { + "type": "object", + "required": ["sbomDigests", "vulnFeedSnapshotIds", "vexDocumentDigests", "clockCutoff"], + "properties": { + "sbomDigests": { + "type": "array", + "items": { "type": "string", "pattern": "^sha256:[a-f0-9]{64}$" } + }, + "vulnFeedSnapshotIds": { + "type": "array", + "items": { "type": "string" } + }, + "vexDocumentDigests": { + "type": "array", + "items": { "type": "string", "pattern": "^sha256:[a-f0-9]{64}$" } + }, + "reachabilityGraphIds": { + "type": "array", + "items": { "type": "string" } + }, + "clockCutoff": { + "type": "string", + "format": "date-time" + } + } + }, + "VerdictResult": { + "type": "object", + "required": ["status", "confidence", "explanations"], + "properties": { + "status": { + "type": "string", + "enum": ["affected", "not_affected", "fixed", "under_investigation"] + }, + "confidence": { + "type": "number", + "minimum": 0, + "maximum": 1 + }, + "explanations": { + "type": "array", + "items": { "$ref": "#/$defs/VerdictExplanation" } + }, + "evidenceRefs": { + "type": "array", + "items": { "type": "string" } + } + } + }, + "VerdictExplanation": { + "type": "object", + "required": ["sourceId", "reason", "claimScore"], + "properties": { + "sourceId": { "type": "string" }, + "reason": { "type": "string" }, + "provenanceScore": { "type": "number", "minimum": 0, "maximum": 1 }, + "coverageScore": { "type": "number", "minimum": 0, "maximum": 1 }, + "replayabilityScore": { "type": "number", "minimum": 0, "maximum": 1 }, + "strengthMultiplier": { "type": "number", "minimum": 0, "maximum": 1 }, + "freshnessMultiplier": { "type": "number", "minimum": 0, "maximum": 1 }, + "claimScore": { "type": "number", "minimum": 0, "maximum": 1 } + } + } + } +} +``` + +--- + +## Related Documentation + +- [Trust Lattice Specification](../excititor/trust-lattice.md) +- [Authority Architecture](./architecture.md) +- [DSSE Signing](../../dev/dsse-signing.md) +- [API Reference](../../09_API_CLI_REFERENCE.md) diff --git a/docs/modules/benchmark/architecture.md b/docs/modules/benchmark/architecture.md new file mode 100644 index 000000000..0dc4e5407 --- /dev/null +++ b/docs/modules/benchmark/architecture.md @@ -0,0 +1,444 @@ +# Benchmark Module Architecture + +## Overview + +The Benchmark module provides infrastructure for validating and demonstrating Stella Ops' competitive advantages through automated comparison against other container security scanners (Trivy, Grype, Syft, etc.). + +**Module Path**: `src/Scanner/__Libraries/StellaOps.Scanner.Benchmark/` +**Status**: PLANNED (Sprint 7000.0001.0001) + +--- + +## Mission + +Establish verifiable, reproducible benchmarks that: +1. Validate competitive claims with evidence +2. Detect regressions in accuracy or performance +3. Generate marketing-ready comparison materials +4. Provide ground-truth corpus for testing + +--- + +## Architecture + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ Benchmark Module │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ +│ │ Corpus │ │ Harness │ │ Metrics │ │ +│ │ Manager │───▶│ Runner │───▶│ Calculator │ │ +│ └─────────────┘ └─────────────┘ └─────────────┘ │ +│ │ │ │ │ +│ │ │ │ │ +│ ▼ ▼ ▼ │ +│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ +│ │Ground Truth │ │ Competitor │ │ Claims │ │ +│ │ Manifest │ │ Adapters │ │ Index │ │ +│ └─────────────┘ └─────────────┘ └─────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +--- + +## Components + +### 1. Corpus Manager + +**Namespace**: `StellaOps.Scanner.Benchmark.Corpus` + +Manages the ground-truth corpus of container images with known vulnerabilities. + +```csharp +public interface ICorpusManager +{ + Task LoadCorpusAsync(string corpusPath, CancellationToken ct); + Task GetImageAsync(string digest, CancellationToken ct); + Task GetGroundTruthAsync(string digest, CancellationToken ct); +} + +public record Corpus( + string Version, + DateTimeOffset CreatedAt, + ImmutableArray Images +); + +public record CorpusImage( + string Digest, + string Name, + string Tag, + CorpusCategory Category, + GroundTruth GroundTruth +); + +public record GroundTruth( + ImmutableArray TruePositives, + ImmutableArray KnownFalsePositives, + ImmutableArray Notes +); + +public enum CorpusCategory +{ + BaseOS, // Alpine, Debian, Ubuntu, RHEL + ApplicationNode, // Node.js applications + ApplicationPython,// Python applications + ApplicationJava, // Java applications + ApplicationDotNet,// .NET applications + BackportScenario, // Known backported fixes + Unreachable // Known unreachable vulns +} +``` + +### 2. Harness Runner + +**Namespace**: `StellaOps.Scanner.Benchmark.Harness` + +Executes scans using Stella Ops and competitor tools. + +```csharp +public interface IHarnessRunner +{ + Task RunAsync( + Corpus corpus, + ImmutableArray tools, + BenchmarkOptions options, + CancellationToken ct + ); +} + +public interface ITool +{ + string Name { get; } + string Version { get; } + Task ScanAsync(string imageRef, CancellationToken ct); +} + +public record BenchmarkRun( + string RunId, + DateTimeOffset StartedAt, + DateTimeOffset CompletedAt, + ImmutableArray Results +); + +public record ToolResult( + string ToolName, + string ToolVersion, + string ImageDigest, + ImmutableArray Findings, + TimeSpan Duration +); +``` + +### 3. Competitor Adapters + +**Namespace**: `StellaOps.Scanner.Benchmark.Adapters` + +Normalize output from competitor tools. + +```csharp +public interface ICompetitorAdapter : ITool +{ + Task> ParseOutputAsync( + string output, + CancellationToken ct + ); +} + +// Implementations +public class TrivyAdapter : ICompetitorAdapter { } +public class GrypeAdapter : ICompetitorAdapter { } +public class SyftAdapter : ICompetitorAdapter { } +public class StellaOpsAdapter : ICompetitorAdapter { } +``` + +### 4. Metrics Calculator + +**Namespace**: `StellaOps.Scanner.Benchmark.Metrics` + +Calculate precision, recall, F1, and other metrics. + +```csharp +public interface IMetricsCalculator +{ + BenchmarkMetrics Calculate( + ToolResult result, + GroundTruth groundTruth + ); + + ComparativeMetrics Compare( + BenchmarkMetrics baseline, + BenchmarkMetrics comparison + ); +} + +public record BenchmarkMetrics( + int TruePositives, + int FalsePositives, + int TrueNegatives, + int FalseNegatives, + double Precision, + double Recall, + double F1Score, + ImmutableDictionary ByCategory +); + +public record ComparativeMetrics( + string BaselineTool, + string ComparisonTool, + double PrecisionDelta, + double RecallDelta, + double F1Delta, + ImmutableArray UniqueFindings, + ImmutableArray MissedFindings +); +``` + +### 5. Claims Index + +**Namespace**: `StellaOps.Scanner.Benchmark.Claims` + +Manage verifiable claims with evidence links. + +```csharp +public interface IClaimsIndex +{ + Task> GetAllClaimsAsync(CancellationToken ct); + Task VerifyClaimAsync(string claimId, CancellationToken ct); + Task UpdateClaimsAsync(BenchmarkRun run, CancellationToken ct); +} + +public record Claim( + string Id, + ClaimCategory Category, + string Statement, + string EvidencePath, + ClaimStatus Status, + DateTimeOffset LastVerified +); + +public enum ClaimStatus { Pending, Verified, Published, Disputed, Resolved } + +public record ClaimVerification( + string ClaimId, + bool IsValid, + string? Evidence, + string? FailureReason +); +``` + +--- + +## Data Flow + +``` +┌────────────────┐ +│ Corpus Images │ +│ (50+ images) │ +└───────┬────────┘ + │ + ▼ +┌────────────────┐ ┌────────────────┐ +│ Stella Ops Scan│ │ Trivy/Grype │ +│ │ │ Scan │ +└───────┬────────┘ └───────┬────────┘ + │ │ + ▼ ▼ +┌────────────────┐ ┌────────────────┐ +│ Normalized │ │ Normalized │ +│ Findings │ │ Findings │ +└───────┬────────┘ └───────┬────────┘ + │ │ + └──────────┬───────────┘ + │ + ▼ + ┌──────────────┐ + │ Ground Truth │ + │ Comparison │ + └──────┬───────┘ + │ + ▼ + ┌──────────────┐ + │ Metrics │ + │ (P/R/F1) │ + └──────┬───────┘ + │ + ▼ + ┌──────────────┐ + │ Claims Index │ + │ Update │ + └──────────────┘ +``` + +--- + +## Corpus Structure + +``` +bench/competitors/ +├── corpus/ +│ ├── manifest.json # Corpus metadata +│ ├── ground-truth/ +│ │ ├── alpine-3.18.json # Per-image ground truth +│ │ ├── debian-bookworm.json +│ │ └── ... +│ └── images/ +│ ├── base-os/ +│ ├── applications/ +│ └── edge-cases/ +├── results/ +│ ├── 2025-12-22/ +│ │ ├── stellaops.json +│ │ ├── trivy.json +│ │ ├── grype.json +│ │ └── comparison.json +│ └── latest -> 2025-12-22/ +└── fixtures/ + └── adapters/ # Test fixtures for adapters +``` + +--- + +## Ground Truth Format + +```json +{ + "imageDigest": "sha256:abc123...", + "imageName": "alpine:3.18", + "category": "BaseOS", + "groundTruth": { + "truePositives": [ + { + "cveId": "CVE-2024-1234", + "package": "openssl", + "version": "3.0.8", + "notes": "Fixed in 3.0.9" + } + ], + "knownFalsePositives": [ + { + "cveId": "CVE-2024-9999", + "package": "zlib", + "version": "1.2.13", + "reason": "Backported in alpine:3.18" + } + ], + "expectedUnreachable": [ + { + "cveId": "CVE-2024-5678", + "package": "curl", + "reason": "Vulnerable function not linked" + } + ] + }, + "lastVerified": "2025-12-01T00:00:00Z", + "verifiedBy": "security-team" +} +``` + +--- + +## CI Integration + +### Workflow: `benchmark-vs-competitors.yml` + +```yaml +name: Competitive Benchmark + +on: + schedule: + - cron: '0 2 * * 0' # Weekly Sunday 2 AM + workflow_dispatch: + push: + paths: + - 'src/Scanner/__Libraries/StellaOps.Scanner.Benchmark/**' + - 'bench/competitors/**' + +jobs: + benchmark: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Install competitor tools + run: | + # Install Trivy + curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh + # Install Grype + curl -sSfL https://raw.githubusercontent.com/anchore/grype/main/install.sh | sh + + - name: Run benchmark + run: stella benchmark run --corpus bench/competitors/corpus --output bench/competitors/results/$(date +%Y-%m-%d) + + - name: Update claims index + run: stella benchmark claims --output docs/claims-index.md + + - name: Upload results + uses: actions/upload-artifact@v4 + with: + name: benchmark-results + path: bench/competitors/results/ +``` + +--- + +## CLI Commands + +```bash +# Run full benchmark +stella benchmark run --corpus --competitors trivy,grype,syft + +# Verify a specific claim +stella benchmark verify + +# Generate claims index +stella benchmark claims --output docs/claims-index.md + +# Generate marketing battlecard +stella benchmark battlecard --output docs/marketing/battlecard.md + +# Show comparison summary +stella benchmark summary --format table|json|markdown +``` + +--- + +## Testing + +| Test Type | Location | Purpose | +|-----------|----------|---------| +| Unit | `StellaOps.Scanner.Benchmark.Tests/` | Adapter parsing, metrics calculation | +| Integration | `StellaOps.Scanner.Benchmark.Integration.Tests/` | Full benchmark flow | +| Golden | `bench/competitors/fixtures/` | Deterministic output verification | + +--- + +## Security Considerations + +1. **Competitor binaries**: Run in isolated containers, no network access during scan +2. **Corpus images**: Verified digests, no external pulls during benchmark +3. **Results**: Signed with DSSE before publishing +4. **Claims**: Require PR review before status change + +--- + +## Dependencies + +- `StellaOps.Scanner.Core` - Normalized finding models +- `StellaOps.Attestor.Dsse` - Result signing +- Docker - Competitor tool execution +- Ground-truth corpus (maintained separately) + +--- + +## Related Documentation + +- [Claims Index](../../claims-index.md) +- [Sprint 7000.0001.0001](../../implplan/SPRINT_7000_0001_0001_competitive_benchmarking.md) +- [Testing Strategy](../../implplan/SPRINT_5100_SUMMARY.md) + +--- + +*Document Version*: 1.0.0 +*Created*: 2025-12-22 diff --git a/docs/modules/excititor/trust-lattice.md b/docs/modules/excititor/trust-lattice.md new file mode 100644 index 000000000..25009fa6e --- /dev/null +++ b/docs/modules/excititor/trust-lattice.md @@ -0,0 +1,460 @@ +# VEX Trust Lattice Specification + +> **Status**: Draft (Sprint 7100) +> **Last Updated**: 2025-12-22 +> **Source Advisory**: `docs/product-advisories/archived/22-Dec-2026 - Building a Trust Lattice for VEX Sources.md` + +## 1. Overview + +The VEX Trust Lattice provides a mathematically rigorous framework for converting heterogeneous VEX claims from multiple sources into a single, signed, reproducible verdict with a numeric confidence and a complete audit trail. + +### Goals + +1. **Explainability**: Every verdict includes a full breakdown of how it was computed +2. **Reproducibility**: Same inputs always produce identical verdicts (deterministic) +3. **Auditability**: Signed verdict manifests with pinned inputs for regulatory compliance +4. **Tunability**: Per-tenant, per-source trust configuration without code changes + +### Non-Goals + +- Real-time vulnerability detection (handled by Scanner) +- VEX document ingestion (handled by Excititor core) +- Policy enforcement (handled by Policy Engine) + +--- + +## 2. Trust Vector Model + +Each VEX source is assigned a 3-component trust vector scored in the range [0..1]. + +### 2.1 Provenance (P) + +Measures cryptographic and process integrity of the source. + +| Score | Description | +|-------|-------------| +| 1.00 | DSSE-signed, timestamped, Rekor/Git anchored, key in allow-list, rotation policy OK | +| 0.75 | DSSE-signed + public key known, but no transparency log | +| 0.40 | Unsigned but retrieved via authenticated, immutable artifact repo | +| 0.10 | Opaque/CSV/email/manual import | + +### 2.2 Coverage (C) + +Measures how well the statement's scope maps to the target asset. + +| Score | Description | +|-------|-------------| +| 1.00 | Exact package + version/build digest + feature/flag context matched | +| 0.75 | Exact package + version range matched; partial feature context | +| 0.50 | Product-level only; maps via CPE/PURL family | +| 0.25 | Family-level heuristics; no version proof | + +### 2.3 Replayability (R) + +Measures whether the claim can be deterministically re-derived. + +| Score | Description | +|-------|-------------| +| 1.00 | All inputs pinned (feeds, SBOM hash, ruleset hash, lattice version); replays byte-identical | +| 0.60 | Inputs mostly pinned; non-deterministic ordering tolerated but stable outcome | +| 0.20 | Ephemeral APIs; no snapshot | + +### 2.4 Weight Configuration + +The base trust score is computed as: + +``` +BaseTrust(S) = wP * P + wC * C + wR * R +``` + +**Default weights:** +- `wP = 0.45` (Provenance) +- `wC = 0.35` (Coverage) +- `wR = 0.20` (Replayability) + +Weights are tunable per policy and sum to 1.0. + +--- + +## 3. Claim Scoring + +### 3.1 Base Trust Calculation + +```csharp +double BaseTrust(double P, double C, double R, TrustWeights W) + => W.wP * P + W.wC * C + W.wR * R; +``` + +### 3.2 Claim Strength Multipliers (M) + +Each VEX claim carries a strength multiplier based on evidence quality: + +| Strength | Value | Description | +|----------|-------|-------------| +| ExploitabilityWithReachability | 1.00 | Exploitability analysis + reachability proof subgraph provided | +| ConfigWithEvidence | 0.80 | Config/feature-flag reason with evidence | +| VendorBlanket | 0.60 | Vendor blanket statement | +| UnderInvestigation | 0.40 | "Under investigation" | + +### 3.3 Freshness Decay (F) + +Time-decay curve with configurable half-life: + +```csharp +double Freshness(DateTime issuedAt, DateTime cutoff, double halfLifeDays = 90, double floor = 0.35) +{ + var ageDays = (cutoff - issuedAt).TotalDays; + var decay = Math.Exp(-Math.Log(2) * ageDays / halfLifeDays); + return Math.Max(decay, floor); +} +``` + +**Parameters:** +- `halfLifeDays = 90` (default): Score halves every 90 days +- `floor = 0.35` (default): Minimum freshness unless revoked + +### 3.4 ClaimScore Formula + +``` +ClaimScore = BaseTrust(S) * M * F +``` + +**Example calculation:** +``` +Source: Red Hat (Vendor) + P = 0.90, C = 0.75, R = 0.60 + BaseTrust = 0.45*0.90 + 0.35*0.75 + 0.20*0.60 = 0.405 + 0.2625 + 0.12 = 0.7875 + +Claim: ConfigWithEvidence (M = 0.80) +Freshness: 30 days old (F = 0.79) + +ClaimScore = 0.7875 * 0.80 * 0.79 = 0.498 +``` + +--- + +## 4. Lattice Merge Algorithm + +### 4.1 Partial Ordering + +Claims are ordered by a tuple: `(scope_specificity, ClaimScore)`. + +Scope specificity levels: +1. Exact digest match (highest) +2. Exact version match +3. Version range match +4. Product family match +5. Platform match (lowest) + +### 4.2 Conflict Detection + +Conflicts occur when claims for the same (CVE, Asset) have different statuses: + +```csharp +bool HasConflict(IEnumerable claims) + => claims.Select(c => c.Status).Distinct().Count() > 1; +``` + +### 4.3 Conflict Penalty + +When conflicts exist, apply a penalty to weaker/older claims: + +```csharp +const double ConflictPenalty = 0.25; + +if (contradictory) +{ + var strongest = claims.OrderByDescending(c => c.Score).First(); + foreach (var claim in claims.Where(c => c.Status != strongest.Status)) + { + claim.AdjustedScore = claim.Score * (1 - ConflictPenalty); + } +} +``` + +### 4.4 Winner Selection + +Final verdict is selected by: + +```csharp +var winner = scored + .OrderByDescending(x => (x.Claim.ScopeSpecificity, x.AdjustedScore)) + .First(); +``` + +### 4.5 Audit Trail Generation + +Every merge produces: + +```csharp +public sealed record MergeResult +{ + public VexStatus Status { get; init; } + public double Confidence { get; init; } + public ImmutableArray Explanations { get; init; } + public ImmutableArray EvidenceRefs { get; init; } + public string PolicyHash { get; init; } + public string LatticeVersion { get; init; } +} +``` + +--- + +## 5. Policy Gates + +Gates are evaluated after merge to enforce policy requirements. + +### 5.1 MinimumConfidenceGate + +Requires minimum confidence by environment for certain statuses. + +```yaml +gates: + minimumConfidence: + enabled: true + thresholds: + production: 0.75 + staging: 0.60 + development: 0.40 + applyToStatuses: + - not_affected + - fixed +``` + +**Behavior**: Fails if confidence < threshold for specified statuses. + +### 5.2 UnknownsBudgetGate + +Limits exposure to unknown/unscored dependencies. + +```yaml +gates: + unknownsBudget: + enabled: true + maxUnknownCount: 5 + maxCumulativeUncertainty: 2.0 +``` + +**Behavior**: Fails if: +- `#unknown_deps > maxUnknownCount`, OR +- `sum(1 - ClaimScore) > maxCumulativeUncertainty` + +### 5.3 SourceQuotaGate + +Prevents single-source dominance without corroboration. + +```yaml +gates: + sourceQuota: + enabled: true + maxInfluencePercent: 60 + corroborationDelta: 0.10 +``` + +**Behavior**: Fails if single source influence > 60% AND no second source within delta=0.10. + +### 5.4 ReachabilityRequirementGate + +Requires reachability proof for critical vulnerabilities. + +```yaml +gates: + reachabilityRequirement: + enabled: true + severityThreshold: CRITICAL + requiredForStatuses: + - not_affected + bypassReasons: + - component_not_present +``` + +**Behavior**: Fails if `not_affected` on CRITICAL CVE without reachability proof (unless bypass reason applies). + +--- + +## 6. Deterministic Replay + +### 6.1 Input Pinning + +To guarantee "same inputs → same verdict", pin: + +- SBOM digest(s) +- Vuln feed snapshot IDs +- VEX document digests +- Reachability graph IDs +- Policy file hash +- Lattice version +- Clock cutoff (evaluation timestamp) + +### 6.2 Verdict Manifest + +```json +{ + "manifestId": "verd:tenant:asset:cve:1234567890", + "tenant": "acme-corp", + "assetDigest": "sha256:abc123...", + "vulnerabilityId": "CVE-2025-12345", + "inputs": { + "sbomDigests": ["sha256:..."], + "vulnFeedSnapshotIds": ["nvd:2025-12-22"], + "vexDocumentDigests": ["sha256:..."], + "reachabilityGraphIds": ["graph:..."], + "clockCutoff": "2025-12-22T12:00:00Z" + }, + "result": { + "status": "not_affected", + "confidence": 0.82, + "explanations": [...] + }, + "policyHash": "sha256:...", + "latticeVersion": "1.2.0", + "evaluatedAt": "2025-12-22T12:00:01Z", + "manifestDigest": "sha256:..." +} +``` + +### 6.3 Signing + +Verdict manifests are signed using DSSE with predicate type: + +``` +https://stella-ops.org/attestations/vex-verdict/1 +``` + +### 6.4 Replay Verification + +``` +POST /api/v1/authority/verdicts/{manifestId}/replay + +Response: +{ + "success": true, + "originalManifest": {...}, + "replayedManifest": {...}, + "differences": [], + "signatureValid": true +} +``` + +--- + +## 7. Configuration Reference + +### Full Configuration Example + +```yaml +# etc/trust-lattice.yaml +version: "1.0" + +trustLattice: + weights: + provenance: 0.45 + coverage: 0.35 + replayability: 0.20 + + freshness: + halfLifeDays: 90 + floor: 0.35 + + conflictPenalty: 0.25 + + defaults: + vendor: + provenance: 0.90 + coverage: 0.70 + replayability: 0.60 + distro: + provenance: 0.80 + coverage: 0.85 + replayability: 0.60 + internal: + provenance: 0.85 + coverage: 0.95 + replayability: 0.90 + +gates: + minimumConfidence: + enabled: true + thresholds: + production: 0.75 + staging: 0.60 + development: 0.40 + + unknownsBudget: + enabled: true + maxUnknownCount: 5 + maxCumulativeUncertainty: 2.0 + + sourceQuota: + enabled: true + maxInfluencePercent: 60 + corroborationDelta: 0.10 + + reachabilityRequirement: + enabled: true + severityThreshold: CRITICAL +``` + +--- + +## 8. API Reference + +### Endpoints + +| Method | Path | Description | +|--------|------|-------------| +| GET | `/api/v1/excititor/verdicts/{manifestId}` | Get verdict manifest | +| GET | `/api/v1/excititor/verdicts` | List verdicts (paginated) | +| POST | `/api/v1/authority/verdicts/{manifestId}/replay` | Verify replay | +| GET | `/api/v1/authority/verdicts/{manifestId}/download` | Download signed manifest | + +See `docs/09_API_CLI_REFERENCE.md` for complete API documentation. + +--- + +## 9. Examples + +### Example 1: High-Confidence Verdict + +**Input:** +- Red Hat VEX: `not_affected` with `component_not_present` +- Ubuntu VEX: `not_affected` with `component_not_present` + +**Calculation:** +``` +Red Hat: BaseTrust=0.78, M=0.80, F=0.95 → ClaimScore=0.59 +Ubuntu: BaseTrust=0.72, M=0.80, F=0.90 → ClaimScore=0.52 + +No conflict (both agree) +Winner: Red Hat (higher score) +Confidence: 0.59 +Gates: All pass (> 0.40 threshold) +``` + +### Example 2: Conflict Resolution + +**Input:** +- Vendor VEX: `not_affected` +- Internal scan: `affected` + +**Calculation:** +``` +Vendor: ClaimScore=0.65 +Internal: ClaimScore=0.55 + +Conflict detected → penalty applied +Internal adjusted: 0.55 * 0.75 = 0.41 + +Winner: Vendor +Confidence: 0.65 +Note: Conflict recorded in audit trail +``` + +--- + +## Related Documentation + +- [Excititor Architecture](./architecture.md) +- [Verdict Manifest Specification](../authority/verdict-manifest.md) +- [Policy Gates Configuration](../policy/architecture.md) +- [API Reference](../../09_API_CLI_REFERENCE.md) diff --git a/docs/modules/platform/explainable-triage-implementation-plan.md b/docs/modules/platform/explainable-triage-implementation-plan.md new file mode 100644 index 000000000..9393812b1 --- /dev/null +++ b/docs/modules/platform/explainable-triage-implementation-plan.md @@ -0,0 +1,212 @@ +# Explainable Triage Workflows - Implementation Plan + +## Executive Summary + +This document outlines the implementation plan for delivering **Explainable Triage Workflows** as defined in the product advisory dated 21-Dec-2025. The capability set enables vulnerability-first, policy-backed, reachability-informed verdicts with full explainability and auditability. + +## Vision + +> Every vulnerability finding must resolve to a **policy-backed, reachability-informed, runtime-corroborated verdict** that is **exportable as one signed attestation attached to the built artifact**. + +## Current State Analysis + +### Already Implemented (75%) + +| Capability | Implementation | Completeness | +|------------|----------------|--------------| +| Reachability analysis | 10 language analyzers, binary, runtime | 95% | +| VEX processing | OpenVEX, CSAF, CycloneDX with lattice | 90% | +| Explainability | ExplainTrace with rule steps | 95% | +| Evidence generation | Path witnesses, rich graphs | 90% | +| Audit trails | Immutable ledger with chain integrity | 85% | +| Policy gates | 4-stage gate system | 95% | +| Attestations | 7 predicate types with DSSE | 90% | +| Runtime capture | eBPF, dyld, ETW | 85% | + +### Already Planned (15%) + +| Capability | Sprint | Status | +|------------|--------|--------| +| Risk Verdict Attestation | SPRINT_4100_0003_0001 | TODO | +| OCI Attachment | SPRINT_4100_0003_0002 | TODO | +| Counterfactuals | SPRINT_4200_0002_0005 | TODO | +| Replay Engine | SPRINT_4100_0002_0002 | TODO | +| Knowledge Snapshot | SPRINT_4100_0002_0001 | TODO | +| Audit Pack Export | SPRINT_5100_0006_0001 | TODO | +| Unknown Budgets | SPRINT_4100_0001_0002 | TODO | + +### Net New Gaps (10%) + +| Gap | Sprint | Story Points | +|-----|--------|--------------| +| Unified Confidence Model | 7000.0002.0001 | 13 | +| Vulnerability-First UX API | 7000.0002.0002 | 13 | +| Evidence Graph API | 7000.0003.0001 | 8 | +| Reachability Mini-Map | 7000.0003.0002 | 5 | +| Runtime Timeline | 7000.0003.0003 | 5 | +| Progressive Fidelity | 7000.0004.0001 | 13 | +| Evidence Size Budgets | 7000.0004.0002 | 8 | +| Quality KPIs | 7000.0005.0001 | 8 | + +## Implementation Roadmap + +### Phase 1: Foundation (Existing + New) + +**Objective**: Establish core verdict and confidence infrastructure. + +**Sprints**: +- SPRINT_4100_0003_0001: Risk Verdict Attestation +- SPRINT_4100_0002_0001: Knowledge Snapshot Manifest +- SPRINT_7000_0002_0001: Unified Confidence Model +- SPRINT_7000_0004_0001: Progressive Fidelity (parallel) +- SPRINT_7000_0004_0002: Evidence Budgets (parallel) + +**Key Deliverables**: +- `RiskVerdictAttestation` model with PASS/FAIL/PASS_WITH_EXCEPTIONS/INDETERMINATE +- `ConfidenceScore` with 5-factor breakdown +- `FidelityLevel` enum with Quick/Standard/Deep modes +- `EvidenceBudget` with retention tiers + +### Phase 2: UX Layer + +**Objective**: Deliver vulnerability-first presentation layer. + +**Sprints**: +- SPRINT_7000_0002_0002: Vulnerability-First UX API + +**Key Deliverables**: +- `FindingSummaryResponse` with verdict chip, confidence, one-liner +- `ProofBadges` (Reachability, Runtime, Policy, Provenance) +- `GET /api/v1/findings` list endpoint +- `GET /api/v1/findings/{id}/summary` detail endpoint + +### Phase 3: Visualization APIs + +**Objective**: Enable evidence exploration and click-through. + +**Sprints** (parallelizable): +- SPRINT_7000_0003_0001: Evidence Graph API +- SPRINT_7000_0003_0002: Reachability Mini-Map API +- SPRINT_7000_0003_0003: Runtime Timeline API + +**Key Deliverables**: +- `GET /api/v1/findings/{id}/evidence-graph` +- `GET /api/v1/findings/{id}/reachability-map` +- `GET /api/v1/findings/{id}/runtime-timeline` + +### Phase 4: Metrics & Observability + +**Objective**: Track quality KPIs for continuous improvement. + +**Sprints**: +- SPRINT_7000_0005_0001: Quality KPIs Tracking + +**Key Deliverables**: +- `TriageQualityKpis` model +- `GET /api/v1/metrics/kpis` dashboard endpoint +- Trend tracking over time + +## Architecture Changes + +### New Libraries + +``` +src/ +├── Policy/ +│ └── __Libraries/ +│ └── StellaOps.Policy.Confidence/ # NEW: Confidence model +│ ├── Models/ +│ ├── Services/ +│ └── Configuration/ +├── Scanner/ +│ └── __Libraries/ +│ └── StellaOps.Scanner.Orchestration/ # NEW: Fidelity orchestration +│ └── Fidelity/ +├── Findings/ +│ └── StellaOps.Findings.WebService/ # EXTEND: UX APIs +│ ├── Contracts/ +│ ├── Services/ +│ └── Endpoints/ +├── Evidence/ # NEW: Evidence management +│ └── StellaOps.Evidence/ +│ ├── Budgets/ +│ └── Retention/ +└── Metrics/ # NEW: KPI tracking + └── StellaOps.Metrics/ + └── Kpi/ +``` + +### Database Changes + +| Table | Purpose | +|-------|---------| +| `confidence_factors` | Store factor breakdown per verdict | +| `evidence_items` | Track evidence with size and tier | +| `kpi_counters` | Real-time KPI counters | +| `kpi_snapshots` | Daily KPI snapshots | + +### API Surface + +| Endpoint | Method | Purpose | +|----------|--------|---------| +| `/api/v1/findings` | GET | List findings with summaries | +| `/api/v1/findings/{id}/summary` | GET | Detailed finding summary | +| `/api/v1/findings/{id}/evidence-graph` | GET | Evidence graph | +| `/api/v1/findings/{id}/reachability-map` | GET | Reachability mini-map | +| `/api/v1/findings/{id}/runtime-timeline` | GET | Runtime timeline | +| `/api/v1/scan/analyze` | POST | Analyze with fidelity level | +| `/api/v1/scan/findings/{id}/upgrade` | POST | Upgrade fidelity | +| `/api/v1/metrics/kpis` | GET | Quality KPIs | + +## Non-Negotiables + +From the advisory: + +1. **Vulnerability-first UX**: Users start from CVE/finding and immediately see applicability, reachability, runtime corroboration, and policy rationale. + +2. **Single canonical verdict artifact**: One built-in, signed verdict attestation per subject (OCI digest), replayable. + +3. **Deterministic evidence**: Evidence objects are content-hashed and versioned. + +4. **Unknowns are first-class**: "Unknown reachability/runtime/config" is not hidden; it is budgeted and policy-controlled. + +## Quality KPIs + +| KPI | Target | Measurement | +|-----|--------|-------------| +| % non-UNKNOWN reachability | >80% | Weekly | +| % runtime corroboration | >50% (where sensor deployed) | Weekly | +| Explainability completeness | >95% | Weekly | +| Replay success rate | >99% | Weekly | +| Median time to verdict | <5 min | Daily | + +## Risk Management + +| Risk | Impact | Mitigation | +|------|--------|------------| +| Confidence model complexity | High | Start simple (3 factors), iterate | +| Deep analysis performance | Medium | Progressive fidelity with timeouts | +| Evidence storage growth | Medium | Budget enforcement + tier pruning | +| API backward compatibility | Low | Versioned endpoints | + +## Definition of Done + +Per advisory, a release is "done" only if: + +- [ ] Build produces OCI artifact with attached **signed verdict attestation** +- [ ] Each verdict is **explainable** (reason steps + proof pointers) +- [ ] Reachability evidence stored as **reproducible subgraph** (or explicitly UNKNOWN with reason) +- [ ] Replay verification reproduces same verdict with pinned inputs +- [ ] UX starts from vulnerabilities and links directly to proofs and audit export + +## References + +- **Advisory**: `docs/product-advisories/archived/21-Dec-2025 - Designing Explainable Triage Workflows.md` +- **Sprint Summary**: `docs/implplan/SPRINT_7000_SUMMARY.md` +- **Individual Sprints**: `docs/implplan/SPRINT_7000_*.md` + +## Revision History + +| Date | Change | Author | +|------|--------|--------| +| 2025-12-22 | Initial implementation plan | Claude | diff --git a/docs/modules/platform/moat-gap-analysis.md b/docs/modules/platform/moat-gap-analysis.md new file mode 100644 index 000000000..d6ffef7a8 --- /dev/null +++ b/docs/modules/platform/moat-gap-analysis.md @@ -0,0 +1,276 @@ +# Moat Gap Analysis: StellaOps Competitive Position + +> **Source Advisory**: 19-Dec-2025 - Stella Ops candidate features mapped to moat strength +> **Analysis Date**: 2025-12-22 +> **Status**: Sprints created, implementation pending + +--- + +## Executive Summary + +This document captures the gap analysis between the competitive moat advisory and StellaOps' current implementation, along with the sprint plan to address identified gaps. + +### Moat Scale Reference + +| Rating | Definition | +|--------|------------| +| **5** | Structural moat — new primitives, strong defensibility, durable switching cost | +| **4** | Strong moat — difficult multi-domain engineering; incumbents have partial analogs | +| **3** | Moderate moat — others can build; differentiation is execution + packaging | +| **2** | Weak moat — table-stakes soon; limited defensibility | +| **1** | Commodity — widely available in OSS / easy to replicate | + +--- + +## Feature Implementation Matrix + +| Feature | Moat | Current % | Key Gaps | Sprint Coverage | +|---------|------|-----------|----------|-----------------| +| Signed, replayable risk verdicts | 5 | 70% | OCI push, one-command replay | 4300_0001_* | +| VEX decisioning engine | 4 | 85% | Evidence hooks | Minimal | +| Reachability with proof | 4 | 75% | Standalone artifact | 4400_0001_0002 | +| Smart-Diff semantic delta | 4 | 80% | Signed delta verdict | 4400_0001_0001 | +| Unknowns as first-class state | 4 | 75% | Policy budgets, attestations | 4300_0002_* | +| Air-gapped epistemic mode | 4 | 70% | Sealed snapshot workflow | 4300_0003_0001 | +| SBOM ledger + lineage | 3 | 60% | Historical tracking, BYOS | 4600_0001_* | +| Policy engine with proofs | 3 | 85% | Compilation to artifact | Minimal | +| VEX distribution network | 3-4 | 30% | Hub layer entirely | 4500_0001_* | + +--- + +## Detailed Gap Analysis + +### 1. Signed, Replayable Risk Verdicts (Moat 5) + +**What exists:** +- `VerdictReceiptStatement` with in-toto predicate +- `ProofSpine` and `ProofChainBuilder` infrastructure +- `TrustLatticeEngine.Evaluate()` producing `ProofBundle` +- `ReplayManifest` and `ReplayVerifier` +- Input hashing (sbomDigest, feedsDigest, policyDigest) + +**Gaps:** +| Gap | Sprint | +|-----|--------| +| Verdict as OCI-attached attestation | 4300_0001_0001 | +| One-command audit replay CLI | 4300_0001_0002 | +| Formal replay determinism tests | 4300_0001_0002 | + +**Moat Thesis**: "We don't output findings; we output an attestable decision that can be replayed." + +--- + +### 2. VEX Decisioning Engine (Moat 4) + +**What exists:** +- `VexConsensusEngine` with 5 modes +- `TrustLatticeEngine` with K4 lattice atoms +- `TrustWeightEngine` for issuer weighting +- VEX normalizers for CycloneDX, OpenVEX, CSAF +- `VexLens` module with consensus rationale + +**Gaps:** +| Gap | Sprint | +|-----|--------| +| Configurable evidence hooks | Minor enhancement | + +**Moat Thesis**: "We treat VEX as a logical claim system, not a suppression file." + +--- + +### 3. Reachability with Proof (Moat 4) + +**What exists:** +- `ReachabilityWitnessStatement` attestation type +- `PathWitnessBuilder` for call-path proofs +- `CallPath` models with entrypoint → symbol chain +- `ReachabilityLattice` for state management +- `CompositeGateDetector` for boundary extraction + +**Gaps:** +| Gap | Sprint | +|-----|--------| +| Standalone reachability subgraph as OCI artifact | 4400_0001_0002 | +| Binary-level reachability proof | 6000_* (existing) | + +**Moat Thesis**: "We provide proof of exploitability in *this* artifact, not just a badge." + +--- + +### 4. Smart-Diff Semantic Risk Delta (Moat 4) + +**What exists:** +- `MaterialRiskChangeDetector` with R1-R4 rules +- `RiskStateSnapshot` capturing full finding state +- Detection of all flip types +- Priority scoring algorithm +- SARIF output generation + +**Gaps:** +| Gap | Sprint | +|-----|--------| +| Signed delta verdict attestation | 4400_0001_0001 | +| Diff over reachability graphs | Future | + +**Moat Thesis**: "We explain what changed in exploitable surface area, not what changed in CVE count." + +--- + +### 5. Unknowns as First-Class State (Moat 4) + +**What exists:** +- `UncertaintyTier` (T1-T4) with entropy classification +- `UnknownStateLedger` tracking marker kinds +- Risk modifiers from uncertainty +- `BlocksNotAffected()` gate on T1 tier + +**Gaps:** +| Gap | Sprint | +|-----|--------| +| Policy rule: "fail if unknowns > N" | 4300_0002_0001 | +| Unknown budgets with decay | 4100_0001_0002 (existing) | +| Unknowns in attestations | 4300_0002_0002 | + +**Moat Thesis**: "We quantify uncertainty and gate on it." + +--- + +### 6. Air-Gapped Epistemic Mode (Moat 4) + +**What exists:** +- `AirGap.Controller` with state management +- `ReplayVerifier` with depth levels +- `TrustStore` and `TufMetadataValidator` +- `EgressPolicy` enforcement +- `TimeAnchor` for offline time validation + +**Gaps:** +| Gap | Sprint | +|-----|--------| +| Sealed knowledge snapshot export CLI | 4300_0003_0001 | +| One-command import + replay validation | 4300_0003_0001 | +| Feed snapshot versioning with merkle roots | 4300_0003_0001 | + +**Moat Thesis**: Air-gapped "runtime" is common; air-gapped **reproducibility** is not. + +--- + +### 7. SBOM Ledger + Lineage (Moat 3) + +**What exists:** +- `SbomService` with versioning events +- `CatalogRecord` for storage +- `Graph` module for dependency indexing +- `SbomVersionEvents` + +**Gaps:** +| Gap | Sprint | +|-----|--------| +| Historical SBOM tracking with diff lineage | 4600_0001_0001 | +| BYOS ingestion workflow with validation | 4600_0001_0002 | +| SBOM grouping by artifact family | 4600_0001_0001 | + +**Moat Strategy**: Make the ledger valuable via **semantic diff, evidence joins, and provenance**. + +--- + +### 8. Policy Engine with Proofs (Moat 3) + +**What exists:** +- `PolicyEvaluation` with `PolicyExplanation` +- OPA/Rego integration +- `ProofBundle` generation from TrustLattice +- Evidence pointers in verdict statements + +**Gaps:** +| Gap | Sprint | +|-----|--------| +| Policy compilation to standalone decision artifact | Minor enhancement | + +**Moat Strategy**: Keep policy language small but rigorous; always emit evidence pointers. + +--- + +### 9. VEX Distribution Network (Moat 3-4) + +**What exists:** +- Excititor ingests from 7+ VEX sources +- `VexConnectorMetadata` for source tracking + +**Gaps:** +| Gap | Sprint | +|-----|--------| +| VEX Hub aggregation layer | 4500_0001_0001 | +| Trust scoring of VEX sources | 4500_0001_0002 | +| VEX verification + validation pipeline | 4500_0001_0001 | +| API for VEX discovery/subscription | 4500_0001_0001 | + +**Moat Strategy**: Differentiate with **verification + trust scoring** of VEX sources. + +--- + +## Sprint Roadmap + +### Phase 1: Moat 5 Anchor (P0) +``` +4300_0001_0001 → 4300_0001_0002 + │ + └── Verdict becomes portable, replayable +``` + +### Phase 2: Moat 4 Hardening (P1) +``` +4300_0002_0001 → 4300_0002_0002 + │ + └── Unknowns become actionable + +4300_0003_0001 + │ + └── Air-gap becomes reproducible + +4500_0001_0001 → 4500_0001_0002 + │ + └── VEX becomes distributable +``` + +### Phase 3: Moat 4 Extensions (P2) +``` +4400_0001_0001 (Delta Verdict) +4400_0001_0002 (Reachability Artifact) +``` + +### Phase 4: Moat 3 Foundation (P2) +``` +4600_0001_0001 → 4600_0001_0002 + │ + └── SBOM becomes historical +``` + +--- + +## Competitive Positioning Summary + +### Where StellaOps Is Strong +1. **VEX decisioning** — Multi-mode consensus engine is ahead of competitors +2. **Smart-Diff** — R1-R4 rules with priority scoring is unique +3. **Policy engine** — OPA/Rego with proof output is mature +4. **Attestor** — in-toto/DSSE infrastructure is complete + +### Where StellaOps Must Improve +1. **Verdict portability** — OCI push makes verdicts first-class artifacts +2. **Audit replay** — One-command replay is essential for compliance +3. **VEX distribution** — Hub layer creates network effects +4. **Unknown governance** — Policy budgets make uncertainty actionable + +### Avoid Head-On Fights +- **Snyk**: Don't compete on developer UX; compete on proof-carrying reachability +- **Prisma**: Don't compete on CNAPP breadth; compete on decision integrity +- **Anchore**: Don't compete on SBOM storage; compete on semantic diff + VEX reasoning + +--- + +## References + +- **Sprints**: `docs/implplan/SPRINT_4300_*.md`, `SPRINT_4400_*.md`, `SPRINT_4500_*.md`, `SPRINT_4600_*.md` +- **Original Advisory**: `docs/product-advisories/archived/19-Dec-2025 - Stella Ops candidate features mapped to moat strength.md` +- **Architecture**: `docs/07_HIGH_LEVEL_ARCHITECTURE.md` diff --git a/docs/modules/scanner/reachability-drift.md b/docs/modules/scanner/reachability-drift.md new file mode 100644 index 000000000..ebc707e3d --- /dev/null +++ b/docs/modules/scanner/reachability-drift.md @@ -0,0 +1,371 @@ +# Reachability Drift Detection - Architecture + +**Module:** Scanner +**Version:** 1.0 +**Status:** Implemented (Sprint 3600.2-3600.3) +**Last Updated:** 2025-12-22 + +--- + +## 1. Overview + +Reachability Drift Detection tracks function-level reachability changes between scans to identify when code modifications create new paths to vulnerable sinks or mitigate existing risks. This enables security teams to: + +- **Detect regressions** when previously unreachable vulnerabilities become exploitable +- **Validate fixes** by confirming vulnerable code paths are removed +- **Prioritize triage** based on actual exploitability rather than theoretical risk +- **Automate VEX** by generating evidence-backed justifications + +--- + +## 2. Key Concepts + +### 2.1 Call Graph + +A directed graph representing function/method call relationships in source code: + +- **Nodes**: Functions, methods, lambdas with metadata (file, line, visibility) +- **Edges**: Call relationships with call kind (direct, virtual, delegate, reflection, dynamic) +- **Entrypoints**: Public-facing functions (HTTP handlers, CLI commands, message consumers) +- **Sinks**: Security-sensitive APIs (command execution, SQL, file I/O, deserialization) + +### 2.2 Reachability Analysis + +Multi-source BFS traversal from entrypoints to determine which sinks are exploitable: + +``` +Entrypoints (HTTP handlers, CLI) + │ + ▼ BFS traversal + [Application Code] + │ + ▼ + Sinks (exec, query, writeFile) + │ + ▼ + Reachable = TRUE if path exists +``` + +### 2.3 Drift Detection + +Compares reachability between two scans (base vs head): + +| Transition | Direction | Risk Impact | +|------------|-----------|-------------| +| Unreachable → Reachable | `became_reachable` | **Increased** - New exploit path | +| Reachable → Unreachable | `became_unreachable` | **Decreased** - Mitigation applied | + +### 2.4 Cause Attribution + +Explains *why* drift occurred by correlating with code changes: + +| Cause Kind | Description | Example | +|------------|-------------|---------| +| `guard_removed` | Conditional check removed | `if (!authorized)` deleted | +| `guard_added` | New conditional blocks path | Added null check | +| `new_public_route` | New entrypoint created | Added `/api/admin` endpoint | +| `visibility_escalated` | Internal → Public | Method made public | +| `dependency_upgraded` | Library update changed behavior | lodash 4.x → 5.x | +| `symbol_removed` | Function deleted | Removed vulnerable helper | +| `unknown` | Cannot determine | Multiple simultaneous changes | + +--- + +## 3. Data Flow + +```mermaid +flowchart TD + subgraph Scan["Scan Execution"] + A[Source Code] --> B[Call Graph Extractor] + B --> C[CallGraphSnapshot] + end + + subgraph Analysis["Drift Analysis"] + C --> D[Reachability Analyzer] + D --> E[ReachabilityResult] + + F[Base Scan Graph] --> G[Drift Detector] + E --> G + H[Code Changes] --> G + G --> I[ReachabilityDriftResult] + end + + subgraph Output["Output"] + I --> J[Path Compressor] + J --> K[Compressed Paths] + I --> L[Cause Explainer] + L --> M[Drift Causes] + + K --> N[Storage/API] + M --> N + end + + subgraph Integration["Integration"] + N --> O[Policy Gates] + N --> P[VEX Emission] + N --> Q[Web UI] + end +``` + +--- + +## 4. Component Architecture + +### 4.1 Call Graph Extractors + +Per-language AST analysis producing `CallGraphSnapshot`: + +| Language | Extractor | Technology | Status | +|----------|-----------|------------|--------| +| .NET | `DotNetCallGraphExtractor` | Roslyn semantic model | **Done** | +| Java | `JavaCallGraphExtractor` | ASM bytecode analysis | **Done** | +| Go | `GoCallGraphExtractor` | golang.org/x/tools SSA | **Done** | +| Python | `PythonCallGraphExtractor` | Python AST | **Done** | +| Node.js | `NodeCallGraphExtractor` | Babel (planned) | Skeleton | +| PHP | `PhpCallGraphExtractor` | php-parser | **Done** | +| Ruby | `RubyCallGraphExtractor` | parser gem | **Done** | + +**Location:** `src/Scanner/__Libraries/StellaOps.Scanner.CallGraph/Extraction/` + +### 4.2 Reachability Analyzer + +Multi-source BFS from entrypoints to sinks: + +```csharp +public sealed class ReachabilityAnalyzer +{ + public ReachabilityResult Analyze(CallGraphSnapshot graph); +} + +public record ReachabilityResult +{ + ImmutableHashSet ReachableNodes { get; } + ImmutableArray ReachableSinks { get; } + ImmutableDictionary> ShortestPaths { get; } +} +``` + +**Location:** `src/Scanner/__Libraries/StellaOps.Scanner.CallGraph/Analysis/` + +### 4.3 Drift Detector + +Compares base and head graphs: + +```csharp +public sealed class ReachabilityDriftDetector +{ + public ReachabilityDriftResult Detect( + CallGraphSnapshot baseGraph, + CallGraphSnapshot headGraph, + IReadOnlyList codeChanges); +} +``` + +**Location:** `src/Scanner/__Libraries/StellaOps.Scanner.ReachabilityDrift/Services/` + +### 4.4 Path Compressor + +Reduces full paths to key nodes for storage/display: + +``` +Full Path (20 nodes): + entrypoint → A → B → C → ... → X → Y → sink + +Compressed Path: + entrypoint → [changed: B] → [changed: X] → sink + (intermediateCount: 17) +``` + +**Location:** `src/Scanner/__Libraries/StellaOps.Scanner.ReachabilityDrift/Services/PathCompressor.cs` + +### 4.5 Cause Explainer + +Correlates drift with code changes: + +```csharp +public sealed class DriftCauseExplainer +{ + public DriftCause Explain(...); + public DriftCause ExplainUnreachable(...); +} +``` + +**Location:** `src/Scanner/__Libraries/StellaOps.Scanner.ReachabilityDrift/Services/DriftCauseExplainer.cs` + +--- + +## 5. Language Support Matrix + +| Feature | .NET | Java | Go | Python | Node.js | PHP | Ruby | +|---------|------|------|-------|--------|---------|-----|------| +| Function extraction | Yes | Yes | Yes | Yes | Partial | Yes | Yes | +| Call edge extraction | Yes | Yes | Yes | Yes | Partial | Yes | Yes | +| HTTP entrypoints | ASP.NET | Spring | net/http | Flask/Django | Express* | Laravel | Rails | +| gRPC entrypoints | Yes | Yes | Yes | Yes | No | No | No | +| CLI entrypoints | Yes | Yes | Yes | Yes | Partial | Yes | Yes | +| Sink detection | Yes | Yes | Yes | Yes | Partial | Yes | Yes | + +*Requires Sprint 3600.4 completion + +--- + +## 6. Storage Schema + +### 6.1 PostgreSQL Tables + +**call_graph_snapshots:** +```sql +CREATE TABLE call_graph_snapshots ( + id UUID PRIMARY KEY, + tenant_id UUID NOT NULL, + scan_id TEXT NOT NULL, + language TEXT NOT NULL, + graph_digest TEXT NOT NULL, + node_count INT NOT NULL, + edge_count INT NOT NULL, + entrypoint_count INT NOT NULL, + sink_count INT NOT NULL, + extracted_at TIMESTAMPTZ NOT NULL, + snapshot_json JSONB NOT NULL +); +``` + +**reachability_drift_results:** +```sql +CREATE TABLE reachability_drift_results ( + id UUID PRIMARY KEY, + tenant_id UUID NOT NULL, + base_scan_id TEXT NOT NULL, + head_scan_id TEXT NOT NULL, + language TEXT NOT NULL, + newly_reachable_count INT NOT NULL, + newly_unreachable_count INT NOT NULL, + detected_at TIMESTAMPTZ NOT NULL, + result_digest TEXT NOT NULL +); +``` + +**drifted_sinks:** +```sql +CREATE TABLE drifted_sinks ( + id UUID PRIMARY KEY, + tenant_id UUID NOT NULL, + drift_result_id UUID NOT NULL REFERENCES reachability_drift_results(id), + sink_node_id TEXT NOT NULL, + symbol TEXT NOT NULL, + sink_category TEXT NOT NULL, + direction TEXT NOT NULL, + cause_kind TEXT NOT NULL, + cause_description TEXT NOT NULL, + compressed_path JSONB NOT NULL, + associated_vulns JSONB +); +``` + +**code_changes:** +```sql +CREATE TABLE code_changes ( + id UUID PRIMARY KEY, + tenant_id UUID NOT NULL, + scan_id TEXT NOT NULL, + base_scan_id TEXT NOT NULL, + language TEXT NOT NULL, + file TEXT NOT NULL, + symbol TEXT NOT NULL, + change_kind TEXT NOT NULL, + details JSONB, + detected_at TIMESTAMPTZ NOT NULL +); +``` + +### 6.2 Valkey Caching + +``` +stella:callgraph:{scan_id}:{lang}:{digest} → Compressed CallGraphSnapshot +stella:callgraph:{scan_id}:{lang}:reachable → Set of reachable sink IDs +stella:callgraph:{scan_id}:{lang}:paths:{sink} → Shortest path to sink +``` + +TTL: Configurable (default 24h) +Circuit breaker: 5 failures → 30s timeout + +--- + +## 7. API Endpoints + +| Method | Path | Description | +|--------|------|-------------| +| GET | `/scans/{scanId}/drift` | Get drift results for a scan | +| GET | `/drift/{driftId}/sinks` | List drifted sinks (paginated) | +| POST | `/scans/{scanId}/compute-reachability` | Trigger reachability computation | +| GET | `/scans/{scanId}/reachability/components` | List components with reachability | +| GET | `/scans/{scanId}/reachability/findings` | Get reachable vulnerable sinks | +| GET | `/scans/{scanId}/reachability/explain` | Explain why a sink is reachable | + +See: `docs/api/scanner-drift-api.md` + +--- + +## 8. Integration Points + +### 8.1 Policy Module + +Drift results feed into policy gates for CI/CD blocking: + +```yaml +smart_diff: + gates: + - condition: "delta_reachable > 0 AND is_kev = true" + action: block +``` + +### 8.2 VEX Emission + +Automatic VEX candidate generation on drift: + +| Drift Direction | VEX Status | Justification | +|-----------------|------------|---------------| +| became_unreachable | `not_affected` | `vulnerable_code_not_in_execute_path` | +| became_reachable | — | Requires manual review | + +### 8.3 Attestation + +DSSE-signed drift attestations: + +```json +{ + "_type": "https://in-toto.io/Statement/v1", + "predicateType": "stellaops.dev/predicates/reachability-drift@v1", + "predicate": { + "baseScanId": "abc123", + "headScanId": "def456", + "newlyReachable": [...], + "newlyUnreachable": [...], + "resultDigest": "sha256:..." + } +} +``` + +--- + +## 9. Performance Characteristics + +| Metric | Target | Notes | +|--------|--------|-------| +| Graph extraction (100K LOC) | < 60s | Per language | +| Reachability analysis | < 5s | BFS traversal | +| Drift detection | < 10s | Graph comparison | +| Memory usage | < 2GB | Large projects | +| Cache hit improvement | 10x | Valkey lookup vs recompute | + +--- + +## 10. References + +- **Implementation Sprints:** + - `docs/implplan/SPRINT_3600_0002_0001_call_graph_infrastructure.md` + - `docs/implplan/SPRINT_3600_0003_0001_drift_detection_engine.md` +- **API Reference:** `docs/api/scanner-drift-api.md` +- **Operations Guide:** `docs/operations/reachability-drift-guide.md` +- **Original Advisory:** `docs/product-advisories/archived/17-Dec-2025 - Reachability Drift Detection.md` +- **Source Code:** `src/Scanner/__Libraries/StellaOps.Scanner.ReachabilityDrift/` diff --git a/docs/modules/web/smart-diff-ui-architecture.md b/docs/modules/web/smart-diff-ui-architecture.md new file mode 100644 index 000000000..4574ff1ca --- /dev/null +++ b/docs/modules/web/smart-diff-ui-architecture.md @@ -0,0 +1,358 @@ +# Smart-Diff UI Architecture + +**Version:** 1.0 +**Status:** Draft +**Last Updated:** 2025-12-22 +**Sprint Reference:** SPRINT_4200_0002_0003 + +## Overview + +The Smart-Diff UI provides a dedicated comparison experience for analyzing material risk changes between container image versions. It implements a "diff-first" approach to vulnerability triage, enabling users to focus on what changed rather than reviewing entire vulnerability lists. + +## Design Principles + +### 1. Diff-First Triage +The primary question in any release is: *"What changed that affects risk?"* The UI defaults to showing delta information rather than full vulnerability lists. + +### 2. Proof-Carrying Evidence +Every verdict and comparison includes cryptographic evidence. Users can verify determinism, trace decisions to policy rules, and replay computations. + +### 3. Baseline Transparency +Comparisons require explicit baselines with auditor-friendly rationale. The system never uses "magic" to select baselines without explanation. + +### 4. Role-Based Defaults +Different personas (Developer, Security, Audit) see different default views while retaining access to all information. + +## Component Architecture + +``` +┌─────────────────────────────────────────────────────────────────────────────┐ +│ SMART-DIFF UI ARCHITECTURE │ +├─────────────────────────────────────────────────────────────────────────────┤ +│ │ +│ ┌─────────────────────────────────────────────────────────────────────┐ │ +│ │ COMPARE VIEW CONTAINER │ │ +│ │ ┌──────────────────┐ ┌──────────────────┐ ┌──────────────────┐ │ │ +│ │ │ Baseline │ │ Trust │ │ Export │ │ │ +│ │ │ Selector │ │ Indicators │ │ Actions │ │ │ +│ │ └──────────────────┘ └──────────────────┘ └──────────────────┘ │ │ +│ │ │ │ │ │ │ +│ │ ┌────────────────────────────────────────────────────────────────┐ │ │ +│ │ │ DELTA SUMMARY STRIP │ │ │ +│ │ │ [+N added] [-N removed] [~N changed] [Policy: v1.2] [Feed: 2h]│ │ │ +│ │ └────────────────────────────────────────────────────────────────┘ │ │ +│ │ │ │ +│ │ ┌───────────────────────────────────────────────────────────────┐ │ │ +│ │ │ THREE-PANE LAYOUT │ │ │ +│ │ │ ┌──────────┐ ┌────────────────┐ ┌────────────────────────┐ │ │ │ +│ │ │ │Categories│ │ Items │ │ Proof Panel │ │ │ │ +│ │ │ │ │ │ │ │ │ │ │ │ +│ │ │ │ ● SBOM │ │ CVE-2024-1234 │ │ ┌────────────────────┐ │ │ │ │ +│ │ │ │ ● Reach │ │ lodash@4.17.20 │ │ │ Witness Path │ │ │ │ │ +│ │ │ │ ● VEX │ │ +reachable │ │ │ main() → parse() │ │ │ │ │ +│ │ │ │ ● Policy │ │ Priority: 0.85 │ │ │ → vuln_func() │ │ │ │ │ +│ │ │ │ ● Unknwn │ │ │ │ └────────────────────┘ │ │ │ │ +│ │ │ │ │ │ CVE-2024-5678 │ │ ┌────────────────────┐ │ │ │ │ +│ │ │ │ │ │ requests@2.28 │ │ │ VEX Merge │ │ │ │ │ +│ │ │ │ │ │ +KEV │ │ │ vendor: affected │ │ │ │ │ +│ │ │ │ │ │ Priority: 0.95 │ │ │ distro: not_aff │ │ │ │ │ +│ │ │ │ │ │ │ │ │ → Result: affected │ │ │ │ │ +│ │ │ │ │ │ │ │ └────────────────────┘ │ │ │ │ +│ │ │ └──────────┘ └────────────────┘ └────────────────────────┘ │ │ │ +│ │ └───────────────────────────────────────────────────────────────┘ │ │ +│ │ │ │ +│ │ ┌────────────────────────────────────────────────────────────────┐ │ │ +│ │ │ ACTIONABLES PANEL │ │ │ +│ │ │ ┌─────────────────────────────────────────────────────────┐ │ │ │ +│ │ │ │ What to do next: │ │ │ │ +│ │ │ │ 1. [CRITICAL] Upgrade lodash → 4.17.21 │ │ │ │ +│ │ │ │ 2. [HIGH] Add VEX statement for urllib3 (not affected) │ │ │ │ +│ │ │ │ 3. [MEDIUM] Resolve unknown: missing SBOM for module A │ │ │ │ +│ │ │ └─────────────────────────────────────────────────────────┘ │ │ │ +│ │ └────────────────────────────────────────────────────────────────┘ │ │ +│ └──────────────────────────────────────────────────────────────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────────────────┘ +``` + +## Component Hierarchy + +``` +CompareViewComponent +├── BaselineSelectorComponent +│ └── BaselineRationaleComponent +├── TrustIndicatorsComponent +│ ├── DeterminismHashDisplay +│ ├── PolicyVersionDisplay +│ ├── FeedSnapshotDisplay +│ ├── SignatureStatusDisplay +│ └── PolicyDriftIndicator +├── DeltaSummaryStripComponent +├── ThreePaneLayoutComponent +│ ├── CategoriesPaneComponent +│ ├── ItemsPaneComponent +│ └── ProofPaneComponent +│ ├── WitnessPathComponent +│ ├── VexMergeExplanationComponent +│ └── EnvelopeHashesComponent +├── ActionablesPanelComponent +└── ExportActionsComponent +``` + +## State Management + +### Signals-Based State + +The compare view uses Angular signals for reactive state management: + +```typescript +// Core state +currentTarget = signal(null); +baselineTarget = signal(null); +delta = signal(null); + +// UI state +selectedCategory = signal(null); +selectedItem = signal(null); +viewMode = signal<'side-by-side' | 'unified'>('side-by-side'); +userRole = signal<'developer' | 'security' | 'audit'>('developer'); + +// Computed state +filteredItems = computed(() => { + const cat = this.selectedCategory(); + const items = this.delta()?.Items ?? []; + return cat ? items.filter(i => i.category === cat) : items; +}); + +deltaSummary = computed(() => this.delta()?.Summary); +trustIndicators = computed(() => this.delta()?.TrustIndicators); +``` + +### Data Flow + +``` +┌─────────────┐ ┌─────────────┐ ┌─────────────┐ +│ Route │───►│ Component │───►│ Service │ +│ Params │ │ Init │ │ Calls │ +└─────────────┘ └─────────────┘ └─────────────┘ + │ │ + ▼ ▼ + ┌─────────────┐ ┌─────────────┐ + │ Signals │◄───│ Backend │ + │ Update │ │ Response │ + └─────────────┘ └─────────────┘ + │ + ▼ + ┌─────────────┐ + │ Computed │ + │ Values │ + └─────────────┘ + │ + ▼ + ┌─────────────┐ + │ Template │ + │ Render │ + └─────────────┘ +``` + +## API Integration + +### Backend Endpoints + +| Endpoint | Purpose | +|----------|---------| +| `GET /api/v1/baselines/recommendations/{digest}` | Get recommended baselines | +| `GET /api/v1/baselines/rationale/{base}/{head}` | Get baseline selection rationale | +| `POST /api/v1/delta/compute` | Compute delta (idempotent) | +| `GET /api/v1/delta/{deltaId}` | Get delta results | +| `GET /api/v1/delta/{deltaId}/trust-indicators` | Get trust indicators | +| `GET /api/v1/actionables/delta/{deltaId}` | Get actionable recommendations | +| `GET /api/v1/evidence/delta/{deltaId}/items/{itemId}` | Get item evidence | +| `GET /api/v1/evidence/delta/{deltaId}/witness-paths` | Get witness paths | +| `GET /api/v1/evidence/delta/{deltaId}/vex-merge/{vulnId}` | Get VEX merge explanation | + +### Service Layer + +```typescript +@Injectable({ providedIn: 'root' }) +export class CompareService { + constructor(private http: HttpClient) {} + + getRecommendedBaselines(digest: string): Observable { + return this.http.get( + `/api/v1/baselines/recommendations/${digest}` + ); + } + + computeDelta(request: DeltaComputeRequest): Observable { + return this.http.post('/api/v1/delta/compute', request); + } + + getActionables(deltaId: string): Observable { + return this.http.get(`/api/v1/actionables/delta/${deltaId}`); + } + + getItemEvidence(deltaId: string, itemId: string): Observable { + return this.http.get( + `/api/v1/evidence/delta/${deltaId}/items/${itemId}` + ); + } +} +``` + +## Routing + +```typescript +// app.routes.ts additions +{ + path: 'releases/:releaseId', + children: [ + { path: '', redirectTo: 'detail', pathMatch: 'full' }, + { path: 'detail', component: ReleaseFlowComponent }, + { + path: 'compare', + component: CompareViewComponent, + data: { requireBaseline: false } + }, + { + path: 'compare/:baselineId', + component: CompareViewComponent, + data: { requireBaseline: true } + } + ] +}, +{ + path: 'compare', + children: [ + { + path: ':currentDigest', + component: CompareViewComponent + }, + { + path: ':currentDigest/:baselineDigest', + component: CompareViewComponent + } + ] +} +``` + +## Role-Based Views + +### Default Tab by Role + +| Role | Default Tab | Visible Features | +|------|-------------|------------------| +| Developer | Actionables | Actionables, Witness Paths, Upgrade Suggestions | +| Security | Claims | VEX Merge, Policy Reasoning, Claim Sources, Actionables | +| Audit | Attestations | Signatures, Replay, Evidence Pack, Envelope Hashes | + +### Implementation + +```typescript +const ROLE_DEFAULTS: Record = { + developer: { + defaultTab: 'actionables', + showFeatures: ['actionables', 'witness-paths', 'upgrade-suggestions'], + expandedPanels: ['actionables'] + }, + security: { + defaultTab: 'claims', + showFeatures: ['vex-merge', 'policy-reasoning', 'claim-sources', 'actionables'], + expandedPanels: ['vex-merge', 'policy'] + }, + audit: { + defaultTab: 'attestations', + showFeatures: ['signatures', 'replay', 'evidence-pack', 'envelope-hashes'], + expandedPanels: ['trust-indicators', 'signatures'] + } +}; +``` + +## Trust Indicators + +### Determinism Verification + +The UI displays and enables verification of: + +1. **Determinism Hash** - SHA-256 of normalized delta output +2. **Policy Version/Hash** - Active policy at scan time +3. **Feed Snapshot** - Vulnerability feed timestamp and hash +4. **Signature Status** - DSSE envelope verification result + +### Degraded Mode + +When signature verification fails, the UI: +- Displays a prominent warning banner +- Disables "Approve" actions +- Shows detailed verification failure reason +- Provides replay command for local verification + +## Accessibility + +### Keyboard Navigation + +- `Tab` / `Shift+Tab`: Navigate between panes +- `Arrow Up/Down`: Navigate items within pane +- `Enter`: Select item / expand detail +- `Escape`: Close expanded detail +- `C`: Copy replay command (when focused on trust indicators) + +### Screen Reader Support + +- ARIA labels on all interactive elements +- Live regions for delta summary updates +- Semantic heading structure + +## Performance Considerations + +### Lazy Loading + +- Evidence panel loads on-demand when item selected +- Witness paths collapse by default (expand on click) +- VEX merge details in expansion panel + +### Caching + +- Delta computations cached by (base_hash, head_hash, policy_hash) +- Baseline recommendations cached per session +- Trust indicators cached with delta + +### Virtual Scrolling + +For large deltas (> 100 items), the items pane uses virtual scrolling: + +```html + + + + + +``` + +## Testing Strategy + +### Unit Tests + +- Component behavior (selection, filtering, expansion) +- Computed signal derivations +- Role-based view switching + +### Integration Tests + +- API service calls and response handling +- Navigation and routing +- State persistence across route changes + +### E2E Tests + +- Full comparison workflow +- Baseline selection and rationale display +- Export functionality +- Role-based default verification + +## Related Documentation + +- [Sprint: Delta Compare View UI](../../implplan/SPRINT_4200_0002_0003_delta_compare_view.md) +- [Sprint: Delta Compare Backend API](../../implplan/SPRINT_4200_0002_0006_delta_compare_api.md) +- [Smart-Diff CLI Reference](../../cli/smart-diff-cli.md) +- [Advisory: Smart Diff - Reproducibility as a Feature](../../product-advisories/archived/22-Dec-2025/21-Dec-2025%20-%20Smart%20Diff%20-%20Reproducibility%20as%20a%20Feature.md) diff --git a/docs/operations/reachability-drift-guide.md b/docs/operations/reachability-drift-guide.md new file mode 100644 index 000000000..569b99572 --- /dev/null +++ b/docs/operations/reachability-drift-guide.md @@ -0,0 +1,519 @@ +# Reachability Drift Detection - Operations Guide + +**Module:** Scanner +**Version:** 1.0 +**Last Updated:** 2025-12-22 + +--- + +## 1. Prerequisites + +### 1.1 Infrastructure Requirements + +| Component | Minimum | Recommended | Notes | +|-----------|---------|-------------|-------| +| CPU | 4 cores | 8 cores | For call graph extraction | +| Memory | 4 GB | 8 GB | Large projects need more | +| PostgreSQL | 16+ | 16+ | With RLS enabled | +| Valkey/Redis | 7.0+ | 7.0+ | For caching (optional) | +| .NET Runtime | 10.0 | 10.0 | Preview features enabled | + +### 1.2 Network Requirements + +| Direction | Endpoints | Notes | +|-----------|-----------|-------| +| Inbound | Scanner API (8080) | Load balancer health checks | +| Outbound | PostgreSQL (5432) | Database connections | +| Outbound | Valkey (6379) | Cache connections (optional) | +| Outbound | Signer service | For DSSE attestations | + +### 1.3 Dependencies + +- Scanner WebService deployed and healthy +- PostgreSQL database with Scanner schema migrations applied +- (Optional) Valkey cluster for caching +- (Optional) Signer service for attestation signing + +--- + +## 2. Configuration + +### 2.1 Scanner Service Configuration + +**File:** `etc/scanner.yaml` + +```yaml +scanner: + reachability: + # Enable reachability drift detection + enabled: true + + # Languages to analyze (empty = all supported) + languages: + - dotnet + - java + - node + - python + - go + + # Call graph extraction options + extraction: + max_depth: 100 + max_nodes: 100000 + timeout_seconds: 300 + include_test_code: false + include_vendored: false + + # Drift detection options + drift: + # Auto-compute on scan completion + auto_compute: true + # Base scan selection (previous, tagged, specific) + base_selection: previous + # Emit VEX candidates for unreachable sinks + emit_vex_candidates: true + + storage: + postgres: + connection_string: "Host=localhost;Database=stellaops;Username=scanner;Password=${SCANNER_DB_PASSWORD}" + schema: scanner + pool_size: 20 + + cache: + valkey: + enabled: true + connection: "localhost:6379" + bucket: "stella-callgraph" + ttl_hours: 24 + circuit_breaker: + failure_threshold: 5 + timeout_seconds: 30 +``` + +### 2.2 Valkey Cache Configuration + +```yaml +# Valkey-specific settings +cache: + valkey: + enabled: true + connection: "valkey-cluster.internal:6379" + bucket: "stella-callgraph" + ttl_hours: 24 + + # Circuit breaker prevents cache storms + circuit_breaker: + failure_threshold: 5 + timeout_seconds: 30 + half_open_max_attempts: 3 + + # Compression reduces memory usage + compression: + enabled: true + algorithm: gzip + level: fastest +``` + +### 2.3 Policy Gate Configuration + +**File:** `etc/policy.yaml` + +```yaml +smart_diff: + gates: + # Block on KEV becoming reachable + - id: drift_block_kev + condition: "delta_reachable > 0 AND is_kev = true" + action: block + severity: critical + message: "Known Exploited Vulnerability now reachable" + + # Block on high-severity sink becoming reachable + - id: drift_block_critical + condition: "delta_reachable > 0 AND max_cvss >= 9.0" + action: block + severity: critical + message: "Critical vulnerability now reachable" + + # Warn on any new reachable paths + - id: drift_warn_new_paths + condition: "delta_reachable > 0" + action: warn + severity: medium + message: "New reachable paths detected" + + # Auto-allow mitigated paths + - id: drift_allow_mitigated + condition: "delta_unreachable > 0 AND delta_reachable = 0" + action: allow + auto_approve: true +``` + +--- + +## 3. Deployment Modes + +### 3.1 Standalone Deployment + +```bash +# Run Scanner WebService with drift detection +docker run -d \ + --name scanner \ + -p 8080:8080 \ + -e SCANNER_DB_PASSWORD=secret \ + -v /etc/scanner:/etc/scanner:ro \ + stellaops/scanner:latest + +# Verify health +curl http://localhost:8080/health +``` + +### 3.2 Kubernetes Deployment + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: scanner + namespace: stellaops +spec: + replicas: 3 + selector: + matchLabels: + app: scanner + template: + metadata: + labels: + app: scanner + spec: + containers: + - name: scanner + image: stellaops/scanner:latest + ports: + - containerPort: 8080 + env: + - name: SCANNER_DB_PASSWORD + valueFrom: + secretKeyRef: + name: scanner-secrets + key: db-password + volumeMounts: + - name: config + mountPath: /etc/scanner + readOnly: true + resources: + requests: + memory: "4Gi" + cpu: "2" + limits: + memory: "8Gi" + cpu: "4" + livenessProbe: + httpGet: + path: /health/live + port: 8080 + initialDelaySeconds: 30 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /health/ready + port: 8080 + initialDelaySeconds: 10 + periodSeconds: 5 + volumes: + - name: config + configMap: + name: scanner-config +``` + +### 3.3 Air-Gapped Deployment + +For air-gapped environments: + +1. **Disable external lookups:** + ```yaml + scanner: + reachability: + offline_mode: true + # No external advisory fetching + ``` + +2. **Pre-load call graph caches:** + ```bash + # Export from connected environment + stella cache export --type callgraph --output graphs.tar.gz + + # Import in air-gapped environment + stella cache import --input graphs.tar.gz + ``` + +3. **Use local VEX sources:** + ```yaml + excititor: + sources: + - type: local + path: /data/vex-bundles/ + ``` + +--- + +## 4. Monitoring & Metrics + +### 4.1 Key Metrics + +| Metric | Type | Description | Alert Threshold | +|--------|------|-------------|-----------------| +| `scanner_callgraph_extraction_duration_seconds` | histogram | Time to extract call graph | p99 > 300s | +| `scanner_callgraph_node_count` | gauge | Nodes in extracted graph | > 100,000 | +| `scanner_reachability_analysis_duration_seconds` | histogram | BFS analysis time | p99 > 30s | +| `scanner_drift_newly_reachable_total` | counter | Count of newly reachable sinks | > 0 (alert) | +| `scanner_drift_newly_unreachable_total` | counter | Count of mitigated sinks | (info) | +| `scanner_cache_hit_ratio` | gauge | Valkey cache hit rate | < 0.5 | +| `scanner_cache_circuit_breaker_open` | gauge | Circuit breaker state | = 1 (alert) | + +### 4.2 Grafana Dashboard + +Import dashboard JSON from: `deploy/grafana/scanner-drift-dashboard.json` + +Key panels: +- Drift detection rate over time +- Newly reachable sinks by category +- Call graph extraction latency +- Cache hit/miss ratio +- Circuit breaker state + +### 4.3 Alert Rules + +```yaml +# Prometheus alerting rules +groups: + - name: scanner-drift + rules: + - alert: KevBecameReachable + expr: increase(scanner_drift_kev_reachable_total[5m]) > 0 + for: 0m + labels: + severity: critical + annotations: + summary: "KEV vulnerability became reachable" + description: "A Known Exploited Vulnerability is now reachable from public entrypoints" + + - alert: HighDriftRate + expr: rate(scanner_drift_newly_reachable_total[1h]) > 10 + for: 15m + labels: + severity: warning + annotations: + summary: "High rate of new reachable vulnerabilities" + + - alert: CacheCircuitOpen + expr: scanner_cache_circuit_breaker_open == 1 + for: 5m + labels: + severity: warning + annotations: + summary: "Valkey cache circuit breaker is open" +``` + +--- + +## 5. Troubleshooting + +### 5.1 Call Graph Extraction Failures + +**Symptom:** `GRAPH_NOT_EXTRACTED` error + +**Causes & Solutions:** + +| Cause | Solution | +|-------|----------| +| Missing SDK/runtime | Install required SDK (.NET, Node.js, JDK) | +| Build errors in project | Fix compilation errors first | +| Timeout exceeded | Increase `extraction.timeout_seconds` | +| Memory exhaustion | Increase container memory limits | +| Unsupported language | Check language support matrix | + +**Debugging:** + +```bash +# Check extraction logs +kubectl logs -f deployment/scanner | grep -i extraction + +# Manual extraction test +stella scan callgraph \ + --project /path/to/project \ + --language dotnet \ + --verbose +``` + +### 5.2 Drift Detection Issues + +**Symptom:** Drift not computed or incorrect results + +**Causes & Solutions:** + +| Cause | Solution | +|-------|----------| +| No base scan available | Ensure previous scan exists | +| Different languages | Base and head must have same language | +| Graph digest unchanged | No material code changes detected | +| Cache stale | Clear Valkey cache for scan | + +**Debugging:** + +```bash +# Check drift computation status +curl "http://scanner:8080/api/scanner/scans/{scanId}/drift" + +# Force recomputation +curl -X POST \ + "http://scanner:8080/api/scanner/scans/{scanId}/compute-reachability" \ + -d '{"forceRecompute": true}' + +# View graph digests +psql -c "SELECT scan_id, graph_digest FROM scanner.call_graph_snapshots ORDER BY extracted_at DESC LIMIT 10" +``` + +### 5.3 Cache Problems + +**Symptom:** Slow performance, cache misses, circuit breaker open + +**Solutions:** + +```bash +# Check Valkey connectivity +redis-cli -h valkey.internal ping + +# Check circuit breaker state +curl "http://scanner:8080/health/ready" | jq '.checks.cache' + +# Clear specific scan cache +redis-cli DEL "stella-callgraph:scanId:*" + +# Reset circuit breaker (restart scanner) +kubectl rollout restart deployment/scanner +``` + +### 5.4 Common Error Messages + +| Error | Meaning | Action | +|-------|---------|--------| +| `ERR_GRAPH_TOO_LARGE` | > 100K nodes | Increase `max_nodes` or split project | +| `ERR_EXTRACTION_TIMEOUT` | Analysis timed out | Increase timeout or reduce scope | +| `ERR_NO_ENTRYPOINTS` | No public entrypoints found | Check framework detection | +| `ERR_BASE_SCAN_MISSING` | Base scan not found | Specify valid `baseScanId` | +| `ERR_CACHE_UNAVAILABLE` | Valkey unreachable | Check network, circuit breaker will activate | + +--- + +## 6. Performance Tuning + +### 6.1 Call Graph Extraction + +```yaml +scanner: + reachability: + extraction: + # Exclude test code (reduces graph size) + include_test_code: false + + # Exclude vendored dependencies + include_vendored: false + + # Limit analysis depth + max_depth: 50 # Default: 100 + + # Parallel project analysis + parallelism: 4 +``` + +### 6.2 Caching Strategy + +```yaml +cache: + valkey: + # Longer TTL for stable projects + ttl_hours: 72 + + # Aggressive compression for large graphs + compression: + level: optimal # vs 'fastest' + + # Larger connection pool + pool_size: 20 +``` + +### 6.3 Database Optimization + +```sql +-- Ensure indexes exist +CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_callgraph_scan_lang + ON scanner.call_graph_snapshots(scan_id, language); + +CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_drift_head_scan + ON scanner.reachability_drift_results(head_scan_id); + +-- Vacuum after large imports +VACUUM ANALYZE scanner.call_graph_snapshots; +VACUUM ANALYZE scanner.reachability_drift_results; +``` + +--- + +## 7. Backup & Recovery + +### 7.1 Database Backup + +```bash +# Backup drift-related tables +pg_dump -h postgres.internal -U stellaops \ + -t scanner.call_graph_snapshots \ + -t scanner.reachability_results \ + -t scanner.reachability_drift_results \ + -t scanner.drifted_sinks \ + -t scanner.code_changes \ + > scanner_drift_backup.sql +``` + +### 7.2 Cache Recovery + +```bash +# Export cache to file (if needed) +redis-cli -h valkey.internal --rdb /backup/callgraph-cache.rdb + +# Cache is ephemeral - can be regenerated from database +# Recompute after cache loss: +stella scan recompute-reachability --all-pending +``` + +--- + +## 8. Security Considerations + +### 8.1 Database Access + +- Scanner service uses dedicated PostgreSQL user with schema-limited permissions +- Row-Level Security (RLS) enforces tenant isolation +- Connection strings use secrets management (not plaintext) + +### 8.2 API Authentication + +- All drift endpoints require valid Bearer token +- Scopes: `scanner:read`, `scanner:write`, `scanner:admin` +- Rate limiting prevents abuse + +### 8.3 Attestation Signing + +- Drift results can be DSSE-signed for audit trails +- Signing keys managed by Signer service +- Optional Rekor transparency logging + +--- + +## 9. References + +- **Architecture:** `docs/modules/scanner/reachability-drift.md` +- **API Reference:** `docs/api/scanner-drift-api.md` +- **PostgreSQL Guide:** `docs/operations/postgresql-guide.md` +- **Air-Gap Operations:** `docs/operations/airgap-operations-runbook.md` +- **Reachability Runbook:** `docs/operations/reachability-runbook.md` diff --git a/docs/product-advisories/18-Dec-2025 - Designing Explainable Triage and Proof‑Linked Evidence.md b/docs/product-advisories/18-Dec-2025 - Designing Explainable Triage and Proof‑Linked Evidence.md deleted file mode 100644 index 316b8528d..000000000 --- a/docs/product-advisories/18-Dec-2025 - Designing Explainable Triage and Proof‑Linked Evidence.md +++ /dev/null @@ -1,751 +0,0 @@ -Here’s a practical, first‑time‑friendly blueprint for making your security workflow both **explainable** and **provable**—from triage to approval. - -# Explainable triage UX (what & why) - -Show every risk score with the minimum evidence a responder needs to trust it: - -* **Reachable path:** the concrete call‑chain (or network path) proving the vuln is actually hit. -* **Entrypoint boundary:** the external surface (HTTP route, CLI verb, cron, message topic) that leads to that path. -* **VEX status:** the exploitability decision (Affected/Not Affected/Under Investigation/Fixed) with rationale. -* **Last‑seen timestamp:** when this evidence was last observed/generated. - -## UI pattern (compact, 1‑click expand) - -* **Row (collapsed):** `Score 72 • CVE‑2024‑12345 • service: api-gateway • package: x.y.z` -* **Expand panel (evidence):** - - * **Path:** `POST /billing/charge → BillingController.Pay() → StripeClient.Create()` - * **Boundary:** `Ingress: /billing/charge (JWT: required, scope: payments:write)` - * **VEX:** `Not Affected (runtime guard strips untrusted input before sink)` - * **Last seen:** `2025‑12‑18T09:22Z` (scan: sbomer#c1a2, policy run: lattice#9f0d) - * **Actions:** “Open proof bundle”, “Re-run check”, “Create exception (time‑boxed)” - -## Data contract (what the panel needs) - -```json -{ - "finding_id": "f-7b3c", - "cve": "CVE-2024-12345", - "component": {"name": "stripe-sdk", "version": "6.1.2"}, - "reachable_path": [ - "HTTP POST /billing/charge", - "BillingController.Pay", - "StripeClient.Create" - ], - "entrypoint": {"type":"http","route":"/billing/charge","auth":"jwt:payments:write"}, - "vex": {"status":"not_affected","justification":"runtime_sanitizer_blocks_sink","timestamp":"2025-12-18T09:22:00Z"}, - "last_seen":"2025-12-18T09:22:00Z", - "attestation_refs": ["sha256:…sbom", "sha256:…vex", "sha256:…policy"] -} -``` - -# Evidence‑linked approvals (what & why) - -Make “Approve to ship” contingent on **verifiable proof**, not screenshots: - -* **Chain** must exist and be machine‑verifiable: **SBOM → VEX → policy decision**. -* Use **in‑toto/DSSE** attestations or **SLSA provenance** so each link has a signature, subject digest, and predicate. -* **Gate** merges/deploys only when the chain validates. - -## Pipeline gate (simple policy) - -* Require: - - 1. **SBOM attestation** referencing the exact image digest - 2. **VEX attestation** covering all listed components (or explicit allow‑gaps) - 3. **Policy decision attestation** (e.g., “risk ≤ threshold AND all reachable vulns = Not Affected/Fixed”) - -### Minimal decision attestation (DSSE envelope → JSON payload) - -```json -{ - "predicateType": "stella/policy-decision@v1", - "subject": [{"name":"registry/org/app","digest":{"sha256":""}}], - "predicate": { - "policy": "risk_threshold<=75 && reachable_vulns.all(v => v.vex in ['not_affected','fixed'])", - "inputs": { - "sbom_ref": "sha256:", - "vex_ref": "sha256:" - }, - "result": {"allowed": true, "score": 61, "exemptions":[]}, - "evidence_refs": ["sha256:"], - "run_at": "2025-12-18T09:23:11Z" - } -} -``` - -# How this lands in your product (concrete moves) - -* **Backend:** add `/findings/:id/evidence` (returns the contract above) + `/approvals/:artifact/attestations`. -* **Storage:** keep **proof bundles** (graphs, call stacks, logs) as content‑addressed blobs; store DSSE envelopes alongside. -* **UI:** one list → expandable rows; chips for VEX status; “Open proof” shows the call graph and boundary in 1 view. -* **CLI/API:** `stella verify image: --require sbom,vex,decision` returns a signed summary; pipelines fail on non‑zero. -* **Metrics:** - - * **% changes with complete attestations** (target ≥95%) - * **TTFE (time‑to‑first‑evidence)** from alert → panel open (target ≤30s) - * **Post‑deploy reversions** due to missing proof (trend to zero) - -# Starter acceptance checklist - -* [ ] Every risk row expands to path, boundary, VEX, last‑seen in <300 ms. -* [ ] “Approve” button disabled until SBOM+VEX+Decision attestations validate for the **exact artifact digest**. -* [ ] One‑click “Show DSSE chain” renders the three envelopes with subject digests and signers. -* [ ] Audit log captures who approved, which digests, and which evidence hashes. - -If you want, I can turn this into ready‑to‑drop **.NET 10** endpoints + a small React panel with mocked data so your team can wire it up fast. -Below is a “build‑it” guide for Stella Ops that goes past the concept level: concrete services, schemas, pipelines, signing/storage choices, UI components, and the exact invariants you should enforce so triage is **explainable** and approvals are **provably evidence‑linked**. - ---- - -## 1) Start with the invariants (the rules your system must never violate) - -If you implement nothing else, implement these invariants—they’re what make the UX trustworthy and the approvals auditable. - -### Artifact anchoring invariant - -Every finding, every piece of evidence, and every approval must be anchored to an immutable **subject digest** (e.g., container image digest `sha256:…`, binary SHA, or SBOM digest). - -* No “latest tag” approvals. -* No “approve commit” without mapping to the built artifact digest. - -### Evidence closure invariant - -A policy decision is only valid if it references **exactly** the evidence it used: - -* `inputs.sbom_ref` -* `inputs.vex_ref` -* `inputs.reachability_ref` (optional but recommended) -* `inputs.scan_ref` (optional) -* and any config/IaC refs used for boundary/exposure. - -### Signature chain invariant - -Evidence is only admissible if it is: - -1. structured (machine readable), -2. signed (DSSE/in‑toto), -3. verifiable (trusted identity/keys), -4. retrievable by digest. - -DSSE is specifically designed to authenticate both the message and its type (payload type) and avoid canonicalization pitfalls. ([GitHub][1]) - -### Staleness invariant - -Evidence must have: - -* `last_seen` and `expires_at` (or TTL), -* a “stale evidence” behavior in policy (deny or degrade score). - ---- - -## 2) Choose the canonical formats and where you’ll store “proof” - -### Attestation envelope: DSSE + in‑toto Statement - -Use: - -* **in‑toto Attestation Framework** “Statement” as the payload model (“subject + predicateType + predicate”). ([GitHub][2]) -* Wrap it in **DSSE** for signing. ([GitHub][1]) -* If you use Sigstore bundles, the DSSE envelope is expected to carry an in‑toto statement and uses `payloadType` like `application/vnd.in-toto+json`. ([Sigstore][3]) - -### SBOM format: CycloneDX or SPDX - -* SPDX is an ISO/IEC standard and has v3.0 and v2.3 lines in the ecosystem. ([spdx.dev][4]) -* CycloneDX is an ECMA standard (ECMA‑424) and widely used for application security contexts. ([GitHub][5]) - -Pick one as **your canonical** (internally), but ingest both. - -### VEX format: OpenVEX (practical) + map to “classic” VEX statuses - -VEX’s value is triage noise reduction: vendors can assert whether a product is affected, fixed, under investigation, or not affected. ([NTIA][6]) -OpenVEX is a minimal, embeddable implementation of VEX intended for interoperability. ([GitHub][7]) - -### Where to store proof: OCI registry referrers - -Use OCI “subject/referrers” so proofs travel with the artifact: - -* OCI 1.1 introduces an explicit `subject` field and referrers graph for signatures/attestations/SBOMs. ([opencontainers.org][8]) -* ORAS documentation explains linking artifacts via `subject`. ([Oras][9]) -* Microsoft docs show `oras attach … --artifact-type …` patterns (works across registries that support referrers). ([Microsoft Learn][10]) - ---- - -## 3) System architecture (services + data flow) - -### Services (minimum set) - -1. **Ingestor** - - * Pulls scanner outputs (SCA/SAST/IaC), SBOM, runtime signals. -2. **Evidence Builder** - - * Computes reachability, entrypoints, boundary/auth context, score explanation. -3. **Attestation Service** - - * Creates in‑toto statements, wraps DSSE, signs (cosign/KMS), stores to registry. -4. **Policy Engine** - - * Evaluates allow/deny + reason codes, emits signed decision attestation. - * Use OPA/Rego for maintainable declarative policies. ([openpolicyagent.org][11]) -5. **Stella Ops API** - - * Serves findings + evidence panels to the UI (fast, cached). -6. **UI** - - * Explainable triage panel + chain viewer + approve button. - -### Event flow (artifact‑centric) - -1. Build produces `image@sha256:X` -2. Generate SBOM → sign + attach -3. Run vuln scan → sign + attach (optional but useful) -4. Evidence Builder creates: - - * reachability proof - * boundary proof - * vex doc (or imports vendor VEX + adds your context) -5. Policy engine evaluates → emits “decision attestation” -6. UI shows explainable triage + “approve” gating - ---- - -## 4) Data model (the exact objects you need) - -### Core IDs you should standardize - -* `subject_digest`: `sha256:` -* `subject_name`: `registry/org/app` -* `finding_key`: `(subject_digest, detector, cve, component_purl, location)` stable hash -* `component_purl`: package URL (PURL) canonical component identifier - -### Tables (Postgres suggested) - -**artifacts** - -* `id (uuid)` -* `name` -* `digest` (unique) -* `created_at` - -**findings** - -* `id (uuid)` -* `artifact_digest` -* `cve` -* `component_purl` -* `severity` -* `raw_score` -* `risk_score` -* `status` (open/triaged/accepted/fixed) -* `first_seen`, `last_seen` - -**evidence** - -* `id (uuid)` -* `finding_id` -* `kind` (reachable_path | boundary | score_explain | vex | ...) -* `payload_json` (jsonb, small) -* `blob_ref` (content-addressed URI for big payloads) -* `last_seen` -* `expires_at` -* `confidence` (0–1) -* `source_attestation_digest` (nullable) - -**attestations** - -* `id (uuid)` -* `artifact_digest` -* `predicate_type` -* `attestation_digest` (sha256 of DSSE envelope) -* `signer_identity` (OIDC subject / cert identity) -* `issued_at` -* `registry_ref` (where attached) - -**approvals** - -* `id (uuid)` -* `artifact_digest` -* `decision_attestation_digest` -* `approver` -* `approved_at` -* `expires_at` -* `reason` - ---- - -## 5) Explainable triage: how to compute the “Path + Boundary + VEX + Last‑seen” - -### 5.1 Reachable path proof (call chain / flow) - -You need a uniform reachability result type: - -* `reachable = true` with an explicit path -* `reachable = false` with justification (e.g., symbol absent, dead code) -* `reachable = unknown` with reason (insufficient symbols, dynamic dispatch) - -**Implementation strategy** - -1. **Symbol mapping**: map CVE → vulnerable symbols/functions/classes - - * Use one or more: - - * vendor advisory → patched functions - * diff mining (commit that fixes CVE) to extract changed symbols - * curated mapping in your DB for high volume CVEs -2. **Program graph extraction** at build time: - - * Produce a call graph or dependency graph per language. - * Store as compact adjacency list (or protobuf) keyed by `subject_digest`. -3. **Entrypoint discovery**: - - * HTTP routes (framework metadata) - * gRPC service methods - * queue/stream consumers - * cron/CLI handlers -4. **Path search**: - - * BFS/DFS from entrypoints to vulnerable symbols. - * Record the shortest path + top‑K alternatives. -5. **Proof bundle**: - - * path nodes with stable IDs - * file hashes + line ranges (no raw source required) - * tool version + config hash - * graph digest - -**Reachability evidence JSON (UI‑friendly)** - -```json -{ - "kind": "reachable_path", - "result": "reachable", - "confidence": 0.86, - "entrypoints": [ - {"type":"http","route":"POST /billing/charge","auth":"jwt:payments:write"} - ], - "paths": [{ - "path_id": "p-1", - "steps": [ - {"node":"BillingController.Pay","file_hash":"sha256:aaa","lines":[41,88]}, - {"node":"StripeClient.Create","file_hash":"sha256:bbb","lines":[10,52]}, - {"node":"stripe-sdk.vulnFn","symbol":"stripe-sdk::parseWebhook","evidence":"symbol-match"} - ] - }], - "graph": {"digest":"sha256:callgraph...", "format":"stella-callgraph-v1"}, - "last_seen": "2025-12-18T09:22:00Z", - "expires_at": "2025-12-25T09:22:00Z" -} -``` - -**UI rule:** never show “reachable” without a concrete, replayable path ID. - ---- - -### 5.2 Boundary proof (the “why this is exposed” part) - -Boundary proof answers: “Even if reachable, who can trigger it?” - -**Data sources** - -* Kubernetes ingress/service (exposure) -* API gateway routes and auth policies -* service mesh auth (mTLS, JWT) -* IAM policies (for cloud events) -* network policies (deny/allow) - -**Boundary evidence schema** - -```json -{ - "kind": "boundary", - "surface": {"type":"http","route":"POST /billing/charge"}, - "exposure": {"internet": true, "ports":[443]}, - "auth": { - "mechanism":"jwt", - "required_scopes":["payments:write"], - "audience":"billing-api" - }, - "rate_limits": {"enabled": true, "rps": 20}, - "controls": [ - {"type":"waf","status":"enabled"}, - {"type":"input_validation","status":"enabled","location":"BillingController.Pay"} - ], - "last_seen": "2025-12-18T09:22:00Z", - "confidence": 0.74 -} -``` - -**How to build it** - -* Create a “Surface Extractor” plugin per environment: - - * `k8s-extractor`: reads ingress + service + annotations - * `gateway-extractor`: reads API gateway config - * `iac-extractor`: parses Terraform/CloudFormation -* Normalize into the schema above. - ---- - -### 5.3 VEX in Stella: statuses + justifications - -VEX statuses you should support in UI: - -* Not affected -* Affected -* Fixed -* Under investigation ([NTIA][6]) - -OpenVEX will carry the machine readable structure. ([GitHub][7]) - -**Practical approach** - -* Treat VEX as **the decision record** for exploitability. -* Your policy can require VEX coverage for all “reachable” high severity vulns. - -**Rule of thumb** - -* If `reachable=true` AND boundary shows reachable surface + auth weak → VEX defaults to `affected` until mitigations proven. -* If `reachable=false` with high confidence and stable proof → VEX may be `not_affected`. - ---- - -### 5.4 Explainable risk score (don’t hide the formula) - -Make score explainability first‑class. - -**Recommended implementation** - -* Store risk score as an additive model: - - * `base = CVSS normalized` - * `+ reachability_bonus` - * `+ exposure_bonus` - * `+ privilege_bonus` - * `- mitigation_discount` -* Emit a `score_explain` evidence object: - -```json -{ - "kind": "score_explain", - "risk_score": 72, - "contributions": [ - {"factor":"cvss","value":41,"reason":"CVSS 9.8"}, - {"factor":"reachability","value":18,"reason":"reachable path p-1"}, - {"factor":"exposure","value":10,"reason":"internet-facing route"}, - {"factor":"auth","value":3,"reason":"scope required lowers impact"} - ], - "last_seen":"2025-12-18T09:22:00Z" -} -``` - -**UI rule:** “Score 72” must always be clickable to a stable breakdown. - ---- - -## 6) The UI you should build (components + interaction rules) - -### 6.1 Findings list row (collapsed) - -Show only what helps scanning: - -* Score badge -* CVE + component -* service -* reachability chip: Reachable / Not reachable / Unknown -* VEX chip -* last_seen indicator (green/yellow/red) - -### 6.2 Evidence drawer (expanded) - -Tabs: - -1. **Path** - - * show entrypoint(s) - * render call chain (simple list first; graph view optional) -2. **Boundary** - - * exposure, auth, controls -3. **VEX** - - * status + justification + issuer identity -4. **Score** - - * breakdown bar/list -5. **Proof** - - * attestation chain viewer (SBOM → VEX → Decision) - * “Verify locally” action - -### 6.3 “Open proof bundle” viewer - -Must display: - -* subject digest -* signer identity -* predicate type -* digest of proof bundle -* last_seen + tool versions - -**This is where trust is built:** responders can see that the evidence is signed, tied to the artifact, and recent. - ---- - -## 7) Proof‑linked evidence: how to generate and attach attestations - -### 7.1 Statement format: in‑toto Attestation Framework - -in‑toto’s model is: - -* **Subjects** (the artifact digests) -* **Predicate type** (schema ID) -* **Predicate** (your actual data) ([GitHub][2]) - -### 7.2 DSSE envelope - -Wrap statements using DSSE so payload type is signed too. ([GitHub][1]) - -### 7.3 Attach to OCI image via referrers - -OCI “subject/referrers” makes attestations discoverable from the image digest. ([opencontainers.org][8]) -ORAS provides the operational model (“attach artifacts to an image”). ([Microsoft Learn][10]) - -### 7.4 Practical signing: cosign attest + verify - -Cosign has built‑in in‑toto attestation support and can sign custom predicates. ([Sigstore][12]) - -Typical patterns (example only; adapt to your environment): - -```bash -# Attach an attestation -cosign attest --predicate reachability.json \ - --type stella/reachability/v1 \ - - -# Verify attestation -cosign verify-attestation --type stella/reachability/v1 \ - -``` - -(Use keyless OIDC or KMS keys depending on your org.) - ---- - -## 8) Define your predicate types (this is the “contract” Stella enforces) - -You’ll want at least these predicate types: - -1. `stella/sbom@v1` - - * embeds CycloneDX/SPDX (or references blob digest) - -2. `stella/vex@v1` - - * embeds OpenVEX document or references it ([GitHub][7]) - -3. `stella/reachability@v1` - - * the reachability evidence above - * includes `graph.digest`, `paths`, `confidence`, `expires_at` - -4. `stella/boundary@v1` - - * exposure/auth proof and `last_seen` - -5. `stella/policy-decision@v1` - - * the gating result, references all input attestation digests - -6. Optional: `stella/human-approval@v1` - - * “I approve deploy of subject digest X based on decision attestation Y” - * keep it time‑boxed - ---- - -## 9) The policy gate (how approvals become proof‑linked) - -### 9.1 Use OPA/Rego for the gate - -OPA policies are written in Rego. ([openpolicyagent.org][11]) - -**Gate input** should be a single JSON document assembled from verified attestations: - -```json -{ - "subject": {"name":"registry/org/app","digest":"sha256:..."}, - "sbom": {...}, - "vex": {...}, - "reachability": {...}, - "boundary": {...}, - "org_policy": {"max_risk": 75, "max_age_hours": 168} -} -``` - -**Example Rego (deny‑by‑default)** - -```rego -package stella.gate - -default allow := false - -# deny if evidence is stale -stale_evidence { - now := time.now_ns() - exp := time.parse_rfc3339_ns(input.reachability.expires_at) - now > exp -} - -# deny if any high severity reachable vuln is not resolved by VEX -unresolved_reachable[v] { - v := input.reachability.findings[_] - v.severity in {"critical","high"} - v.reachable == true - not input.vex.resolution[v.cve] in {"not_affected","fixed"} -} - -allow { - input.risk_score <= input.org_policy.max_risk - not stale_evidence - count(unresolved_reachable) == 0 -} -``` - -### 9.2 Emit a signed policy decision attestation - -When OPA returns `allow=true`, emit **another attestation**: - -* predicate includes the policy version/hash and all input refs. -* that’s what the UI “Approve” button targets. - -This is the “evidence‑linked approval”: approval references the signed decision, and the decision references the signed evidence. - ---- - -## 10) “Approve” button behavior (what Stella Ops should enforce) - -### Disabled until… - -* subject digest known -* SBOM attestation found + signature verified -* VEX attestation found + signature verified -* Decision attestation found + signature verified -* Decision’s `inputs` digests match the actual retrieved evidence - -### When clicked… - -1. Stella Ops creates a `stella/human-approval@v1` statement: - - * `subject` = artifact digest - * `predicate.decision_ref` = decision attestation digest - * `predicate.expires_at` = short TTL (e.g., 7–30 days) -2. Signs it with the approver identity -3. Attaches it to the artifact (OCI referrer) - -### Audit view must show - -* approver identity -* exact artifact digest -* exact decision attestation digest -* timestamp and expiry - ---- - -## 11) Implementation details that matter in production - -### 11.1 Verification library (shared by UI backend + CI gate) - -Write one verifier module used everywhere: - -**Inputs** - -* image digest -* expected predicate types -* trust policy (allowed identities/issuers, keyless rules, KMS keys) - -**Steps** - -1. Discover referrers for `image@sha256:…` -2. Filter by `predicateType` -3. Verify DSSE + signature + identity -4. Validate JSON schema for predicate -5. Check `subject.digest` matches image digest -6. Return “verified evidence set” + “errors” - -### 11.2 Evidence privacy - -Reachability proofs can leak implementation details. - -* Store file hashes, symbol names, and line ranges -* Gate raw source behind elevated permissions -* Provide redacted proofs by default - -### 11.3 Evidence TTL strategy - -* SBOM: long TTL (weeks/months) if digest immutable -* Boundary: short TTL (hours/days) because env changes -* Reachability: medium TTL (days/weeks) depending on code churn -* VEX: must be renewed if boundary/reachability changes - -### 11.4 Handling “Unknown reachability” - -Don’t force false certainty. - -* Mark as `unknown` and show why (missing symbols, dynamic reflection, stripped binaries) -* Policy can treat unknown as “reachable” for critical CVEs in internet‑facing services. - ---- - -## 12) A concrete MVP path that still delivers value - -If you want a minimal but real first release: - -### MVP (2–3 deliverables) - -1. **Evidence drawer** fed by: - - * scanner output + SBOM + a simple “entrypoint map” -2. **VEX workflow** - - * allow engineers to set VEX status + justification -3. **Signed decision gating** - - * even if reachability is heuristic, the chain is real - -Then iterate: - -* add reachability graphs -* add boundary extraction from IaC/K8s -* tighten policy (staleness, confidence thresholds) - ---- - -## 13) Quick checklist for “done enough to trust” - -* [ ] Every finding expands to: Path, Boundary, VEX, Score, Proof -* [ ] Every evidence tab shows `last_seen` + confidence -* [ ] “Verify chain” works: SBOM → VEX → Decision all signed and bound to the artifact digest -* [ ] Approve button signs a human approval attestation tied to the decision digest -* [ ] CI gate verifies the same chain before deploy - ---- - -If you want, I can also drop in: - -* a full set of JSON Schemas for `stella/*@v1` predicates, -* a reference verifier implementation outline in .NET 10 (Minimal API + a verifier class), -* and a sample UI component tree (React) that renders path/boundary graphs and attestation chains. - -[1]: https://github.com/secure-systems-lab/dsse?utm_source=chatgpt.com "DSSE: Dead Simple Signing Envelope" -[2]: https://github.com/in-toto/attestation?utm_source=chatgpt.com "in-toto Attestation Framework" -[3]: https://docs.sigstore.dev/about/bundle/?utm_source=chatgpt.com "Sigstore Bundle Format" -[4]: https://spdx.dev/use/specifications/?utm_source=chatgpt.com "Specifications" -[5]: https://github.com/CycloneDX/specification?utm_source=chatgpt.com "CycloneDX/specification" -[6]: https://www.ntia.gov/sites/default/files/publications/vex_one-page_summary_0.pdf "VEX one-page summary" -[7]: https://github.com/openvex/spec?utm_source=chatgpt.com "OpenVEX Specification" -[8]: https://opencontainers.org/posts/blog/2024-03-13-image-and-distribution-1-1/?utm_source=chatgpt.com "OCI Image and Distribution Specs v1.1 Releases" -[9]: https://oras.land/docs/concepts/reftypes/?utm_source=chatgpt.com "Attached Artifacts | OCI Registry As Storage" -[10]: https://learn.microsoft.com/en-us/azure/container-registry/container-registry-manage-artifact?utm_source=chatgpt.com "Manage OCI Artifacts and Supply Chain Artifacts with ORAS" -[11]: https://openpolicyagent.org/docs/policy-language?utm_source=chatgpt.com "Policy Language" -[12]: https://docs.sigstore.dev/cosign/verifying/attestation/?utm_source=chatgpt.com "In-Toto Attestations" diff --git a/docs/product-advisories/22-Dec-2026 - UI Patterns for Triage and Replay.md b/docs/product-advisories/22-Dec-2026 - UI Patterns for Triage and Replay.md new file mode 100644 index 000000000..485595fa0 --- /dev/null +++ b/docs/product-advisories/22-Dec-2026 - UI Patterns for Triage and Replay.md @@ -0,0 +1,140 @@ +Here’s a tight, first‑time‑friendly blueprint for two Stella Ops UX pillars—**Triage & Exceptions** and **Knowledge Snapshots & Merge Semantics**—with just enough background plus concrete specs your PMs/devs can ship. + +--- + +# Triage & Exceptions (quiet‑by‑design) + +**Why it matters (plain English):** Most scanners drown users in alerts. “Quiet‑by‑design” shows only *provable, reachable* risks and lets you create **auditable exceptions** (temporary waivers) that auto‑feed compliance packs. + +**User flow** + +1. **Inbox grouped by exploit path** + + * Group key = `(artifact → package → vulnerable symbol → runtime path)`. + * Each group shows: risk score, blast radius (count of dependents), EPSS/CVSS, and a “Proof” button. +2. **Open a path → Proof bundle** + + * **Reach subgraph** (who calls what). + * **Symbol map** (function/offsets; source or binary map). + * **VEX claims** (vendor/distro/internal) with trust score + signatures. +3. **Raise Exception** (time‑boxed) + + * **Required fields:** attested reason (dropdown + free text), expiry date, recheck policy (e.g., “fail build if new reachable path appears”, “fail if EPSS > X”). + * **Attestation:** DSSE‑signed exception object, OCI‑attached to artifact digest. + * Auto‑lands in **Audit Pack** (PDF/JSON bundle) and **Timeline**. + +**Data model (C# POCO sketch)** + +```csharp +record ExploitPathId(string ArtifactDigest, string PackagePurl, string CveId, string Symbol, string EntryPathHash); + +record ExceptionObj( + string Id, ExploitPathId Path, string ReasonCode, string ReasonText, + DateTimeOffset IssuedAt, DateTimeOffset ExpiresAt, + RecheckPolicy Policy, EvidenceRef[] Evidence, AttestationMeta Att); +``` + +**RecheckPolicy (examples)** + +* `ReachGraphChange=Block` +* `EPSSAbove=0.5` +* `EnvScope=prod-only` +* `UnknownsAbove=N → Block` + +**UI essentials** + +* **Inbox:** 3‑pane (List • Details • Proof). +* **Proof drawer:** collapsible reach subgraph; one‑click JSON export. +* **Exception modal:** expiry presets (7/30/90 days), reason templates (backport, feature‑flag‑off, compensating control). + +**APIs (REST-ish)** + +* `GET /triage/inbox?scope=env:prod&quiet=true` +* `GET /triage/path/{id}/proof` +* `POST /exceptions` (body = ExceptionObj sans AttestationMeta; server returns DSSE envelope) +* `GET /audit-packs/{releaseDigest}` + +**Guardrails** + +* Exceptions must **never** outlive a release line by default (force renewal). +* Creating an exception **requires evidence hooks** (see below). +* Build gates read exceptions; if proof no longer holds (reach diff changed), gate fails. + +--- + +# Knowledge Snapshots & Merge Semantics + +**Plain English:** Take a sealed “photo” of everything you *know* at a point in time—SBOM, VEX, attestations, policies—so audits and incident reviews can be replayed exactly. + +**Lifecycle: Snapshot → Seal → Export** + +1. **Snapshot** + + * Capture: SBOM (CycloneDX 1.6 + SPDX 3.0.1), VEX docs, reach subgraphs, exception objects, policies, trust scores, feed versions, rules hashes. +2. **Seal** + + * Compute **Manifest of Manifests** (Merkle root) + DSSE signature (with PQ option). + * Store to **Authority** (Postgres) and attach to image digest (OCI ref). +3. **Export** + + * Produce a single **Replay Bundle** (`.stella-replay.tgz`): data + `REPLAY.yaml` (inputs, versions, lattice rules). + * Offline‑ready. + +**Policy pane with merge semantics** + +* Default preview: **vendor ⊕ distro ⊕ internal** (not “vendor > distro > internal”). +* **Lattice rules** define resolution (e.g., `NOT_AFFECTED ⊕ AFFECTED → AFFECTED unless Evidence(feature_flag_off)`). +* **Evidence hooks (required):** + + * “Not affected because feature X off” → must include **feature‑flag attestation** (env‑scoped, signed). + * “Backported patch” → must include **patch‑index** mapping (`fixed‑symbols`, commit OIDs). + * “Compensating control” → must include **control attestation** (control ID, monitoring link, SLO). + +**UI essentials** + +* **Snapshot panel:** shows inputs (feed versions, rules hash), diff vs last snapshot, “Seal & Export” button. +* **Policy pane:** interactive merge preview; failed hooks highlighted with “Add evidence” CTA. +* **Replay check:** “Verify determinism” runs local re‑eval; shows PASS/FAIL badge. + +**APIs** + +* `POST /snapshots` → returns `SnapshotId` + content hashes +* `POST /snapshots/{id}/seal` → returns DSSE + Merkle root +* `GET /snapshots/{id}/export` → stream bundle +* `POST /policy/preview` (inputs: sources+claims) → resolved verdict + missing-evidence list + +**Storage** + +* **Postgres** = system of record (immutable rows for sealed snapshots). +* **Valkey** (optional) = cache (diffs, precomputed subgraphs). +* **OCI** = distribution of attestations & snapshots alongside images. + +--- + +# PM & Dev acceptance checklist (short) + +* **Triage Inbox** + + * [ ] Groups by exploit path, not by CVE alone. + * [ ] Proof bundle includes reach subgraph + symbol map + VEX claims. + * [ ] Exception modal enforces reason + expiry + recheck policy + evidence. + * [ ] Exceptions are DSSE‑signed and appear in Audit Pack. + +* **Knowledge Snapshot** + + * [ ] Snapshot records *all* inputs (feeds, rules, versions, hashes). + * [ ] Seal produces a verifiable Merkle root + DSSE. + * [ ] Export bundle replays deterministically offline. + * [ ] Policy pane supports lattice merges + evidence hooks with blocking states. + +--- + +# Tiny implementation notes (.NET 10) + +* Use **record structs** for small immutable IDs; keep hash fields as `ReadOnlyMemory`. +* Attest with **in‑toto/DSSE**; provide PQ toggle (Dilithium/Falcon) at key‑policy level. +* Graphs: store **reach subgraph** as compressed adjacency lists; index by `(digest, symbol)`. + +--- + +If you want, I can turn this into: (a) Swagger stubs, (b) EF Core schema + migrations, or (c) a Figma‑ready UI spec with screen flows and copy. diff --git a/docs/product-advisories/archive b/docs/product-advisories/archive deleted file mode 100644 index 3f7b7ca7b..000000000 --- a/docs/product-advisories/archive +++ /dev/null @@ -1,1028 +0,0 @@ -Here’s a compact, practical blueprint to fuse **Smart‑Diff** with a **Call‑Stack Reachability** engine and emit **DSSE‑signed diff attestations** that carry a **weighted impact index**—so Stella Ops can prove not just “what changed,” but “how risky the change is in runtime‑reachable code.” - ---- - -# What this does (in plain words) - -* **Smart‑Diff**: computes semantic diffs between two artifact states (SBOMs, lockfiles, symbols, call maps). -* **Reachability**: measures whether a changed function/package is actually on a path that executes in your services (based on call graph + entrypoints). -* **Weighted Impact Index (WII)**: one number (0–100) that rises when the change lands on short, highly‑used, externally‑facing, or privileged call paths—and when known vulns become more “deterministic” (exploitably reachable). -* **DSSE Attestation**: a signed, portable JSON (in‑toto/DSSE) that binds the diff + WII to build/run evidence. - ---- - -# Signals that feed the Weighted Impact Index - -**Per‑delta features** (each contributes a weight; defaults in brackets): - -* `Δreach_len` – change in **shortest reachable path length** to an entrypoint (−∞..+∞) [w=0.25] -* `Δlib_depth` – change in **library call depth** (indirection layers) [w=0.1] -* `exposure` – whether the touched symbol is **public/external‑facing** (API, RPC, HTTP route) [w=0.15] -* `privilege` – whether path crosses **privileged sinks** (deserialization, shell, fs/net, crypto) [w=0.15] -* `hot_path` – historical runtime evidence (pprof, APM, eBPF) showing **frequent execution** [w=0.15] -* `cvss_v4` – normalized CVSS v4.0 severity for affected CVEs (0–10 → 0–1) [w=0.1] -* `epss_v4` – exploit probability (0–1) [w=0.1] -* `guard_coverage` – presence of sanitizers/validations on the path (reduces score) [w=−0.1] - -**Determinism nudge** -If `reachability == true` and `(cvss_v4 > 0.7 || epss_v4 > 0.5)`, add a +5 bonus to reflect “evidence‑linked determinism.” - -**Final WII** - -``` -WII = clamp01( Σ (w_i * feature_i_normalized) ) * 100 -``` - ---- - -# Minimal data you need in the engines - -## 1) Smart‑Diff (inputs/outputs) - -**Inputs:** SBOM(CycloneDX), symbol graph (per‑lang indexers), lockfiles, route maps. -**Outputs:** `DiffUnit[]` with: - -```json -{ - "unitId": "pkg:npm/lodash@4.17.21#function:merge", - "change": "modified|added|removed", - "before": {"hash":"...", "attrs": {...}}, - "after": {"hash":"...", "attrs": {...}} -} -``` - -## 2) Reachability Engine - -**Inputs:** call graph (nodes: symbols; edges: calls), entrypoints (HTTP routes, jobs, message handlers), runtime heat (optional). -**Queries:** `isReachable(symbol)`, `shortestPathLen(symbol)`, `libCallDepth(symbol)`, `hasPrivilegedSink(path)`, `hasGuards(path)`. - ---- - -# Putting it together (pipeline) - -1. **Collect**: For image/artifact A→B, build call graph, import SBOMs, CVE map, EPSS/CVSS data, routes, runtime heat. -2. **Diff**: Run Smart‑Diff → `DiffUnit[]`. -3. **Enrich per DiffUnit** using Reachability: - - * `reachable = isReachable(unit.symbol)` - * `Δreach_len = shortestPathLen_B - shortestPathLen_A` - * `Δlib_depth = libCallDepth_B - libCallDepth_A` - * `exposure/privilege/hot_path/guard_coverage` booleans from path analysis - * `cvss_v4/epss_v4` from Feed (Concelier) + Excititor -4. **Score**: Compute `WII` per unit; also compute **artifact‑level WII** as: - - * `max(WII_unit)` and `p95(WII_unit)` for “spike” vs “broad” impact. -5. **Attest**: Emit DSSE statement with diff + scores + evidence URIs (SBOM digest, call‑graph digest, logs). -6. **Publish/Store**: Rekor(v2) mirror (Proof‑Market Ledger), and PostgreSQL as system‑of‑record. - ---- - -# DSSE statement (example) - -```json -{ - "_type": "https://in-toto.io/Statement/v1", - "subject": [{"name":"ghcr.io/acme/payments:1.9.3","digest":{"sha256":"..."} }], - "predicateType": "https://stella-ops.org/attestations/smart-diff-wii@v1", - "predicate": { - "artifactBefore": {"digest":{"sha256":"..."}}, - "artifactAfter": {"digest":{"sha256":"..."}}, - "evidence": { - "sbomBefore":{"mediaType":"application/vnd.cdx+json","digest":{"sha256":"..." }}, - "sbomAfter": {"mediaType":"application/vnd.cdx+json","digest":{"sha256":"..." }}, - "callGraph": {"mediaType":"application/vnd.stella.callgraph+json","digest":{"sha256":"..."}}, - "runtimeHeat": {"mediaType":"application/json","optional":true,"digest":{"sha256":"..."}} - }, - "units": [{ - "unitId":"pkg:nuget/Newtonsoft.Json@13.0.3#type:JsonSerializer", - "change":"modified", - "features":{ - "reachable":true, - "deltaReachLen":-2, - "deltaLibDepth":-1, - "exposure":true, - "privilege":true, - "hotPath":true, - "guardCoverage":false, - "cvssV4":0.84, - "epssV4":0.61 - }, - "wii": 78.4, - "paths":[ - {"entry":"HTTP POST /api/import","shortestLen":3,"privSinks":["fs.write"] } - ] - }], - "aggregate": {"maxWii": 78.4, "p95Wii": 42.1} - }, - "dsse": {"alg":"ed25519","keyid":"stella-authority:kid:abc123","sig":"..."} -} -``` - ---- - -# .NET 10 integration (skeletal but end‑to‑end) - -## Contracts - -```csharp -public record DiffUnit(string UnitId, ChangeKind Change, Attr? Before, Attr? After); - -public interface IReachabilityService { - bool IsReachable(SymbolId s); - int? ShortestPathLen(SymbolId s); - int LibCallDepth(SymbolId s); - bool PathHasPrivilegedSinks(SymbolId s); - bool PathHasGuards(SymbolId s); - bool IsHotPath(SymbolId s); -} - -public sealed class WiiScorer { - public double Score(WiiFeatures f) { - double sum = - 0.25 * NormalizeDelta(f.DeltaReachLen) + - 0.10 * NormalizeDelta(f.DeltaLibDepth) + - 0.15 * Bool(f.Exposure) + - 0.15 * Bool(f.Privilege) + - 0.15 * Bool(f.HotPath) + - 0.10 * Clamp01(f.CvssV4) + - 0.10 * Clamp01(f.EpssV4) - - 0.10 * Bool(f.GuardCoverage); - if (f.Reachable && (f.CvssV4 > 0.7 || f.EpssV4 > 0.5)) sum += 0.05; - return Math.Round(Clamp01(sum) * 100, 1); - } - // helper normalizers (Δ capped to ±5 for stability) - static double NormalizeDelta(int? d) => Clamp01(((d ?? 0) + 5) / 10.0); - static double Bool(bool b) => b ? 1.0 : 0.0; - static double Clamp01(double x) => Math.Min(1, Math.Max(0, x)); -} -``` - -## Orchestrator (Scanner.WebService or Scheduled.Worker) - -```csharp -public async Task RunAsync(Artifact before, Artifact after) { - var diffs = await _smartDiff.ComputeAsync(before, after); - var scorer = new WiiScorer(); - var units = new List(); - - foreach (var d in diffs) { - var s = SymbolId.Parse(d.UnitId); - var feat = new WiiFeatures { - Reachable = _reach.IsReachable(s), - DeltaReachLen = SafeDelta(_reach.ShortestPathLen(s), _baseline.ShortestPathLen(s)), - DeltaLibDepth = _reach.LibCallDepth(s) - _baseline.LibCallDepth(s), - Exposure = _exposure.IsExternalFacing(s), - Privilege = _reach.PathHasPrivilegedSinks(s), - HotPath = _reach.IsHotPath(s), - GuardCoverage = _reach.PathHasGuards(s), - CvssV4 = _vuln.CvssV4For(s), - EpssV4 = _vuln.EpssV4For(s) - }; - var wii = scorer.Score(feat); - units.Add(new AttestedUnit(d.UnitId, d.Change, feat, wii, _reach.PathPreview(s))); - } - - var agg = new { - maxWii = units.Max(u => u.Wii), - p95Wii = Percentile(units.Select(u => u.Wii), 0.95) - }; - - var stmt = _attestor.BuildDsse(before, after, units, agg, _evidence.Hashes()); - return await _attestor.SignAsync(stmt); -} -``` - ---- - -# Where it lives in Stella Ops - -* **Concelier** (feeds): CVE → CVSS v4.0 and EPSS v4 hydration. -* **Excititor** (VEX): accepts WII + reachability to mark *Affected/Not Affected/Under Investigation* with evidence. -* **Scanner.WebService & Scanner.Workers**: build call graphs, compute diffs, ask Concelier/Excititor for scores, produce attestations. -* **Notify.WebService**: triggers when `aggregate.maxWii >= threshold` or when `reachable && epss_v4 > X`. - ---- - -# Developer checklist (DoD) - -* [ ] Per‑language call‑graph adapters: .NET, Java, Node, Python (symbol → entrypoint reach; shortest path). -* [ ] Smart‑Diff emits `unitId` at **function/method granularity** (fall back to package). -* [ ] Concelier endpoints to fetch **CVSS v4** and **EPSS v4** per `purl`/symbol. -* [ ] WiiScorer unit tests with frozen fixtures (golden files). -* [ ] DSSE attestation schema + JSON Schema validation. -* [ ] Rekor(v2) publish & local verification; offline bundle export. -* [ ] Policy gates: block deploy if `maxWii ≥ 70` **and** `reachable == true`. -* [ ] UI panel: show **top 5 WII units**, path preview, “why this score” explainer. -* [ ] CLI: `stella attest diff --before A --after B --emit dsse.json`. - ---- - -# Why this strengthens the moat - -* Turns **reachability** into a **determinism signal**, not just “found a CVE.” -* Binds **evidence** (graphs, SBOMs, metrics) into **signed, portable** proofs. -* Produces a **single, comparable index** that procurement/compliance and SREs can act on in seconds. - -If you want, I can drop in a ready‑to‑run `.NET 10` sample project (Scanner plug‑in + WiiScorer tests) and a JSON‑Schema for the DSSE predicate. -Below is a **full, end‑to‑end implementation plan** to ship “Smart‑Diff + Call‑Stack Reachability + Weighted Impact Index + DSSE‑signed diff attestations” into production inside Stella Ops (Scanner / Concelier / Excititor / Notify), with clear sequencing, deliverables, and the concrete engineering work breakdown. - ---- - -## 1) Target outcomes and success criteria - -### Outcomes - -1. **For any artifact upgrade (A → B)**, produce a **DSSE‑signed in‑toto Statement** that includes: - - * What changed (Smart‑Diff) - * What is runtime reachable (Call‑Stack Reachability) - * A **per‑change Weighted Impact Index (WII)** and artifact aggregates (max, p95) - * Evidence digests (SBOMs, call graph, runtime heat, vuln feeds) bound to the attestation - -2. **Policy gates** can block or require approval based on: - - * `maxWII`, `reachable == true`, `epss/cvss thresholds`, “privileged path” flags, etc. - -3. **Operators can explain the score**: top changes, entrypoints, shortest path, sinks, guards, heat. - -### Success criteria (Definition of Done) - -* ✅ Deterministic diff attestations (same inputs → same statement bytes before signing) -* ✅ Signature verification succeeds offline using published key(s) -* ✅ Correct reachability on representative services (route hits match predicted reachable set) -* ✅ Low noise: “unreachable changes” do not page -* ✅ Scoring is configurable and versioned (weights tracked) -* ✅ Works in CI and/or cluster scanning pipeline -* ✅ Stored in ledger + queryable DB + searchable UI - ---- - -## 2) System architecture and data flow - -### High-level data flow - -``` -Artifact A/B (OCI digest, build metadata) - ├─ SBOM A/B (CycloneDX) - ├─ Symbol Index A/B (function/type identifiers) - ├─ Call Graph A/B (nodes=symbols, edges=calls, entrypoints) - ├─ Runtime Heat (optional; APM/eBPF) - ├─ Vuln Intelligence (CVSS v4, EPSS) from Concelier - │ - └─ Smart-Diff (A vs B) -> DiffUnit[] - └─ Reachability enrich (A/B) -> features per unit - └─ WII scoring - └─ DSSE in-toto Statement (predicate) - └─ Sign (KMS/HSM/Key vault) -> DSSE envelope - ├─ Publish to Ledger (Rekor-like) - ├─ Store in Postgres + Object store - └─ Notify/Policy evaluation -``` - -### Services / modules to implement or extend - -* **Scanner.Workers**: build evidence (SBOM, call graph), compute diff, compute reachability features, compute WII -* **Scanner.WebService**: APIs to request attestations, query results, verify -* **Concelier**: CVE → package/symbol mapping, CVSS v4 + EPSS hydration -* **Excititor**: produce/ingest VEX decisions using WII + reachability evidence -* **Notify**: alerting rules and policy gates for CI/CD and runtime - ---- - -## 3) Core data contracts (must come first) - -### 3.1 Stable identifiers - -You need **stable IDs** for everything so diffs and reachability join correctly: - -* **Artifact ID**: OCI digest (sha256) + image name + tag (tag is not trusted as primary) -* **Package ID**: PURL (package-url standard) -* **Symbol ID**: language-specific but normalized: - - * `.NET`: `assembly::namespace.type::method(signature)` or `pdb mvid + token` - * Java: `class#method(desc)` - * Node: `module:path#exportName` (best-effort) - * Python: `module:function` (best-effort) - -Rule: **Symbol IDs must remain stable across rebuilds** where possible (prefer token-based or fully-qualified signature). - -### 3.2 Predicate schema v1 - -Lock the predicate shape early: - -* `artifactBefore`, `artifactAfter` digests -* `evidence` digests/URIs (sbom, callGraph, runtimeHeat, vulnSnapshot) -* `units[]` (diff units with features, paths, wii) -* `aggregate` (max/p95 etc.) -* `scoring` (weights + version + normalizer caps) -* `generator` info (scanner version, build id) - -Deliverables: - -* JSON Schema for predicate `smart-diff-wii@v1` -* Protobuf (optional) for internal transport -* Golden-file fixtures for serialization determinism - ---- - -## 4) Phase plan (sequenced deliverables, no guessy timelines) - -### Milestone M0 — Foundations (must be completed before “real scoring”) - -**Goal:** You can sign/verify attestations and store evidence. - -**Work items** - -1. **DSSE + in-toto Statement implementation** - - * Choose signing primitive: Ed25519 or ECDSA P‑256 - * Implement: - - * Statement builder (canonical JSON) - * DSSE envelope wrapper - * Signature verify endpoint + CLI - * Add key rotation fields: `keyid`, `predicateType version`, `scanner build version` - -2. **Evidence store** - - * Object storage bucket layout: - - * `/sbom/{artifactDigest}.cdx.json` - * `/callgraph/{artifactDigest}.cg.json` - * `/runtimeheat/{service}/{date}.json` - * `/vuln-snapshot/{date}.json` - * Every evidence object has digest recorded in DB - -3. **Database schema** - - * `artifacts(id, digest, name, createdAt, buildMetaJson)` - * `evidence(id, artifactDigest, kind, digest, uri, createdAt)` - * `attestations(id, subjectDigest, beforeDigest, afterDigest, predicateType, dsseDigest, createdAt, aggregateJson)` - * `attestation_units(attestationId, unitId, changeKind, reachable, wii, featuresJson, pathsJson)` - -4. **Ledger integration** - - * Minimal: append-only table + hash chaining (if you want quickly) - * Full: publish to Rekor-like transparency log if already present in your ecosystem - -**DoD** - -* `stella verify dsse.json` returns OK -* Stored attestations can be fetched by subject digest -* Evidence digests validate - ---- - -### Milestone M1 — Smart‑Diff v1 (package + file level) - -**Goal:** Produce a signed attestation that captures “what changed” even before reachability. - -**Work items** - -1. **SBOM ingestion & normalization** - - * Parse CycloneDX SBOM - * Normalize component identity to PURL - * Extract versions, hashes, scopes, dependencies edges - -2. **Diff engine (SBOM-level)** - - * Added/removed/updated packages - * Transitive dependency changes - * License changes (optional) - * Output `DiffUnit[]` at package granularity first - -3. **Attestation emitter** - - * Populate predicate with: - - * units (packages) - * aggregate metrics: number of changed packages; “risk placeholders” (no reachability yet) - -**DoD** - -* For a dependency bump PR, the system emits DSSE attestation with package diffs - ---- - -### Milestone M2 — Call graph & reachability for .NET (first “real value”) - -**Goal:** For .NET services, determine whether changed symbols/packages are reachable from entrypoints. - -**Work items** - -1. **.NET call graph builder** - - * Implement Roslyn-based static analysis for: - - * method invocations - * virtual calls (best-effort: conservative edges) - * async/await (capture call relations) - * Capture: - - * nodes: methods/functions - * edges: caller → callee - * metadata: assembly, namespace, accessibility, source location (if available) - -2. **Entrypoint extractor (.NET)** - - * ASP.NET Core minimal APIs: - - * `MapGet/MapPost/...` route mapping to delegate targets - * MVC/WebAPI: - - * controller action methods - * gRPC endpoints - * Background workers: - - * `IHostedService`, Hangfire jobs, Quartz jobs - * Message handlers: - - * MassTransit / Kafka consumers (pattern match + config hooks) - -3. **Reachability index** - - * Store adjacency lists - * For each entrypoint, compute: - - * reachable set - * shortest path length to each reachable node (BFS on unweighted graph) - * path witness (store 1–3 representative paths for explainability) - * Store: - - * `distToNearestEntrypoint[node]` - * `nearestEntrypoint[node]` - * (optional) `countEntrypointsReaching[node]` - -4. **Join Smart‑Diff with reachability** - - * If you only have package diffs at this stage: - - * Map package → symbol set using symbol index - * Mark unit reachable if any symbol reachable - * If you already have symbol diffs: - - * Directly query reachability per symbol - -**DoD** - -* For a PR that changes a controller path or core code, top diffs show reachable paths -* For a PR that only touches unreachable code (dead feature flags), system marks unreachable - ---- - -### Milestone M3 — Smart‑Diff v2 (symbol-level diffs) - -**Goal:** Move from “package changed” to “what functions/types changed.” - -**Work items** - -1. **Symbol indexer (.NET)** - - * Extract public/internal symbols - * Map symbol → file/module + hash of IL/body (or semantic hash) - * Record signature + accessibility + attributes - -2. **Symbol-level diff** - - * Added/removed/modified methods/types - * Semantic hashing to avoid noise from non-functional rebuild differences - * Generate unit IDs like: - - * `pkg:nuget/Newtonsoft.Json@13.0.3#method:Namespace.Type::Method(args)` - -3. **Unit grouping** - - * Group symbol deltas under: - - * package delta - * assembly delta - * “API surface delta” (public symbol changes) for exposure - -**DoD** - -* Attestation units list individual changed symbols with reachable evidence - ---- - -### Milestone M4 — Feature extraction for WII - -**Goal:** Compute the features that make WII meaningful and explainable. - -**Work items** - -1. **Exposure classification** - - * `exposure=true` if symbol is: - - * directly an entrypoint method - * in the shortest path to an entrypoint - * part of public API surface changes - * Store explanation: “reachable from HTTP POST /x” - -2. **Privilege sink detection** - - * Maintain a versioned sink catalog: - - * deserialization entrypoints - * process execution - * filesystem writes - * network dial / SSRF primitives - * crypto key handling - * dynamic code evaluation - * Mark if any witness path crosses sinks - * Store sink list in `paths[]` - -3. **Guard coverage detection** - - * Catalog of guard functions: - - * input validation, sanitizers, authz checks, schema validators - * Heuristic: on witness path, detect guard call before sink - * `guardCoverage=true` reduces WII - -4. **Library depth** - - * Compute “lib call depth” heuristics: - - * number of frames from entrypoint to unit - * number of boundary crossings (app → lib → lib) - * Use in scoring normalization - -5. **Runtime heat integration (optional but high impact)** - - * Ingest APM sampling / pprof / eBPF: - - * `(symbolId → invocationCount or CPU%)` - * Normalize to 0..1 `hotPath` - * Add mapping strategy: - - * route names → controller action symbols - -**DoD** - -* Every unit has a features object with enough fields to justify its score - ---- - -### Milestone M5 — Vulnerability intelligence + determinism linkage - -**Goal:** Use Concelier data to raise score when reachable changes align with exploitable vulns. - -**Work items** - -1. **Vuln snapshot service (Concelier)** - - * Provide API: - - * `GET /vuln/by-purl?purl=...` → CVEs + CVSS v4 + EPSS - * `GET /vuln/snapshot/{date}` for reproducibility - -2. **Package ↔ symbol ↔ vuln mapping** - - * Map CVE affected package versions to `DiffUnit` packages - * (Optional advanced) map to symbols if your feed provides function-level info - -3. **Determinism rule** - - * If `reachable=true` AND (cvss>threshold OR epss>threshold) add bonus - * Record “why” in unit explanation metadata - -**DoD** - -* A dependency bump that introduces/removes a CVE changes WII accordingly -* Attestation includes vuln snapshot digest - ---- - -### Milestone M6 — WII scoring engine v1 + calibration - -**Goal:** Produce a stable numeric index and calibrate thresholds. - -**Work items** - -1. **Scoring engine** - - * Implement WII as: - - * weighted sum of normalized features - * clamp to 0..100 - * Make scoring config **external + versioned**: - - * weights - * normalization caps (e.g., delta path len capped at ±5) - * determinism bonus amounts - * Include `scoring.version` and config hash in predicate - -2. **Golden tests** - - * Fixture diffs with expected WII - * Regression tests for scoring changes (if weights change, version bumps) - -3. **Calibration workflow** - - * Backtest on historical PRs/incidents: - - * correlate WII with incidents / rollbacks / sev tickets - * Produce recommended initial gate thresholds: - - * block: `maxWII >= X` and reachable and privileged - * warn: `p95WII >= Y` - * Store calibration report as an artifact (so you can justify policy) - -**DoD** - -* Score doesn’t oscillate due to minor code movement -* Thresholds are defensible and adjustable - ---- - -### Milestone M7 — Policy engine + CI/CD integration - -**Goal:** Make it enforceable. - -**Work items** - -1. **Policy evaluation component** - - * Inputs: - - * DSSE attestation - * verification result - * environment context (prod/stage) - * Output: - - * allow / warn / block + reason - -2. **CI integration** - - * Pipeline step: - - * build artifact - * generate evidence - * compute diff against deployed baseline - * emit + sign attestation - * run policy gate - * Attach attestation to build metadata / OCI registry (as referrers if supported in your ecosystem) - -3. **Deployment integration** - - * Admission controller / deploy-time check: - - * verify signature - * enforce policy - -**DoD** - -* A deployment with `reachable && maxWII >= threshold` is blocked or requires approval - ---- - -### Milestone M8 — UI/UX and operator experience - -**Goal:** People can understand and act quickly. - -**Work items** - -1. **Diff attestation viewer** - - * Show: - - * aggregate WII (max/p95) - * top units by WII - * per unit: features + witness path(s) - * sinks/guards - * vuln evidence (CVSS/EPSS) with snapshot date - -2. **Explainability** - - * “Why this score” breakdown: - - * weights * feature values - * determinism bonus - * Link to evidence objects (SBOM digest, call graph digest) - -3. **Notifications** - - * Rules: - - * page if `maxWII >= hard` and reachable and privileged - * slack/email if `maxWII >= warn` - * Include the top 3 units with paths - -**DoD** - -* Operators can make a decision within ~1–2 minutes reading the UI (no digging through logs) - ---- - -### Milestone M9 — Multi-language expansion + runtime reachability improvements - -**Goal:** Expand coverage beyond .NET and reduce static-analysis blind spots. - -**Work items** - -1. **Language adapters** - - * Java: bytecode analyzer (ASM/Soot-like approach), Spring entrypoints - * Node: TypeScript compiler graph, Express/Koa routes (heuristics) - * Python: AST + import graph; Django/FastAPI routes (heuristics) - -2. **Dynamic call handling** - - * Reflection / DI / dynamic dispatch: - - * conservative edges in static - * supplement with runtime traces to confirm - -3. **Distributed reachability** - - * Cross-service edges inferred from: - - * OpenTelemetry traces (service A → service B endpoint) - * Build “service-level call graph” overlay: - - * entrypoints + downstream calls - -**DoD** - -* Coverage reaches your top N services and languages -* False negatives reduced by runtime evidence - ---- - -## 5) Detailed engineering work breakdown (by component) - -### A) Smart‑Diff engine - -**Deliverables** - -* `ISmartDiff` interface with pluggable diff sources: - - * SBOM diff - * lockfile diff - * symbol diff - * route diff (entrypoints changed) - -**Key implementation tasks** - -* Normalization layer (PURL, symbol IDs) -* Diff computation: - - * add/remove/update - * semantic hash comparison -* Output: - - * stable `DiffUnit` list - * deterministic ordering (sort by unitId) - -**Risk controls** - -* Deterministic hashing and ordering to keep DSSE stable -* “Noise filters” for rebuild-only diffs - ---- - -### B) Call graph builder - -**Deliverables** - -* `CallGraph` object: - - * nodes, edges - * node metadata - * entrypoints list - -**Key tasks** - -* Static analysis per language -* Entrypoint extraction (routes/jobs/consumers) -* Graph serialization format: - - * versioned - * compressed adjacency lists - -**Risk controls** - -* Expect incomplete graphs; never treat as perfect truth -* Maintain confidence score per edge if desired - ---- - -### C) Reachability service - -**Deliverables** - -* `IReachabilityService` with: - - * `IsReachable(symbol)` - * `ShortestPathLen(symbol)` - * `PathPreview(symbol)` (witness) - * `LibCallDepth(symbol)` - * `PathHasPrivilegedSinks(symbol)` - * `PathHasGuards(symbol)` - -**Key tasks** - -* BFS from entrypoints -* Store distances and witnesses -* Cache per artifact digest -* Incremental updates: - - * recompute only impacted parts when call graph changes (optional optimization) - ---- - -### D) Feature extraction + WII scorer - -**Deliverables** - -* `WiiFeatures` + `WiiScorer` -* Versioned `ScoringConfig` (weights/normalizers) - -**Key tasks** - -* Normalization functions (caps and monotonic transforms) -* Determinism bonus logic -* Aggregation (max, p95, counts by changeKind) - -**Risk controls** - -* Scoring changes require a version bump -* Golden tests + backtests - ---- - -### E) Attestation service - -**Deliverables** - -* `BuildStatement(...)` -* `SignDsse(...)` -* `VerifyDsse(...)` - -**Key tasks** - -* Canonical JSON serialization (avoid map-order randomness) -* Key management: - - * key IDs - * rotation and revocation list handling -* Attach evidence digest set - -**Risk controls** - -* Sign only canonical bytes -* Record scanner version and config hash - ---- - -### F) Persistence + ledger - -**Deliverables** - -* DB migrations -* Object store client -* Ledger publish/verify integration - -**Key tasks** - -* Store DSSE envelope bytes and computed digest -* Index by: - - * subject digest - * before/after - * maxWII - * reachable count -* Retention policies - ---- - -### G) Policy + Notifications - -**Deliverables** - -* Policy rules (OPA/Rego or internal DSL) -* CI step and deploy-time verifier -* Notify workflows - -**Key tasks** - -* Verification must be mandatory before policy evaluation -* Provide human-readable reasons - ---- - -## 6) Testing strategy (ship safely) - -### Unit tests - -* Smart‑Diff normalization and diff correctness -* Reachability BFS correctness -* WII scoring determinism -* Predicate schema validation -* DSSE sign/verify roundtrip - -### Integration tests - -* Build sample .NET service → generate call graph → diff two versions → attest -* Concelier mocked responses for CVSS/EPSS - -### End-to-end tests - -* In CI: build → attest → store → verify → policy gate -* In deployment: admission check verifies signature and policy - -### Performance tests - -* Large call graph (100k+ nodes) BFS time and memory -* Batch scoring of thousands of diff units - -### Security tests - -* Tampered evidence digest detection -* Signature replay attempts (wrong subject digest) -* Key rotation tests - ---- - -## 7) Rollout plan and operational guardrails - -### Rollout stages - -1. **Observe-only mode** - - * Generate attestations, no gates - * Tune scoring weights and thresholds - -2. **Warn mode** - - * Notify only for high WII or reachable vuln combos - -3. **Enforce mode** - - * Block only on clear high-risk conditions (reachable + privileged + high WII) - * Add “break glass” path with audit logging - -### Operational metrics - -* Attestation generation success rate -* Verification failure rate -* Reachability coverage (% units with reachable computation) -* False positive/negative reports (human feedback) -* Policy gate blocks over time - -### Playbooks - -* What to do when: - - * call graph generation fails - * Concelier feed unavailable - * signature verification fails - * scoring config mismatch - ---- - -## 8) Concrete “first 10 commits” checklist - -1. Add predicate JSON schema + canonical JSON serializer -2. Implement DSSE sign/verify library + CLI command -3. Create DB schema + evidence storage plumbing -4. Implement SBOM ingestion + SBOM diff -> `DiffUnit[]` -5. Emit DSSE attestation for SBOM diffs only -6. Implement .NET entrypoint extraction (minimal API + controllers) -7. Implement .NET call graph builder (basic invocations) -8. Implement reachability BFS + path witness extraction -9. Add WII scoring with config + golden tests -10. Add CI policy step (verify + evaluate thresholds) in warn-only mode - ---- - -## 9) Deliverables bundle (what you should end up with) - -* **Code** - - * Smart‑Diff engine + plugins - * Call graph builders (starting with .NET) - * Reachability service + caching - * WII scoring service + config - * Attestation builder + DSSE signer/verifier - * Policy evaluation step - * UI endpoints + viewer - -* **Schemas and specs** - - * `smart-diff-wii@v1` JSON schema - * Evidence media types and versioning rules - * Scoring config format + versioning policy - -* **Ops** - - * Playbooks and runbooks - * Metrics dashboards - * Key rotation procedure - * Backtest/calibration report - ---- - -If you want the plan converted into a **Jira-ready epic/story breakdown** (with each story having acceptance criteria and dependencies), tell me whether you’re implementing **only .NET first** or **multi-language from day one**—and I’ll output the backlog in that format. diff --git a/docs/product-advisories/17-Dec-2025 - Reachability Drift Detection.md b/docs/product-advisories/archived/17-Dec-2025 - Reachability Drift Detection.md similarity index 91% rename from docs/product-advisories/17-Dec-2025 - Reachability Drift Detection.md rename to docs/product-advisories/archived/17-Dec-2025 - Reachability Drift Detection.md index 8d17b3a7c..1bcf5d98a 100644 --- a/docs/product-advisories/17-Dec-2025 - Reachability Drift Detection.md +++ b/docs/product-advisories/archived/17-Dec-2025 - Reachability Drift Detection.md @@ -1,11 +1,22 @@ # Reachability Drift Detection **Date**: 2025-12-17 -**Status**: ANALYZED - Ready for Implementation Planning +**Status**: ARCHIVED - Implementation Complete (Sprints 3600.2-3600.3) +**Archived**: 2025-12-22 **Related Advisories**: - 14-Dec-2025 - Smart-Diff Technical Reference - 14-Dec-2025 - Reachability Analysis Technical Reference +**Implementation Documentation**: +- Architecture: `docs/modules/scanner/reachability-drift.md` +- API Reference: `docs/api/scanner-drift-api.md` +- Operations Guide: `docs/operations/reachability-drift-guide.md` + +**Follow-up Sprints**: +- `SPRINT_3600_0004_0001` - Node.js Babel Integration (TODO) +- `SPRINT_3600_0005_0001` - Policy CI Gate Integration (TODO) +- `SPRINT_3600_0006_0001` - Documentation Finalization (TODO) + --- ## 1. EXECUTIVE SUMMARY @@ -39,17 +50,17 @@ This advisory proposes extending StellaOps' Smart-Diff capabilities to detect ** ### 2.2 What's Missing (New Implementation Required) -| Component | Advisory Ref | Gap Description | -|-----------|-------------|-----------------| -| **Call Graph Extractor (.NET)** | §7 C# Roslyn | No MSBuildWorkspace/Roslyn analysis exists | -| **Call Graph Extractor (Go)** | §7 Go SSA | No golang.org/x/tools/go/ssa integration | -| **Call Graph Extractor (Java)** | §7 | No Soot/WALA integration | -| **Call Graph Extractor (Node)** | §7 | No @babel/traverse integration | -| **`scanner.code_changes` table** | §4 Smart-Diff | AST-level diff facts not persisted | -| **Drift Cause Explainer** | §6 Timeline | No causal attribution on path nodes | -| **Path Viewer UI** | §UX | No Angular component for call path visualization | -| **Cross-scan Function-level Drift** | §6 | State drift exists, function-level doesn't | -| **Entrypoint Discovery (per-framework)** | §3 | Limited beyond package.json/manifest parsing | +| Component | Advisory Ref | Gap Description | **Post-Implementation Status** | +|-----------|-------------|-----------------|-------------------------------| +| **Call Graph Extractor (.NET)** | §7 C# Roslyn | No MSBuildWorkspace/Roslyn analysis exists | **DONE** - `DotNetCallGraphExtractor` | +| **Call Graph Extractor (Go)** | §7 Go SSA | No golang.org/x/tools/go/ssa integration | **DONE** - `GoCallGraphExtractor` | +| **Call Graph Extractor (Java)** | §7 | No Soot/WALA integration | **DONE** - `JavaCallGraphExtractor` (ASM) | +| **Call Graph Extractor (Node)** | §7 | No @babel/traverse integration | **PARTIAL** - Skeleton exists | +| **`scanner.code_changes` table** | §4 Smart-Diff | AST-level diff facts not persisted | **DONE** - Migration 010 | +| **Drift Cause Explainer** | §6 Timeline | No causal attribution on path nodes | **DONE** - `DriftCauseExplainer` | +| **Path Viewer UI** | §UX | No Angular component for call path visualization | **DONE** - `path-viewer.component.ts` | +| **Cross-scan Function-level Drift** | §6 | State drift exists, function-level doesn't | **DONE** - `ReachabilityDriftDetector` | +| **Entrypoint Discovery (per-framework)** | §3 | Limited beyond package.json/manifest parsing | **DONE** - 9 entrypoint types | ### 2.3 Terminology Mapping @@ -378,11 +389,13 @@ smart_diff: ### 7.2 Sub-Sprints -| ID | Topic | Priority | Effort | Dependencies | -|----|-------|----------|--------|--------------| -| SPRINT_3600_0002_0001 | Call Graph Infrastructure | P0 | Large | Master | -| SPRINT_3600_0003_0001 | Drift Detection Engine | P0 | Medium | 3600.2 | -| SPRINT_3600_0004_0001 | UI and Evidence Chain | P1 | Medium | 3600.3 | +| ID | Topic | Priority | Effort | Dependencies | **Status** | +|----|-------|----------|--------|--------------|------------| +| SPRINT_3600_0002_0001 | Call Graph Infrastructure | P0 | Large | Master | **DONE** | +| SPRINT_3600_0003_0001 | Drift Detection Engine | P0 | Medium | 3600.2 | **DONE** | +| SPRINT_3600_0004_0001 | Node.js Babel Integration | P1 | Medium | 3600.3 | TODO | +| SPRINT_3600_0005_0001 | Policy CI Gate Integration | P1 | Small | 3600.3 | TODO | +| SPRINT_3600_0006_0001 | Documentation Finalization | P0 | Medium | 3600.3 | TODO | --- diff --git a/docs/product-advisories/19-Dec-2025 - Stella Ops candidate features mapped to moat strength.md b/docs/product-advisories/archived/19-Dec-2025 - Stella Ops candidate features mapped to moat strength.md similarity index 100% rename from docs/product-advisories/19-Dec-2025 - Stella Ops candidate features mapped to moat strength.md rename to docs/product-advisories/archived/19-Dec-2025 - Stella Ops candidate features mapped to moat strength.md diff --git a/docs/product-advisories/20-Dec-2025 - Layered binary + call‑stack reachability.md b/docs/product-advisories/archived/2025-12-22-binary-reachability/20-Dec-2025 - Layered binary + call‑stack reachability.md similarity index 100% rename from docs/product-advisories/20-Dec-2025 - Layered binary + call‑stack reachability.md rename to docs/product-advisories/archived/2025-12-22-binary-reachability/20-Dec-2025 - Layered binary + call‑stack reachability.md diff --git a/docs/product-advisories/archived/2025-12-22-explainable-triage/18-Dec-2025 - Designing Explainable Triage and Proof-Linked Evidence.md b/docs/product-advisories/archived/2025-12-22-explainable-triage/18-Dec-2025 - Designing Explainable Triage and Proof-Linked Evidence.md new file mode 100644 index 000000000..edc89f32d --- /dev/null +++ b/docs/product-advisories/archived/2025-12-22-explainable-triage/18-Dec-2025 - Designing Explainable Triage and Proof-Linked Evidence.md @@ -0,0 +1,231 @@ +# ARCHIVED + +> **Archived:** 2025-12-22 +> **Reason:** Gap analysis complete; implementation planned via SPRINT_4300 series +> **Analysis:** `docs/implplan/analysis/4300_explainable_triage_gap_analysis.md` +> **Sprints:** `docs/implplan/SPRINT_4300_*.md` +> **Coverage:** ~85% already implemented via prior sprints (3800, 3801, 4100, 4200) +> **Remaining Gaps:** 6 sprints created to close gaps (CLI verify, Evidence API, Privacy, TTL, Schemas, Metrics) + +--- + +Here's a practical, first-time-friendly blueprint for making your security workflow both **explainable** and **provable**-from triage to approval. + +# Explainable triage UX (what & why) + +Show every risk score with the minimum evidence a responder needs to trust it: + +* **Reachable path:** the concrete call-chain (or network path) proving the vuln is actually hit. +* **Entrypoint boundary:** the external surface (HTTP route, CLI verb, cron, message topic) that leads to that path. +* **VEX status:** the exploitability decision (Affected/Not Affected/Under Investigation/Fixed) with rationale. +* **Last-seen timestamp:** when this evidence was last observed/generated. + +## UI pattern (compact, 1-click expand) + +* **Row (collapsed):** `Score 72 - CVE-2024-12345 - service: api-gateway - package: x.y.z` +* **Expand panel (evidence):** + + * **Path:** `POST /billing/charge -> BillingController.Pay() -> StripeClient.Create()` + * **Boundary:** `Ingress: /billing/charge (JWT: required, scope: payments:write)` + * **VEX:** `Not Affected (runtime guard strips untrusted input before sink)` + * **Last seen:** `2025-12-18T09:22Z` (scan: sbomer#c1a2, policy run: lattice#9f0d) + * **Actions:** "Open proof bundle", "Re-run check", "Create exception (time-boxed)" + +## Data contract (what the panel needs) + +```json +{ + "finding_id": "f-7b3c", + "cve": "CVE-2024-12345", + "component": {"name": "stripe-sdk", "version": "6.1.2"}, + "reachable_path": [ + "HTTP POST /billing/charge", + "BillingController.Pay", + "StripeClient.Create" + ], + "entrypoint": {"type":"http","route":"/billing/charge","auth":"jwt:payments:write"}, + "vex": {"status":"not_affected","justification":"runtime_sanitizer_blocks_sink","timestamp":"2025-12-18T09:22:00Z"}, + "last_seen":"2025-12-18T09:22:00Z", + "attestation_refs": ["sha256:...sbom", "sha256:...vex", "sha256:...policy"] +} +``` + +# Evidence-linked approvals (what & why) + +Make "Approve to ship" contingent on **verifiable proof**, not screenshots: + +* **Chain** must exist and be machine-verifiable: **SBOM -> VEX -> policy decision**. +* Use **in-toto/DSSE** attestations or **SLSA provenance** so each link has a signature, subject digest, and predicate. +* **Gate** merges/deploys only when the chain validates. + +## Pipeline gate (simple policy) + +* Require: + + 1. **SBOM attestation** referencing the exact image digest + 2. **VEX attestation** covering all listed components (or explicit allow-gaps) + 3. **Policy decision attestation** (e.g., "risk <= threshold AND all reachable vulns = Not Affected/Fixed") + +### Minimal decision attestation (DSSE envelope -> JSON payload) + +```json +{ + "predicateType": "stella/policy-decision@v1", + "subject": [{"name":"registry/org/app","digest":{"sha256":""}}], + "predicate": { + "policy": "risk_threshold<=75 && reachable_vulns.all(v => v.vex in ['not_affected','fixed'])", + "inputs": { + "sbom_ref": "sha256:", + "vex_ref": "sha256:" + }, + "result": {"allowed": true, "score": 61, "exemptions":[]}, + "evidence_refs": ["sha256:"], + "run_at": "2025-12-18T09:23:11Z" + } +} +``` + +# How this lands in your product (concrete moves) + +* **Backend:** add `/findings/:id/evidence` (returns the contract above) + `/approvals/:artifact/attestations`. +* **Storage:** keep **proof bundles** (graphs, call stacks, logs) as content-addressed blobs; store DSSE envelopes alongside. +* **UI:** one list -> expandable rows; chips for VEX status; "Open proof" shows the call graph and boundary in 1 view. +* **CLI/API:** `stella verify image: --require sbom,vex,decision` returns a signed summary; pipelines fail on non-zero. +* **Metrics:** + + * **% changes with complete attestations** (target >=95%) + * **TTFE (time-to-first-evidence)** from alert -> panel open (target <=30s) + * **Post-deploy reversions** due to missing proof (trend to zero) + +# Starter acceptance checklist + +* [x] Every risk row expands to path, boundary, VEX, last-seen in <300 ms. *(SPRINT_4200_0001_0001)* +* [x] "Approve" button disabled until SBOM+VEX+Decision attestations validate for the **exact artifact digest**. *(SPRINT_4100_0005_0001)* +* [x] One-click "Show DSSE chain" renders the three envelopes with subject digests and signers. *(SPRINT_4200_0001_0001)* +* [x] Audit log captures who approved, which digests, and which evidence hashes. *(SPRINT_3801_0001_0004)* + +--- + +## Implementation Coverage Summary + +| Section | Coverage | Sprint(s) | +|---------|----------|-----------| +| Explainable Triage UX | 85% | 3800.*, 4200.0001.0001 | +| Evidence-Linked Approvals | 100% | 3801.*, 4100.* | +| Backend APIs | 85% | 3800.0003.0001, **4300.0001.0002** | +| CLI/API | 50% | 3500.0004.*, **4300.0001.0001** | +| Invariants | 90% | 4100.0003.*, **4300.0002.0002** | +| Data Model | 100% | Scanner.Triage | +| Evidence Types | 100% | 3800.0002.*, Evidence.Bundle | +| Predicate Types | 80% | 3801.*, **4300.0003.0001** | +| Policy Gate | 100% | Policy.Engine | +| Approve Button | 100% | 4100.0005.0001 | +| Privacy | 0% | **4300.0002.0001** | +| TTL Strategy | 50% | **4300.0002.0002** | +| Metrics | 30% | **4300.0003.0002** | + +--- + +*Original advisory content preserved below for reference.* + +--- + +## 1) Start with the invariants (the rules your system must never violate) + +If you implement nothing else, implement these invariants-they're what make the UX trustworthy and the approvals auditable. + +### Artifact anchoring invariant + +Every finding, every piece of evidence, and every approval must be anchored to an immutable **subject digest** (e.g., container image digest `sha256:...`, binary SHA, or SBOM digest). + +* No "latest tag" approvals. +* No "approve commit" without mapping to the built artifact digest. + +### Evidence closure invariant + +A policy decision is only valid if it references **exactly** the evidence it used: + +* `inputs.sbom_ref` +* `inputs.vex_ref` +* `inputs.reachability_ref` (optional but recommended) +* `inputs.scan_ref` (optional) +* and any config/IaC refs used for boundary/exposure. + +### Signature chain invariant + +Evidence is only admissible if it is: + +1. structured (machine readable), +2. signed (DSSE/in-toto), +3. verifiable (trusted identity/keys), +4. retrievable by digest. + +### Staleness invariant + +Evidence must have: + +* `last_seen` and `expires_at` (or TTL), +* a "stale evidence" behavior in policy (deny or degrade score). + +--- + +## 2) Choose the canonical formats and where you'll store "proof" + +### Attestation envelope: DSSE + in-toto Statement + +Use: + +* **in-toto Attestation Framework** "Statement" as the payload model ("subject + predicateType + predicate"). +* Wrap it in **DSSE** for signing. + +### SBOM format: CycloneDX or SPDX + +* SPDX is an ISO/IEC standard and has v3.0 and v2.3 lines in the ecosystem. +* CycloneDX is an ECMA standard (ECMA-424) and widely used for application security contexts. + +Pick one as **your canonical** (internally), but ingest both. + +### VEX format: OpenVEX (practical) + map to "classic" VEX statuses + +VEX's value is triage noise reduction: vendors can assert whether a product is affected, fixed, under investigation, or not affected. +OpenVEX is a minimal, embeddable implementation of VEX intended for interoperability. + +### Where to store proof: OCI registry referrers + +Use OCI "subject/referrers" so proofs travel with the artifact: + +* OCI 1.1 introduces an explicit `subject` field and referrers graph for signatures/attestations/SBOMs. + +--- + +## 3-13) [Additional Sections] + +*See original advisory for full content on:* +- System architecture +- Data model +- Explainable triage computation +- UI components +- Proof-linked evidence generation +- Predicate types +- Policy gate +- Approve button behavior +- Implementation details +- MVP path +- Quick checklist + +--- + +## References + +[1]: https://github.com/secure-systems-lab/dsse "DSSE: Dead Simple Signing Envelope" +[2]: https://github.com/in-toto/attestation "in-toto Attestation Framework" +[3]: https://docs.sigstore.dev/about/bundle/ "Sigstore Bundle Format" +[4]: https://spdx.dev/use/specifications/ "Specifications" +[5]: https://github.com/CycloneDX/specification "CycloneDX/specification" +[6]: https://www.ntia.gov/sites/default/files/publications/vex_one-page_summary_0.pdf "VEX one-page summary" +[7]: https://github.com/openvex/spec "OpenVEX Specification" +[8]: https://opencontainers.org/posts/blog/2024-03-13-image-and-distribution-1-1/ "OCI Image and Distribution Specs v1.1 Releases" +[9]: https://oras.land/docs/concepts/reftypes/ "Attached Artifacts | OCI Registry As Storage" +[10]: https://learn.microsoft.com/en-us/azure/container-registry/container-registry-manage-artifact "Manage OCI Artifacts and Supply Chain Artifacts with ORAS" +[11]: https://openpolicyagent.org/docs/policy-language "Policy Language" +[12]: https://docs.sigstore.dev/cosign/verifying/attestation/ "In-Toto Attestations" diff --git a/docs/product-advisories/21-Dec-2025 - Designing Explainable Triage Workflows.md b/docs/product-advisories/archived/21-Dec-2025 - Designing Explainable Triage Workflows.md similarity index 100% rename from docs/product-advisories/21-Dec-2025 - Designing Explainable Triage Workflows.md rename to docs/product-advisories/archived/21-Dec-2025 - Designing Explainable Triage Workflows.md diff --git a/docs/product-advisories/archived/22-Dec-2025 - Getting Distro Backport Logic Right.md b/docs/product-advisories/archived/22-Dec-2025 - Getting Distro Backport Logic Right.md new file mode 100644 index 000000000..095b39f45 --- /dev/null +++ b/docs/product-advisories/archived/22-Dec-2025 - Getting Distro Backport Logic Right.md @@ -0,0 +1,93 @@ +# ARCHIVED + +> **Archived:** 2025-12-22 +> **Reason:** Gap analysis complete. Recommendations incorporated into sprints and documentation. +> +> **Implementation Artifacts:** +> - SPRINT_2000_0003_0001: Alpine connector and APK comparator +> - SPRINT_2000_0003_0002: Comprehensive distro version tests (50-100 per distro) +> - SPRINT_4000_0002_0001: Backport UX explainability ("Compared with" badge, "Why Fixed" popover) +> - SPRINT_6000_SUMMARY.md: Updated to reference existing Concelier comparators +> - `src/Concelier/AGENTS.md`: Added distro backport version handling section +> +> **Existing Implementations Validated:** +> - `src/Concelier/__Libraries/StellaOps.Concelier.Merge/Comparers/Nevra.cs` (RPM) +> - `src/Concelier/__Libraries/StellaOps.Concelier.Merge/Comparers/DebianEvr.cs` (Debian/Ubuntu) +> - Distro connectors: Debian, Ubuntu, RedHat, SUSE + +--- + +Here's a quick, practical heads-up on **patch-aware backport handling** so your vulnerability verdicts don't go sideways. + +![Package versions concept diagram](attachment\:image) + +### Why this matters + +Distros often **backport fixes** without bumping the upstream version. If you compare versions with a generic SemVer library, you can mislabel **fixed** builds as **vulnerable** (or the reverse). + +### Use distro-native comparators (not SemVer) + +* **RPM (RHEL/CentOS/Fedora/openSUSE):** compare using **EVR** (`epoch:version-release`) via `rpmvercmp`. Tilde `~` sorts **before** anything; releases matter (e.g., `1.2-3.el9_2` > `1.2-3`). +* **Debian/Ubuntu:** compare **epoch >> upstream_version >> debian_revision** using `dpkg --compare-versions` rules. Tilde `~` sorts **lower** than empty, so `2.0~rc1` < `2.0`. +* **Alpine (APK):** follows its own comparator; treat `-r` (pkgrel) as part of ordering, similar in spirit to RPM release. + +### Practical rules for your scanner (Stella Ops / Feedser -> Vexer) + +1. **Normalize the package coordinate** + + * RPM: `name:evr.arch` (epoch default 0 if missing). + * DEB: `name:epoch:upstream_version-debian_revision arch`. + * Keep the **distro release**/revision; it encodes backports. + +2. **Compare with native engines** + + * On Linux hosts/containers, call the system tool when possible: + + * RPM: `rpm --qf '%{EPOCH}:%{VERSION}-%{RELEASE}\n' -q ` then use `rpmdev-vercmp`/`rpmvercmp`. + * DEB/Ubuntu: `dpkg-query -W -f='${Version}\n' ` and `dpkg --compare-versions`. + * In offline analysis, embed battle-tested comparators (ports of `rpmvercmp` and `dpkg` logic) in your evaluator. + +3. **Model advisories with distro ranges** + + * Store **per-ecosystem fixed ranges**: + + * RPM example: `fixed >= 2:1.4.3-5.el9_3` + * DEB example: `fixed >= 1:1.4.3-5+deb12u2` + * Do **not** rewrite to SemVer; keep native forms. + +4. **VEX/decisioning** + + * When upstream says "fixed in 1.4.4" but **distro claims fixed in 1.4.3-5~deb12u2**, prefer distro channel **if source is trusted**. + * Record **evidence**: source (DSA/RHSA/USN), comparator used, installed EVR/DEB version, fixed threshold, and result. Attach to the verdict. + +5. **Edge cases to test** + + * Epoch jumps: `1:1.2-1` > `0:9.9-9`. + * Tilde pre-releases: `2.0~rc1` < `2.0`. + * Release qualifiers: `1.2-3.el9_2` < `1.2-3.el9_3`. + * Rebuilds/backports: `1.2-3ubuntu0.1` vs `1.2-3`. + +### Minimal implementation sketch (C#) + +* **Strategy pattern**: `IVersionComparator` with implementations `RpmComparator`, `DpkgComparator`, `ApkComparator`. +* **Selector** by package source (`rpmdb`, `dpkg-status`, `apk info`). +* **Evidence struct**: + + ```csharp + record VersionVerdict( + string Pkg, string Distro, string Installed, string Fixed, + string Comparator, bool IsFixed, string EvidenceSource, string[] ProofLines); + ``` +* **Fallback**: If native comparator unavailable, use embedded ports of `rpmvercmp` and Debian's algorithm; never SemVer. + +### CI tests you should pin + +* A table-driven test set with 50-100 cases covering epochs, tildes, and distro revisions. +* Golden files per distro to prevent regressions. +* Cross-check installed values from real images (e.g., `ubi9`, `debian:12`, `ubuntu:22.04`, `alpine:3.20`). + +### UX nudge + +* In the UI, show **"Compared with: RPM EVR / dpkg rules"** and link the **exact fixed threshold** that matched. Provide a "why fixed" popover showing the string compare steps. + +If you like, I can drop in ready-to-use C# comparators (rpmvercmp/dpkg) and a test corpus so you can wire this straight into Feedser/Vexer. diff --git a/docs/product-advisories/19-Dec-2025 - Benchmarking Container Scanners Against Stella Ops.md b/docs/product-advisories/archived/22-Dec-2025/19-Dec-2025 - Benchmarking Container Scanners Against Stella Ops.md similarity index 100% rename from docs/product-advisories/19-Dec-2025 - Benchmarking Container Scanners Against Stella Ops.md rename to docs/product-advisories/archived/22-Dec-2025/19-Dec-2025 - Benchmarking Container Scanners Against Stella Ops.md diff --git a/docs/product-advisories/21-Dec-2025 - Smart Diff - Reproducibility as a Feature.md b/docs/product-advisories/archived/22-Dec-2025/21-Dec-2025 - Smart Diff - Reproducibility as a Feature.md similarity index 100% rename from docs/product-advisories/21-Dec-2025 - Smart Diff - Reproducibility as a Feature.md rename to docs/product-advisories/archived/22-Dec-2025/21-Dec-2025 - Smart Diff - Reproducibility as a Feature.md diff --git a/docs/product-advisories/archived/22-Dec-2026 - Building a Trust Lattice for VEX Sources.md b/docs/product-advisories/archived/22-Dec-2026 - Building a Trust Lattice for VEX Sources.md new file mode 100644 index 000000000..8425dbc9f --- /dev/null +++ b/docs/product-advisories/archived/22-Dec-2026 - Building a Trust Lattice for VEX Sources.md @@ -0,0 +1,184 @@ +Here's a compact, practical way to make VEX trust decisions explainable and replayable across vendor, distro, and internal sources—without adding human-in-the-loop friction. + +--- + +# VEX Trust Lattice (compact) + +**Goal:** turn messy/contradictory VEX claims into a single, signed, reproducible verdict with a numeric confidence and an audit trail. + +## 1) Trust vector per source + +Each VEX source S gets a 3‑component trust vector scored in [0..1]: + +* **Provenance (P):** cryptographic & process integrity + + * 1.00 = DSSE‑signed, timestamped, Rekor/Git tag anchored, org DKIM/Sigstore OIDC, key in allow‑list, rotation policy OK + * 0.75 = DSSE‑signed + public key known, but no transparency log + * 0.40 = unsigned but retrieved via authenticated, immutable artifact repo + * 0.10 = opaque/CSV/email/manual import +* **Coverage (C):** how well the statement's scope maps to your asset + + * 1.00 = exact package + version/build digest + feature/flag context matched + * 0.75 = exact pkg + version range matched; partial feature context + * 0.50 = product‑level only; maps via CPE/PURL family + * 0.25 = family‑level heuristics; no version proof +* **Replayability (R):** can we deterministically re‑derive the claim? + + * 1.00 = all inputs pinned (feeds, SBOM hash, ruleset hash, lattice version); replays byte‑identical + * 0.60 = inputs mostly pinned; non‑deterministic ordering tolerated but stable outcome + * 0.20 = ephemeral APIs; no snapshot + +**BaseTrust(S) = wP·P + wC·C + wR·R** (defaults: wP=0.45, wC=0.35, wR=0.20; tunable per policy). + +## 2) Claim strength & freshness + +Every individual VEX claim `S asserts X` carries multipliers: + +* **Strength (M):** *not*‑affected‑because‑{reason} + + * Exploitability analysis + reachability proof subgraph provided → 1.00 + * Config/feature‑flag reason with evidence → 0.80 + * Vendor blanket statement → 0.60 + * "Under investigation" → 0.40 +* **Freshness (F):** time‑decay curve; default half‑life 90 days + + * `F = exp(- ln(2) · age_days / 90)`; floor at 0.35 unless revoked + +**ClaimScore = BaseTrust(S) · M · F.** + +## 3) Lattice ordering & merge + +Define a **partial order** on claims by (scope specificity, ClaimScore). More specific scope wins ties. + +For a given CVE×Asset, gather all claims `{Ci}`: + +* If any **revocation/contradiction** exists, keep both and trigger **conflict mode**: require replay proof; otherwise down‑weight older/weaker by Δ=0.25. +* Final **verdict** chosen by **argmax(ClaimScore)** after conflict adjustments. + +Return tuple: + +``` +Verdict = { + status: {affected|not_affected|under_investigation|fixed}, + confidence: ClaimScore*, + expl: list of (source, reason, P/C/R, M, F), + evidence_refs: [attestations, SBOM hash, reachability subgraph id], + policy_hash, lattice_version +} +``` + +## 4) Policy hooks (explainable gates) + +* **Minimum confidence by environment:** e.g., prod requires ≥0.75 to accept "not_affected". +* **Unknowns budget:** fail if (#unknown deps > N) OR (Σ(1–ClaimScore) over unknowns > T). +* **Source quotas:** cap influence from any single vendor at 60% unless a second independent source supports within Δ=0.1. +* **Reason allow‑list:** forbid blanket vendor claims for criticals unless reachability proof exists. + +## 5) Deterministic replay + +To guarantee "same inputs → same verdict": + +* Pin: SBOM digest(s), vuln feed snapshot ids, VEX document digests, reachability graph ids, policy file, lattice version, clock cutoff. +* Sort: stable topological order on inputs (by `(issuer_did, statement_digest)`). +* Serialize verdict + inputs into a **Verdict Manifest** (JSON/CBOR) and sign (DSSE). +* Store in **Authority** with index: `(asset_digest, CVE, policy_hash, lattice_version)`. + +## 6) Minimal data model (for Vexer/Policy Engine) + +```json +{ + "source": { + "id": "did:web:vendor.example", + "provenance": {"sig_type":"dsse","rekor_log_id":"...","key_alias":"vendor_k1"}, + "provenance_score": 0.90, + "coverage_score": 0.75, + "replay_score": 0.60, + "weights": {"wP":0.45,"wC":0.35,"wR":0.20} + }, + "claim": { + "scope": {"purl":"pkg:rpm/openssl@3.0.12-5","digest":"sha256:...","features":{"fips":true}}, + "cve": "CVE-2025-12345", + "status": "not_affected", + "reason": "feature_flag_off", + "strength": 0.80, + "issued_at": "2025-11-28T10:12:00Z", + "evidence": {"reach_subgraph_id":"reg:subg/abcd","attestation":"sha256:..."} + }, + "policy": {"min_confidence_prod":0.75,"unknown_budget":5,"require_reachability_for_criticals":true}, + "lattice_version": "1.2.0" +} +``` + +## 7) Deterministic evaluation (C# sketch) + +```csharp +public record TrustWeights(double wP=0.45, double wC=0.35, double wR=0.20); + +double BaseTrust(double P, double C, double R, TrustWeights W) + => W.wP*P + W.wC*C + W.wR*R; + +double Freshness(DateTime issuedAt, DateTime cutoff, double halfLifeDays=90, double floor=0.35) +{ + var age = (cutoff - issuedAt).TotalDays; + var f = Math.Exp(-Math.Log(2) * age / halfLifeDays); + return Math.Max(f, floor); +} + +double ClaimScore(Source s, Claim c, TrustWeights W, DateTime cutoffUtc) +{ + var baseTrust = BaseTrust(s.P, s.C, s.R, W); + var freshness = Freshness(c.IssuedAt, cutoffUtc); + return baseTrust * c.Strength * freshness; +} + +// Merge: pick best score; apply conflict penalty if contradictory present +Verdict Merge(IEnumerable<(Source S, Claim C)> claims, Policy policy, DateTime cutoffUtc) +{ + var scored = claims.Select(t => new { + t.S, t.C, Score = ClaimScore(t.S, t.C, t.S.Weights, cutoffUtc) + }).ToList(); + + bool contradictory = scored.Select(x=>x.C.Status).Distinct().Count() > 1; + if (contradictory) { + scored = scored.Select(x => new { + x.S, x.C, Score = x.Score * 0.75 // conflict penalty + }).ToList(); + } + + var winner = scored.OrderByDescending(x => (x.C.Scope.Specificity, x.Score)).First(); + if (policy.RequireReachForCriticals && winner.C.IsCritical && !winner.C.HasReachabilityProof) + return Verdict.FailGate("No reachability proof for critical"); + + if (policy.MinConfidenceProd.HasValue && winner.Score < policy.MinConfidenceProd) + return Verdict.FailGate("Below minimum confidence"); + + return Verdict.Accept(winner.C.Status, winner.Score, AuditTrail.From(scored)); +} +``` + +## 8) UI: "Trust Algebra" panel (1 screen, no new page) + +* **Header:** CVE × Asset digest → final status + confidence meter. +* **Stacked bars:** P/C/R contributions for the winning claim. +* **Claim table:** source, status, reason, P/C/R, strength, freshness, ClaimScore; toggle "show conflicts". +* **Policy chips:** which gates applied; click to open policy YAML/JSON (read‑only if in replay). +* **Replay button:** "Reproduce verdict" → emits a signed **Verdict Manifest** and logs proof ids. + +## 9) Defaults for source classes + +* **Vendor:** P=0.9, C=0.7 (often coarse), R=0.6 +* **Distro:** P=0.8, C=0.85 (build‑aware), R=0.6 +* **Internal:** P=0.85 (org‑signed), C=0.95 (exact SBOM+reach), R=0.9 + +Tune per issuer using rolling calibration: compare past ClaimScores vs. post‑mortem truth; adjust via small learning rate (±0.02/epoch) under a signed **calibration manifest** (also replayable). + +--- + +If you want, I can drop this into your Stella Ops modules today as: + +* **Vexer:** trust‑vector store + claim normalizer +* **Policy Engine:** lattice evaluator + gates +* **Authority:** verdict manifest signer/indexer +* **UI:** single "Trust Algebra" panel wired to evidence ids + +Say the word and I'll generate the concrete JSON schemas, C# interfaces, and a seed policy file. diff --git a/docs/reachability/cve-symbol-mapping.md b/docs/reachability/cve-symbol-mapping.md new file mode 100644 index 000000000..a5c29acf3 --- /dev/null +++ b/docs/reachability/cve-symbol-mapping.md @@ -0,0 +1,296 @@ +# CVE → Symbol Mapping + +_Last updated: 2025-12-22. Owner: Scanner Guild + Concelier Guild._ + +This document describes how Stella Ops maps CVE identifiers to specific binary symbols/functions for precise reachability analysis. + +--- + +## 1. Overview + +To determine if a vulnerability is reachable, we need to know which specific functions are affected. The **CVE→Symbol Mapping** service bridges: + +- **CVE identifiers** (e.g., `CVE-2024-1234`) +- **Package coordinates** (e.g., `pkg:npm/lodash@4.17.21`) +- **Affected symbols** (e.g., `lodash.template`, `openssl:EVP_PKEY_decrypt`) + +--- + +## 2. Data Sources + +### 2.1 Patch Diff Analysis + +The highest-fidelity source: analyze git commits that fix vulnerabilities. + +``` +CVE-2024-1234 fixed in commit abc123 + → Diff shows changes to: + - src/crypto.c: EVP_PKEY_decrypt() [modified] + - src/crypto.c: decrypt_internal() [added guard] + → Affected symbols: EVP_PKEY_decrypt, decrypt_internal +``` + +**Implementation**: `StellaOps.Scanner.VulnSurfaces.PatchDiffAnalyzer` + +### 2.2 Advisory Metadata + +Structured advisories with function-level detail: + +- **OSV** (`affected[].ranges[].events[].introduced/fixed`) +- **NVD CPE** with CWE → typical affected patterns +- **Vendor advisories** (GitHub, npm, PyPI security advisories) + +**Implementation**: `StellaOps.Concelier.Connectors.*` + +### 2.3 Heuristic Inference + +When precise mappings unavailable: + +1. **All public exports** of affected package version +2. **CWE-based patterns** (e.g., CWE-79 XSS → output functions) +3. **Function name patterns** (e.g., `*_decrypt*`, `*_parse*`) + +**Implementation**: `StellaOps.Scanner.VulnSurfaces.HeuristicMapper` + +--- + +## 3. Mapping Confidence Tiers + +| Tier | Source | Confidence | Example | +|------|--------|------------|---------| +| **Confirmed** | Patch diff analysis | 0.95–1.0 | Exact function from git diff | +| **Likely** | Advisory with function names | 0.7–0.9 | OSV with `affected.functions[]` | +| **Inferred** | CWE/pattern heuristics | 0.4–0.6 | All exports of vulnerable version | +| **Unknown** | No data available | 0.0–0.3 | Package-level only | + +--- + +## 4. Query Interface + +### 4.1 Service Contract + +```csharp +public interface IVulnSurfaceService +{ + /// + /// Get symbols affected by a CVE for a specific package. + /// + Task GetAffectedSymbolsAsync( + string cveId, + string purl, + VulnSurfaceOptions? options = null, + CancellationToken ct = default); + + /// + /// Batch query for multiple CVE+PURL pairs. + /// + Task> GetAffectedSymbolsBatchAsync( + IEnumerable<(string CveId, string Purl)> queries, + CancellationToken ct = default); +} +``` + +### 4.2 Result Model + +```csharp +public sealed record VulnSurfaceResult +{ + public required string CveId { get; init; } + public required string Purl { get; init; } + public required ImmutableArray Symbols { get; init; } + public required VulnSurfaceSource Source { get; init; } + public required double Confidence { get; init; } + public DateTimeOffset? CachedAt { get; init; } +} + +public sealed record AffectedSymbol +{ + public required string Name { get; init; } + public required string SymbolId { get; init; } + public string? File { get; init; } + public int? Line { get; init; } + public string? Signature { get; init; } + public SymbolChangeType ChangeType { get; init; } +} + +public enum VulnSurfaceSource +{ + PatchDiff, + Advisory, + Heuristic, + Unknown +} + +public enum SymbolChangeType +{ + Modified, // Function code changed + Added, // New guard/check added + Removed, // Vulnerable code removed + Renamed // Function renamed +} +``` + +--- + +## 5. Integration with Concelier + +The CVE→Symbol mapping service integrates with Concelier's advisory feed: + +``` +┌─────────────────┐ ┌──────────────────┐ ┌───────────────────┐ +│ Scanner │────►│ VulnSurface │────►│ Concelier │ +│ (Query) │ │ Service │ │ Advisory API │ +└─────────────────┘ └──────────────────┘ └───────────────────┘ + │ + ▼ + ┌──────────────────┐ + │ Patch Diff │ + │ Analyzer │ + └──────────────────┘ +``` + +### 5.1 Advisory Client + +```csharp +public interface IAdvisoryClient +{ + Task GetAdvisoryAsync(string cveId, CancellationToken ct); + Task> GetAffectedPackagesAsync( + string cveId, + CancellationToken ct); +} +``` + +### 5.2 Caching Strategy + +| Data | TTL | Invalidation | +|------|-----|--------------| +| Advisory metadata | 1 hour | On feed update | +| Patch diff results | 24 hours | On new CVE revision | +| Heuristic mappings | 15 minutes | On query | + +--- + +## 6. Offline Support + +For air-gapped environments: + +### 6.1 Pre-computed Bundles + +``` +offline-bundles/ + vuln-surfaces/ + cve-2024-*.json # Pre-computed mappings + ecosystem-npm.json # NPM ecosystem mappings + ecosystem-pypi.json # PyPI ecosystem mappings +``` + +### 6.2 Bundle Format + +```json +{ + "version": "1.0.0", + "generatedAt": "2025-12-22T00:00:00Z", + "mappings": { + "CVE-2024-1234": { + "pkg:npm/lodash@4.17.21": { + "symbols": ["template", "templateSettings"], + "source": "patch_diff", + "confidence": 0.95 + } + } + } +} +``` + +--- + +## 7. Fallback Behavior + +When no mapping is available: + +1. **Ecosystem-specific defaults**: + - npm: All `exports` from package.json + - PyPI: All public functions (`__all__`) + - Native: All exported symbols (`.dynsym`) + +2. **Conservative approach**: + - Mark all public APIs as potentially affected + - Set confidence = 0.3 (Inferred tier) + - Include explanation in verdict reasons + +3. **Manual override**: + - Allow user-provided symbol lists via policy + - Support suppression rules for known false positives + +--- + +## 8. Performance Considerations + +| Metric | Target | Notes | +|--------|--------|-------| +| Cache hit rate | >90% | Most queries hit cache | +| Cold query latency | <500ms | Concelier API call | +| Batch throughput | >100 queries/sec | Parallel execution | + +--- + +## 9. Example Queries + +### Simple Query + +```http +POST /api/vuln-surfaces/query +Content-Type: application/json + +{ + "cveId": "CVE-2024-1234", + "purl": "pkg:npm/lodash@4.17.21" +} +``` + +Response: +```json +{ + "cveId": "CVE-2024-1234", + "purl": "pkg:npm/lodash@4.17.21", + "symbols": [ + { + "name": "template", + "symbolId": "js:lodash/template", + "file": "lodash.js", + "line": 14850, + "changeType": "modified" + } + ], + "source": "patch_diff", + "confidence": 0.95 +} +``` + +### Batch Query + +```http +POST /api/vuln-surfaces/batch +Content-Type: application/json + +{ + "queries": [ + {"cveId": "CVE-2024-1234", "purl": "pkg:npm/lodash@4.17.21"}, + {"cveId": "CVE-2024-5678", "purl": "pkg:pypi/requests@2.28.0"} + ] +} +``` + +--- + +## 10. Related Documentation + +- [Slice Schema](./slice-schema.md) +- [Patch Oracles](./patch-oracles.md) +- [Concelier Architecture](../modules/concelier/architecture.md) +- [Vulnerability Surfaces](../modules/scanner/vuln-surfaces.md) + +--- + +_Created: 2025-12-22. See Sprint 3810 for implementation details._ diff --git a/docs/reachability/replay-verification.md b/docs/reachability/replay-verification.md new file mode 100644 index 000000000..6ac338883 --- /dev/null +++ b/docs/reachability/replay-verification.md @@ -0,0 +1,332 @@ +# Replay Verification + +_Last updated: 2025-12-22. Owner: Scanner Guild._ + +This document describes the **replay verification** workflow that ensures reachability slices are reproducible and tamper-evident. + +--- + +## 1. Overview + +Replay verification answers: *"Given the same inputs, do we get the exact same slice?"* + +This is critical for: +- **Audit trails**: Prove analysis results are genuine +- **Tamper detection**: Detect modified inputs or results +- **Debugging**: Identify sources of non-determinism +- **Compliance**: Demonstrate reproducible security analysis + +--- + +## 2. Replay Workflow + +``` +┌─────────────────┐ ┌──────────────────┐ ┌───────────────────┐ +│ Original │ │ Rehydrate │ │ Recompute │ +│ Slice │────►│ Inputs │────►│ Slice │ +│ (with digest) │ │ from CAS │ │ (fresh) │ +└─────────────────┘ └──────────────────┘ └───────────────────┘ + │ + ▼ + ┌───────────────────┐ + │ Compare │ + │ byte-for-byte │ + └───────────────────┘ + │ + ┌─────────────┴─────────────┐ + ▼ ▼ + ┌──────────┐ ┌──────────┐ + │ MATCH │ │ MISMATCH │ + │ ✓ │ │ + diff │ + └──────────┘ └──────────┘ +``` + +--- + +## 3. API Reference + +### 3.1 Replay Endpoint + +```http +POST /api/slices/replay +Content-Type: application/json + +{ + "sliceDigest": "blake3:a1b2c3d4..." +} +``` + +### 3.2 Response Format + +**Match Response (200 OK)**: +```json +{ + "match": true, + "originalDigest": "blake3:a1b2c3d4...", + "recomputedDigest": "blake3:a1b2c3d4...", + "replayedAt": "2025-12-22T10:00:00Z", + "inputsVerified": true +} +``` + +**Mismatch Response (200 OK)**: +```json +{ + "match": false, + "originalDigest": "blake3:a1b2c3d4...", + "recomputedDigest": "blake3:e5f6g7h8...", + "replayedAt": "2025-12-22T10:00:00Z", + "diff": { + "missingNodes": ["node:5"], + "extraNodes": ["node:6"], + "missingEdges": [{"from": "node:1", "to": "node:5"}], + "extraEdges": [{"from": "node:1", "to": "node:6"}], + "verdictDiff": { + "original": "unreachable", + "recomputed": "reachable" + }, + "confidenceDiff": { + "original": 0.95, + "recomputed": 0.72 + } + }, + "possibleCauses": [ + "Input graph may have been modified", + "Analyzer version mismatch: 1.2.0 vs 1.2.1", + "Feed version changed: nvd-2025-12-20 vs nvd-2025-12-22" + ] +} +``` + +**Error Response (404 Not Found)**: +```json +{ + "error": "slice_not_found", + "message": "Slice with digest blake3:a1b2c3d4... not found in CAS", + "sliceDigest": "blake3:a1b2c3d4..." +} +``` + +--- + +## 4. Input Rehydration + +All inputs must be CAS-addressed for replay: + +### 4.1 Required Inputs + +| Input | CAS Key | Description | +|-------|---------|-------------| +| Graph | `cas://graphs/{digest}` | Full RichGraph JSON | +| Binaries | `cas://binaries/{digest}` | Binary file hashes | +| SBOM | `cas://sboms/{digest}` | CycloneDX/SPDX document | +| Policy | `cas://policies/{digest}` | Policy DSL | +| Feeds | `cas://feeds/{version}` | Advisory feed snapshot | + +### 4.2 Manifest Contents + +```json +{ + "manifest": { + "analyzerVersion": "scanner.native:1.2.0", + "rulesetHash": "sha256:abc123...", + "feedVersions": { + "nvd": "2025-12-20", + "osv": "2025-12-20", + "ghsa": "2025-12-20" + }, + "createdAt": "2025-12-22T10:00:00Z", + "toolchain": "iced-x86:1.21.0", + "environment": { + "os": "linux", + "arch": "x86_64" + } + } +} +``` + +--- + +## 5. Determinism Requirements + +For byte-for-byte reproducibility: + +### 5.1 JSON Canonicalization + +``` +1. Keys sorted alphabetically at all levels +2. No whitespace (compact JSON) +3. UTF-8 encoding +4. Lowercase hex for all hashes +5. Numbers: no trailing zeros, scientific notation for large values +``` + +### 5.2 Graph Ordering + +``` +Nodes: sorted by symbolId (lexicographic) +Edges: sorted by (from, to) tuple (lexicographic) +Paths: sorted by first node, then path length +``` + +### 5.3 Timestamp Handling + +``` +All timestamps: UTC, ISO-8601, with 'Z' suffix +Example: "2025-12-22T10:00:00Z" +No milliseconds unless significant +``` + +### 5.4 Floating Point + +``` +Confidence values: round to 6 decimal places +Example: 0.950000, not 0.95 or 0.9500001 +``` + +--- + +## 6. Diff Computation + +When slices don't match: + +### 6.1 Diff Algorithm + +```python +def compute_diff(original, recomputed): + diff = SliceDiff() + + # Node diff + orig_nodes = set(n.id for n in original.subgraph.nodes) + new_nodes = set(n.id for n in recomputed.subgraph.nodes) + diff.missing_nodes = list(orig_nodes - new_nodes) + diff.extra_nodes = list(new_nodes - orig_nodes) + + # Edge diff + orig_edges = set((e.from, e.to) for e in original.subgraph.edges) + new_edges = set((e.from, e.to) for e in recomputed.subgraph.edges) + diff.missing_edges = list(orig_edges - new_edges) + diff.extra_edges = list(new_edges - orig_edges) + + # Verdict diff + if original.verdict.status != recomputed.verdict.status: + diff.verdict_diff = { + "original": original.verdict.status, + "recomputed": recomputed.verdict.status + } + + return diff +``` + +### 6.2 Cause Analysis + +```python +def analyze_causes(original, recomputed, manifest): + causes = [] + + if manifest.analyzerVersion != current_version(): + causes.append(f"Analyzer version mismatch") + + if manifest.feedVersions != current_feed_versions(): + causes.append(f"Feed version changed") + + if original.inputs.graphDigest != fetch_graph_digest(): + causes.append(f"Input graph may have been modified") + + return causes +``` + +--- + +## 7. CLI Usage + +### 7.1 Replay Command + +```bash +# Replay and verify a slice +stella slice replay --digest blake3:a1b2c3d4... + +# Output: +# ✓ Slice verified: digest matches +# Original: blake3:a1b2c3d4... +# Recomputed: blake3:a1b2c3d4... +``` + +### 7.2 Verbose Mode + +```bash +stella slice replay --digest blake3:a1b2c3d4... --verbose + +# Output: +# Fetching slice from CAS... +# Rehydrating inputs: +# - Graph: cas://graphs/blake3:xyz... ✓ +# - SBOM: cas://sboms/sha256:abc... ✓ +# - Policy: cas://policies/sha256:def... ✓ +# Recomputing slice... +# Comparing results... +# ✓ Match confirmed +``` + +### 7.3 Mismatch Handling + +```bash +stella slice replay --digest blake3:a1b2c3d4... + +# Output: +# ✗ Slice mismatch detected! +# +# Differences: +# Nodes: 1 missing, 0 extra +# Edges: 1 missing, 1 extra +# Verdict: unreachable → reachable +# +# Possible causes: +# - Input graph may have been modified +# - Analyzer version: 1.2.0 → 1.2.1 +# +# Run with --diff-file to export detailed diff +``` + +--- + +## 8. Error Handling + +| Error | Cause | Resolution | +|-------|-------|------------| +| `slice_not_found` | Slice not in CAS | Check digest, verify upload | +| `input_not_found` | Referenced input missing | Reupload inputs | +| `version_mismatch` | Analyzer version differs | Pin version or accept drift | +| `feed_stale` | Feed snapshot unavailable | Use latest or pin version | + +--- + +## 9. Security Considerations + +1. **Input integrity**: Verify CAS digests before replay +2. **Audit logging**: Log all replay attempts +3. **Rate limiting**: Prevent replay DoS +4. **Access control**: Same permissions as slice access + +--- + +## 10. Performance Targets + +| Metric | Target | +|--------|--------| +| Replay latency | <5s for typical slice | +| Input fetch | <2s (parallel CAS fetches) | +| Comparison | <100ms | + +--- + +## 11. Related Documentation + +- [Slice Schema](./slice-schema.md) +- [Binary Reachability Schema](./binary-reachability-schema.md) +- [Determinism Requirements](../contracts/determinism.md) +- [CAS Architecture](../modules/platform/cas.md) + +--- + +_Created: 2025-12-22. See Sprint 3820 for implementation details._ diff --git a/docs/reachability/slice-schema.md b/docs/reachability/slice-schema.md new file mode 100644 index 000000000..de7fdec8f --- /dev/null +++ b/docs/reachability/slice-schema.md @@ -0,0 +1,287 @@ +# Reachability Slice Schema + +_Last updated: 2025-12-22. Owner: Scanner Guild._ + +This document defines the **Reachability Slice** schema—a minimal, attestable proof unit that answers whether a vulnerable symbol is reachable from application entrypoints. + +--- + +## 1. Overview + +A **slice** is a focused subgraph extracted from a full reachability graph, containing only the nodes and edges relevant to answering a specific reachability query (e.g., "Is CVE-2024-1234's vulnerable function reachable?"). + +### Key Properties + +| Property | Description | +|----------|-------------| +| **Minimal** | Contains only nodes/edges on paths between entrypoints and targets | +| **Attestable** | DSSE-signed with in-toto predicate format | +| **Reproducible** | Same inputs → same bytes (deterministic) | +| **Content-addressed** | Retrieved by BLAKE3 digest | + +--- + +## 2. Schema Definition + +### 2.1 DSSE Predicate Type + +``` +https://stellaops.dev/predicates/reachability-slice/v1 +``` + +### 2.2 Full Schema + +```json +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://stellaops.dev/schemas/reachability-slice.v1.schema.json", + "title": "Reachability Slice", + "type": "object", + "required": ["_type", "inputs", "query", "subgraph", "verdict", "manifest"], + "properties": { + "_type": { + "const": "https://stellaops.dev/predicates/reachability-slice/v1" + }, + "inputs": { "$ref": "#/$defs/SliceInputs" }, + "query": { "$ref": "#/$defs/SliceQuery" }, + "subgraph": { "$ref": "#/$defs/SliceSubgraph" }, + "verdict": { "$ref": "#/$defs/SliceVerdict" }, + "manifest": { "$ref": "#/$defs/ScanManifest" } + }, + "$defs": { + "SliceInputs": { + "type": "object", + "required": ["graphDigest", "binaryDigests"], + "properties": { + "graphDigest": { "type": "string", "pattern": "^blake3:[a-f0-9]{64}$" }, + "binaryDigests": { + "type": "array", + "items": { "type": "string", "pattern": "^sha256:[a-f0-9]{64}$" } + }, + "sbomDigest": { "type": "string" }, + "layerDigests": { "type": "array", "items": { "type": "string" } } + } + }, + "SliceQuery": { + "type": "object", + "properties": { + "cveId": { "type": "string", "pattern": "^CVE-\\d{4}-\\d+$" }, + "targetSymbols": { "type": "array", "items": { "type": "string" } }, + "entrypoints": { "type": "array", "items": { "type": "string" } }, + "policyHash": { "type": "string" } + } + }, + "SliceSubgraph": { + "type": "object", + "required": ["nodes", "edges"], + "properties": { + "nodes": { + "type": "array", + "items": { "$ref": "#/$defs/SliceNode" } + }, + "edges": { + "type": "array", + "items": { "$ref": "#/$defs/SliceEdge" } + } + } + }, + "SliceNode": { + "type": "object", + "required": ["id", "symbol", "kind"], + "properties": { + "id": { "type": "string" }, + "symbol": { "type": "string" }, + "kind": { "enum": ["entrypoint", "intermediate", "target", "unknown"] }, + "file": { "type": "string" }, + "line": { "type": "integer" }, + "purl": { "type": "string" }, + "attributes": { "type": "object" } + } + }, + "SliceEdge": { + "type": "object", + "required": ["from", "to", "confidence"], + "properties": { + "from": { "type": "string" }, + "to": { "type": "string" }, + "kind": { "enum": ["direct", "plt", "iat", "dynamic", "unknown"] }, + "confidence": { "type": "number", "minimum": 0, "maximum": 1 }, + "evidence": { "type": "string" }, + "gate": { "$ref": "#/$defs/GateInfo" }, + "observed": { "$ref": "#/$defs/ObservedInfo" } + } + }, + "GateInfo": { + "type": "object", + "properties": { + "type": { "enum": ["feature_flag", "auth", "config", "admin_only"] }, + "condition": { "type": "string" }, + "satisfied": { "type": "boolean" } + } + }, + "ObservedInfo": { + "type": "object", + "properties": { + "firstObserved": { "type": "string", "format": "date-time" }, + "lastObserved": { "type": "string", "format": "date-time" }, + "count": { "type": "integer" } + } + }, + "SliceVerdict": { + "type": "object", + "required": ["status", "confidence"], + "properties": { + "status": { "enum": ["reachable", "unreachable", "unknown", "gated"] }, + "confidence": { "type": "number", "minimum": 0, "maximum": 1 }, + "reasons": { "type": "array", "items": { "type": "string" } }, + "pathWitnesses": { "type": "array", "items": { "type": "string" } }, + "unknownCount": { "type": "integer" }, + "gatedPaths": { "type": "array", "items": { "$ref": "#/$defs/GateInfo" } } + } + }, + "ScanManifest": { + "type": "object", + "required": ["analyzerVersion", "createdAt"], + "properties": { + "analyzerVersion": { "type": "string" }, + "rulesetHash": { "type": "string" }, + "feedVersions": { "type": "object" }, + "createdAt": { "type": "string", "format": "date-time" }, + "toolchain": { "type": "string" } + } + } + } +} +``` + +--- + +## 3. Verdict Status Definitions + +| Status | Meaning | Confidence Range | +|--------|---------|------------------| +| `reachable` | Path exists from entrypoint to target | ≥0.7 | +| `unreachable` | No path found, no unknowns | ≥0.9 | +| `unknown` | Unknowns present on potential paths | 0.3–0.7 | +| `gated` | Path exists but gated by feature flag/auth | 0.5–0.8 | + +### Verdict Computation Rules + +``` +reachable := path_exists AND min(path_confidence) ≥ 0.7 AND unknown_edges = 0 +unreachable := NOT path_exists AND unknown_edges = 0 +gated := path_exists AND all_paths_gated AND gates_not_satisfied +unknown := unknown_edges > 0 OR min(path_confidence) < 0.5 +``` + +--- + +## 4. Example Slice + +```json +{ + "_type": "https://stellaops.dev/predicates/reachability-slice/v1", + "inputs": { + "graphDigest": "blake3:a1b2c3d4e5f6789012345678901234567890123456789012345678901234abcd", + "binaryDigests": ["sha256:deadbeef..."], + "sbomDigest": "sha256:cafebabe..." + }, + "query": { + "cveId": "CVE-2024-1234", + "targetSymbols": ["openssl:EVP_PKEY_decrypt"], + "entrypoints": ["main", "http_handler"] + }, + "subgraph": { + "nodes": [ + {"id": "node:1", "symbol": "main", "kind": "entrypoint", "file": "/app/main.c", "line": 42}, + {"id": "node:2", "symbol": "process_request", "kind": "intermediate", "file": "/app/handler.c", "line": 100}, + {"id": "node:3", "symbol": "decrypt_data", "kind": "intermediate", "file": "/app/crypto.c", "line": 55}, + {"id": "node:4", "symbol": "EVP_PKEY_decrypt", "kind": "target", "purl": "pkg:generic/openssl@3.0.0"} + ], + "edges": [ + {"from": "node:1", "to": "node:2", "kind": "direct", "confidence": 1.0}, + {"from": "node:2", "to": "node:3", "kind": "direct", "confidence": 0.95}, + {"from": "node:3", "to": "node:4", "kind": "plt", "confidence": 0.9} + ] + }, + "verdict": { + "status": "reachable", + "confidence": 0.9, + "reasons": ["Direct call path from main() to EVP_PKEY_decrypt()"], + "pathWitnesses": ["main → process_request → decrypt_data → EVP_PKEY_decrypt"] + }, + "manifest": { + "analyzerVersion": "scanner.native:1.2.0", + "rulesetHash": "sha256:...", + "createdAt": "2025-12-22T10:00:00Z", + "toolchain": "iced-x86:1.21.0" + } +} +``` + +--- + +## 5. DSSE Envelope Format + +Slices are wrapped in DSSE envelopes for attestation: + +```json +{ + "payloadType": "application/vnd.in-toto+json", + "payload": "", + "signatures": [ + { + "keyid": "sha256:abc123...", + "sig": "" + } + ] +} +``` + +--- + +## 6. Storage & Retrieval + +### CAS URI Format + +``` +cas://slices/blake3: +``` + +### OCI Artifact Format + +```json +{ + "mediaType": "application/vnd.stellaops.slice.v1+json", + "digest": "sha256:...", + "annotations": { + "org.stellaops.slice.cve": "CVE-2024-1234", + "org.stellaops.slice.verdict": "reachable" + } +} +``` + +--- + +## 7. Determinism Requirements + +For reproducible slices: + +1. **Node ordering**: Sort by `id` lexicographically +2. **Edge ordering**: Sort by `(from, to)` tuple +3. **Timestamps**: Use UTC ISO-8601 with Z suffix +4. **Floating point**: Round to 6 decimal places +5. **JSON serialization**: No whitespace, sorted keys + +--- + +## 8. Related Documentation + +- [Binary Reachability Schema](./binary-reachability-schema.md) +- [RichGraph Contract](../contracts/richgraph-v1.md) +- [Function-Level Evidence](./function-level-evidence.md) +- [Replay Verification](./replay-verification.md) + +--- + +_Created: 2025-12-22. See Sprint 3810 for implementation details._ diff --git a/src/Concelier/AGENTS.md b/src/Concelier/AGENTS.md index d37d52dcd..a8d549cbd 100644 --- a/src/Concelier/AGENTS.md +++ b/src/Concelier/AGENTS.md @@ -32,6 +32,35 @@ - **Cross-module edits:** none without sprint note; if needed, log in sprint Execution Log and Decisions & Risks. - **CVSS v4.0 ingest:** when vendor advisories ship CVSS v4.0 vectors, parse without mutation, store provenance (source id + observation path), and emit vectors unchanged to Policy receipts. Do not derive fields; attach DSSE/observation refs for Policy reuse. +## Distro Backport Version Handling + +> **Reference:** `docs/product-advisories/archived/22-Dec-2025 - Getting Distro Backport Logic Right.md` + +When working with OS package advisories, follow these rules: + +### Version Comparators +- **RPM (RHEL/CentOS/Fedora/openSUSE):** Use `NevraComparer` in `StellaOps.Concelier.Merge.Comparers`. Compares EVR (`epoch:version-release`) using `rpmvercmp` semantics. Tilde `~` sorts before anything. +- **Debian/Ubuntu:** Use `DebianEvrComparer`. Compares `epoch:upstream_version-debian_revision` using dpkg rules. Tilde `~` sorts lower than empty. +- **Alpine (APK):** Use `ApkVersionComparer` (via SPRINT_2000_0003_0001). Handles `-r` and suffix ordering (`_alpha` < `_beta` < `_pre` < `_rc` < none < `_p`). + +### Key Rules +1. **Never convert distro versions to SemVer.** Preserve native EVR/NEVRA/APK strings in `AffectedVersionRange`. +2. **Use `RangeKind` correctly:** `nevra` for RPM, `evr` for Debian/Ubuntu, `apk` for Alpine. +3. **Preserve release qualifiers:** `.el8_5`, `+deb12u2`, `-r0` encode backport information. +4. **Distro advisories take precedence:** When upstream says "fixed in 1.4.4" but distro claims "fixed in 1.4.3-5~deb12u2", prefer distro channel if source is trusted. +5. **Record evidence:** Store source (DSA/RHSA/USN), comparator used, installed version, fixed threshold, and result. + +### Edge Cases to Handle +- Epoch jumps: `1:1.2-1` > `0:9.9-9` +- Tilde pre-releases: `2.0~rc1` < `2.0` +- Release qualifiers: `1.2-3.el9_2` < `1.2-3.el9_3` +- Rebuilds/backports: `1.2-3ubuntu0.1` vs `1.2-3` + +### Test Corpus +Version comparators must be tested with 50+ cases per distro. See: +- `src/Concelier/__Tests/StellaOps.Concelier.Merge.Tests/Comparers/` +- SPRINT_2000_0003_0002 for comprehensive test requirements + ## Coding & Observability Standards - Target **.NET 10**; prefer latest C# preview features already enabled in repo. - Npgsql driver for PostgreSQL; canonical JSON mapping in Storage.Postgres. diff --git a/src/Scanner/AGENTS.md b/src/Scanner/AGENTS.md index 051ced3cd..9cf9f7b70 100644 --- a/src/Scanner/AGENTS.md +++ b/src/Scanner/AGENTS.md @@ -110,6 +110,53 @@ Compute vulnerability surfaces by diffing vulnerable vs fixed package versions: - Confidence tiers: Confirmed (trigger reachable) > Likely (API reachable) > Present (dep only) - Path witnesses include surface evidence for audit trail +## Binary + Call-Stack Reachability (Sprint 3800 Series) + +Layered binary reachability with attestable slices for CVE triage: + +### Sprint Summary +- **3800**: Binary call-edge enhancement (disassembly, PLT/IAT, dynamic loading) +- **3810**: CVE→Symbol mapping and slice format +- **3820**: Slice query and replay APIs +- **3830**: VEX integration and policy binding +- **3840**: Runtime trace merge (eBPF/ETW) +- **3850**: OCI storage and CLI commands + +See: `docs/implplan/SPRINT_3800_SUMMARY.md` + +### Libraries +- `StellaOps.Scanner.Reachability.Slices` - Slice extraction, DSSE signing, verdict computation +- `StellaOps.Scanner.Advisory` - CVE→symbol mapping integration with Concelier +- `StellaOps.Scanner.Runtime` - eBPF/ETW runtime trace collectors +- `StellaOps.Scanner.Storage.Oci` - OCI artifact storage for slices + +### Key Types +- `ReachabilitySlice` - Minimal attestable proof unit for CVE reachability +- `SliceQuery` - Query parameters (CVE, symbols, entrypoints, policy) +- `SliceVerdict` - Result status (reachable/unreachable/unknown/gated) +- `VulnSurfaceResult` - CVE→symbol mapping result with confidence + +### Predicate Schema +- URI: `stellaops.dev/predicates/reachability-slice@v1` +- Schema: `docs/schemas/stellaops-slice.v1.schema.json` +- DSSE-signed slices for audit trail + +### Slice API Endpoints +- `POST /api/slices/query` - Query reachability for CVE/symbols +- `GET /api/slices/{digest}` - Retrieve attested slice +- `POST /api/slices/replay` - Verify slice reproducibility + +### CLI Commands (Sprint 3850) +- `stella binary submit` - Submit binary graph +- `stella binary info` - Display graph info +- `stella binary symbols` - List symbols +- `stella binary verify` - Verify attestation + +### Documentation +- `docs/reachability/slice-schema.md` - Slice format specification +- `docs/reachability/cve-symbol-mapping.md` - CVE→symbol service design +- `docs/reachability/replay-verification.md` - Replay workflow guide + ## Engineering Rules - Target `net10.0`; prefer latest C# preview allowed in repo. - Offline-first: no new external network calls; use cached feeds (`/local-nugets`). diff --git a/src/__Libraries/StellaOps.VersionComparison/Comparers/DebianVersionComparer.cs b/src/__Libraries/StellaOps.VersionComparison/Comparers/DebianVersionComparer.cs new file mode 100644 index 000000000..de4bfcee7 --- /dev/null +++ b/src/__Libraries/StellaOps.VersionComparison/Comparers/DebianVersionComparer.cs @@ -0,0 +1,261 @@ +using System.Collections.Immutable; +using StellaOps.VersionComparison.Models; + +namespace StellaOps.VersionComparison.Comparers; + +/// +/// Compares Debian/Ubuntu package versions using dpkg semantics. +/// Handles epoch:upstream_version-debian_revision with tilde pre-release support. +/// +public sealed class DebianVersionComparer : IVersionComparator, IComparer, IComparer +{ + /// + /// Singleton instance. + /// + public static DebianVersionComparer Instance { get; } = new(); + + private DebianVersionComparer() { } + + /// + public ComparatorType ComparatorType => ComparatorType.Dpkg; + + /// + public int Compare(string? x, string? y) + { + if (ReferenceEquals(x, y)) return 0; + if (x is null) return -1; + if (y is null) return 1; + + var xParsed = DebianVersion.TryParse(x, out var xVer); + var yParsed = DebianVersion.TryParse(y, out var yVer); + + if (xParsed && yParsed) + { + return Compare(xVer!, yVer!); + } + + if (xParsed) return 1; + if (yParsed) return -1; + + return string.Compare(x, y, StringComparison.Ordinal); + } + + /// + /// Compare two parsed Debian versions. + /// + public int Compare(DebianVersion? x, DebianVersion? y) + { + if (ReferenceEquals(x, y)) return 0; + if (x is null) return -1; + if (y is null) return 1; + + // Epoch first + var compare = x.Epoch.CompareTo(y.Epoch); + if (compare != 0) return compare; + + // Upstream version + compare = CompareSegment(x.Version, y.Version); + if (compare != 0) return compare; + + // Debian revision + compare = CompareSegment(x.Revision, y.Revision); + return compare; + } + + /// + public VersionComparisonResult CompareWithProof(string? left, string? right) + { + var proofLines = new List(); + + if (left is null && right is null) + { + proofLines.Add("Both versions are null: equal"); + return new VersionComparisonResult(0, [.. proofLines], ComparatorType.Dpkg); + } + + if (left is null) + { + proofLines.Add("Left version is null: less than right"); + return new VersionComparisonResult(-1, [.. proofLines], ComparatorType.Dpkg); + } + + if (right is null) + { + proofLines.Add("Right version is null: left is greater"); + return new VersionComparisonResult(1, [.. proofLines], ComparatorType.Dpkg); + } + + var leftParsed = DebianVersion.TryParse(left, out var leftVer); + var rightParsed = DebianVersion.TryParse(right, out var rightVer); + + if (!leftParsed || !rightParsed) + { + if (!leftParsed && !rightParsed) + { + var cmp = string.Compare(left, right, StringComparison.Ordinal); + proofLines.Add($"Both versions invalid, fallback to string comparison: {ResultString(cmp)}"); + return new VersionComparisonResult(cmp, [.. proofLines], ComparatorType.Dpkg); + } + + if (!leftParsed) + { + proofLines.Add("Left version invalid, right valid: left is less"); + return new VersionComparisonResult(-1, [.. proofLines], ComparatorType.Dpkg); + } + + proofLines.Add("Right version invalid, left valid: left is greater"); + return new VersionComparisonResult(1, [.. proofLines], ComparatorType.Dpkg); + } + + // Compare epoch + var epochCmp = leftVer!.Epoch.CompareTo(rightVer!.Epoch); + if (epochCmp != 0) + { + proofLines.Add($"Epoch: {leftVer.Epoch} {CompareSymbol(epochCmp)} {rightVer.Epoch} ({ResultString(epochCmp)})"); + return new VersionComparisonResult(epochCmp, [.. proofLines], ComparatorType.Dpkg); + } + proofLines.Add($"Epoch: {leftVer.Epoch} == {rightVer.Epoch} (equal)"); + + // Compare upstream version + var versionCmp = CompareSegmentWithProof(leftVer.Version, rightVer.Version, "Upstream version", proofLines); + if (versionCmp != 0) + { + return new VersionComparisonResult(versionCmp, [.. proofLines], ComparatorType.Dpkg); + } + + // Compare revision + var revisionCmp = CompareSegmentWithProof(leftVer.Revision, rightVer.Revision, "Debian revision", proofLines); + return new VersionComparisonResult(revisionCmp, [.. proofLines], ComparatorType.Dpkg); + } + + private static int CompareSegmentWithProof(string left, string right, string segmentName, List proofLines) + { + var cmp = CompareSegment(left, right); + if (cmp == 0) + { + if (string.IsNullOrEmpty(left) && string.IsNullOrEmpty(right)) + { + proofLines.Add($"{segmentName}: (empty) == (empty) (equal)"); + } + else + { + proofLines.Add($"{segmentName}: {left} == {right} (equal)"); + } + } + else + { + var leftDisplay = string.IsNullOrEmpty(left) ? "(empty)" : left; + var rightDisplay = string.IsNullOrEmpty(right) ? "(empty)" : right; + proofLines.Add($"{segmentName}: {leftDisplay} {CompareSymbol(cmp)} {rightDisplay} ({ResultString(cmp)})"); + } + return cmp; + } + + private static string CompareSymbol(int cmp) => cmp < 0 ? "<" : cmp > 0 ? ">" : "=="; + + private static string ResultString(int cmp) => cmp < 0 ? "left is older" : cmp > 0 ? "left is newer" : "equal"; + + /// + /// Compare two version/revision segments using dpkg semantics. + /// + internal static int CompareSegment(string left, string right) + { + var i = 0; + var j = 0; + + while (i < left.Length || j < right.Length) + { + // Skip non-alphanumeric (except tilde) + while (i < left.Length && !IsAlphaNumeric(left[i]) && left[i] != '~') + { + i++; + } + + while (j < right.Length && !IsAlphaNumeric(right[j]) && right[j] != '~') + { + j++; + } + + var leftChar = i < left.Length ? left[i] : '\0'; + var rightChar = j < right.Length ? right[j] : '\0'; + + // Tilde sorts before everything (including empty) + if (leftChar == '~' || rightChar == '~') + { + if (leftChar != rightChar) + { + return leftChar == '~' ? -1 : 1; + } + + if (leftChar == '~') i++; + if (rightChar == '~') j++; + continue; + } + + var leftIsDigit = char.IsDigit(leftChar); + var rightIsDigit = char.IsDigit(rightChar); + + // Both numeric + if (leftIsDigit && rightIsDigit) + { + var leftStart = i; + while (i < left.Length && char.IsDigit(left[i])) i++; + + var rightStart = j; + while (j < right.Length && char.IsDigit(right[j])) j++; + + // Trim leading zeros + var leftTrimmed = leftStart; + while (leftTrimmed < i && left[leftTrimmed] == '0') leftTrimmed++; + + var rightTrimmed = rightStart; + while (rightTrimmed < j && right[rightTrimmed] == '0') rightTrimmed++; + + var leftLength = i - leftTrimmed; + var rightLength = j - rightTrimmed; + + if (leftLength != rightLength) + { + return leftLength.CompareTo(rightLength); + } + + var comparison = left.AsSpan(leftTrimmed, leftLength) + .CompareTo(right.AsSpan(rightTrimmed, rightLength), StringComparison.Ordinal); + if (comparison != 0) return comparison; + + continue; + } + + // Digits sort after letters + if (leftIsDigit) return 1; + if (rightIsDigit) return -1; + + // Character ordering + var leftOrder = CharOrder(leftChar); + var rightOrder = CharOrder(rightChar); + + var orderComparison = leftOrder.CompareTo(rightOrder); + if (orderComparison != 0) return orderComparison; + + if (leftChar != rightChar) return leftChar.CompareTo(rightChar); + + if (leftChar == '\0') return 0; + + i++; + j++; + } + + return 0; + } + + private static bool IsAlphaNumeric(char value) => char.IsLetterOrDigit(value); + + private static int CharOrder(char value) + { + if (value == '\0') return 0; + if (value == '~') return -1; + if (char.IsDigit(value)) return 0; + if (char.IsLetter(value)) return value; + return value + 256; + } +} diff --git a/src/__Libraries/StellaOps.VersionComparison/Comparers/RpmVersionComparer.cs b/src/__Libraries/StellaOps.VersionComparison/Comparers/RpmVersionComparer.cs new file mode 100644 index 000000000..65e5c54b6 --- /dev/null +++ b/src/__Libraries/StellaOps.VersionComparison/Comparers/RpmVersionComparer.cs @@ -0,0 +1,259 @@ +using System.Collections.Immutable; +using StellaOps.VersionComparison.Models; + +namespace StellaOps.VersionComparison.Comparers; + +/// +/// Compares RPM package versions using rpmvercmp semantics. +/// Handles epoch, version, release with tilde pre-release support. +/// +public sealed class RpmVersionComparer : IVersionComparator, IComparer, IComparer +{ + /// + /// Singleton instance. + /// + public static RpmVersionComparer Instance { get; } = new(); + + private RpmVersionComparer() { } + + /// + public ComparatorType ComparatorType => ComparatorType.RpmEvr; + + /// + public int Compare(string? x, string? y) + { + if (ReferenceEquals(x, y)) return 0; + if (x is null) return -1; + if (y is null) return 1; + + var xParsed = RpmVersion.TryParse(x, out var xVer); + var yParsed = RpmVersion.TryParse(y, out var yVer); + + if (xParsed && yParsed) + { + return Compare(xVer!, yVer!); + } + + if (xParsed) return 1; + if (yParsed) return -1; + + return string.Compare(x, y, StringComparison.Ordinal); + } + + /// + /// Compare two parsed RPM versions. + /// + public int Compare(RpmVersion? x, RpmVersion? y) + { + if (ReferenceEquals(x, y)) return 0; + if (x is null) return -1; + if (y is null) return 1; + + // Epoch first + var compare = x.Epoch.CompareTo(y.Epoch); + if (compare != 0) return compare; + + // Version + compare = CompareSegment(x.Version, y.Version); + if (compare != 0) return compare; + + // Release + compare = CompareSegment(x.Release, y.Release); + return compare; + } + + /// + public VersionComparisonResult CompareWithProof(string? left, string? right) + { + var proofLines = new List(); + + if (left is null && right is null) + { + proofLines.Add("Both versions are null: equal"); + return new VersionComparisonResult(0, [.. proofLines], ComparatorType.RpmEvr); + } + + if (left is null) + { + proofLines.Add("Left version is null: less than right"); + return new VersionComparisonResult(-1, [.. proofLines], ComparatorType.RpmEvr); + } + + if (right is null) + { + proofLines.Add("Right version is null: left is greater"); + return new VersionComparisonResult(1, [.. proofLines], ComparatorType.RpmEvr); + } + + var leftParsed = RpmVersion.TryParse(left, out var leftVer); + var rightParsed = RpmVersion.TryParse(right, out var rightVer); + + if (!leftParsed || !rightParsed) + { + if (!leftParsed && !rightParsed) + { + var cmp = string.Compare(left, right, StringComparison.Ordinal); + proofLines.Add($"Both versions invalid, fallback to string comparison: {ResultString(cmp)}"); + return new VersionComparisonResult(cmp, [.. proofLines], ComparatorType.RpmEvr); + } + + if (!leftParsed) + { + proofLines.Add("Left version invalid, right valid: left is less"); + return new VersionComparisonResult(-1, [.. proofLines], ComparatorType.RpmEvr); + } + + proofLines.Add("Right version invalid, left valid: left is greater"); + return new VersionComparisonResult(1, [.. proofLines], ComparatorType.RpmEvr); + } + + // Compare epoch + var epochCmp = leftVer!.Epoch.CompareTo(rightVer!.Epoch); + if (epochCmp != 0) + { + proofLines.Add($"Epoch: {leftVer.Epoch} {CompareSymbol(epochCmp)} {rightVer.Epoch} ({ResultString(epochCmp)})"); + return new VersionComparisonResult(epochCmp, [.. proofLines], ComparatorType.RpmEvr); + } + proofLines.Add($"Epoch: {leftVer.Epoch} == {rightVer.Epoch} (equal)"); + + // Compare version + var versionCmp = CompareSegmentWithProof(leftVer.Version, rightVer.Version, "Version", proofLines); + if (versionCmp != 0) + { + return new VersionComparisonResult(versionCmp, [.. proofLines], ComparatorType.RpmEvr); + } + + // Compare release + var releaseCmp = CompareSegmentWithProof(leftVer.Release, rightVer.Release, "Release", proofLines); + return new VersionComparisonResult(releaseCmp, [.. proofLines], ComparatorType.RpmEvr); + } + + private static int CompareSegmentWithProof(string left, string right, string segmentName, List proofLines) + { + var cmp = CompareSegment(left, right); + if (cmp == 0) + { + proofLines.Add($"{segmentName}: {left} == {right} (equal)"); + } + else + { + proofLines.Add($"{segmentName}: {left} {CompareSymbol(cmp)} {right} ({ResultString(cmp)})"); + } + return cmp; + } + + private static string CompareSymbol(int cmp) => cmp < 0 ? "<" : cmp > 0 ? ">" : "=="; + + private static string ResultString(int cmp) => cmp < 0 ? "left is older" : cmp > 0 ? "left is newer" : "equal"; + + /// + /// Compare two version/release segments using rpmvercmp semantics. + /// + internal static int CompareSegment(string? left, string? right) + { + left ??= string.Empty; + right ??= string.Empty; + + var i = 0; + var j = 0; + + while (true) + { + var leftHasTilde = SkipToNextSegment(left, ref i); + var rightHasTilde = SkipToNextSegment(right, ref j); + + if (leftHasTilde || rightHasTilde) + { + if (leftHasTilde && rightHasTilde) continue; + return leftHasTilde ? -1 : 1; + } + + var leftEnd = i >= left.Length; + var rightEnd = j >= right.Length; + + if (leftEnd || rightEnd) + { + if (leftEnd && rightEnd) return 0; + return leftEnd ? -1 : 1; + } + + var leftDigit = char.IsDigit(left[i]); + var rightDigit = char.IsDigit(right[j]); + + if (leftDigit && !rightDigit) return 1; + if (!leftDigit && rightDigit) return -1; + + int compare; + if (leftDigit) + { + compare = CompareNumericSegment(left, ref i, right, ref j); + } + else + { + compare = CompareAlphaSegment(left, ref i, right, ref j); + } + + if (compare != 0) return compare; + } + } + + private static bool SkipToNextSegment(string value, ref int index) + { + var sawTilde = false; + while (index < value.Length) + { + var current = value[index]; + if (current == '~') + { + sawTilde = true; + index++; + break; + } + + if (char.IsLetterOrDigit(current)) break; + index++; + } + return sawTilde; + } + + private static int CompareNumericSegment(string value, ref int index, string other, ref int otherIndex) + { + var start = index; + while (index < value.Length && char.IsDigit(value[index])) index++; + + var otherStart = otherIndex; + while (otherIndex < other.Length && char.IsDigit(other[otherIndex])) otherIndex++; + + // Trim leading zeros + var trimmedStart = start; + while (trimmedStart < index && value[trimmedStart] == '0') trimmedStart++; + + var otherTrimmedStart = otherStart; + while (otherTrimmedStart < otherIndex && other[otherTrimmedStart] == '0') otherTrimmedStart++; + + var length = index - trimmedStart; + var otherLength = otherIndex - otherTrimmedStart; + + // Longer number is greater + if (length != otherLength) return length.CompareTo(otherLength); + + // Same length: compare lexicographically + return value.AsSpan(trimmedStart, length) + .CompareTo(other.AsSpan(otherTrimmedStart, otherLength), StringComparison.Ordinal); + } + + private static int CompareAlphaSegment(string value, ref int index, string other, ref int otherIndex) + { + var start = index; + while (index < value.Length && char.IsLetter(value[index])) index++; + + var otherStart = otherIndex; + while (otherIndex < other.Length && char.IsLetter(other[otherIndex])) otherIndex++; + + var length = index - start; + var otherLength = otherIndex - otherStart; + + return value.AsSpan(start, length) + .CompareTo(other.AsSpan(otherStart, otherLength), StringComparison.Ordinal); + } +} diff --git a/src/__Libraries/StellaOps.VersionComparison/IVersionComparator.cs b/src/__Libraries/StellaOps.VersionComparison/IVersionComparator.cs new file mode 100644 index 000000000..c499f7bae --- /dev/null +++ b/src/__Libraries/StellaOps.VersionComparison/IVersionComparator.cs @@ -0,0 +1,81 @@ +using System.Collections.Immutable; + +namespace StellaOps.VersionComparison; + +/// +/// Comparator type identifier for UI display and evidence recording. +/// +public enum ComparatorType +{ + /// RPM EVR comparison using rpmvercmp semantics. + RpmEvr, + + /// Debian/Ubuntu version comparison using dpkg semantics. + Dpkg, + + /// Alpine APK version comparison. + Apk, + + /// Semantic versioning (SemVer 2.0). + SemVer +} + +/// +/// Result of a version comparison with optional proof lines for explainability. +/// +/// Negative if left < right, zero if equal, positive if left > right. +/// Human-readable explanation of comparison steps. +/// The comparator type used. +public readonly record struct VersionComparisonResult( + int Comparison, + ImmutableArray ProofLines, + ComparatorType Comparator) +{ + /// + /// True if the left version is less than the right version. + /// + public bool IsLessThan => Comparison < 0; + + /// + /// True if the left version equals the right version. + /// + public bool IsEqual => Comparison == 0; + + /// + /// True if the left version is greater than the right version. + /// + public bool IsGreaterThan => Comparison > 0; + + /// + /// True if the left version is greater than or equal to the right version. + /// Useful for checking if installed >= fixed. + /// + public bool IsGreaterThanOrEqual => Comparison >= 0; +} + +/// +/// Interface for distro-native version comparison with proof-line generation. +/// +public interface IVersionComparator +{ + /// + /// The type of comparator (for UI display and evidence recording). + /// + ComparatorType ComparatorType { get; } + + /// + /// Compare two version strings using distro-native semantics. + /// + /// First version string. + /// Second version string. + /// Negative if left < right, zero if equal, positive if left > right. + int Compare(string? left, string? right); + + /// + /// Compare two version strings with proof-line generation for explainability. + /// + /// First version string (typically installed version). + /// Second version string (typically fixed version). + /// Comparison result with human-readable proof lines. + VersionComparisonResult CompareWithProof(string? left, string? right); +} diff --git a/src/__Libraries/StellaOps.VersionComparison/Models/ApkVersion.cs b/src/__Libraries/StellaOps.VersionComparison/Models/ApkVersion.cs new file mode 100644 index 000000000..893ab831a --- /dev/null +++ b/src/__Libraries/StellaOps.VersionComparison/Models/ApkVersion.cs @@ -0,0 +1,158 @@ +using System.Globalization; +using System.Text.RegularExpressions; + +namespace StellaOps.VersionComparison.Models; + +/// +/// Represents a parsed Alpine APK version (version-r<pkgrel>). +/// Handles Alpine-specific suffixes: _alpha, _beta, _pre, _rc, (none), _p +/// +public sealed partial class ApkVersion +{ + private ApkVersion(string version, string? suffix, int suffixNum, int pkgRel, string original) + { + Version = version; + Suffix = suffix; + SuffixNum = suffixNum; + PkgRel = pkgRel; + Original = original; + } + + /// + /// Base version component (without suffix or pkgrel). + /// + public string Version { get; } + + /// + /// Optional suffix (_alpha, _beta, _pre, _rc, _p, or null for release). + /// + public string? Suffix { get; } + + /// + /// Numeric value after suffix (e.g., 2 in _rc2). + /// + public int SuffixNum { get; } + + /// + /// Package release number (after -r). + /// + public int PkgRel { get; } + + /// + /// Original version string supplied to TryParse. + /// + public string Original { get; } + + /// + /// Suffix ordering for Alpine versions. + /// _alpha < _beta < _pre < _rc < (none/release) < _p (patch) + /// + public static int GetSuffixOrder(string? suffix) => suffix switch + { + "_alpha" => -4, + "_beta" => -3, + "_pre" => -2, + "_rc" => -1, + null or "" => 0, // Release version + "_p" => 1, // Patch (post-release) + _ => 0 // Unknown suffix treated as release + }; + + /// + /// Returns human-readable suffix name. + /// + public static string GetSuffixName(string? suffix) => suffix switch + { + "_alpha" => "alpha", + "_beta" => "beta", + "_pre" => "pre-release", + "_rc" => "release candidate", + "_p" => "patch", + null or "" => "release", + _ => suffix + }; + + // Regex to parse APK version: [_[]][-r] + [GeneratedRegex(@"^(?[^_-]+(?:\.[^_-]+)*)(?:_(?alpha|beta|pre|rc|p)(?\d+)?)?(?:-r(?\d+))?$", RegexOptions.Compiled)] + private static partial Regex ApkVersionRegex(); + + /// + /// Attempts to parse the provided APK version string. + /// + public static bool TryParse(string? value, out ApkVersion? result) + { + result = null; + if (string.IsNullOrWhiteSpace(value)) + { + return false; + } + + var trimmed = value.Trim(); + + // Try regex match first + var match = ApkVersionRegex().Match(trimmed); + if (match.Success) + { + var version = match.Groups["version"].Value; + var suffix = match.Groups["suffix"].Success ? "_" + match.Groups["suffix"].Value : null; + var suffixNum = match.Groups["suffixnum"].Success + ? int.Parse(match.Groups["suffixnum"].Value, CultureInfo.InvariantCulture) + : 0; + var pkgRel = match.Groups["pkgrel"].Success + ? int.Parse(match.Groups["pkgrel"].Value, CultureInfo.InvariantCulture) + : 0; + + result = new ApkVersion(version, suffix, suffixNum, pkgRel, trimmed); + return true; + } + + // Fallback: simple parsing for versions without standard suffix format + var pkgRelIndex = trimmed.LastIndexOf("-r", StringComparison.Ordinal); + var versionPart = trimmed; + var parsedPkgRel = 0; + + if (pkgRelIndex > 0) + { + var pkgRelStr = trimmed[(pkgRelIndex + 2)..]; + if (int.TryParse(pkgRelStr, NumberStyles.Integer, CultureInfo.InvariantCulture, out parsedPkgRel)) + { + versionPart = trimmed[..pkgRelIndex]; + } + } + + if (string.IsNullOrEmpty(versionPart)) + { + return false; + } + + result = new ApkVersion(versionPart, null, 0, parsedPkgRel, trimmed); + return true; + } + + /// + /// Parses the provided APK version string or throws FormatException. + /// + public static ApkVersion Parse(string value) + { + if (!TryParse(value, out var result)) + { + throw new FormatException($"Input '{value}' is not a valid APK version string."); + } + return result!; + } + + /// + /// Returns a canonical APK version string. + /// + public string ToCanonicalString() + { + var suffixPart = Suffix != null + ? (SuffixNum > 0 ? $"{Suffix}{SuffixNum}" : Suffix) + : string.Empty; + var pkgRelPart = PkgRel > 0 ? $"-r{PkgRel}" : string.Empty; + return $"{Version}{suffixPart}{pkgRelPart}"; + } + + /// + public override string ToString() => Original; +} diff --git a/src/__Libraries/StellaOps.VersionComparison/Models/DebianVersion.cs b/src/__Libraries/StellaOps.VersionComparison/Models/DebianVersion.cs new file mode 100644 index 000000000..2d655eb75 --- /dev/null +++ b/src/__Libraries/StellaOps.VersionComparison/Models/DebianVersion.cs @@ -0,0 +1,126 @@ +using System.Globalization; + +namespace StellaOps.VersionComparison.Models; + +/// +/// Represents a parsed Debian epoch:version-revision tuple. +/// +public sealed class DebianVersion +{ + private DebianVersion(int epoch, bool hasExplicitEpoch, string version, string revision, string original) + { + Epoch = epoch; + HasExplicitEpoch = hasExplicitEpoch; + Version = version; + Revision = revision; + Original = original; + } + + /// + /// Epoch segment (defaults to 0 when omitted). + /// + public int Epoch { get; } + + /// + /// Indicates whether an epoch segment was present explicitly. + /// + public bool HasExplicitEpoch { get; } + + /// + /// Upstream version portion (without revision). + /// + public string Version { get; } + + /// + /// Debian revision portion (after the last dash). Empty when omitted. + /// + public string Revision { get; } + + /// + /// Original EVR string supplied to TryParse. + /// + public string Original { get; } + + /// + /// Attempts to parse the provided EVR string. + /// + public static bool TryParse(string? value, out DebianVersion? result) + { + result = null; + if (string.IsNullOrWhiteSpace(value)) + { + return false; + } + + var trimmed = value.Trim(); + var epoch = 0; + var hasExplicitEpoch = false; + var remainder = trimmed; + + var colonIndex = remainder.IndexOf(':'); + if (colonIndex >= 0) + { + if (colonIndex == 0) + { + return false; + } + + var epochPart = remainder[..colonIndex]; + if (!int.TryParse(epochPart, NumberStyles.Integer, CultureInfo.InvariantCulture, out epoch)) + { + return false; + } + + hasExplicitEpoch = true; + remainder = colonIndex < remainder.Length - 1 ? remainder[(colonIndex + 1)..] : string.Empty; + } + + if (string.IsNullOrEmpty(remainder)) + { + return false; + } + + var version = remainder; + var revision = string.Empty; + + var dashIndex = remainder.LastIndexOf('-'); + if (dashIndex > 0) + { + version = remainder[..dashIndex]; + revision = dashIndex < remainder.Length - 1 ? remainder[(dashIndex + 1)..] : string.Empty; + } + + if (string.IsNullOrEmpty(version)) + { + return false; + } + + result = new DebianVersion(epoch, hasExplicitEpoch, version, revision, trimmed); + return true; + } + + /// + /// Parses the provided EVR string or throws FormatException. + /// + public static DebianVersion Parse(string value) + { + if (!TryParse(value, out var result)) + { + throw new FormatException($"Input '{value}' is not a valid Debian EVR string."); + } + return result!; + } + + /// + /// Returns a canonical EVR string. + /// + public string ToCanonicalString() + { + var epochSegment = HasExplicitEpoch || Epoch > 0 ? $"{Epoch}:" : string.Empty; + var revisionSegment = string.IsNullOrEmpty(Revision) ? string.Empty : $"-{Revision}"; + return $"{epochSegment}{Version}{revisionSegment}"; + } + + /// + public override string ToString() => Original; +} diff --git a/src/__Libraries/StellaOps.VersionComparison/Models/RpmVersion.cs b/src/__Libraries/StellaOps.VersionComparison/Models/RpmVersion.cs new file mode 100644 index 000000000..b4972c3e2 --- /dev/null +++ b/src/__Libraries/StellaOps.VersionComparison/Models/RpmVersion.cs @@ -0,0 +1,130 @@ +using System.Globalization; + +namespace StellaOps.VersionComparison.Models; + +/// +/// Represents a parsed RPM EVR (Epoch:Version-Release) identifier. +/// +public sealed class RpmVersion +{ + private RpmVersion(int epoch, bool hasExplicitEpoch, string version, string release, string original) + { + Epoch = epoch; + HasExplicitEpoch = hasExplicitEpoch; + Version = version; + Release = release; + Original = original; + } + + /// + /// Epoch segment (defaults to 0 when omitted). + /// + public int Epoch { get; } + + /// + /// Indicates whether an epoch segment was present explicitly. + /// + public bool HasExplicitEpoch { get; } + + /// + /// Version component (without epoch or release). + /// + public string Version { get; } + + /// + /// Release component. + /// + public string Release { get; } + + /// + /// Original EVR string supplied to TryParse. + /// + public string Original { get; } + + /// + /// Attempts to parse the provided EVR string. + /// + public static bool TryParse(string? value, out RpmVersion? result) + { + result = null; + if (string.IsNullOrWhiteSpace(value)) + { + return false; + } + + var trimmed = value.Trim(); + var epoch = 0; + var hasExplicitEpoch = false; + var remainder = trimmed; + + // Parse epoch + var colonIndex = remainder.IndexOf(':'); + if (colonIndex >= 0) + { + if (colonIndex == 0) + { + hasExplicitEpoch = true; + remainder = remainder[1..]; + } + else + { + var epochPart = remainder[..colonIndex]; + if (!int.TryParse(epochPart, NumberStyles.Integer, CultureInfo.InvariantCulture, out epoch)) + { + return false; + } + hasExplicitEpoch = true; + remainder = colonIndex < remainder.Length - 1 ? remainder[(colonIndex + 1)..] : string.Empty; + } + } + + if (string.IsNullOrEmpty(remainder)) + { + return false; + } + + // Parse version and release + var version = remainder; + var release = string.Empty; + + var dashIndex = remainder.LastIndexOf('-'); + if (dashIndex > 0) + { + version = remainder[..dashIndex]; + release = dashIndex < remainder.Length - 1 ? remainder[(dashIndex + 1)..] : string.Empty; + } + + if (string.IsNullOrEmpty(version)) + { + return false; + } + + result = new RpmVersion(epoch, hasExplicitEpoch, version, release, trimmed); + return true; + } + + /// + /// Parses the provided EVR string or throws FormatException. + /// + public static RpmVersion Parse(string value) + { + if (!TryParse(value, out var result)) + { + throw new FormatException($"Input '{value}' is not a valid RPM EVR string."); + } + return result!; + } + + /// + /// Returns a canonical EVR string. + /// + public string ToCanonicalString() + { + var epochSegment = HasExplicitEpoch || Epoch > 0 ? $"{Epoch}:" : string.Empty; + var releaseSegment = string.IsNullOrEmpty(Release) ? string.Empty : $"-{Release}"; + return $"{epochSegment}{Version}{releaseSegment}"; + } + + /// + public override string ToString() => Original; +} diff --git a/src/__Libraries/StellaOps.VersionComparison/StellaOps.VersionComparison.csproj b/src/__Libraries/StellaOps.VersionComparison/StellaOps.VersionComparison.csproj new file mode 100644 index 000000000..f32fb516d --- /dev/null +++ b/src/__Libraries/StellaOps.VersionComparison/StellaOps.VersionComparison.csproj @@ -0,0 +1,13 @@ + + + + net10.0 + enable + enable + preview + true + true + Distro-native version comparison with proof-line generation for RPM, Debian, and Alpine packages. + + + diff --git a/src/__Libraries/__Tests/StellaOps.VersionComparison.Tests/DebianVersionComparerTests.cs b/src/__Libraries/__Tests/StellaOps.VersionComparison.Tests/DebianVersionComparerTests.cs new file mode 100644 index 000000000..7bf3557d1 --- /dev/null +++ b/src/__Libraries/__Tests/StellaOps.VersionComparison.Tests/DebianVersionComparerTests.cs @@ -0,0 +1,192 @@ +using FluentAssertions; +using StellaOps.VersionComparison; +using StellaOps.VersionComparison.Comparers; + +namespace StellaOps.VersionComparison.Tests; + +public class DebianVersionComparerTests +{ + private readonly DebianVersionComparer _comparer = DebianVersionComparer.Instance; + + #region Basic Comparison Tests + + public static TheoryData DebianComparisonCases => new() + { + // Epoch precedence + { "0:1.0-1", "1:0.1-1", -1 }, + { "1:1.0-1", "0:9.9-9", 1 }, + { "1.0-1", "0:1.0-1", 0 }, // Missing epoch = 0 + { "2:1.0-1", "1:9.9-9", 1 }, + + // Upstream version ordering + { "1.9-1", "1.10-1", -1 }, + { "1.02-1", "1.2-1", 0 }, // Leading zeros ignored + { "1.0-1", "1.0.1-1", -1 }, + + // Tilde pre-releases + { "1.0~rc1-1", "1.0-1", -1 }, + { "1.0~alpha-1", "1.0~beta-1", -1 }, + { "2.0~rc1", "2.0", -1 }, + { "1.0~-1", "1.0-1", -1 }, + + // Debian revision + { "1.0-1", "1.0-2", -1 }, + { "1.0-1ubuntu1", "1.0-1ubuntu2", -1 }, + { "1.0-1+deb11u1", "1.0-1+deb11u2", -1 }, + + // Ubuntu backport patterns + { "1.0-1", "1.0-1ubuntu0.1", -1 }, + { "1.0-1ubuntu0.1", "1.0-1ubuntu0.2", -1 }, + { "1.0-1build1", "1.0-1build2", -1 }, + + // Native packages (no revision) + { "1.0", "1.0-1", -1 }, + { "1.1", "1.0-99", 1 }, + }; + + [Theory] + [MemberData(nameof(DebianComparisonCases))] + public void Compare_DebianVersions_ReturnsExpectedOrder(string left, string right, int expected) + { + var result = Math.Sign(_comparer.Compare(left, right)); + result.Should().Be(expected, because: $"comparing '{left}' with '{right}'"); + } + + [Fact] + public void Compare_SameVersion_ReturnsZero() + { + _comparer.Compare("1:1.1.1k-1+deb11u1", "1:1.1.1k-1+deb11u1").Should().Be(0); + } + + [Fact] + public void Compare_NullLeft_ReturnsNegative() + { + _comparer.Compare(null, "1.0-1").Should().BeNegative(); + } + + [Fact] + public void Compare_NullRight_ReturnsPositive() + { + _comparer.Compare("1.0-1", null).Should().BePositive(); + } + + #endregion + + #region Proof Line Tests + + [Fact] + public void CompareWithProof_EpochDifference_ReturnsEpochProof() + { + var result = _comparer.CompareWithProof("0:1.0-1", "1:0.1-1"); + + result.Comparison.Should().BeNegative(); + result.Comparator.Should().Be(ComparatorType.Dpkg); + result.ProofLines.Should().Contain(line => line.Contains("Epoch:") && line.Contains("left is older")); + } + + [Fact] + public void CompareWithProof_SameEpochDifferentVersion_ReturnsVersionProof() + { + var result = _comparer.CompareWithProof("1:1.1.1k-1", "1:1.1.1l-1"); + + result.Comparison.Should().BeNegative(); + result.ProofLines.Should().Contain(line => line.Contains("Epoch:") && line.Contains("equal")); + result.ProofLines.Should().Contain(line => line.Contains("Upstream version:") && line.Contains("left is older")); + } + + [Fact] + public void CompareWithProof_SameVersionDifferentRevision_ReturnsRevisionProof() + { + var result = _comparer.CompareWithProof("1:1.1.1k-1+deb11u1", "1:1.1.1k-1+deb11u2"); + + result.Comparison.Should().BeNegative(); + result.ProofLines.Should().Contain(line => line.Contains("Upstream version:") && line.Contains("equal")); + result.ProofLines.Should().Contain(line => line.Contains("Debian revision:") && line.Contains("left is older")); + } + + [Fact] + public void CompareWithProof_EqualVersions_ReturnsEqualProof() + { + var result = _comparer.CompareWithProof("1:1.1.1k-1+deb11u1", "1:1.1.1k-1+deb11u1"); + + result.Comparison.Should().Be(0); + result.IsEqual.Should().BeTrue(); + result.ProofLines.Should().AllSatisfy(line => line.Should().Contain("equal")); + } + + [Fact] + public void CompareWithProof_TildePreRelease_ReturnsCorrectProof() + { + var result = _comparer.CompareWithProof("2.0~rc1-1", "2.0-1"); + + result.Comparison.Should().BeNegative(); + result.ProofLines.Should().Contain(line => line.Contains("Upstream version:") && line.Contains("left is older")); + } + + [Fact] + public void CompareWithProof_NativePackage_HandlesEmptyRevision() + { + var result = _comparer.CompareWithProof("1.0", "1.0-1"); + + result.Comparison.Should().BeNegative(); + result.ProofLines.Should().Contain(line => line.Contains("Debian revision:")); + } + + #endregion + + #region Edge Cases + + [Theory] + [InlineData("1:1.2-1", "0:9.9-9", 1)] // Epoch jump + [InlineData("2.0~rc1", "2.0", -1)] // Tilde pre-release + [InlineData("1.2-3+deb12u1", "1.2-3+deb12u2", -1)] // Debian stable update + [InlineData("1.2-3ubuntu0.1", "1.2-3", 1)] // Ubuntu backport + public void Compare_EdgeCases_ReturnsExpectedOrder(string left, string right, int expected) + { + var result = Math.Sign(_comparer.Compare(left, right)); + result.Should().Be(expected); + } + + #endregion + + #region Real-World Advisory Scenarios + + [Fact] + public void Compare_OpenSSL_DebianBackport_CorrectlyIdentifiesVulnerable() + { + // Installed version vs fixed version from DSA + var installed = "1:1.1.1k-1+deb11u1"; + var fixedVersion = "1:1.1.1k-1+deb11u2"; + + var result = _comparer.CompareWithProof(installed, fixedVersion); + + result.IsLessThan.Should().BeTrue("installed version should be less than fixed"); + result.IsGreaterThanOrEqual.Should().BeFalse("installed is VULNERABLE"); + } + + [Fact] + public void Compare_OpenSSL_DebianBackport_CorrectlyIdentifiesFixed() + { + // Installed version >= fixed version + var installed = "1:1.1.1k-1+deb11u2"; + var fixedVersion = "1:1.1.1k-1+deb11u2"; + + var result = _comparer.CompareWithProof(installed, fixedVersion); + + result.IsGreaterThanOrEqual.Should().BeTrue("installed version is FIXED"); + } + + [Fact] + public void Compare_UbuntuSecurityBackport_CorrectlyIdentifies() + { + // Ubuntu security backport pattern + var installed = "1.0-1ubuntu0.1"; + var fixedVersion = "1.0-1ubuntu0.2"; + + var result = _comparer.CompareWithProof(installed, fixedVersion); + + result.IsLessThan.Should().BeTrue("installed is older security backport"); + } + + #endregion +} diff --git a/src/__Libraries/__Tests/StellaOps.VersionComparison.Tests/RpmVersionComparerTests.cs b/src/__Libraries/__Tests/StellaOps.VersionComparison.Tests/RpmVersionComparerTests.cs new file mode 100644 index 000000000..bbb8165b5 --- /dev/null +++ b/src/__Libraries/__Tests/StellaOps.VersionComparison.Tests/RpmVersionComparerTests.cs @@ -0,0 +1,138 @@ +using FluentAssertions; +using StellaOps.VersionComparison; +using StellaOps.VersionComparison.Comparers; + +namespace StellaOps.VersionComparison.Tests; + +public class RpmVersionComparerTests +{ + private readonly RpmVersionComparer _comparer = RpmVersionComparer.Instance; + + #region Basic Comparison Tests + + public static TheoryData RpmComparisonCases => new() + { + // Epoch precedence + { "0:1.0-1", "1:0.1-1", -1 }, + { "1:1.0-1", "0:9.9-9", 1 }, + { "1.0-1", "0:1.0-1", 0 }, // Missing epoch = 0 + { "2:1.0-1", "1:9.9-9", 1 }, + + // Numeric version ordering + { "1.9-1", "1.10-1", -1 }, + { "1.02-1", "1.2-1", 0 }, // Leading zeros ignored + { "1.0-1", "1.0.1-1", -1 }, + { "2.0-1", "1.999-1", 1 }, + + // Tilde pre-releases + { "1.0~rc1-1", "1.0-1", -1 }, // Tilde sorts before release + { "1.0~alpha-1", "1.0~beta-1", -1 }, + { "1.0~-1", "1.0-1", -1 }, + { "1.0~~-1", "1.0~-1", -1 }, // Double tilde < single + + // Release qualifiers (RHEL backports) + { "1.0-1.el8", "1.0-1.el8_5", -1 }, // Base < security update + { "1.0-1.el8_5", "1.0-1.el8_5.1", -1 }, + { "1.0-1.el8", "1.0-1.el9", -1 }, // el8 < el9 + { "1.0-1.el9_2", "1.0-1.el9_3", -1 }, + + // Alpha suffix ordering (1.0a > 1.0 because 'a' is additional segment) + { "1.0a-1", "1.0-1", 1 }, // 1.0a > 1.0 (alpha suffix adds to version) + { "1.0-1", "1.0a-1", -1 }, + }; + + [Theory] + [MemberData(nameof(RpmComparisonCases))] + public void Compare_RpmVersions_ReturnsExpectedOrder(string left, string right, int expected) + { + var result = Math.Sign(_comparer.Compare(left, right)); + result.Should().Be(expected, because: $"comparing '{left}' with '{right}'"); + } + + [Fact] + public void Compare_SameVersion_ReturnsZero() + { + _comparer.Compare("1.0-1.el8", "1.0-1.el8").Should().Be(0); + } + + [Fact] + public void Compare_NullLeft_ReturnsNegative() + { + _comparer.Compare(null, "1.0-1").Should().BeNegative(); + } + + [Fact] + public void Compare_NullRight_ReturnsPositive() + { + _comparer.Compare("1.0-1", null).Should().BePositive(); + } + + #endregion + + #region Proof Line Tests + + [Fact] + public void CompareWithProof_EpochDifference_ReturnsEpochProof() + { + var result = _comparer.CompareWithProof("0:1.0-1", "1:0.1-1"); + + result.Comparison.Should().BeNegative(); + result.Comparator.Should().Be(ComparatorType.RpmEvr); + result.ProofLines.Should().Contain(line => line.Contains("Epoch:") && line.Contains("left is older")); + } + + [Fact] + public void CompareWithProof_SameEpochDifferentVersion_ReturnsVersionProof() + { + var result = _comparer.CompareWithProof("1:1.0-1", "1:2.0-1"); + + result.Comparison.Should().BeNegative(); + result.ProofLines.Should().Contain(line => line.Contains("Epoch:") && line.Contains("equal")); + result.ProofLines.Should().Contain(line => line.Contains("Version:") && line.Contains("left is older")); + } + + [Fact] + public void CompareWithProof_SameVersionDifferentRelease_ReturnsReleaseProof() + { + var result = _comparer.CompareWithProof("1.0-1.el8", "1.0-1.el8_5"); + + result.Comparison.Should().BeNegative(); + result.ProofLines.Should().Contain(line => line.Contains("Release:") && line.Contains("left is older")); + } + + [Fact] + public void CompareWithProof_EqualVersions_ReturnsEqualProof() + { + var result = _comparer.CompareWithProof("1.0-1.el8", "1.0-1.el8"); + + result.Comparison.Should().Be(0); + result.IsEqual.Should().BeTrue(); + result.ProofLines.Should().AllSatisfy(line => line.Should().Contain("equal")); + } + + [Fact] + public void CompareWithProof_TildePreRelease_ReturnsCorrectProof() + { + var result = _comparer.CompareWithProof("1.0~rc1-1", "1.0-1"); + + result.Comparison.Should().BeNegative(); + result.ProofLines.Should().Contain(line => line.Contains("Version:") && line.Contains("left is older")); + } + + #endregion + + #region Edge Cases + + [Theory] + [InlineData("1:1.2-1", "0:9.9-9", 1)] // Epoch jump + [InlineData("2.0~rc1", "2.0", -1)] // Tilde pre-release + [InlineData("1.2-3.el9_2", "1.2-3.el9_3", -1)] // Release qualifier + [InlineData("1.2-3ubuntu0.1", "1.2-3", 1)] // Ubuntu rebuild + public void Compare_EdgeCases_ReturnsExpectedOrder(string left, string right, int expected) + { + var result = Math.Sign(_comparer.Compare(left, right)); + result.Should().Be(expected); + } + + #endregion +} diff --git a/src/__Libraries/__Tests/StellaOps.VersionComparison.Tests/StellaOps.VersionComparison.Tests.csproj b/src/__Libraries/__Tests/StellaOps.VersionComparison.Tests/StellaOps.VersionComparison.Tests.csproj new file mode 100644 index 000000000..8e776355e --- /dev/null +++ b/src/__Libraries/__Tests/StellaOps.VersionComparison.Tests/StellaOps.VersionComparison.Tests.csproj @@ -0,0 +1,19 @@ + + + + net10.0 + enable + enable + preview + false + + + + + + + + + + +