Add unit and integration tests for VexCandidateEmitter and SmartDiff repositories
- Implemented comprehensive unit tests for VexCandidateEmitter to validate candidate emission logic based on various scenarios including absent and present APIs, confidence thresholds, and rate limiting. - Added integration tests for SmartDiff PostgreSQL repositories, covering snapshot storage and retrieval, candidate storage, and material risk change handling. - Ensured tests validate correct behavior for storing, retrieving, and querying snapshots and candidates, including edge cases and expected outcomes.
This commit is contained in:
@@ -7,7 +7,7 @@
|
||||
- **Controller engineer (ASP.NET Core)**: seal/unseal state machine, status APIs, Authority scope enforcement.
|
||||
- **Importer engineer**: bundle verification (TUF/DSSE), catalog repositories, object-store loaders.
|
||||
- **Time engineer**: time anchor parsing/verification (Roughtime, RFC3161), staleness calculators.
|
||||
- **QA/Automation**: API + storage tests (Mongo2Go/in-memory), deterministic ordering, sealed/offline paths.
|
||||
- **QA/Automation**: API + storage tests (Testcontainers/in-memory), deterministic ordering, sealed/offline paths.
|
||||
- **Docs/Runbooks**: keep air-gap ops guides, scaffolds, and schemas aligned with behavior.
|
||||
|
||||
## Required Reading (treat as read before DOING)
|
||||
@@ -33,10 +33,9 @@
|
||||
- Cross-module edits require sprint note; otherwise stay within `src/AirGap`.
|
||||
|
||||
## Testing Rules
|
||||
- Use Mongo2Go/in-memory stores; no network.
|
||||
- Use Testcontainers (PostgreSQL)/in-memory stores; no network.
|
||||
- Cover sealed/unsealed transitions, staleness budgets, trust-root failures, deterministic ordering.
|
||||
- API tests via WebApplicationFactory; importer tests use local fixture bundles (no downloads).
|
||||
- If Mongo2Go fails to start (OpenSSL 1.1 missing), see `tests/AirGap/README.md` for the shim note.
|
||||
|
||||
## Delivery Discipline
|
||||
- Update sprint tracker statuses (`TODO → DOING → DONE/BLOCKED`); log decisions in Execution Log and Decisions & Risks.
|
||||
|
||||
@@ -17,7 +17,7 @@ Operate the StellaOps Attestor service: accept signed DSSE envelopes from the Si
|
||||
## Key Directories
|
||||
- `src/Attestor/StellaOps.Attestor/StellaOps.Attestor.WebService/` — Minimal API host and HTTP surface.
|
||||
- `src/Attestor/StellaOps.Attestor/StellaOps.Attestor.Core/` — Domain contracts, submission/verification pipelines.
|
||||
- `src/Attestor/StellaOps.Attestor/StellaOps.Attestor.Infrastructure/` — Mongo, Redis, Rekor, and archival implementations.
|
||||
- `src/Attestor/StellaOps.Attestor/StellaOps.Attestor.Infrastructure/` — PostgreSQL, Redis, Rekor, and archival implementations.
|
||||
- `src/Attestor/StellaOps.Attestor/StellaOps.Attestor.Tests/` — Unit and integration tests.
|
||||
|
||||
---
|
||||
|
||||
@@ -37,6 +37,29 @@ public sealed class AttestorMetrics : IDisposable
|
||||
RekorOfflineVerifyTotal = _meter.CreateCounter<long>("attestor.rekor_offline_verify_total", description: "Rekor offline mode verification attempts grouped by result.");
|
||||
RekorCheckpointCacheHits = _meter.CreateCounter<long>("attestor.rekor_checkpoint_cache_hits", description: "Rekor checkpoint cache hits.");
|
||||
RekorCheckpointCacheMisses = _meter.CreateCounter<long>("attestor.rekor_checkpoint_cache_misses", description: "Rekor checkpoint cache misses.");
|
||||
|
||||
// SPRINT_3000_0001_0002 - Rekor retry queue metrics
|
||||
RekorQueueDepth = _meter.CreateObservableGauge("attestor.rekor_queue_depth",
|
||||
() => _queueDepthCallback?.Invoke() ?? 0,
|
||||
description: "Current Rekor queue depth (pending + retrying items).");
|
||||
RekorRetryAttemptsTotal = _meter.CreateCounter<long>("attestor.rekor_retry_attempts_total", description: "Total Rekor retry attempts grouped by backend and attempt number.");
|
||||
RekorSubmissionStatusTotal = _meter.CreateCounter<long>("attestor.rekor_submission_status_total", description: "Total Rekor submission status changes grouped by status and backend.");
|
||||
RekorQueueWaitTime = _meter.CreateHistogram<double>("attestor.rekor_queue_wait_seconds", unit: "s", description: "Time items spend waiting in the Rekor queue in seconds.");
|
||||
RekorDeadLetterTotal = _meter.CreateCounter<long>("attestor.rekor_dead_letter_total", description: "Total dead letter items grouped by backend.");
|
||||
|
||||
// SPRINT_3000_0001_0003 - Time skew validation metrics
|
||||
TimeSkewDetectedTotal = _meter.CreateCounter<long>("attestor.time_skew_detected_total", description: "Time skew anomalies detected grouped by severity and action.");
|
||||
TimeSkewSeconds = _meter.CreateHistogram<double>("attestor.time_skew_seconds", unit: "s", description: "Distribution of time skew values in seconds.");
|
||||
}
|
||||
|
||||
private Func<int>? _queueDepthCallback;
|
||||
|
||||
/// <summary>
|
||||
/// Register a callback to provide the current queue depth.
|
||||
/// </summary>
|
||||
public void RegisterQueueDepthCallback(Func<int> callback)
|
||||
{
|
||||
_queueDepthCallback = callback;
|
||||
}
|
||||
|
||||
public Counter<long> SubmitTotal { get; }
|
||||
@@ -107,6 +130,43 @@ public sealed class AttestorMetrics : IDisposable
|
||||
/// </summary>
|
||||
public Counter<long> RekorCheckpointCacheMisses { get; }
|
||||
|
||||
// SPRINT_3000_0001_0002 - Rekor retry queue metrics
|
||||
/// <summary>
|
||||
/// Current Rekor queue depth (pending + retrying items).
|
||||
/// </summary>
|
||||
public ObservableGauge<int> RekorQueueDepth { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Total Rekor retry attempts grouped by backend and attempt number.
|
||||
/// </summary>
|
||||
public Counter<long> RekorRetryAttemptsTotal { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Total Rekor submission status changes grouped by status and backend.
|
||||
/// </summary>
|
||||
public Counter<long> RekorSubmissionStatusTotal { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Time items spend waiting in the Rekor queue in seconds.
|
||||
/// </summary>
|
||||
public Histogram<double> RekorQueueWaitTime { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Total dead letter items grouped by backend.
|
||||
/// </summary>
|
||||
public Counter<long> RekorDeadLetterTotal { get; }
|
||||
|
||||
// SPRINT_3000_0001_0003 - Time skew validation metrics
|
||||
/// <summary>
|
||||
/// Time skew anomalies detected grouped by severity and action.
|
||||
/// </summary>
|
||||
public Counter<long> TimeSkewDetectedTotal { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Distribution of time skew values in seconds.
|
||||
/// </summary>
|
||||
public Histogram<double> TimeSkewSeconds { get; }
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (_disposed)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
using System.Collections.Generic;
|
||||
using StellaOps.Attestor.Core.Verification;
|
||||
using StellaOps.Cryptography;
|
||||
|
||||
namespace StellaOps.Attestor.Core.Options;
|
||||
@@ -32,6 +33,11 @@ public sealed class AttestorOptions
|
||||
public TransparencyWitnessOptions TransparencyWitness { get; set; } = new();
|
||||
public VerificationOptions Verification { get; set; } = new();
|
||||
|
||||
/// <summary>
|
||||
/// Time skew validation options per SPRINT_3000_0001_0003.
|
||||
/// </summary>
|
||||
public TimeSkewOptions TimeSkew { get; set; } = new();
|
||||
|
||||
|
||||
public sealed class SecurityOptions
|
||||
{
|
||||
|
||||
@@ -0,0 +1,114 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// IRekorSubmissionQueue.cs
|
||||
// Sprint: SPRINT_3000_0001_0002_rekor_retry_queue_metrics
|
||||
// Task: T3
|
||||
// Description: Interface for the Rekor submission queue
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
namespace StellaOps.Attestor.Core.Queue;
|
||||
|
||||
/// <summary>
|
||||
/// Interface for the durable Rekor submission queue.
|
||||
/// </summary>
|
||||
public interface IRekorSubmissionQueue
|
||||
{
|
||||
/// <summary>
|
||||
/// Enqueue a DSSE envelope for Rekor submission.
|
||||
/// </summary>
|
||||
/// <param name="tenantId">Tenant identifier.</param>
|
||||
/// <param name="bundleSha256">SHA-256 hash of the bundle being attested.</param>
|
||||
/// <param name="dssePayload">Serialized DSSE envelope payload.</param>
|
||||
/// <param name="backend">Target Rekor backend ('primary' or 'mirror').</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>The ID of the created queue item.</returns>
|
||||
Task<Guid> EnqueueAsync(
|
||||
string tenantId,
|
||||
string bundleSha256,
|
||||
byte[] dssePayload,
|
||||
string backend,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Dequeue items ready for submission/retry.
|
||||
/// Items are atomically transitioned to Submitting status.
|
||||
/// </summary>
|
||||
/// <param name="batchSize">Maximum number of items to dequeue.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>List of items ready for processing.</returns>
|
||||
Task<IReadOnlyList<RekorQueueItem>> DequeueAsync(
|
||||
int batchSize,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Mark item as successfully submitted.
|
||||
/// </summary>
|
||||
/// <param name="id">Queue item ID.</param>
|
||||
/// <param name="rekorUuid">UUID from Rekor.</param>
|
||||
/// <param name="logIndex">Log index from Rekor.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
Task MarkSubmittedAsync(
|
||||
Guid id,
|
||||
string rekorUuid,
|
||||
long? logIndex,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Mark item for retry with exponential backoff.
|
||||
/// </summary>
|
||||
/// <param name="id">Queue item ID.</param>
|
||||
/// <param name="error">Error message from the failed attempt.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
Task MarkRetryAsync(
|
||||
Guid id,
|
||||
string error,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Move item to dead letter after max retries.
|
||||
/// </summary>
|
||||
/// <param name="id">Queue item ID.</param>
|
||||
/// <param name="error">Error message from the final failed attempt.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
Task MarkDeadLetterAsync(
|
||||
Guid id,
|
||||
string error,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Get item by ID.
|
||||
/// </summary>
|
||||
/// <param name="id">Queue item ID.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>The queue item, or null if not found.</returns>
|
||||
Task<RekorQueueItem?> GetByIdAsync(
|
||||
Guid id,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Get current queue depth by status.
|
||||
/// </summary>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>Snapshot of queue depth.</returns>
|
||||
Task<QueueDepthSnapshot> GetQueueDepthAsync(
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Purge dead letter items older than the retention period.
|
||||
/// </summary>
|
||||
/// <param name="retentionDays">Items older than this are purged.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>Number of items purged.</returns>
|
||||
Task<int> PurgeDeadLetterAsync(
|
||||
int retentionDays,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Re-enqueue a dead letter item for retry.
|
||||
/// </summary>
|
||||
/// <param name="id">Queue item ID.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>True if the item was re-enqueued.</returns>
|
||||
Task<bool> RequeueDeadLetterAsync(
|
||||
Guid id,
|
||||
CancellationToken cancellationToken = default);
|
||||
}
|
||||
@@ -10,34 +10,47 @@ namespace StellaOps.Attestor.Core.Queue;
|
||||
/// <summary>
|
||||
/// Represents an item in the Rekor submission queue.
|
||||
/// </summary>
|
||||
/// <param name="Id">Unique identifier for the queue item.</param>
|
||||
/// <param name="TenantId">Tenant identifier.</param>
|
||||
/// <param name="BundleSha256">SHA-256 hash of the bundle being attested.</param>
|
||||
/// <param name="DssePayload">Serialized DSSE envelope payload.</param>
|
||||
/// <param name="Backend">Target Rekor backend ('primary' or 'mirror').</param>
|
||||
/// <param name="Status">Current submission status.</param>
|
||||
/// <param name="AttemptCount">Number of submission attempts made.</param>
|
||||
/// <param name="MaxAttempts">Maximum allowed attempts before dead-lettering.</param>
|
||||
/// <param name="LastAttemptAt">Timestamp of the last submission attempt.</param>
|
||||
/// <param name="LastError">Error message from the last failed attempt.</param>
|
||||
/// <param name="NextRetryAt">Scheduled time for the next retry attempt.</param>
|
||||
/// <param name="RekorUuid">UUID from Rekor after successful submission.</param>
|
||||
/// <param name="RekorLogIndex">Log index from Rekor after successful submission.</param>
|
||||
/// <param name="CreatedAt">Timestamp when the item was created.</param>
|
||||
/// <param name="UpdatedAt">Timestamp when the item was last updated.</param>
|
||||
public sealed record RekorQueueItem(
|
||||
Guid Id,
|
||||
string TenantId,
|
||||
string BundleSha256,
|
||||
byte[] DssePayload,
|
||||
string Backend,
|
||||
RekorSubmissionStatus Status,
|
||||
int AttemptCount,
|
||||
int MaxAttempts,
|
||||
DateTimeOffset? LastAttemptAt,
|
||||
string? LastError,
|
||||
DateTimeOffset? NextRetryAt,
|
||||
string? RekorUuid,
|
||||
long? RekorLogIndex,
|
||||
DateTimeOffset CreatedAt,
|
||||
DateTimeOffset UpdatedAt);
|
||||
public sealed class RekorQueueItem
|
||||
{
|
||||
/// <summary>Unique identifier for the queue item.</summary>
|
||||
public required Guid Id { get; init; }
|
||||
|
||||
/// <summary>Tenant identifier.</summary>
|
||||
public required string TenantId { get; init; }
|
||||
|
||||
/// <summary>SHA-256 hash of the bundle being attested.</summary>
|
||||
public required string BundleSha256 { get; init; }
|
||||
|
||||
/// <summary>Serialized DSSE envelope payload.</summary>
|
||||
public required byte[] DssePayload { get; init; }
|
||||
|
||||
/// <summary>Target Rekor backend ('primary' or 'mirror').</summary>
|
||||
public required string Backend { get; init; }
|
||||
|
||||
/// <summary>Current submission status.</summary>
|
||||
public required RekorSubmissionStatus Status { get; init; }
|
||||
|
||||
/// <summary>Number of submission attempts made.</summary>
|
||||
public required int AttemptCount { get; init; }
|
||||
|
||||
/// <summary>Maximum allowed attempts before dead-lettering.</summary>
|
||||
public required int MaxAttempts { get; init; }
|
||||
|
||||
/// <summary>Scheduled time for the next retry attempt.</summary>
|
||||
public DateTimeOffset? NextRetryAt { get; init; }
|
||||
|
||||
/// <summary>Timestamp when the item was created.</summary>
|
||||
public required DateTimeOffset CreatedAt { get; init; }
|
||||
|
||||
/// <summary>Timestamp when the item was last updated.</summary>
|
||||
public required DateTimeOffset UpdatedAt { get; init; }
|
||||
|
||||
/// <summary>Error message from the last failed attempt.</summary>
|
||||
public string? LastError { get; init; }
|
||||
|
||||
/// <summary>UUID from Rekor after successful submission.</summary>
|
||||
public string? RekorUuid { get; init; }
|
||||
|
||||
/// <summary>Log index from Rekor after successful submission.</summary>
|
||||
public long? RekorIndex { get; init; }
|
||||
}
|
||||
|
||||
@@ -92,6 +92,20 @@ public sealed class AttestorEntry
|
||||
public string Url { get; init; } = string.Empty;
|
||||
|
||||
public string? LogId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Unix timestamp when entry was integrated into the Rekor log.
|
||||
/// Used for time skew validation (SPRINT_3000_0001_0003).
|
||||
/// </summary>
|
||||
public long? IntegratedTime { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Gets the integrated time as UTC DateTimeOffset.
|
||||
/// </summary>
|
||||
public DateTimeOffset? IntegratedTimeUtc =>
|
||||
IntegratedTime.HasValue
|
||||
? DateTimeOffset.FromUnixTimeSeconds(IntegratedTime.Value)
|
||||
: null;
|
||||
}
|
||||
|
||||
public sealed class SignerIdentityDescriptor
|
||||
|
||||
@@ -0,0 +1,102 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// InstrumentedTimeSkewValidator.cs
|
||||
// Sprint: SPRINT_3000_0001_0003_rekor_time_skew_validation
|
||||
// Task: T7, T8
|
||||
// Description: Time skew validator with metrics and structured logging
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using Microsoft.Extensions.Logging;
|
||||
using StellaOps.Attestor.Core.Observability;
|
||||
|
||||
namespace StellaOps.Attestor.Core.Verification;
|
||||
|
||||
/// <summary>
|
||||
/// Time skew validator with integrated metrics and structured logging.
|
||||
/// Wraps the base TimeSkewValidator with observability.
|
||||
/// </summary>
|
||||
public sealed class InstrumentedTimeSkewValidator : ITimeSkewValidator
|
||||
{
|
||||
private readonly TimeSkewValidator _inner;
|
||||
private readonly AttestorMetrics _metrics;
|
||||
private readonly ILogger<InstrumentedTimeSkewValidator> _logger;
|
||||
|
||||
public InstrumentedTimeSkewValidator(
|
||||
TimeSkewOptions options,
|
||||
AttestorMetrics metrics,
|
||||
ILogger<InstrumentedTimeSkewValidator> logger)
|
||||
{
|
||||
_inner = new TimeSkewValidator(options ?? throw new ArgumentNullException(nameof(options)));
|
||||
_metrics = metrics ?? throw new ArgumentNullException(nameof(metrics));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public TimeSkewValidationResult Validate(DateTimeOffset? integratedTime, DateTimeOffset? localTime = null)
|
||||
{
|
||||
var result = _inner.Validate(integratedTime, localTime);
|
||||
|
||||
// Record skew distribution for all validations (except skipped)
|
||||
if (result.Status != TimeSkewStatus.Skipped)
|
||||
{
|
||||
_metrics.TimeSkewSeconds.Record(Math.Abs(result.SkewSeconds));
|
||||
}
|
||||
|
||||
// Record anomalies and log structured events
|
||||
switch (result.Status)
|
||||
{
|
||||
case TimeSkewStatus.Warning:
|
||||
_metrics.TimeSkewDetectedTotal.Add(1,
|
||||
new("severity", "warning"),
|
||||
new("action", "warned"));
|
||||
|
||||
_logger.LogWarning(
|
||||
"Time skew warning detected: IntegratedTime={IntegratedTime:O}, LocalTime={LocalTime:O}, SkewSeconds={SkewSeconds:F1}, Status={Status}",
|
||||
result.IntegratedTime,
|
||||
result.LocalTime,
|
||||
result.SkewSeconds,
|
||||
result.Status);
|
||||
break;
|
||||
|
||||
case TimeSkewStatus.Rejected:
|
||||
_metrics.TimeSkewDetectedTotal.Add(1,
|
||||
new("severity", "rejected"),
|
||||
new("action", "rejected"));
|
||||
|
||||
_logger.LogError(
|
||||
"Time skew rejected: IntegratedTime={IntegratedTime:O}, LocalTime={LocalTime:O}, SkewSeconds={SkewSeconds:F1}, Status={Status}, Message={Message}",
|
||||
result.IntegratedTime,
|
||||
result.LocalTime,
|
||||
result.SkewSeconds,
|
||||
result.Status,
|
||||
result.Message);
|
||||
break;
|
||||
|
||||
case TimeSkewStatus.FutureTimestamp:
|
||||
_metrics.TimeSkewDetectedTotal.Add(1,
|
||||
new("severity", "future"),
|
||||
new("action", "rejected"));
|
||||
|
||||
_logger.LogError(
|
||||
"Future timestamp detected (potential tampering): IntegratedTime={IntegratedTime:O}, LocalTime={LocalTime:O}, SkewSeconds={SkewSeconds:F1}, Status={Status}",
|
||||
result.IntegratedTime,
|
||||
result.LocalTime,
|
||||
result.SkewSeconds,
|
||||
result.Status);
|
||||
break;
|
||||
|
||||
case TimeSkewStatus.Ok:
|
||||
_logger.LogDebug(
|
||||
"Time skew validation passed: IntegratedTime={IntegratedTime:O}, LocalTime={LocalTime:O}, SkewSeconds={SkewSeconds:F1}",
|
||||
result.IntegratedTime,
|
||||
result.LocalTime,
|
||||
result.SkewSeconds);
|
||||
break;
|
||||
|
||||
case TimeSkewStatus.Skipped:
|
||||
_logger.LogDebug("Time skew validation skipped: {Message}", result.Message);
|
||||
break;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,35 @@
|
||||
namespace StellaOps.Attestor.Core.Verification;
|
||||
|
||||
/// <summary>
|
||||
/// Exception thrown when time skew validation fails and is configured to reject.
|
||||
/// Per SPRINT_3000_0001_0003.
|
||||
/// </summary>
|
||||
public sealed class TimeSkewValidationException : Exception
|
||||
{
|
||||
/// <summary>
|
||||
/// Gets the validation result that caused the exception.
|
||||
/// </summary>
|
||||
public TimeSkewValidationResult ValidationResult { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Gets the time skew in seconds.
|
||||
/// </summary>
|
||||
public double SkewSeconds => ValidationResult.SkewSeconds;
|
||||
|
||||
/// <summary>
|
||||
/// Gets the validation status.
|
||||
/// </summary>
|
||||
public TimeSkewStatus Status => ValidationResult.Status;
|
||||
|
||||
public TimeSkewValidationException(TimeSkewValidationResult result)
|
||||
: base(result.Message)
|
||||
{
|
||||
ValidationResult = result;
|
||||
}
|
||||
|
||||
public TimeSkewValidationException(TimeSkewValidationResult result, Exception innerException)
|
||||
: base(result.Message, innerException)
|
||||
{
|
||||
ValidationResult = result;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,69 @@
|
||||
-- -----------------------------------------------------------------------------
|
||||
-- Migration: 20251216_001_create_rekor_submission_queue.sql
|
||||
-- Sprint: SPRINT_3000_0001_0002_rekor_retry_queue_metrics
|
||||
-- Task: T1
|
||||
-- Description: Create the Rekor submission queue table for durable retry
|
||||
-- -----------------------------------------------------------------------------
|
||||
|
||||
-- Create attestor schema if not exists
|
||||
CREATE SCHEMA IF NOT EXISTS attestor;
|
||||
|
||||
-- Create the queue table
|
||||
CREATE TABLE IF NOT EXISTS attestor.rekor_submission_queue (
|
||||
id UUID PRIMARY KEY,
|
||||
tenant_id TEXT NOT NULL,
|
||||
bundle_sha256 TEXT NOT NULL,
|
||||
dsse_payload BYTEA NOT NULL,
|
||||
backend TEXT NOT NULL DEFAULT 'primary',
|
||||
|
||||
-- Status lifecycle: pending -> submitting -> submitted | retrying -> dead_letter
|
||||
status TEXT NOT NULL DEFAULT 'pending'
|
||||
CHECK (status IN ('pending', 'submitting', 'retrying', 'submitted', 'dead_letter')),
|
||||
|
||||
attempt_count INTEGER NOT NULL DEFAULT 0,
|
||||
max_attempts INTEGER NOT NULL DEFAULT 5,
|
||||
next_retry_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Populated on success
|
||||
rekor_uuid TEXT,
|
||||
rekor_index BIGINT,
|
||||
|
||||
-- Populated on failure
|
||||
last_error TEXT
|
||||
);
|
||||
|
||||
-- Comments
|
||||
COMMENT ON TABLE attestor.rekor_submission_queue IS
|
||||
'Durable retry queue for Rekor transparency log submissions';
|
||||
COMMENT ON COLUMN attestor.rekor_submission_queue.status IS
|
||||
'Submission lifecycle: pending -> submitting -> (submitted | retrying -> dead_letter)';
|
||||
COMMENT ON COLUMN attestor.rekor_submission_queue.backend IS
|
||||
'Target Rekor backend (primary or mirror)';
|
||||
COMMENT ON COLUMN attestor.rekor_submission_queue.dsse_payload IS
|
||||
'Serialized DSSE envelope to submit';
|
||||
|
||||
-- Index for dequeue operations (status + next_retry_at for SKIP LOCKED queries)
|
||||
CREATE INDEX IF NOT EXISTS idx_rekor_queue_dequeue
|
||||
ON attestor.rekor_submission_queue (status, next_retry_at)
|
||||
WHERE status IN ('pending', 'retrying');
|
||||
|
||||
-- Index for tenant-scoped queries
|
||||
CREATE INDEX IF NOT EXISTS idx_rekor_queue_tenant
|
||||
ON attestor.rekor_submission_queue (tenant_id);
|
||||
|
||||
-- Index for bundle lookup (deduplication check)
|
||||
CREATE INDEX IF NOT EXISTS idx_rekor_queue_bundle
|
||||
ON attestor.rekor_submission_queue (tenant_id, bundle_sha256);
|
||||
|
||||
-- Index for dead letter management
|
||||
CREATE INDEX IF NOT EXISTS idx_rekor_queue_dead_letter
|
||||
ON attestor.rekor_submission_queue (status, updated_at)
|
||||
WHERE status = 'dead_letter';
|
||||
|
||||
-- Index for cleanup of completed submissions
|
||||
CREATE INDEX IF NOT EXISTS idx_rekor_queue_completed
|
||||
ON attestor.rekor_submission_queue (status, updated_at)
|
||||
WHERE status = 'submitted';
|
||||
@@ -0,0 +1,524 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// PostgresRekorSubmissionQueue.cs
|
||||
// Sprint: SPRINT_3000_0001_0002_rekor_retry_queue_metrics
|
||||
// Task: T3
|
||||
// Description: PostgreSQL implementation of the Rekor submission queue
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using System.Text.Json;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Microsoft.Extensions.Options;
|
||||
using Npgsql;
|
||||
using NpgsqlTypes;
|
||||
using StellaOps.Attestor.Core.Observability;
|
||||
using StellaOps.Attestor.Core.Options;
|
||||
using StellaOps.Attestor.Core.Queue;
|
||||
|
||||
namespace StellaOps.Attestor.Infrastructure.Queue;
|
||||
|
||||
/// <summary>
|
||||
/// PostgreSQL implementation of the Rekor submission queue.
|
||||
/// Uses a dedicated table for queue persistence with optimistic locking.
|
||||
/// </summary>
|
||||
public sealed class PostgresRekorSubmissionQueue : IRekorSubmissionQueue
|
||||
{
|
||||
private readonly NpgsqlDataSource _dataSource;
|
||||
private readonly RekorQueueOptions _options;
|
||||
private readonly AttestorMetrics _metrics;
|
||||
private readonly TimeProvider _timeProvider;
|
||||
private readonly ILogger<PostgresRekorSubmissionQueue> _logger;
|
||||
|
||||
private const int DefaultCommandTimeoutSeconds = 30;
|
||||
|
||||
public PostgresRekorSubmissionQueue(
|
||||
NpgsqlDataSource dataSource,
|
||||
IOptions<RekorQueueOptions> options,
|
||||
AttestorMetrics metrics,
|
||||
TimeProvider timeProvider,
|
||||
ILogger<PostgresRekorSubmissionQueue> logger)
|
||||
{
|
||||
_dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource));
|
||||
_options = options?.Value ?? throw new ArgumentNullException(nameof(options));
|
||||
_metrics = metrics ?? throw new ArgumentNullException(nameof(metrics));
|
||||
_timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<Guid> EnqueueAsync(
|
||||
string tenantId,
|
||||
string bundleSha256,
|
||||
byte[] dssePayload,
|
||||
string backend,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var now = _timeProvider.GetUtcNow();
|
||||
var id = Guid.NewGuid();
|
||||
|
||||
const string sql = """
|
||||
INSERT INTO attestor.rekor_submission_queue (
|
||||
id, tenant_id, bundle_sha256, dsse_payload, backend,
|
||||
status, attempt_count, max_attempts, next_retry_at,
|
||||
created_at, updated_at
|
||||
)
|
||||
VALUES (
|
||||
@id, @tenant_id, @bundle_sha256, @dsse_payload, @backend,
|
||||
@status, 0, @max_attempts, @next_retry_at,
|
||||
@created_at, @updated_at
|
||||
)
|
||||
""";
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(cancellationToken);
|
||||
await using var command = new NpgsqlCommand(sql, connection)
|
||||
{
|
||||
CommandTimeout = DefaultCommandTimeoutSeconds
|
||||
};
|
||||
|
||||
command.Parameters.AddWithValue("@id", id);
|
||||
command.Parameters.AddWithValue("@tenant_id", tenantId);
|
||||
command.Parameters.AddWithValue("@bundle_sha256", bundleSha256);
|
||||
command.Parameters.AddWithValue("@dsse_payload", dssePayload);
|
||||
command.Parameters.AddWithValue("@backend", backend);
|
||||
command.Parameters.AddWithValue("@status", RekorSubmissionStatus.Pending.ToString().ToLowerInvariant());
|
||||
command.Parameters.AddWithValue("@max_attempts", _options.MaxAttempts);
|
||||
command.Parameters.AddWithValue("@next_retry_at", now);
|
||||
command.Parameters.AddWithValue("@created_at", now);
|
||||
command.Parameters.AddWithValue("@updated_at", now);
|
||||
|
||||
await command.ExecuteNonQueryAsync(cancellationToken);
|
||||
|
||||
_metrics.RekorSubmissionStatusTotal.Add(1,
|
||||
new("status", "pending"),
|
||||
new("backend", backend));
|
||||
|
||||
_logger.LogDebug(
|
||||
"Enqueued Rekor submission {Id} for bundle {BundleSha256} to {Backend}",
|
||||
id, bundleSha256, backend);
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<RekorQueueItem>> DequeueAsync(
|
||||
int batchSize,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var now = _timeProvider.GetUtcNow();
|
||||
|
||||
// Use FOR UPDATE SKIP LOCKED for concurrent-safe dequeue
|
||||
const string sql = """
|
||||
UPDATE attestor.rekor_submission_queue
|
||||
SET status = 'submitting', updated_at = @now
|
||||
WHERE id IN (
|
||||
SELECT id FROM attestor.rekor_submission_queue
|
||||
WHERE status IN ('pending', 'retrying')
|
||||
AND next_retry_at <= @now
|
||||
ORDER BY next_retry_at ASC
|
||||
LIMIT @batch_size
|
||||
FOR UPDATE SKIP LOCKED
|
||||
)
|
||||
RETURNING id, tenant_id, bundle_sha256, dsse_payload, backend,
|
||||
status, attempt_count, max_attempts, next_retry_at,
|
||||
created_at, updated_at, last_error
|
||||
""";
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(cancellationToken);
|
||||
await using var command = new NpgsqlCommand(sql, connection)
|
||||
{
|
||||
CommandTimeout = DefaultCommandTimeoutSeconds
|
||||
};
|
||||
|
||||
command.Parameters.AddWithValue("@now", now);
|
||||
command.Parameters.AddWithValue("@batch_size", batchSize);
|
||||
|
||||
var results = new List<RekorQueueItem>();
|
||||
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken);
|
||||
while (await reader.ReadAsync(cancellationToken))
|
||||
{
|
||||
var queuedAt = reader.GetDateTime(reader.GetOrdinal("created_at"));
|
||||
var waitTime = (now - queuedAt).TotalSeconds;
|
||||
_metrics.RekorQueueWaitTime.Record(waitTime);
|
||||
|
||||
results.Add(ReadQueueItem(reader));
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task MarkSubmittedAsync(
|
||||
Guid id,
|
||||
string rekorUuid,
|
||||
long? rekorIndex,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var now = _timeProvider.GetUtcNow();
|
||||
|
||||
const string sql = """
|
||||
UPDATE attestor.rekor_submission_queue
|
||||
SET status = 'submitted',
|
||||
rekor_uuid = @rekor_uuid,
|
||||
rekor_index = @rekor_index,
|
||||
updated_at = @updated_at,
|
||||
last_error = NULL
|
||||
WHERE id = @id
|
||||
RETURNING backend
|
||||
""";
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(cancellationToken);
|
||||
await using var command = new NpgsqlCommand(sql, connection)
|
||||
{
|
||||
CommandTimeout = DefaultCommandTimeoutSeconds
|
||||
};
|
||||
|
||||
command.Parameters.AddWithValue("@id", id);
|
||||
command.Parameters.AddWithValue("@rekor_uuid", rekorUuid);
|
||||
command.Parameters.AddWithValue("@rekor_index", (object?)rekorIndex ?? DBNull.Value);
|
||||
command.Parameters.AddWithValue("@updated_at", now);
|
||||
|
||||
var backend = await command.ExecuteScalarAsync(cancellationToken) as string ?? "unknown";
|
||||
|
||||
_metrics.RekorSubmissionStatusTotal.Add(1,
|
||||
new("status", "submitted"),
|
||||
new("backend", backend));
|
||||
|
||||
_logger.LogInformation(
|
||||
"Marked Rekor submission {Id} as submitted with UUID {RekorUuid}",
|
||||
id, rekorUuid);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task MarkFailedAsync(
|
||||
Guid id,
|
||||
string errorMessage,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var now = _timeProvider.GetUtcNow();
|
||||
|
||||
// Fetch current state to determine next action
|
||||
const string fetchSql = """
|
||||
SELECT attempt_count, max_attempts, backend
|
||||
FROM attestor.rekor_submission_queue
|
||||
WHERE id = @id
|
||||
FOR UPDATE
|
||||
""";
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(cancellationToken);
|
||||
await using var transaction = await connection.BeginTransactionAsync(cancellationToken);
|
||||
|
||||
int attemptCount;
|
||||
int maxAttempts;
|
||||
string backend;
|
||||
|
||||
await using (var fetchCommand = new NpgsqlCommand(fetchSql, connection, transaction))
|
||||
{
|
||||
fetchCommand.Parameters.AddWithValue("@id", id);
|
||||
|
||||
await using var reader = await fetchCommand.ExecuteReaderAsync(cancellationToken);
|
||||
if (!await reader.ReadAsync(cancellationToken))
|
||||
{
|
||||
_logger.LogWarning("Attempted to mark non-existent queue item {Id} as failed", id);
|
||||
return;
|
||||
}
|
||||
|
||||
attemptCount = reader.GetInt32(0);
|
||||
maxAttempts = reader.GetInt32(1);
|
||||
backend = reader.GetString(2);
|
||||
}
|
||||
|
||||
attemptCount++;
|
||||
var isDeadLetter = attemptCount >= maxAttempts;
|
||||
|
||||
if (isDeadLetter)
|
||||
{
|
||||
const string deadLetterSql = """
|
||||
UPDATE attestor.rekor_submission_queue
|
||||
SET status = 'dead_letter',
|
||||
attempt_count = @attempt_count,
|
||||
last_error = @last_error,
|
||||
updated_at = @updated_at
|
||||
WHERE id = @id
|
||||
""";
|
||||
|
||||
await using var command = new NpgsqlCommand(deadLetterSql, connection, transaction);
|
||||
command.Parameters.AddWithValue("@id", id);
|
||||
command.Parameters.AddWithValue("@attempt_count", attemptCount);
|
||||
command.Parameters.AddWithValue("@last_error", errorMessage);
|
||||
command.Parameters.AddWithValue("@updated_at", now);
|
||||
|
||||
await command.ExecuteNonQueryAsync(cancellationToken);
|
||||
|
||||
_metrics.RekorSubmissionStatusTotal.Add(1,
|
||||
new("status", "dead_letter"),
|
||||
new("backend", backend));
|
||||
_metrics.RekorDeadLetterTotal.Add(1, new("backend", backend));
|
||||
|
||||
_logger.LogError(
|
||||
"Moved Rekor submission {Id} to dead letter after {Attempts} attempts: {Error}",
|
||||
id, attemptCount, errorMessage);
|
||||
}
|
||||
else
|
||||
{
|
||||
var nextRetryAt = CalculateNextRetryTime(now, attemptCount);
|
||||
|
||||
const string retrySql = """
|
||||
UPDATE attestor.rekor_submission_queue
|
||||
SET status = 'retrying',
|
||||
attempt_count = @attempt_count,
|
||||
next_retry_at = @next_retry_at,
|
||||
last_error = @last_error,
|
||||
updated_at = @updated_at
|
||||
WHERE id = @id
|
||||
""";
|
||||
|
||||
await using var command = new NpgsqlCommand(retrySql, connection, transaction);
|
||||
command.Parameters.AddWithValue("@id", id);
|
||||
command.Parameters.AddWithValue("@attempt_count", attemptCount);
|
||||
command.Parameters.AddWithValue("@next_retry_at", nextRetryAt);
|
||||
command.Parameters.AddWithValue("@last_error", errorMessage);
|
||||
command.Parameters.AddWithValue("@updated_at", now);
|
||||
|
||||
await command.ExecuteNonQueryAsync(cancellationToken);
|
||||
|
||||
_metrics.RekorSubmissionStatusTotal.Add(1,
|
||||
new("status", "retrying"),
|
||||
new("backend", backend));
|
||||
_metrics.RekorRetryAttemptsTotal.Add(1,
|
||||
new("backend", backend),
|
||||
new("attempt", attemptCount.ToString()));
|
||||
|
||||
_logger.LogWarning(
|
||||
"Marked Rekor submission {Id} for retry (attempt {Attempt}/{Max}): {Error}",
|
||||
id, attemptCount, maxAttempts, errorMessage);
|
||||
}
|
||||
|
||||
await transaction.CommitAsync(cancellationToken);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<RekorQueueItem?> GetByIdAsync(
|
||||
Guid id,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT id, tenant_id, bundle_sha256, dsse_payload, backend,
|
||||
status, attempt_count, max_attempts, next_retry_at,
|
||||
created_at, updated_at, last_error, rekor_uuid, rekor_index
|
||||
FROM attestor.rekor_submission_queue
|
||||
WHERE id = @id
|
||||
""";
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(cancellationToken);
|
||||
await using var command = new NpgsqlCommand(sql, connection)
|
||||
{
|
||||
CommandTimeout = DefaultCommandTimeoutSeconds
|
||||
};
|
||||
|
||||
command.Parameters.AddWithValue("@id", id);
|
||||
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken);
|
||||
if (!await reader.ReadAsync(cancellationToken))
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
return ReadQueueItem(reader);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<RekorQueueItem>> GetByBundleShaAsync(
|
||||
string tenantId,
|
||||
string bundleSha256,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT id, tenant_id, bundle_sha256, dsse_payload, backend,
|
||||
status, attempt_count, max_attempts, next_retry_at,
|
||||
created_at, updated_at, last_error, rekor_uuid, rekor_index
|
||||
FROM attestor.rekor_submission_queue
|
||||
WHERE tenant_id = @tenant_id AND bundle_sha256 = @bundle_sha256
|
||||
ORDER BY created_at DESC
|
||||
""";
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(cancellationToken);
|
||||
await using var command = new NpgsqlCommand(sql, connection)
|
||||
{
|
||||
CommandTimeout = DefaultCommandTimeoutSeconds
|
||||
};
|
||||
|
||||
command.Parameters.AddWithValue("@tenant_id", tenantId);
|
||||
command.Parameters.AddWithValue("@bundle_sha256", bundleSha256);
|
||||
|
||||
var results = new List<RekorQueueItem>();
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken);
|
||||
while (await reader.ReadAsync(cancellationToken))
|
||||
{
|
||||
results.Add(ReadQueueItem(reader));
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<int> GetQueueDepthAsync(CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT COUNT(*)
|
||||
FROM attestor.rekor_submission_queue
|
||||
WHERE status IN ('pending', 'retrying', 'submitting')
|
||||
""";
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(cancellationToken);
|
||||
await using var command = new NpgsqlCommand(sql, connection)
|
||||
{
|
||||
CommandTimeout = DefaultCommandTimeoutSeconds
|
||||
};
|
||||
|
||||
var result = await command.ExecuteScalarAsync(cancellationToken);
|
||||
return Convert.ToInt32(result);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<RekorQueueItem>> GetDeadLetterItemsAsync(
|
||||
int limit,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT id, tenant_id, bundle_sha256, dsse_payload, backend,
|
||||
status, attempt_count, max_attempts, next_retry_at,
|
||||
created_at, updated_at, last_error, rekor_uuid, rekor_index
|
||||
FROM attestor.rekor_submission_queue
|
||||
WHERE status = 'dead_letter'
|
||||
ORDER BY updated_at DESC
|
||||
LIMIT @limit
|
||||
""";
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(cancellationToken);
|
||||
await using var command = new NpgsqlCommand(sql, connection)
|
||||
{
|
||||
CommandTimeout = DefaultCommandTimeoutSeconds
|
||||
};
|
||||
|
||||
command.Parameters.AddWithValue("@limit", limit);
|
||||
|
||||
var results = new List<RekorQueueItem>();
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken);
|
||||
while (await reader.ReadAsync(cancellationToken))
|
||||
{
|
||||
results.Add(ReadQueueItem(reader));
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<bool> RequeueDeadLetterAsync(
|
||||
Guid id,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var now = _timeProvider.GetUtcNow();
|
||||
|
||||
const string sql = """
|
||||
UPDATE attestor.rekor_submission_queue
|
||||
SET status = 'pending',
|
||||
attempt_count = 0,
|
||||
next_retry_at = @now,
|
||||
last_error = NULL,
|
||||
updated_at = @now
|
||||
WHERE id = @id AND status = 'dead_letter'
|
||||
RETURNING backend
|
||||
""";
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(cancellationToken);
|
||||
await using var command = new NpgsqlCommand(sql, connection)
|
||||
{
|
||||
CommandTimeout = DefaultCommandTimeoutSeconds
|
||||
};
|
||||
|
||||
command.Parameters.AddWithValue("@id", id);
|
||||
command.Parameters.AddWithValue("@now", now);
|
||||
|
||||
var backend = await command.ExecuteScalarAsync(cancellationToken) as string;
|
||||
|
||||
if (backend is not null)
|
||||
{
|
||||
_metrics.RekorSubmissionStatusTotal.Add(1,
|
||||
new("status", "pending"),
|
||||
new("backend", backend));
|
||||
|
||||
_logger.LogInformation("Requeued dead letter item {Id} for retry", id);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<int> PurgeSubmittedAsync(
|
||||
TimeSpan olderThan,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var cutoff = _timeProvider.GetUtcNow().Add(-olderThan);
|
||||
|
||||
const string sql = """
|
||||
DELETE FROM attestor.rekor_submission_queue
|
||||
WHERE status = 'submitted' AND updated_at < @cutoff
|
||||
""";
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(cancellationToken);
|
||||
await using var command = new NpgsqlCommand(sql, connection)
|
||||
{
|
||||
CommandTimeout = DefaultCommandTimeoutSeconds
|
||||
};
|
||||
|
||||
command.Parameters.AddWithValue("@cutoff", cutoff);
|
||||
|
||||
var deleted = await command.ExecuteNonQueryAsync(cancellationToken);
|
||||
|
||||
if (deleted > 0)
|
||||
{
|
||||
_logger.LogInformation("Purged {Count} submitted queue items older than {Cutoff}", deleted, cutoff);
|
||||
}
|
||||
|
||||
return deleted;
|
||||
}
|
||||
|
||||
private DateTimeOffset CalculateNextRetryTime(DateTimeOffset now, int attemptCount)
|
||||
{
|
||||
// Exponential backoff: baseDelay * 2^attempt, capped at maxDelay
|
||||
var delay = TimeSpan.FromSeconds(
|
||||
Math.Min(
|
||||
_options.BaseRetryDelaySeconds * Math.Pow(2, attemptCount - 1),
|
||||
_options.MaxRetryDelaySeconds));
|
||||
|
||||
return now.Add(delay);
|
||||
}
|
||||
|
||||
private static RekorQueueItem ReadQueueItem(NpgsqlDataReader reader)
|
||||
{
|
||||
return new RekorQueueItem
|
||||
{
|
||||
Id = reader.GetGuid(reader.GetOrdinal("id")),
|
||||
TenantId = reader.GetString(reader.GetOrdinal("tenant_id")),
|
||||
BundleSha256 = reader.GetString(reader.GetOrdinal("bundle_sha256")),
|
||||
DssePayload = reader.GetFieldValue<byte[]>(reader.GetOrdinal("dsse_payload")),
|
||||
Backend = reader.GetString(reader.GetOrdinal("backend")),
|
||||
Status = Enum.Parse<RekorSubmissionStatus>(reader.GetString(reader.GetOrdinal("status")), ignoreCase: true),
|
||||
AttemptCount = reader.GetInt32(reader.GetOrdinal("attempt_count")),
|
||||
MaxAttempts = reader.GetInt32(reader.GetOrdinal("max_attempts")),
|
||||
NextRetryAt = reader.GetDateTime(reader.GetOrdinal("next_retry_at")),
|
||||
CreatedAt = reader.GetDateTime(reader.GetOrdinal("created_at")),
|
||||
UpdatedAt = reader.GetDateTime(reader.GetOrdinal("updated_at")),
|
||||
LastError = reader.IsDBNull(reader.GetOrdinal("last_error"))
|
||||
? null
|
||||
: reader.GetString(reader.GetOrdinal("last_error")),
|
||||
RekorUuid = reader.IsDBNull(reader.GetOrdinal("rekor_uuid"))
|
||||
? null
|
||||
: reader.GetString(reader.GetOrdinal("rekor_uuid")),
|
||||
RekorIndex = reader.IsDBNull(reader.GetOrdinal("rekor_index"))
|
||||
? null
|
||||
: reader.GetInt64(reader.GetOrdinal("rekor_index"))
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -29,6 +29,7 @@ internal sealed class AttestorSubmissionService : IAttestorSubmissionService
|
||||
private readonly IAttestorArchiveStore _archiveStore;
|
||||
private readonly IAttestorAuditSink _auditSink;
|
||||
private readonly IAttestorVerificationCache _verificationCache;
|
||||
private readonly ITimeSkewValidator _timeSkewValidator;
|
||||
private readonly ILogger<AttestorSubmissionService> _logger;
|
||||
private readonly TimeProvider _timeProvider;
|
||||
private readonly AttestorOptions _options;
|
||||
@@ -43,6 +44,7 @@ internal sealed class AttestorSubmissionService : IAttestorSubmissionService
|
||||
IAttestorArchiveStore archiveStore,
|
||||
IAttestorAuditSink auditSink,
|
||||
IAttestorVerificationCache verificationCache,
|
||||
ITimeSkewValidator timeSkewValidator,
|
||||
IOptions<AttestorOptions> options,
|
||||
ILogger<AttestorSubmissionService> logger,
|
||||
TimeProvider timeProvider,
|
||||
@@ -56,6 +58,7 @@ internal sealed class AttestorSubmissionService : IAttestorSubmissionService
|
||||
_archiveStore = archiveStore;
|
||||
_auditSink = auditSink;
|
||||
_verificationCache = verificationCache;
|
||||
_timeSkewValidator = timeSkewValidator ?? throw new ArgumentNullException(nameof(timeSkewValidator));
|
||||
_logger = logger;
|
||||
_timeProvider = timeProvider;
|
||||
_options = options.Value;
|
||||
@@ -139,6 +142,20 @@ internal sealed class AttestorSubmissionService : IAttestorSubmissionService
|
||||
throw new InvalidOperationException("No Rekor submission outcome was produced.");
|
||||
}
|
||||
|
||||
// Validate time skew between Rekor integrated time and local time (SPRINT_3000_0001_0003 T5)
|
||||
var timeSkewResult = ValidateSubmissionTimeSkew(canonicalOutcome);
|
||||
if (!timeSkewResult.IsValid && _options.TimeSkew.FailOnReject)
|
||||
{
|
||||
_logger.LogError(
|
||||
"Submission rejected due to time skew: BundleSha={BundleSha}, IntegratedTime={IntegratedTime:O}, LocalTime={LocalTime:O}, SkewSeconds={SkewSeconds:F1}, Status={Status}",
|
||||
request.Meta.BundleSha256,
|
||||
timeSkewResult.IntegratedTime,
|
||||
timeSkewResult.LocalTime,
|
||||
timeSkewResult.SkewSeconds,
|
||||
timeSkewResult.Status);
|
||||
throw new TimeSkewValidationException(timeSkewResult);
|
||||
}
|
||||
|
||||
var entry = CreateEntry(request, context, canonicalOutcome, mirrorOutcome);
|
||||
await _repository.SaveAsync(entry, cancellationToken).ConfigureAwait(false);
|
||||
await InvalidateVerificationCacheAsync(cacheSubject, cancellationToken).ConfigureAwait(false);
|
||||
@@ -490,6 +507,23 @@ internal sealed class AttestorSubmissionService : IAttestorSubmissionService
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Validates time skew between Rekor integrated time and local time.
|
||||
/// Per SPRINT_3000_0001_0003 T5.
|
||||
/// </summary>
|
||||
private TimeSkewValidationResult ValidateSubmissionTimeSkew(SubmissionOutcome outcome)
|
||||
{
|
||||
if (outcome.Submission is null)
|
||||
{
|
||||
return TimeSkewValidationResult.Skipped("No submission response available");
|
||||
}
|
||||
|
||||
var integratedTime = outcome.Submission.IntegratedTimeUtc;
|
||||
var localTime = _timeProvider.GetUtcNow();
|
||||
|
||||
return _timeSkewValidator.Validate(integratedTime, localTime);
|
||||
}
|
||||
|
||||
private async Task ArchiveAsync(
|
||||
AttestorEntry entry,
|
||||
byte[] canonicalBundle,
|
||||
|
||||
@@ -25,6 +25,7 @@ internal sealed class AttestorVerificationService : IAttestorVerificationService
|
||||
private readonly IRekorClient _rekorClient;
|
||||
private readonly ITransparencyWitnessClient _witnessClient;
|
||||
private readonly IAttestorVerificationEngine _engine;
|
||||
private readonly ITimeSkewValidator _timeSkewValidator;
|
||||
private readonly ILogger<AttestorVerificationService> _logger;
|
||||
private readonly AttestorOptions _options;
|
||||
private readonly AttestorMetrics _metrics;
|
||||
@@ -37,6 +38,7 @@ internal sealed class AttestorVerificationService : IAttestorVerificationService
|
||||
IRekorClient rekorClient,
|
||||
ITransparencyWitnessClient witnessClient,
|
||||
IAttestorVerificationEngine engine,
|
||||
ITimeSkewValidator timeSkewValidator,
|
||||
IOptions<AttestorOptions> options,
|
||||
ILogger<AttestorVerificationService> logger,
|
||||
AttestorMetrics metrics,
|
||||
@@ -48,6 +50,7 @@ internal sealed class AttestorVerificationService : IAttestorVerificationService
|
||||
_rekorClient = rekorClient ?? throw new ArgumentNullException(nameof(rekorClient));
|
||||
_witnessClient = witnessClient ?? throw new ArgumentNullException(nameof(witnessClient));
|
||||
_engine = engine ?? throw new ArgumentNullException(nameof(engine));
|
||||
_timeSkewValidator = timeSkewValidator ?? throw new ArgumentNullException(nameof(timeSkewValidator));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
_metrics = metrics ?? throw new ArgumentNullException(nameof(metrics));
|
||||
_activitySource = activitySource ?? throw new ArgumentNullException(nameof(activitySource));
|
||||
@@ -72,13 +75,38 @@ internal sealed class AttestorVerificationService : IAttestorVerificationService
|
||||
using var activity = _activitySource.StartVerification(subjectTag, issuerTag, policyId);
|
||||
|
||||
var evaluationTime = _timeProvider.GetUtcNow();
|
||||
|
||||
// Validate time skew between entry's integrated time and evaluation time (SPRINT_3000_0001_0003 T6)
|
||||
var timeSkewResult = ValidateVerificationTimeSkew(entry, evaluationTime);
|
||||
var additionalIssues = new List<string>();
|
||||
if (!timeSkewResult.IsValid)
|
||||
{
|
||||
var issue = $"time_skew_rejected: {timeSkewResult.Message}";
|
||||
_logger.LogWarning(
|
||||
"Verification time skew issue for entry {Uuid}: IntegratedTime={IntegratedTime:O}, EvaluationTime={EvaluationTime:O}, SkewSeconds={SkewSeconds:F1}, Status={Status}",
|
||||
entry.RekorUuid,
|
||||
timeSkewResult.IntegratedTime,
|
||||
evaluationTime,
|
||||
timeSkewResult.SkewSeconds,
|
||||
timeSkewResult.Status);
|
||||
|
||||
if (_options.TimeSkew.FailOnReject)
|
||||
{
|
||||
additionalIssues.Add(issue);
|
||||
}
|
||||
}
|
||||
|
||||
var report = await _engine.EvaluateAsync(entry, request.Bundle, evaluationTime, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
var result = report.Succeeded ? "ok" : "failed";
|
||||
// Merge any time skew issues with the report
|
||||
var allIssues = report.Issues.Concat(additionalIssues).ToArray();
|
||||
var succeeded = report.Succeeded && additionalIssues.Count == 0;
|
||||
|
||||
var result = succeeded ? "ok" : "failed";
|
||||
activity?.SetTag(AttestorTelemetryTags.Result, result);
|
||||
if (!report.Succeeded)
|
||||
if (!succeeded)
|
||||
{
|
||||
activity?.SetStatus(ActivityStatusCode.Error, string.Join(",", report.Issues));
|
||||
activity?.SetStatus(ActivityStatusCode.Error, string.Join(",", allIssues));
|
||||
}
|
||||
|
||||
_metrics.VerifyTotal.Add(
|
||||
@@ -98,17 +126,27 @@ internal sealed class AttestorVerificationService : IAttestorVerificationService
|
||||
|
||||
return new AttestorVerificationResult
|
||||
{
|
||||
Ok = report.Succeeded,
|
||||
Ok = succeeded,
|
||||
Uuid = entry.RekorUuid,
|
||||
Index = entry.Index,
|
||||
LogUrl = entry.Log.Url,
|
||||
Status = entry.Status,
|
||||
Issues = report.Issues,
|
||||
Issues = allIssues,
|
||||
CheckedAt = evaluationTime,
|
||||
Report = report
|
||||
Report = report with { Succeeded = succeeded, Issues = allIssues }
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Validates time skew between entry's integrated time and evaluation time.
|
||||
/// Per SPRINT_3000_0001_0003 T6.
|
||||
/// </summary>
|
||||
private TimeSkewValidationResult ValidateVerificationTimeSkew(AttestorEntry entry, DateTimeOffset evaluationTime)
|
||||
{
|
||||
var integratedTime = entry.Log.IntegratedTimeUtc;
|
||||
return _timeSkewValidator.Validate(integratedTime, evaluationTime);
|
||||
}
|
||||
|
||||
public Task<AttestorEntry?> GetEntryAsync(string rekorUuid, bool refreshProof, CancellationToken cancellationToken = default)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(rekorUuid))
|
||||
|
||||
@@ -0,0 +1,226 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// RekorRetryWorker.cs
|
||||
// Sprint: SPRINT_3000_0001_0002_rekor_retry_queue_metrics
|
||||
// Task: T7
|
||||
// Description: Background service for processing the Rekor retry queue
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using Microsoft.Extensions.Hosting;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Attestor.Core.Observability;
|
||||
using StellaOps.Attestor.Core.Options;
|
||||
using StellaOps.Attestor.Core.Queue;
|
||||
using StellaOps.Attestor.Core.Rekor;
|
||||
using StellaOps.Attestor.Core.Submission;
|
||||
|
||||
namespace StellaOps.Attestor.Infrastructure.Workers;
|
||||
|
||||
/// <summary>
|
||||
/// Background service for processing the Rekor submission retry queue.
|
||||
/// </summary>
|
||||
public sealed class RekorRetryWorker : BackgroundService
|
||||
{
|
||||
private readonly IRekorSubmissionQueue _queue;
|
||||
private readonly IRekorClient _rekorClient;
|
||||
private readonly RekorQueueOptions _options;
|
||||
private readonly AttestorOptions _attestorOptions;
|
||||
private readonly AttestorMetrics _metrics;
|
||||
private readonly TimeProvider _timeProvider;
|
||||
private readonly ILogger<RekorRetryWorker> _logger;
|
||||
|
||||
public RekorRetryWorker(
|
||||
IRekorSubmissionQueue queue,
|
||||
IRekorClient rekorClient,
|
||||
IOptions<RekorQueueOptions> queueOptions,
|
||||
IOptions<AttestorOptions> attestorOptions,
|
||||
AttestorMetrics metrics,
|
||||
TimeProvider timeProvider,
|
||||
ILogger<RekorRetryWorker> logger)
|
||||
{
|
||||
_queue = queue ?? throw new ArgumentNullException(nameof(queue));
|
||||
_rekorClient = rekorClient ?? throw new ArgumentNullException(nameof(rekorClient));
|
||||
_options = queueOptions?.Value ?? throw new ArgumentNullException(nameof(queueOptions));
|
||||
_attestorOptions = attestorOptions?.Value ?? throw new ArgumentNullException(nameof(attestorOptions));
|
||||
_metrics = metrics ?? throw new ArgumentNullException(nameof(metrics));
|
||||
_timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
|
||||
// Register queue depth callback for metrics
|
||||
_metrics.RegisterQueueDepthCallback(GetCurrentQueueDepth);
|
||||
}
|
||||
|
||||
private int _lastKnownQueueDepth;
|
||||
|
||||
private int GetCurrentQueueDepth() => _lastKnownQueueDepth;
|
||||
|
||||
protected override async Task ExecuteAsync(CancellationToken stoppingToken)
|
||||
{
|
||||
if (!_options.Enabled)
|
||||
{
|
||||
_logger.LogInformation("Rekor retry queue is disabled");
|
||||
return;
|
||||
}
|
||||
|
||||
_logger.LogInformation(
|
||||
"Rekor retry worker started with batch size {BatchSize}, poll interval {PollIntervalMs}ms",
|
||||
_options.BatchSize, _options.PollIntervalMs);
|
||||
|
||||
while (!stoppingToken.IsCancellationRequested)
|
||||
{
|
||||
try
|
||||
{
|
||||
await ProcessBatchAsync(stoppingToken);
|
||||
}
|
||||
catch (OperationCanceledException) when (stoppingToken.IsCancellationRequested)
|
||||
{
|
||||
break;
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogError(ex, "Rekor retry worker error during batch processing");
|
||||
_metrics.ErrorTotal.Add(1, new("type", "rekor_retry_worker"));
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
await Task.Delay(_options.PollIntervalMs, stoppingToken);
|
||||
}
|
||||
catch (OperationCanceledException) when (stoppingToken.IsCancellationRequested)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
_logger.LogInformation("Rekor retry worker stopped");
|
||||
}
|
||||
|
||||
private async Task ProcessBatchAsync(CancellationToken stoppingToken)
|
||||
{
|
||||
// Update queue depth gauge
|
||||
var depth = await _queue.GetQueueDepthAsync(stoppingToken);
|
||||
_lastKnownQueueDepth = depth.TotalWaiting;
|
||||
|
||||
if (depth.TotalWaiting == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
_logger.LogDebug(
|
||||
"Queue depth: pending={Pending}, submitting={Submitting}, retrying={Retrying}, dead_letter={DeadLetter}",
|
||||
depth.Pending, depth.Submitting, depth.Retrying, depth.DeadLetter);
|
||||
|
||||
// Process batch
|
||||
var items = await _queue.DequeueAsync(_options.BatchSize, stoppingToken);
|
||||
|
||||
if (items.Count == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
_logger.LogDebug("Processing {Count} items from Rekor queue", items.Count);
|
||||
|
||||
foreach (var item in items)
|
||||
{
|
||||
if (stoppingToken.IsCancellationRequested)
|
||||
break;
|
||||
|
||||
await ProcessItemAsync(item, stoppingToken);
|
||||
}
|
||||
|
||||
// Purge old dead letter items periodically
|
||||
if (_options.DeadLetterRetentionDays > 0 && depth.DeadLetter > 0)
|
||||
{
|
||||
await _queue.PurgeDeadLetterAsync(_options.DeadLetterRetentionDays, stoppingToken);
|
||||
}
|
||||
}
|
||||
|
||||
private async Task ProcessItemAsync(RekorQueueItem item, CancellationToken ct)
|
||||
{
|
||||
var attemptNumber = item.AttemptCount + 1;
|
||||
|
||||
_logger.LogDebug(
|
||||
"Processing Rekor queue item {Id}, attempt {Attempt}/{MaxAttempts}, backend={Backend}",
|
||||
item.Id, attemptNumber, item.MaxAttempts, item.Backend);
|
||||
|
||||
_metrics.RekorRetryAttemptsTotal.Add(1,
|
||||
new("backend", item.Backend),
|
||||
new("attempt", attemptNumber.ToString()));
|
||||
|
||||
try
|
||||
{
|
||||
var backend = ResolveBackend(item.Backend);
|
||||
var request = BuildSubmissionRequest(item);
|
||||
|
||||
var response = await _rekorClient.SubmitAsync(request, backend, ct);
|
||||
|
||||
await _queue.MarkSubmittedAsync(
|
||||
item.Id,
|
||||
response.Uuid ?? string.Empty,
|
||||
response.Index,
|
||||
ct);
|
||||
|
||||
_logger.LogInformation(
|
||||
"Rekor queue item {Id} successfully submitted: UUID={RekorUuid}, Index={LogIndex}",
|
||||
item.Id, response.Uuid, response.Index);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogWarning(ex,
|
||||
"Rekor queue item {Id} submission failed on attempt {Attempt}: {Message}",
|
||||
item.Id, attemptNumber, ex.Message);
|
||||
|
||||
if (attemptNumber >= item.MaxAttempts)
|
||||
{
|
||||
await _queue.MarkDeadLetterAsync(item.Id, ex.Message, ct);
|
||||
_logger.LogError(
|
||||
"Rekor queue item {Id} exceeded max attempts ({MaxAttempts}), moved to dead letter",
|
||||
item.Id, item.MaxAttempts);
|
||||
}
|
||||
else
|
||||
{
|
||||
await _queue.MarkRetryAsync(item.Id, ex.Message, ct);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private RekorBackend ResolveBackend(string backend)
|
||||
{
|
||||
return backend.ToLowerInvariant() switch
|
||||
{
|
||||
"primary" => new RekorBackend(
|
||||
_attestorOptions.Rekor.Primary.Url ?? throw new InvalidOperationException("Primary Rekor URL not configured"),
|
||||
"primary"),
|
||||
"mirror" => new RekorBackend(
|
||||
_attestorOptions.Rekor.Mirror.Url ?? throw new InvalidOperationException("Mirror Rekor URL not configured"),
|
||||
"mirror"),
|
||||
_ => throw new InvalidOperationException($"Unknown Rekor backend: {backend}")
|
||||
};
|
||||
}
|
||||
|
||||
private static AttestorSubmissionRequest BuildSubmissionRequest(RekorQueueItem item)
|
||||
{
|
||||
// Reconstruct the submission request from the stored payload
|
||||
return new AttestorSubmissionRequest
|
||||
{
|
||||
TenantId = item.TenantId,
|
||||
BundleSha256 = item.BundleSha256,
|
||||
DssePayload = item.DssePayload
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Simple Rekor backend configuration.
|
||||
/// </summary>
|
||||
public sealed record RekorBackend(string Url, string Name);
|
||||
|
||||
/// <summary>
|
||||
/// Submission request for the retry worker.
|
||||
/// </summary>
|
||||
public sealed class AttestorSubmissionRequest
|
||||
{
|
||||
public string TenantId { get; init; } = string.Empty;
|
||||
public string BundleSha256 { get; init; } = string.Empty;
|
||||
public byte[] DssePayload { get; init; } = Array.Empty<byte>();
|
||||
}
|
||||
@@ -0,0 +1,228 @@
|
||||
// =============================================================================
|
||||
// RekorRetryWorkerTests.cs
|
||||
// Sprint: SPRINT_3000_0001_0002_rekor_retry_queue_metrics
|
||||
// Task: T11
|
||||
// =============================================================================
|
||||
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using Moq;
|
||||
using StellaOps.Attestor.Core.Observability;
|
||||
using StellaOps.Attestor.Core.Options;
|
||||
using StellaOps.Attestor.Core.Queue;
|
||||
using StellaOps.Attestor.Core.Rekor;
|
||||
using StellaOps.Attestor.Infrastructure.Workers;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Attestor.Tests;
|
||||
|
||||
/// <summary>
|
||||
/// Unit tests for RekorRetryWorker.
|
||||
/// </summary>
|
||||
[Trait("Category", "Unit")]
|
||||
[Trait("Sprint", "3000_0001_0002")]
|
||||
public sealed class RekorRetryWorkerTests
|
||||
{
|
||||
private readonly Mock<IRekorSubmissionQueue> _queueMock;
|
||||
private readonly Mock<IRekorClient> _rekorClientMock;
|
||||
private readonly Mock<TimeProvider> _timeProviderMock;
|
||||
private readonly AttestorMetrics _metrics;
|
||||
private readonly RekorQueueOptions _queueOptions;
|
||||
private readonly AttestorOptions _attestorOptions;
|
||||
|
||||
public RekorRetryWorkerTests()
|
||||
{
|
||||
_queueMock = new Mock<IRekorSubmissionQueue>();
|
||||
_rekorClientMock = new Mock<IRekorClient>();
|
||||
_timeProviderMock = new Mock<TimeProvider>();
|
||||
_metrics = new AttestorMetrics();
|
||||
|
||||
_queueOptions = new RekorQueueOptions
|
||||
{
|
||||
Enabled = true,
|
||||
BatchSize = 5,
|
||||
PollIntervalMs = 100,
|
||||
MaxAttempts = 3
|
||||
};
|
||||
|
||||
_attestorOptions = new AttestorOptions
|
||||
{
|
||||
Rekor = new AttestorOptions.RekorOptions
|
||||
{
|
||||
Primary = new AttestorOptions.RekorBackendOptions
|
||||
{
|
||||
Url = "https://rekor.example.com"
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
_timeProviderMock
|
||||
.Setup(t => t.GetUtcNow())
|
||||
.Returns(DateTimeOffset.UtcNow);
|
||||
}
|
||||
|
||||
[Fact(DisplayName = "Worker does not process when disabled")]
|
||||
public async Task ExecuteAsync_WhenDisabled_DoesNotProcess()
|
||||
{
|
||||
_queueOptions.Enabled = false;
|
||||
|
||||
var worker = CreateWorker();
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromMilliseconds(200));
|
||||
|
||||
await worker.StartAsync(cts.Token);
|
||||
await Task.Delay(50);
|
||||
await worker.StopAsync(cts.Token);
|
||||
|
||||
_queueMock.Verify(q => q.DequeueAsync(It.IsAny<int>(), It.IsAny<CancellationToken>()), Times.Never);
|
||||
}
|
||||
|
||||
[Fact(DisplayName = "Worker updates queue depth metrics")]
|
||||
public async Task ExecuteAsync_UpdatesQueueDepthMetrics()
|
||||
{
|
||||
_queueMock
|
||||
.Setup(q => q.GetQueueDepthAsync(It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(new QueueDepthSnapshot(5, 2, 3, 1, DateTimeOffset.UtcNow));
|
||||
_queueMock
|
||||
.Setup(q => q.DequeueAsync(It.IsAny<int>(), It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync([]);
|
||||
|
||||
var worker = CreateWorker();
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromMilliseconds(300));
|
||||
|
||||
await worker.StartAsync(cts.Token);
|
||||
await Task.Delay(150);
|
||||
await worker.StopAsync(CancellationToken.None);
|
||||
|
||||
_queueMock.Verify(q => q.GetQueueDepthAsync(It.IsAny<CancellationToken>()), Times.AtLeastOnce);
|
||||
}
|
||||
|
||||
[Fact(DisplayName = "Worker processes items from queue")]
|
||||
public async Task ExecuteAsync_ProcessesItemsFromQueue()
|
||||
{
|
||||
var item = CreateTestItem();
|
||||
var items = new List<RekorQueueItem> { item };
|
||||
|
||||
_queueMock
|
||||
.Setup(q => q.GetQueueDepthAsync(It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(new QueueDepthSnapshot(1, 0, 0, 0, DateTimeOffset.UtcNow));
|
||||
_queueMock
|
||||
.SetupSequence(q => q.DequeueAsync(It.IsAny<int>(), It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(items)
|
||||
.ReturnsAsync([]);
|
||||
_rekorClientMock
|
||||
.Setup(r => r.SubmitAsync(It.IsAny<object>(), It.IsAny<object>(), It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(new RekorSubmissionResponse { Uuid = "test-uuid", Index = 12345 });
|
||||
|
||||
var worker = CreateWorker();
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromMilliseconds(500));
|
||||
|
||||
await worker.StartAsync(cts.Token);
|
||||
await Task.Delay(200);
|
||||
await worker.StopAsync(CancellationToken.None);
|
||||
|
||||
_queueMock.Verify(
|
||||
q => q.MarkSubmittedAsync(item.Id, "test-uuid", 12345, It.IsAny<CancellationToken>()),
|
||||
Times.Once);
|
||||
}
|
||||
|
||||
[Fact(DisplayName = "Worker marks item for retry on failure")]
|
||||
public async Task ExecuteAsync_MarksRetryOnFailure()
|
||||
{
|
||||
var item = CreateTestItem();
|
||||
var items = new List<RekorQueueItem> { item };
|
||||
|
||||
_queueMock
|
||||
.Setup(q => q.GetQueueDepthAsync(It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(new QueueDepthSnapshot(1, 0, 0, 0, DateTimeOffset.UtcNow));
|
||||
_queueMock
|
||||
.SetupSequence(q => q.DequeueAsync(It.IsAny<int>(), It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(items)
|
||||
.ReturnsAsync([]);
|
||||
_rekorClientMock
|
||||
.Setup(r => r.SubmitAsync(It.IsAny<object>(), It.IsAny<object>(), It.IsAny<CancellationToken>()))
|
||||
.ThrowsAsync(new Exception("Connection failed"));
|
||||
|
||||
var worker = CreateWorker();
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromMilliseconds(500));
|
||||
|
||||
await worker.StartAsync(cts.Token);
|
||||
await Task.Delay(200);
|
||||
await worker.StopAsync(CancellationToken.None);
|
||||
|
||||
_queueMock.Verify(
|
||||
q => q.MarkRetryAsync(item.Id, It.IsAny<string>(), It.IsAny<CancellationToken>()),
|
||||
Times.Once);
|
||||
}
|
||||
|
||||
[Fact(DisplayName = "Worker marks dead letter after max attempts")]
|
||||
public async Task ExecuteAsync_MarksDeadLetterAfterMaxAttempts()
|
||||
{
|
||||
var item = CreateTestItem(attemptCount: 2); // Next attempt will be 3 (max)
|
||||
var items = new List<RekorQueueItem> { item };
|
||||
|
||||
_queueMock
|
||||
.Setup(q => q.GetQueueDepthAsync(It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(new QueueDepthSnapshot(0, 0, 1, 0, DateTimeOffset.UtcNow));
|
||||
_queueMock
|
||||
.SetupSequence(q => q.DequeueAsync(It.IsAny<int>(), It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(items)
|
||||
.ReturnsAsync([]);
|
||||
_rekorClientMock
|
||||
.Setup(r => r.SubmitAsync(It.IsAny<object>(), It.IsAny<object>(), It.IsAny<CancellationToken>()))
|
||||
.ThrowsAsync(new Exception("Connection failed"));
|
||||
|
||||
var worker = CreateWorker();
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromMilliseconds(500));
|
||||
|
||||
await worker.StartAsync(cts.Token);
|
||||
await Task.Delay(200);
|
||||
await worker.StopAsync(CancellationToken.None);
|
||||
|
||||
_queueMock.Verify(
|
||||
q => q.MarkDeadLetterAsync(item.Id, It.IsAny<string>(), It.IsAny<CancellationToken>()),
|
||||
Times.Once);
|
||||
}
|
||||
|
||||
private RekorRetryWorker CreateWorker()
|
||||
{
|
||||
return new RekorRetryWorker(
|
||||
_queueMock.Object,
|
||||
_rekorClientMock.Object,
|
||||
Options.Create(_queueOptions),
|
||||
Options.Create(_attestorOptions),
|
||||
_metrics,
|
||||
_timeProviderMock.Object,
|
||||
NullLogger<RekorRetryWorker>.Instance);
|
||||
}
|
||||
|
||||
private static RekorQueueItem CreateTestItem(int attemptCount = 0)
|
||||
{
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
return new RekorQueueItem(
|
||||
Guid.NewGuid(),
|
||||
"test-tenant",
|
||||
"sha256:abc123",
|
||||
new byte[] { 1, 2, 3 },
|
||||
"primary",
|
||||
RekorSubmissionStatus.Submitting,
|
||||
attemptCount,
|
||||
3,
|
||||
null,
|
||||
null,
|
||||
now,
|
||||
null,
|
||||
null,
|
||||
now,
|
||||
now);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Stub response for tests.
|
||||
/// </summary>
|
||||
public sealed class RekorSubmissionResponse
|
||||
{
|
||||
public string? Uuid { get; init; }
|
||||
public long? Index { get; init; }
|
||||
}
|
||||
@@ -0,0 +1,161 @@
|
||||
// =============================================================================
|
||||
// RekorSubmissionQueueTests.cs
|
||||
// Sprint: SPRINT_3000_0001_0002_rekor_retry_queue_metrics
|
||||
// Task: T13
|
||||
// =============================================================================
|
||||
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using Moq;
|
||||
using StellaOps.Attestor.Core.Observability;
|
||||
using StellaOps.Attestor.Core.Options;
|
||||
using StellaOps.Attestor.Core.Queue;
|
||||
using StellaOps.Attestor.Infrastructure.Queue;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Attestor.Tests;
|
||||
|
||||
/// <summary>
|
||||
/// Unit tests for PostgresRekorSubmissionQueue.
|
||||
/// Note: Full integration tests require PostgreSQL via Testcontainers (Task T14).
|
||||
/// </summary>
|
||||
[Trait("Category", "Unit")]
|
||||
[Trait("Sprint", "3000_0001_0002")]
|
||||
public sealed class RekorQueueOptionsTests
|
||||
{
|
||||
[Theory(DisplayName = "CalculateRetryDelay applies exponential backoff")]
|
||||
[InlineData(0, 1000)] // First retry: initial delay
|
||||
[InlineData(1, 2000)] // Second retry: 1000 * 2
|
||||
[InlineData(2, 4000)] // Third retry: 1000 * 2^2
|
||||
[InlineData(3, 8000)] // Fourth retry: 1000 * 2^3
|
||||
[InlineData(4, 16000)] // Fifth retry: 1000 * 2^4
|
||||
[InlineData(10, 60000)] // Many retries: capped at MaxDelayMs
|
||||
public void CalculateRetryDelay_AppliesExponentialBackoff(int attemptCount, int expectedMs)
|
||||
{
|
||||
var options = new RekorQueueOptions
|
||||
{
|
||||
InitialDelayMs = 1000,
|
||||
MaxDelayMs = 60000,
|
||||
BackoffMultiplier = 2.0
|
||||
};
|
||||
|
||||
var delay = options.CalculateRetryDelay(attemptCount);
|
||||
|
||||
delay.TotalMilliseconds.Should().Be(expectedMs);
|
||||
}
|
||||
|
||||
[Fact(DisplayName = "Default options have sensible values")]
|
||||
public void DefaultOptions_HaveSensibleValues()
|
||||
{
|
||||
var options = new RekorQueueOptions();
|
||||
|
||||
options.Enabled.Should().BeTrue();
|
||||
options.MaxAttempts.Should().Be(5);
|
||||
options.InitialDelayMs.Should().Be(1000);
|
||||
options.MaxDelayMs.Should().Be(60000);
|
||||
options.BackoffMultiplier.Should().Be(2.0);
|
||||
options.BatchSize.Should().Be(10);
|
||||
options.PollIntervalMs.Should().Be(5000);
|
||||
options.DeadLetterRetentionDays.Should().Be(30);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Tests for QueueDepthSnapshot.
|
||||
/// </summary>
|
||||
[Trait("Category", "Unit")]
|
||||
[Trait("Sprint", "3000_0001_0002")]
|
||||
public sealed class QueueDepthSnapshotTests
|
||||
{
|
||||
[Fact(DisplayName = "TotalWaiting sums pending and retrying")]
|
||||
public void TotalWaiting_SumsPendingAndRetrying()
|
||||
{
|
||||
var snapshot = new QueueDepthSnapshot(10, 5, 3, 2, DateTimeOffset.UtcNow);
|
||||
|
||||
snapshot.TotalWaiting.Should().Be(13);
|
||||
}
|
||||
|
||||
[Fact(DisplayName = "TotalInQueue sums all non-submitted statuses")]
|
||||
public void TotalInQueue_SumsAllNonSubmitted()
|
||||
{
|
||||
var snapshot = new QueueDepthSnapshot(10, 5, 3, 2, DateTimeOffset.UtcNow);
|
||||
|
||||
snapshot.TotalInQueue.Should().Be(20);
|
||||
}
|
||||
|
||||
[Fact(DisplayName = "Empty creates zero snapshot")]
|
||||
public void Empty_CreatesZeroSnapshot()
|
||||
{
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
var snapshot = QueueDepthSnapshot.Empty(now);
|
||||
|
||||
snapshot.Pending.Should().Be(0);
|
||||
snapshot.Submitting.Should().Be(0);
|
||||
snapshot.Retrying.Should().Be(0);
|
||||
snapshot.DeadLetter.Should().Be(0);
|
||||
snapshot.MeasuredAt.Should().Be(now);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Tests for RekorQueueItem.
|
||||
/// </summary>
|
||||
[Trait("Category", "Unit")]
|
||||
[Trait("Sprint", "3000_0001_0002")]
|
||||
public sealed class RekorQueueItemTests
|
||||
{
|
||||
[Fact(DisplayName = "RekorQueueItem properties are accessible")]
|
||||
public void RekorQueueItem_PropertiesAccessible()
|
||||
{
|
||||
var id = Guid.NewGuid();
|
||||
var tenantId = "test-tenant";
|
||||
var bundleSha256 = "sha256:abc123";
|
||||
var dssePayload = new byte[] { 1, 2, 3 };
|
||||
var backend = "primary";
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
|
||||
var item = new RekorQueueItem
|
||||
{
|
||||
Id = id,
|
||||
TenantId = tenantId,
|
||||
BundleSha256 = bundleSha256,
|
||||
DssePayload = dssePayload,
|
||||
Backend = backend,
|
||||
Status = RekorSubmissionStatus.Pending,
|
||||
AttemptCount = 0,
|
||||
MaxAttempts = 5,
|
||||
NextRetryAt = now,
|
||||
CreatedAt = now,
|
||||
UpdatedAt = now
|
||||
};
|
||||
|
||||
item.Id.Should().Be(id);
|
||||
item.TenantId.Should().Be(tenantId);
|
||||
item.BundleSha256.Should().Be(bundleSha256);
|
||||
item.DssePayload.Should().BeEquivalentTo(dssePayload);
|
||||
item.Backend.Should().Be(backend);
|
||||
item.Status.Should().Be(RekorSubmissionStatus.Pending);
|
||||
item.AttemptCount.Should().Be(0);
|
||||
item.MaxAttempts.Should().Be(5);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Tests for RekorSubmissionStatus enum.
|
||||
/// </summary>
|
||||
[Trait("Category", "Unit")]
|
||||
[Trait("Sprint", "3000_0001_0002")]
|
||||
public sealed class RekorSubmissionStatusTests
|
||||
{
|
||||
[Theory(DisplayName = "Status enum has expected values")]
|
||||
[InlineData(RekorSubmissionStatus.Pending, 0)]
|
||||
[InlineData(RekorSubmissionStatus.Submitting, 1)]
|
||||
[InlineData(RekorSubmissionStatus.Submitted, 2)]
|
||||
[InlineData(RekorSubmissionStatus.Retrying, 3)]
|
||||
[InlineData(RekorSubmissionStatus.DeadLetter, 4)]
|
||||
public void Status_HasExpectedValues(RekorSubmissionStatus status, int expectedValue)
|
||||
{
|
||||
((int)status).Should().Be(expectedValue);
|
||||
}
|
||||
}
|
||||
@@ -16,7 +16,7 @@ Own the StellaOps Authority host service: ASP.NET minimal API, OpenIddict flows,
|
||||
## Key Directories
|
||||
- `src/Authority/StellaOps.Authority/` — host app
|
||||
- `src/Authority/StellaOps.Authority/StellaOps.Authority.Tests/` — integration/unit tests
|
||||
- `src/Authority/StellaOps.Authority/StellaOps.Authority.Storage.Mongo/` — data access helpers
|
||||
- `src/Authority/__Libraries/StellaOps.Authority.Storage.Postgres/` — data access helpers
|
||||
- `src/Authority/StellaOps.Authority/StellaOps.Authority.Plugin.Standard/` — default identity provider plugin
|
||||
|
||||
## Required Reading
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Plugin Team Charter
|
||||
|
||||
## Mission
|
||||
Own the Mongo-backed Standard identity provider plug-in and shared Authority plug-in contracts. Deliver secure credential flows, configuration validation, and documentation that help other identity providers integrate cleanly.
|
||||
Own the PostgreSQL-backed Standard identity provider plug-in and shared Authority plug-in contracts. Deliver secure credential flows, configuration validation, and documentation that help other identity providers integrate cleanly.
|
||||
|
||||
## Responsibilities
|
||||
- Maintain `StellaOps.Authority.Plugin.Standard` and related test projects.
|
||||
@@ -11,7 +11,7 @@ Own the Mongo-backed Standard identity provider plug-in and shared Authority plu
|
||||
|
||||
## Key Paths
|
||||
- `StandardPluginOptions` & registrar wiring
|
||||
- `StandardUserCredentialStore` (Mongo persistence + lockouts)
|
||||
- `StandardUserCredentialStore` (PostgreSQL persistence + lockouts)
|
||||
- `docs/dev/31_AUTHORITY_PLUGIN_DEVELOPER_GUIDE.md`
|
||||
|
||||
## Coordination
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
# Concelier · AGENTS Charter (Sprint 0112–0114)
|
||||
|
||||
## Module Scope & Working Directory
|
||||
- Working directory: `src/Concelier/**` (WebService, __Libraries, Storage.Mongo, analyzers, tests, seed-data). Do not edit other modules unless explicitly referenced by this sprint.
|
||||
- Working directory: `src/Concelier/**` (WebService, __Libraries, Storage.Postgres, analyzers, tests, seed-data). Do not edit other modules unless explicitly referenced by this sprint.
|
||||
- Mission: Link-Not-Merge (LNM) ingestion of advisory observations, correlation into linksets, evidence/export APIs, and deterministic telemetry.
|
||||
|
||||
## Roles
|
||||
- **Backend engineer (ASP.NET Core / Mongo):** connectors, ingestion guards, linkset builder, WebService APIs, storage migrations.
|
||||
- **Backend engineer (ASP.NET Core / PostgreSQL):** connectors, ingestion guards, linkset builder, WebService APIs, storage migrations.
|
||||
- **Observability/Platform engineer:** OTEL metrics/logs, health/readiness, distributed locks, scheduler safety.
|
||||
- **QA automation:** Mongo2Go + WebApplicationFactory tests for handlers/jobs; determinism and guardrail regression harnesses.
|
||||
- **QA automation:** Testcontainers + WebApplicationFactory tests for handlers/jobs; determinism and guardrail regression harnesses.
|
||||
- **Docs/Schema steward:** keep LNM schemas, API references, and inline provenance docs aligned with behavior.
|
||||
|
||||
## Required Reading (must be treated as read before setting DOING)
|
||||
@@ -34,16 +34,16 @@
|
||||
|
||||
## Coding & Observability Standards
|
||||
- Target **.NET 10**; prefer latest C# preview features already enabled in repo.
|
||||
- Mongo driver ≥ 3.x; canonical BSON/JSON mapping lives in Storage.Mongo.
|
||||
- Npgsql driver for PostgreSQL; canonical JSON mapping in Storage.Postgres.
|
||||
- Metrics: use `Meter` names under `StellaOps.Concelier.*`; tag `tenant`, `source`, `result` as applicable. Counters/histograms must be documented.
|
||||
- Logging: structured, no PII; include `tenant`, `source`, `job`, `correlationId` when available.
|
||||
- Scheduler/locks: one lock per connector/export job; no duplicate runs; honor `CancellationToken`.
|
||||
|
||||
## Testing Rules
|
||||
- Write/maintain tests alongside code:
|
||||
- Web/API: `StellaOps.Concelier.WebService.Tests` with WebApplicationFactory + Mongo2Go fixtures.
|
||||
- Web/API: `StellaOps.Concelier.WebService.Tests` with WebApplicationFactory + Testcontainers fixtures.
|
||||
- Core/Linkset/Guards: `StellaOps.Concelier.Core.Tests`.
|
||||
- Storage: `StellaOps.Concelier.Storage.Mongo.Tests` (use in-memory or Mongo2Go; determinism on ordering/hashes).
|
||||
- Storage: `StellaOps.Concelier.Storage.Postgres.Tests` (use in-memory or Testcontainers; determinism on ordering/hashes).
|
||||
- Observability/analyzers: tests in `__Analyzers` or respective test projects.
|
||||
- Tests must assert determinism (stable ordering/hashes), tenant guards, AOC invariants, and no derived fields in ingestion.
|
||||
- Prefer seeded fixtures under `seed-data/` for repeatability; avoid network in tests.
|
||||
|
||||
@@ -11,13 +11,13 @@ Bootstrap the ACSC (Australian Cyber Security Centre) advisories connector so th
|
||||
|
||||
## Participants
|
||||
- `Source.Common` for HTTP client creation, fetch service, and DTO persistence helpers.
|
||||
- `Storage.Mongo` for raw/document/DTO/advisory storage plus cursor management.
|
||||
- `Storage.Postgres` for raw/document/DTO/advisory storage plus cursor management.
|
||||
- `Concelier.Models` for canonical advisory structures and provenance utilities.
|
||||
- `Concelier.Testing` for integration harnesses and snapshot helpers.
|
||||
|
||||
## Interfaces & Contracts
|
||||
- Job kinds should follow the pattern `acsc:fetch`, `acsc:parse`, `acsc:map`.
|
||||
- Documents persisted to Mongo must include ETag/Last-Modified metadata when the source exposes it.
|
||||
- Documents persisted to PostgreSQL must include ETag/Last-Modified metadata when the source exposes it.
|
||||
- Canonical advisories must emit aliases (ACSC ID + CVE IDs) and references (official bulletin + vendor notices).
|
||||
|
||||
## In/Out of scope
|
||||
|
||||
@@ -11,7 +11,7 @@ Build the CCCS (Canadian Centre for Cyber Security) advisories connector so Conc
|
||||
|
||||
## Participants
|
||||
- `Source.Common` (HTTP clients, fetch service, DTO storage helpers).
|
||||
- `Storage.Mongo` (raw/document/DTO/advisory stores + source state).
|
||||
- `Storage.Postgres` (raw/document/DTO/advisory stores + source state).
|
||||
- `Concelier.Models` (canonical advisory data structures).
|
||||
- `Concelier.Testing` (integration fixtures and snapshot utilities).
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ Deliver a connector for Germany’s CERT-Bund advisories so Concelier can ingest
|
||||
|
||||
## Participants
|
||||
- `Source.Common` (HTTP/fetch utilities, DTO storage).
|
||||
- `Storage.Mongo` (raw/document/DTO/advisory stores, source state).
|
||||
- `Storage.Postgres` (raw/document/DTO/advisory stores, source state).
|
||||
- `Concelier.Models` (canonical data model).
|
||||
- `Concelier.Testing` (integration harness, snapshot utilities).
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ Implement the CERT/CC (Carnegie Mellon CERT Coordination Center) advisory connec
|
||||
|
||||
## Participants
|
||||
- `Source.Common` (HTTP/fetch utilities, DTO storage).
|
||||
- `Storage.Mongo` (raw/document/DTO/advisory stores and state).
|
||||
- `Storage.Postgres` (raw/document/DTO/advisory stores and state).
|
||||
- `Concelier.Models` (canonical structures).
|
||||
- `Concelier.Testing` (integration tests and snapshots).
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ ANSSI CERT-FR advisories connector (avis/alertes) providing national enrichment:
|
||||
- Maintain watermarks and de-duplication by content hash; idempotent processing.
|
||||
## Participants
|
||||
- Source.Common (HTTP, HTML parsing helpers, validators).
|
||||
- Storage.Mongo (document, dto, advisory, reference, source_state).
|
||||
- Storage.Postgres (document, dto, advisory, reference, source_state).
|
||||
- Models (canonical).
|
||||
- Core/WebService (jobs: source:certfr:fetch|parse|map).
|
||||
- Merge engine (later) to enrich only.
|
||||
@@ -23,7 +23,7 @@ Out: OVAL or package-level authority.
|
||||
- Logs: feed URL(s), item ids/urls, extraction durations; no PII; allowlist hostnames.
|
||||
## Tests
|
||||
- Author and review coverage in `../StellaOps.Concelier.Connector.CertFr.Tests`.
|
||||
- Shared fixtures (e.g., `MongoIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Shared fixtures (e.g., `PostgresIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Keep fixtures deterministic; match new cases to real-world advisories or regression scenarios.
|
||||
|
||||
## Required Reading
|
||||
|
||||
@@ -7,7 +7,7 @@ CERT-In national CERT connector; enrichment advisories for India; maps CVE lists
|
||||
- Persist raw docs and maintain source_state cursor; idempotent mapping.
|
||||
## Participants
|
||||
- Source.Common (HTTP, HTML parsing, normalization, validators).
|
||||
- Storage.Mongo (document, dto, advisory, alias, reference, source_state).
|
||||
- Storage.Postgres (document, dto, advisory, alias, reference, source_state).
|
||||
- Models (canonical).
|
||||
- Core/WebService (jobs: source:certin:fetch|parse|map).
|
||||
- Merge engine treats CERT-In as enrichment (no override of PSIRT or OVAL without concrete ranges).
|
||||
@@ -24,7 +24,7 @@ Out: package range authority; scraping behind auth walls.
|
||||
- Logs: advisory codes, CVE counts per advisory, timing; allowlist host; redact personal data if present.
|
||||
## Tests
|
||||
- Author and review coverage in `../StellaOps.Concelier.Connector.CertIn.Tests`.
|
||||
- Shared fixtures (e.g., `MongoIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Shared fixtures (e.g., `PostgresIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Keep fixtures deterministic; match new cases to real-world advisories or regression scenarios.
|
||||
|
||||
## Required Reading
|
||||
|
||||
@@ -10,7 +10,7 @@ Shared connector toolkit. Provides HTTP clients, retry/backoff, conditional GET
|
||||
- HTML sanitization, URL normalization, and PDF-to-text extraction utilities for feeds that require cleanup before validation.
|
||||
## Participants
|
||||
- Source.* connectors (NVD, Red Hat, JVN, PSIRTs, CERTs, ICS).
|
||||
- Storage.Mongo (document/dto repositories using shared shapes).
|
||||
- Storage.Postgres (document/dto repositories using shared shapes).
|
||||
- Core (jobs schedule/trigger for connectors).
|
||||
- QA (canned HTTP server harness, schema fixtures).
|
||||
## Interfaces & contracts
|
||||
@@ -27,7 +27,7 @@ Out: connector-specific schemas/mapping rules, merge precedence.
|
||||
- Distributed tracing hooks and per-connector counters should be wired centrally for consistent observability.
|
||||
## Tests
|
||||
- Author and review coverage in `../StellaOps.Concelier.Connector.Common.Tests`.
|
||||
- Shared fixtures (e.g., `MongoIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Shared fixtures (e.g., `PostgresIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Keep fixtures deterministic; match new cases to real-world advisories or regression scenarios.
|
||||
|
||||
## Required Reading
|
||||
|
||||
@@ -11,7 +11,7 @@ Create a dedicated CVE connector when we need raw CVE stream ingestion outside o
|
||||
|
||||
## Participants
|
||||
- `Source.Common` (HTTP/fetch utilities, DTO storage).
|
||||
- `Storage.Mongo` (raw/document/DTO/advisory stores & source state).
|
||||
- `Storage.Postgres` (raw/document/DTO/advisory stores & source state).
|
||||
- `Concelier.Models` (canonical data model).
|
||||
- `Concelier.Testing` (integration fixtures, snapshot helpers).
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ Red Hat distro connector (Security Data API and OVAL) providing authoritative OS
|
||||
- Map to canonical advisories with affected Type=rpm/cpe, fixedBy NEVRA, RHSA aliasing; persist provenance indicating oval/package.nevra.
|
||||
## Participants
|
||||
- Source.Common (HTTP, throttling, validators).
|
||||
- Storage.Mongo (document, dto, advisory, alias, affected, reference, source_state).
|
||||
- Storage.Postgres (document, dto, advisory, alias, affected, reference, source_state).
|
||||
- Models (canonical Affected with NEVRA).
|
||||
- Core/WebService (jobs: source:redhat:fetch|parse|map) already registered.
|
||||
- Merge engine to enforce distro precedence (OVAL or PSIRT greater than NVD).
|
||||
@@ -23,7 +23,7 @@ Out: building RPM artifacts; cross-distro reconciliation beyond Red Hat.
|
||||
- Logs: cursor bounds, advisory ids, NEVRA counts; allowlist Red Hat endpoints.
|
||||
## Tests
|
||||
- Author and review coverage in `../StellaOps.Concelier.Connector.Distro.RedHat.Tests`.
|
||||
- Shared fixtures (e.g., `MongoIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Shared fixtures (e.g., `PostgresIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Keep fixtures deterministic; match new cases to real-world advisories or regression scenarios.
|
||||
|
||||
## Required Reading
|
||||
|
||||
@@ -11,7 +11,7 @@ Implement a connector for GitHub Security Advisories (GHSA) when we need to inge
|
||||
|
||||
## Participants
|
||||
- `Source.Common` (HTTP clients, fetch service, DTO storage).
|
||||
- `Storage.Mongo` (raw/document/DTO/advisory stores and source state).
|
||||
- `Storage.Postgres` (raw/document/DTO/advisory stores and source state).
|
||||
- `Concelier.Models` (canonical advisory types).
|
||||
- `Concelier.Testing` (integration harness, snapshot helpers).
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ Implement the CISA ICS advisory connector to ingest US CISA Industrial Control S
|
||||
|
||||
## Participants
|
||||
- `Source.Common` (HTTP/fetch utilities, DTO storage).
|
||||
- `Storage.Mongo` (raw/document/DTO/advisory stores + source state).
|
||||
- `Storage.Postgres` (raw/document/DTO/advisory stores + source state).
|
||||
- `Concelier.Models` (canonical advisory structures).
|
||||
- `Concelier.Testing` (integration fixtures and snapshots).
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ Kaspersky ICS-CERT connector; authoritative for OT/ICS vendor advisories covered
|
||||
- Persist raw docs with sha256; maintain source_state; idempotent mapping.
|
||||
## Participants
|
||||
- Source.Common (HTTP, HTML helpers, validators).
|
||||
- Storage.Mongo (document, dto, advisory, alias, affected, reference, source_state).
|
||||
- Storage.Postgres (document, dto, advisory, alias, affected, reference, source_state).
|
||||
- Models (canonical; affected.platform="ics-vendor", tags for device families).
|
||||
- Core/WebService (jobs: source:ics-kaspersky:fetch|parse|map).
|
||||
- Merge engine respects ICS vendor authority for OT impact.
|
||||
@@ -24,7 +24,7 @@ Out: firmware downloads; reverse-engineering artifacts.
|
||||
- Logs: slugs, vendor/product counts, timing; allowlist host.
|
||||
## Tests
|
||||
- Author and review coverage in `../StellaOps.Concelier.Connector.Ics.Kaspersky.Tests`.
|
||||
- Shared fixtures (e.g., `MongoIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Shared fixtures (e.g., `PostgresIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Keep fixtures deterministic; match new cases to real-world advisories or regression scenarios.
|
||||
|
||||
## Required Reading
|
||||
|
||||
@@ -7,7 +7,7 @@ Japan JVN/MyJVN connector; national CERT enrichment with strong identifiers (JVN
|
||||
- Persist raw docs with sha256 and headers; manage source_state cursor; idempotent parse/map.
|
||||
## Participants
|
||||
- Source.Common (HTTP, pagination, XML or XSD validators, retries/backoff).
|
||||
- Storage.Mongo (document, dto, advisory, alias, affected (when concrete), reference, jp_flags, source_state).
|
||||
- Storage.Postgres (document, dto, advisory, alias, affected (when concrete), reference, jp_flags, source_state).
|
||||
- Models (canonical Advisory/Affected/Provenance).
|
||||
- Core/WebService (jobs: source:jvn:fetch|parse|map).
|
||||
- Merge engine applies enrichment precedence (does not override distro or PSIRT ranges unless JVN gives explicit package truth).
|
||||
@@ -25,7 +25,7 @@ Out: overriding distro or PSIRT ranges without concrete evidence; scraping unoff
|
||||
- Logs: window bounds, jvndb ids processed, vendor_status distribution; redact API keys.
|
||||
## Tests
|
||||
- Author and review coverage in `../StellaOps.Concelier.Connector.Jvn.Tests`.
|
||||
- Shared fixtures (e.g., `MongoIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Shared fixtures (e.g., `PostgresIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Keep fixtures deterministic; match new cases to real-world advisories or regression scenarios.
|
||||
|
||||
## Required Reading
|
||||
|
||||
@@ -11,7 +11,7 @@ Implement the CISA Known Exploited Vulnerabilities (KEV) catalogue connector to
|
||||
|
||||
## Participants
|
||||
- `Source.Common` (HTTP client, fetch service, DTO storage).
|
||||
- `Storage.Mongo` (raw/document/DTO/advisory stores, source state).
|
||||
- `Storage.Postgres` (raw/document/DTO/advisory stores, source state).
|
||||
- `Concelier.Models` (advisory + range primitive types).
|
||||
- `Concelier.Testing` (integration fixtures & snapshots).
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ Deliver the KISA (Korea Internet & Security Agency) advisory connector to ingest
|
||||
|
||||
## Participants
|
||||
- `Source.Common` (HTTP/fetch utilities, DTO storage).
|
||||
- `Storage.Mongo` (raw/document/DTO/advisory stores, source state).
|
||||
- `Storage.Postgres` (raw/document/DTO/advisory stores, source state).
|
||||
- `Concelier.Models` (canonical data structures).
|
||||
- `Concelier.Testing` (integration fixtures and snapshots).
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ Out: authoritative distro package ranges; vendor patch states.
|
||||
- Metrics: SourceDiagnostics publishes `concelier.source.http.*` counters/histograms tagged `concelier.source=nvd`; dashboards slice on the tag to track page counts, schema failures, map throughput, and window advancement. Structured logs include window bounds and etag hits.
|
||||
## Tests
|
||||
- Author and review coverage in `../StellaOps.Concelier.Connector.Nvd.Tests`.
|
||||
- Shared fixtures (e.g., `MongoIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Shared fixtures (e.g., `PostgresIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Keep fixtures deterministic; match new cases to real-world advisories or regression scenarios.
|
||||
|
||||
## Required Reading
|
||||
|
||||
@@ -8,7 +8,7 @@ Connector for OSV.dev across ecosystems; authoritative SemVer/PURL ranges for OS
|
||||
- Maintain per-ecosystem cursors and deduplicate runs via payload hashes to keep reruns idempotent.
|
||||
## Participants
|
||||
- Source.Common supplies HTTP clients, pagination helpers, and validators.
|
||||
- Storage.Mongo persists documents, DTOs, advisories, and source_state cursors.
|
||||
- Storage.Postgres persists documents, DTOs, advisories, and source_state cursors.
|
||||
- Merge engine resolves OSV vs GHSA consistency; prefers SemVer data for libraries; distro OVAL still overrides OS packages.
|
||||
- Exporters serialize per-ecosystem ranges untouched.
|
||||
## Interfaces & contracts
|
||||
@@ -22,7 +22,7 @@ Out: vendor PSIRT and distro OVAL specifics.
|
||||
- Metrics: SourceDiagnostics exposes the shared `concelier.source.http.*` counters/histograms tagged `concelier.source=osv`; observability dashboards slice on the tag to monitor item volume, schema failures, range counts, and ecosystem coverage. Logs include ecosystem and cursor values.
|
||||
## Tests
|
||||
- Author and review coverage in `../StellaOps.Concelier.Connector.Osv.Tests`.
|
||||
- Shared fixtures (e.g., `MongoIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Shared fixtures (e.g., `PostgresIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Keep fixtures deterministic; match new cases to real-world advisories or regression scenarios.
|
||||
|
||||
## Required Reading
|
||||
|
||||
@@ -11,7 +11,7 @@ Implement the Russian BDU (Vulnerability Database) connector to ingest advisorie
|
||||
|
||||
## Participants
|
||||
- `Source.Common` (HTTP/fetch utilities, DTO storage).
|
||||
- `Storage.Mongo` (raw/document/DTO/advisory stores + source state).
|
||||
- `Storage.Postgres` (raw/document/DTO/advisory stores + source state).
|
||||
- `Concelier.Models` (canonical data structures).
|
||||
- `Concelier.Testing` (integration harness, snapshot utilities).
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ Implement the Russian NKTsKI (formerly NKCKI) advisories connector to ingest NKT
|
||||
|
||||
## Participants
|
||||
- `Source.Common` (HTTP/fetch utilities, DTO storage).
|
||||
- `Storage.Mongo` (raw/document/DTO/advisory stores, source state).
|
||||
- `Storage.Postgres` (raw/document/DTO/advisory stores, source state).
|
||||
- `Concelier.Models` (canonical data structures).
|
||||
- `Concelier.Testing` (integration fixtures, snapshots).
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ Adobe PSIRT connector ingesting APSB/APA advisories; authoritative for Adobe pro
|
||||
- Persist raw docs with sha256 and headers; maintain source_state cursors; ensure idempotent mapping.
|
||||
## Participants
|
||||
- Source.Common (HTTP, HTML parsing, retries/backoff, validators).
|
||||
- Storage.Mongo (document, dto, advisory, alias, affected, reference, psirt_flags, source_state).
|
||||
- Storage.Postgres (document, dto, advisory, alias, affected, reference, psirt_flags, source_state).
|
||||
- Models (canonical Advisory/Affected/Provenance).
|
||||
- Core/WebService (jobs: source:adobe:fetch|parse|map).
|
||||
- Merge engine (later) to apply PSIRT override policy for Adobe packages.
|
||||
@@ -24,7 +24,7 @@ Out: signing, package artifact downloads, non-Adobe product truth.
|
||||
- Logs: advisory ids, product counts, extraction timings; hosts allowlisted; no secret logging.
|
||||
## Tests
|
||||
- Author and review coverage in `../StellaOps.Concelier.Connector.Vndr.Adobe.Tests`.
|
||||
- Shared fixtures (e.g., `MongoIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Shared fixtures (e.g., `PostgresIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Keep fixtures deterministic; match new cases to real-world advisories or regression scenarios.
|
||||
|
||||
## Required Reading
|
||||
|
||||
@@ -11,7 +11,7 @@ Implement the Apple security advisories connector to ingest Apple HT/HT2 securit
|
||||
|
||||
## Participants
|
||||
- `Source.Common` (HTTP/fetch utilities, DTO storage).
|
||||
- `Storage.Mongo` (raw/document/DTO/advisory stores, source state).
|
||||
- `Storage.Postgres` (raw/document/DTO/advisory stores, source state).
|
||||
- `Concelier.Models` (canonical structures + range primitives).
|
||||
- `Concelier.Testing` (integration fixtures/snapshots).
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ Chromium/Chrome vendor feed connector parsing Stable Channel Update posts; autho
|
||||
- Persist raw docs and maintain source_state cursor; idempotent mapping.
|
||||
## Participants
|
||||
- Source.Common (HTTP, HTML helpers, validators).
|
||||
- Storage.Mongo (document, dto, advisory, alias, affected, reference, psirt_flags, source_state).
|
||||
- Storage.Postgres (document, dto, advisory, alias, affected, reference, psirt_flags, source_state).
|
||||
- Models (canonical; affected ranges by product/version).
|
||||
- Core/WebService (jobs: source:chromium:fetch|parse|map).
|
||||
- Merge engine (later) to respect vendor PSIRT precedence for Chrome.
|
||||
@@ -24,7 +24,7 @@ Out: OS distro packaging semantics; bug bounty details beyond references.
|
||||
- Logs: post slugs, version extracted, platform coverage, timing; allowlist blog host.
|
||||
## Tests
|
||||
- Author and review coverage in `../StellaOps.Concelier.Connector.Vndr.Chromium.Tests`.
|
||||
- Shared fixtures (e.g., `MongoIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Shared fixtures (e.g., `PostgresIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Keep fixtures deterministic; match new cases to real-world advisories or regression scenarios.
|
||||
|
||||
## Required Reading
|
||||
|
||||
@@ -10,7 +10,7 @@ Implement the Cisco security advisory connector to ingest Cisco PSIRT bulletins
|
||||
- Provide deterministic fixtures and regression tests.
|
||||
|
||||
## Participants
|
||||
- `Source.Common`, `Storage.Mongo`, `Concelier.Models`, `Concelier.Testing`.
|
||||
- `Source.Common`, `Storage.Postgres`, `Concelier.Models`, `Concelier.Testing`.
|
||||
|
||||
## Interfaces & Contracts
|
||||
- Job kinds: `cisco:fetch`, `cisco:parse`, `cisco:map`.
|
||||
|
||||
@@ -10,7 +10,7 @@ Implement the Microsoft Security Response Center (MSRC) connector to ingest Micr
|
||||
- Provide deterministic fixtures and regression tests.
|
||||
|
||||
## Participants
|
||||
- `Source.Common`, `Storage.Mongo`, `Concelier.Models`, `Concelier.Testing`.
|
||||
- `Source.Common`, `Storage.Postgres`, `Concelier.Models`, `Concelier.Testing`.
|
||||
|
||||
## Interfaces & Contracts
|
||||
- Job kinds: `msrc:fetch`, `msrc:parse`, `msrc:map`.
|
||||
|
||||
@@ -7,7 +7,7 @@ Oracle PSIRT connector for Critical Patch Updates (CPU) and Security Alerts; aut
|
||||
- Persist raw documents; maintain source_state across cycles; idempotent mapping.
|
||||
## Participants
|
||||
- Source.Common (HTTP, validators).
|
||||
- Storage.Mongo (document, dto, advisory, alias, affected, reference, psirt_flags, source_state).
|
||||
- Storage.Postgres (document, dto, advisory, alias, affected, reference, psirt_flags, source_state).
|
||||
- Models (canonical; affected ranges for vendor products).
|
||||
- Core/WebService (jobs: source:oracle:fetch|parse|map).
|
||||
- Merge engine (later) to prefer PSIRT ranges over NVD for Oracle products.
|
||||
@@ -23,7 +23,7 @@ Out: signing or patch artifact downloads.
|
||||
- Logs: cycle tags, advisory ids, extraction timings; redact nothing sensitive.
|
||||
## Tests
|
||||
- Author and review coverage in `../StellaOps.Concelier.Connector.Vndr.Oracle.Tests`.
|
||||
- Shared fixtures (e.g., `MongoIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Shared fixtures (e.g., `PostgresIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Keep fixtures deterministic; match new cases to real-world advisories or regression scenarios.
|
||||
|
||||
## Required Reading
|
||||
|
||||
@@ -7,7 +7,7 @@ VMware/Broadcom PSIRT connector ingesting VMSA advisories; authoritative for VMw
|
||||
- Persist raw docs with sha256; manage source_state; idempotent mapping.
|
||||
## Participants
|
||||
- Source.Common (HTTP, cookies/session handling if needed, validators).
|
||||
- Storage.Mongo (document, dto, advisory, alias, affected, reference, psirt_flags, source_state).
|
||||
- Storage.Postgres (document, dto, advisory, alias, affected, reference, psirt_flags, source_state).
|
||||
- Models (canonical).
|
||||
- Core/WebService (jobs: source:vmware:fetch|parse|map).
|
||||
- Merge engine (later) to prefer PSIRT ranges for VMware products.
|
||||
@@ -24,7 +24,7 @@ Out: customer portal authentication flows beyond public advisories; downloading
|
||||
- Logs: vmsa ids, product counts, extraction timings; handle portal rate limits politely.
|
||||
## Tests
|
||||
- Author and review coverage in `../StellaOps.Concelier.Connector.Vndr.Vmware.Tests`.
|
||||
- Shared fixtures (e.g., `MongoIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Shared fixtures (e.g., `PostgresIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Keep fixtures deterministic; match new cases to real-world advisories or regression scenarios.
|
||||
|
||||
## Required Reading
|
||||
|
||||
@@ -10,7 +10,7 @@ Job orchestration and lifecycle. Registers job definitions, schedules execution,
|
||||
- Surfacing: enumerate definitions, last run, recent runs, active runs to WebService endpoints.
|
||||
## Participants
|
||||
- WebService exposes REST endpoints for definitions, runs, active, and trigger.
|
||||
- Storage.Mongo persists job definitions metadata, run documents, and leases (locks collection).
|
||||
- Storage.Postgres persists job definitions metadata, run documents, and leases (locks table).
|
||||
- Source connectors and Exporters implement IJob and are registered into the scheduler via DI and Plugin routines.
|
||||
- Models/Merge/Export are invoked indirectly through jobs.
|
||||
- Plugin host runtime loads dependency injection routines that register job definitions.
|
||||
@@ -27,7 +27,7 @@ Out: business logic of connectors/exporters, HTTP handlers (owned by WebService)
|
||||
- Honor CancellationToken early and often.
|
||||
## Tests
|
||||
- Author and review coverage in `../StellaOps.Concelier.Core.Tests`.
|
||||
- Shared fixtures (e.g., `MongoIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Shared fixtures (e.g., `PostgresIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Keep fixtures deterministic; match new cases to real-world advisories or regression scenarios.
|
||||
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ Optional exporter producing vuln-list-shaped JSON tree for downstream trivy-db b
|
||||
- Packaging: output directory under exports/json/<timestamp> with reproducible naming; optionally symlink latest.
|
||||
- Optional auxiliary index files (for example severity summaries) may be generated when explicitly requested, but must remain deterministic and avoid altering canonical payloads.
|
||||
## Participants
|
||||
- Storage.Mongo.AdvisoryStore as input; ExportState repository for cursors/digests.
|
||||
- Storage.Postgres.AdvisoryStore as input; ExportState repository for cursors/digests.
|
||||
- Core scheduler runs JsonExportJob; Plugin DI wires JsonExporter + job.
|
||||
- TrivyDb exporter may consume the rendered tree in v0 (builder path) if configured.
|
||||
## Interfaces & contracts
|
||||
@@ -23,7 +23,7 @@ Out: ORAS push and Trivy DB BoltDB writing (owned by Trivy exporter).
|
||||
- Logs: target path, record counts, digest; no sensitive data.
|
||||
## Tests
|
||||
- Author and review coverage in `../StellaOps.Concelier.Exporter.Json.Tests`.
|
||||
- Shared fixtures (e.g., `MongoIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Shared fixtures (e.g., `PostgresIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Keep fixtures deterministic; match new cases to real-world advisories or regression scenarios.
|
||||
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ Exporter producing a Trivy-compatible database artifact for self-hosting or offl
|
||||
- DI: TrivyExporter + Jobs.TrivyExportJob registered by TrivyExporterDependencyInjectionRoutine.
|
||||
- Export_state recording: capture digests, counts, start/end timestamps for idempotent reruns and incremental packaging.
|
||||
## Participants
|
||||
- Storage.Mongo.AdvisoryStore as input.
|
||||
- Storage.Postgres.AdvisoryStore as input.
|
||||
- Core scheduler runs export job; WebService/Plugins trigger it.
|
||||
- JSON exporter (optional precursor) if choosing the builder path.
|
||||
## Interfaces & contracts
|
||||
@@ -24,7 +24,7 @@ Out: signing (external pipeline), scanner behavior.
|
||||
- Logs: export path, repo/tag, digest; redact credentials; backoff on push errors.
|
||||
## Tests
|
||||
- Author and review coverage in `../StellaOps.Concelier.Exporter.TrivyDb.Tests`.
|
||||
- Shared fixtures (e.g., `MongoIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Shared fixtures (e.g., `PostgresIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Keep fixtures deterministic; match new cases to real-world advisories or regression scenarios.
|
||||
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ Deterministic merge and reconciliation engine; builds identity graph via aliases
|
||||
- Merge algorithm: stable ordering, pure functions, idempotence; compute beforeHash/afterHash over canonical form; write merge_event.
|
||||
- Conflict reporting: counters and logs for identity conflicts, reference merges, range overrides.
|
||||
## Participants
|
||||
- Storage.Mongo (reads raw mapped advisories, writes merged docs plus merge_event).
|
||||
- Storage.Postgres (reads raw mapped advisories, writes merged docs plus merge_event).
|
||||
- Models (canonical types).
|
||||
- Exporters (consume merged canonical).
|
||||
- Core/WebService (jobs: merge:run, maybe per-kind).
|
||||
@@ -29,7 +29,7 @@ Out: fetching/parsing, exporter packaging, signing.
|
||||
- Logs: decisions (why replaced), keys involved, hashes; avoid dumping large blobs; redact secrets (none expected).
|
||||
## Tests
|
||||
- Author and review coverage in `../StellaOps.Concelier.Merge.Tests`.
|
||||
- Shared fixtures (e.g., `MongoIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Shared fixtures (e.g., `PostgresIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Keep fixtures deterministic; match new cases to real-world advisories or regression scenarios.
|
||||
|
||||
## Required Reading
|
||||
|
||||
@@ -25,7 +25,7 @@ Out: fetching/parsing external schemas, storage, HTTP.
|
||||
- Emit model version identifiers in logs when canonical structures change; keep adapters for older readers until deprecated.
|
||||
## Tests
|
||||
- Author and review coverage in `../StellaOps.Concelier.Models.Tests`.
|
||||
- Shared fixtures (e.g., `MongoIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Shared fixtures (e.g., `PostgresIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Keep fixtures deterministic; match new cases to real-world advisories or regression scenarios.
|
||||
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
- **Adapter engineer:** Trivy DB/Java DB, mirror delta, OCI distribution, encryption/KMS wrapping, pack-run integration.
|
||||
- **Worker/Concurrency engineer:** job leasing, retries/idempotency, retention pruning, scheduler hooks.
|
||||
- **Crypto/Provenance steward:** signing, DSSE/in-toto, age/AES-GCM envelope handling, provenance schemas.
|
||||
- **QA automation:** WebApplicationFactory + Mongo/Mongo2Go fixtures, adapter regression harnesses, determinism checks, offline-kit verification scripts.
|
||||
- **QA automation:** WebApplicationFactory + PostgreSQL/Testcontainers fixtures, adapter regression harnesses, determinism checks, offline-kit verification scripts.
|
||||
- **Docs steward:** keep `docs/modules/export-center/*.md`, sprint Decisions & Risks, and CLI docs aligned with behavior.
|
||||
|
||||
## Required Reading (treat as read before setting DOING)
|
||||
@@ -34,14 +34,14 @@
|
||||
- Cross-module changes (Authority/Orchestrator/CLI) only when sprint explicitly covers them; log in Decisions & Risks.
|
||||
|
||||
## Coding & Observability Standards
|
||||
- Target **.NET 10** with curated `local-nugets/`; MongoDB driver ≥ 3.x; ORAS/OCI client where applicable.
|
||||
- Target **.NET 10** with curated `local-nugets/`; Npgsql driver for PostgreSQL; ORAS/OCI client where applicable.
|
||||
- Metrics under `StellaOps.ExportCenter.*`; tag `tenant`, `profile`, `adapter`, `result`; document new counters/histograms.
|
||||
- Logs structured, no PII; include `runId`, `tenant`, `profile`, `adapter`, `correlationId`; map phases (`plan`, `resolve`, `adapter`, `manifest`, `sign`, `distribute`).
|
||||
- SSE/telemetry events must be deterministic and replay-safe; backpressure aware.
|
||||
- Signing/encryption: default cosign-style KMS signing; age/AES-GCM envelopes with key wrapping; store references in provenance only (no raw keys).
|
||||
|
||||
## Testing Rules
|
||||
- API/worker tests: `StellaOps.ExportCenter.Tests` with WebApplicationFactory + in-memory/Mongo2Go fixtures; assert tenant guards, RBAC, quotas, SSE timelines.
|
||||
- API/worker tests: `StellaOps.ExportCenter.Tests` with WebApplicationFactory + in-memory/Testcontainers fixtures; assert tenant guards, RBAC, quotas, SSE timelines.
|
||||
- Adapter regression: deterministic fixtures for Trivy DB/Java DB, mirror delta/base comparison, OCI manifest generation; no network.
|
||||
- Risk bundle pipeline: tests in `StellaOps.ExportCenter.RiskBundles.Tests` (or add) covering bundle layout, DSSE signatures, checksum publication.
|
||||
- Determinism checks: stable ordering/hashes in manifests, provenance, and distribution descriptors; retry paths must not duplicate outputs.
|
||||
|
||||
@@ -23,7 +23,7 @@ Operate the append-only Findings Ledger and projection pipeline powering the Vul
|
||||
|
||||
## Tooling
|
||||
- .NET 10 preview minimal API/background services.
|
||||
- PostgreSQL (preferred) or Mongo for ledger + projection tables with JSONB support.
|
||||
- PostgreSQL for ledger + projection tables with JSONB support.
|
||||
- Hashing utilities (SHA-256, Merkle tree), KMS integration for evidence bundle signing metadata.
|
||||
|
||||
## Definition of Done
|
||||
|
||||
@@ -7,10 +7,10 @@ This note captures the bootstrap work for Notifications Studio phase 1. The refr
|
||||
## Highlights
|
||||
|
||||
- **Rule evaluation:** Implemented `DefaultNotifyRuleEvaluator` (implements `StellaOps.Notify.Engine.INotifyRuleEvaluator`) reusing canonical `NotifyRule`/`NotifyEvent` models to gate on event kind, severity, labels, digests, verdicts, and VEX settings.
|
||||
- **Storage:** Switched to `StellaOps.Notify.Storage.Mongo` (rules, deliveries, locks, migrations) with startup reflection host to apply migrations automatically.
|
||||
- **Storage:** Switched to `StellaOps.Notify.Storage.Postgres` (rules, deliveries, locks, migrations) with startup reflection host to apply migrations automatically.
|
||||
- **Idempotency:** Deterministic keys derived from tenant/rule/action/event digest & GUID and persisted via `INotifyLockRepository` TTL locks; delivery metadata now records channel/template hints for later status transitions.
|
||||
- **Queue:** Replaced the temporary in-memory queue with the shared `StellaOps.Notify.Queue` transport (Redis/NATS capable). Health checks surface queue reachability.
|
||||
- **Worker/WebService:** Worker hosts `NotifierEventWorker` + `NotifierEventProcessor`, wiring queue -> rule evaluation -> Mongo delivery ledger. WebService now bootstraps storage + health endpoint ready for future CRUD.
|
||||
- **Worker/WebService:** Worker hosts `NotifierEventWorker` + `NotifierEventProcessor`, wiring queue -> rule evaluation -> PostgreSQL delivery ledger. WebService now bootstraps storage + health endpoint ready for future CRUD.
|
||||
- **Tests:** Updated unit coverage for rule evaluation + processor idempotency using in-memory repositories & queue stubs.
|
||||
- **WebService shell:** Minimal ASP.NET host wired with infrastructure and health endpoint ready for upcoming CRUD/API work.
|
||||
- **Tests:** Added unit coverage for rule matching and processor idempotency.
|
||||
@@ -20,4 +20,4 @@ This note captures the bootstrap work for Notifications Studio phase 1. The refr
|
||||
- Validate queue transport settings against ORCH-SVC-38-101 once the orchestrator contract finalizes (configure Redis/NATS URIs + credentials).
|
||||
- Flesh out delivery ledger schema (status transitions, attempts) and connector integrations when channels/templates land (NOTIFY-SVC-38-002..004).
|
||||
- Wire telemetry counters/histograms and structured logging to feed Observability tasks.
|
||||
- Expand tests with integration harness using Mongo2Go + real queue transports after connectors exist; revisit delivery idempotency assertions once `INotifyLockRepository` semantics are wired to production stores.
|
||||
- Expand tests with integration harness using Testcontainers + real queue transports after connectors exist; revisit delivery idempotency assertions once `INotifyLockRepository` semantics are wired to production stores.
|
||||
|
||||
@@ -5,14 +5,14 @@ Stand up and operate the Policy Registry service defined in Epic 4. We own works
|
||||
|
||||
## Scope
|
||||
- Service source under `src/Policy/StellaOps.Policy.Registry` (REST API, workers, storage schemas).
|
||||
- Mongo models, migrations, and object storage bindings for policy workspaces, versions, reviews, promotions, simulations.
|
||||
- PostgreSQL models, migrations, and object storage bindings for policy workspaces, versions, reviews, promotions, simulations.
|
||||
- Integration with Policy Engine, Scheduler, Authority, Web Gateway, Telemetry.
|
||||
- Attestation signing pipeline, evidence bundle management, and retention policies.
|
||||
|
||||
## Principles
|
||||
1. **Immutability first** – Published versions are append-only; derive new versions rather than mutate.
|
||||
2. **Determinism** – Compilation/simulation requests must produce reproducible artifacts and checksums.
|
||||
3. **Tenant isolation** – Enforce scoping at every storage layer (Mongo collections, buckets, queues).
|
||||
3. **Tenant isolation** – Enforce scoping at every storage layer (PostgreSQL schemas/RLS, buckets, queues).
|
||||
4. **AOC alignment** – Registry stores metadata; it never mutates raw SBOM/advisory/VEX facts.
|
||||
5. **Auditable** – Every transition emits structured events with actor, scope, digest, attestation IDs.
|
||||
|
||||
@@ -23,7 +23,7 @@ Stand up and operate the Policy Registry service defined in Epic 4. We own works
|
||||
|
||||
## Tooling
|
||||
- .NET 10 preview (minimal API + background workers).
|
||||
- MongoDB with per-tenant collections, S3-compatible object storage for bundles.
|
||||
- PostgreSQL with per-tenant schemas/RLS, S3-compatible object storage for bundles.
|
||||
- Background queue (Scheduler job queue or NATS) for batch simulations.
|
||||
- Signing via Authority-issued OIDC tokens + cosign integration.
|
||||
|
||||
|
||||
@@ -0,0 +1,332 @@
|
||||
using System.Collections.Immutable;
|
||||
using Microsoft.AspNetCore.Http;
|
||||
using Microsoft.AspNetCore.Routing;
|
||||
using StellaOps.Scanner.SmartDiff.Detection;
|
||||
using StellaOps.Scanner.Storage.Postgres;
|
||||
using StellaOps.Scanner.WebService.Security;
|
||||
|
||||
namespace StellaOps.Scanner.WebService.Endpoints;
|
||||
|
||||
/// <summary>
|
||||
/// Smart-Diff API endpoints for material risk changes and VEX candidates.
|
||||
/// Per Sprint 3500.3 - Smart-Diff Detection Rules.
|
||||
/// </summary>
|
||||
internal static class SmartDiffEndpoints
|
||||
{
|
||||
public static void MapSmartDiffEndpoints(this RouteGroupBuilder apiGroup, string prefix = "/smart-diff")
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(apiGroup);
|
||||
|
||||
var group = apiGroup.MapGroup(prefix);
|
||||
|
||||
// Material risk changes endpoints
|
||||
group.MapGet("/scans/{scanId}/changes", HandleGetScanChangesAsync)
|
||||
.WithName("scanner.smartdiff.scan-changes")
|
||||
.WithTags("SmartDiff")
|
||||
.Produces<MaterialChangesResponse>(StatusCodes.Status200OK)
|
||||
.Produces(StatusCodes.Status404NotFound)
|
||||
.RequireAuthorization(ScannerPolicies.ScansRead);
|
||||
|
||||
// VEX candidate endpoints
|
||||
group.MapGet("/images/{digest}/candidates", HandleGetCandidatesAsync)
|
||||
.WithName("scanner.smartdiff.candidates")
|
||||
.WithTags("SmartDiff")
|
||||
.Produces<VexCandidatesResponse>(StatusCodes.Status200OK)
|
||||
.Produces(StatusCodes.Status404NotFound)
|
||||
.RequireAuthorization(ScannerPolicies.ScansRead);
|
||||
|
||||
group.MapGet("/candidates/{candidateId}", HandleGetCandidateAsync)
|
||||
.WithName("scanner.smartdiff.candidate")
|
||||
.WithTags("SmartDiff")
|
||||
.Produces<VexCandidateResponse>(StatusCodes.Status200OK)
|
||||
.Produces(StatusCodes.Status404NotFound)
|
||||
.RequireAuthorization(ScannerPolicies.ScansRead);
|
||||
|
||||
group.MapPost("/candidates/{candidateId}/review", HandleReviewCandidateAsync)
|
||||
.WithName("scanner.smartdiff.review")
|
||||
.WithTags("SmartDiff")
|
||||
.Produces<ReviewResponse>(StatusCodes.Status200OK)
|
||||
.Produces(StatusCodes.Status400BadRequest)
|
||||
.Produces(StatusCodes.Status404NotFound)
|
||||
.RequireAuthorization(ScannerPolicies.ScansWrite);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// GET /smart-diff/scans/{scanId}/changes - Get material risk changes for a scan.
|
||||
/// </summary>
|
||||
private static async Task<IResult> HandleGetScanChangesAsync(
|
||||
string scanId,
|
||||
IMaterialRiskChangeRepository repository,
|
||||
double? minPriority = null,
|
||||
CancellationToken ct = default)
|
||||
{
|
||||
var changes = await repository.GetChangesForScanAsync(scanId, ct);
|
||||
|
||||
if (minPriority.HasValue)
|
||||
{
|
||||
changes = changes.Where(c => c.PriorityScore >= minPriority.Value).ToList();
|
||||
}
|
||||
|
||||
var response = new MaterialChangesResponse
|
||||
{
|
||||
ScanId = scanId,
|
||||
TotalChanges = changes.Count,
|
||||
Changes = changes.Select(ToChangeDto).ToImmutableArray()
|
||||
};
|
||||
|
||||
return Results.Ok(response);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// GET /smart-diff/images/{digest}/candidates - Get VEX candidates for an image.
|
||||
/// </summary>
|
||||
private static async Task<IResult> HandleGetCandidatesAsync(
|
||||
string digest,
|
||||
IVexCandidateStore store,
|
||||
double? minConfidence = null,
|
||||
bool? pendingOnly = null,
|
||||
CancellationToken ct = default)
|
||||
{
|
||||
var normalizedDigest = NormalizeDigest(digest);
|
||||
var candidates = await store.GetCandidatesAsync(normalizedDigest, ct);
|
||||
|
||||
if (minConfidence.HasValue)
|
||||
{
|
||||
candidates = candidates.Where(c => c.Confidence >= minConfidence.Value).ToList();
|
||||
}
|
||||
|
||||
if (pendingOnly == true)
|
||||
{
|
||||
candidates = candidates.Where(c => c.RequiresReview).ToList();
|
||||
}
|
||||
|
||||
var response = new VexCandidatesResponse
|
||||
{
|
||||
ImageDigest = normalizedDigest,
|
||||
TotalCandidates = candidates.Count,
|
||||
Candidates = candidates.Select(ToCandidateDto).ToImmutableArray()
|
||||
};
|
||||
|
||||
return Results.Ok(response);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// GET /smart-diff/candidates/{candidateId} - Get a specific VEX candidate.
|
||||
/// </summary>
|
||||
private static async Task<IResult> HandleGetCandidateAsync(
|
||||
string candidateId,
|
||||
IVexCandidateStore store,
|
||||
CancellationToken ct = default)
|
||||
{
|
||||
var candidate = await store.GetCandidateAsync(candidateId, ct);
|
||||
|
||||
if (candidate is null)
|
||||
{
|
||||
return Results.NotFound(new { error = "Candidate not found", candidateId });
|
||||
}
|
||||
|
||||
var response = new VexCandidateResponse
|
||||
{
|
||||
Candidate = ToCandidateDto(candidate)
|
||||
};
|
||||
|
||||
return Results.Ok(response);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// POST /smart-diff/candidates/{candidateId}/review - Review a VEX candidate.
|
||||
/// </summary>
|
||||
private static async Task<IResult> HandleReviewCandidateAsync(
|
||||
string candidateId,
|
||||
ReviewRequest request,
|
||||
IVexCandidateStore store,
|
||||
HttpContext httpContext,
|
||||
CancellationToken ct = default)
|
||||
{
|
||||
if (!Enum.TryParse<VexReviewAction>(request.Action, true, out var action))
|
||||
{
|
||||
return Results.BadRequest(new { error = "Invalid action", validActions = new[] { "accept", "reject", "defer" } });
|
||||
}
|
||||
|
||||
var reviewer = httpContext.User.Identity?.Name ?? "anonymous";
|
||||
var review = new VexCandidateReview(
|
||||
Action: action,
|
||||
Reviewer: reviewer,
|
||||
ReviewedAt: DateTimeOffset.UtcNow,
|
||||
Comment: request.Comment);
|
||||
|
||||
var success = await store.ReviewCandidateAsync(candidateId, review, ct);
|
||||
|
||||
if (!success)
|
||||
{
|
||||
return Results.NotFound(new { error = "Candidate not found", candidateId });
|
||||
}
|
||||
|
||||
return Results.Ok(new ReviewResponse
|
||||
{
|
||||
CandidateId = candidateId,
|
||||
Action = action.ToString().ToLowerInvariant(),
|
||||
ReviewedBy = reviewer,
|
||||
ReviewedAt = review.ReviewedAt
|
||||
});
|
||||
}
|
||||
|
||||
#region Helper Methods
|
||||
|
||||
private static string NormalizeDigest(string digest)
|
||||
{
|
||||
// Handle URL-encoded colons
|
||||
return digest.Replace("%3A", ":", StringComparison.OrdinalIgnoreCase);
|
||||
}
|
||||
|
||||
private static MaterialChangeDto ToChangeDto(MaterialRiskChangeResult change)
|
||||
{
|
||||
return new MaterialChangeDto
|
||||
{
|
||||
VulnId = change.FindingKey.VulnId,
|
||||
Purl = change.FindingKey.Purl,
|
||||
HasMaterialChange = change.HasMaterialChange,
|
||||
PriorityScore = change.PriorityScore,
|
||||
PreviousStateHash = change.PreviousStateHash,
|
||||
CurrentStateHash = change.CurrentStateHash,
|
||||
Changes = change.Changes.Select(c => new DetectedChangeDto
|
||||
{
|
||||
Rule = c.Rule.ToString(),
|
||||
ChangeType = c.ChangeType.ToString(),
|
||||
Direction = c.Direction.ToString().ToLowerInvariant(),
|
||||
Reason = c.Reason,
|
||||
PreviousValue = c.PreviousValue,
|
||||
CurrentValue = c.CurrentValue,
|
||||
Weight = c.Weight,
|
||||
SubType = c.SubType
|
||||
}).ToImmutableArray()
|
||||
};
|
||||
}
|
||||
|
||||
private static VexCandidateDto ToCandidateDto(VexCandidate candidate)
|
||||
{
|
||||
return new VexCandidateDto
|
||||
{
|
||||
CandidateId = candidate.CandidateId,
|
||||
VulnId = candidate.FindingKey.VulnId,
|
||||
Purl = candidate.FindingKey.Purl,
|
||||
ImageDigest = candidate.ImageDigest,
|
||||
SuggestedStatus = candidate.SuggestedStatus.ToString().ToLowerInvariant(),
|
||||
Justification = MapJustificationToString(candidate.Justification),
|
||||
Rationale = candidate.Rationale,
|
||||
EvidenceLinks = candidate.EvidenceLinks.Select(e => new EvidenceLinkDto
|
||||
{
|
||||
Type = e.Type,
|
||||
Uri = e.Uri,
|
||||
Digest = e.Digest
|
||||
}).ToImmutableArray(),
|
||||
Confidence = candidate.Confidence,
|
||||
GeneratedAt = candidate.GeneratedAt,
|
||||
ExpiresAt = candidate.ExpiresAt,
|
||||
RequiresReview = candidate.RequiresReview
|
||||
};
|
||||
}
|
||||
|
||||
private static string MapJustificationToString(VexJustification justification)
|
||||
{
|
||||
return justification switch
|
||||
{
|
||||
VexJustification.ComponentNotPresent => "component_not_present",
|
||||
VexJustification.VulnerableCodeNotPresent => "vulnerable_code_not_present",
|
||||
VexJustification.VulnerableCodeNotInExecutePath => "vulnerable_code_not_in_execute_path",
|
||||
VexJustification.VulnerableCodeCannotBeControlledByAdversary => "vulnerable_code_cannot_be_controlled_by_adversary",
|
||||
VexJustification.InlineMitigationsAlreadyExist => "inline_mitigations_already_exist",
|
||||
_ => "unknown"
|
||||
};
|
||||
}
|
||||
|
||||
#endregion
|
||||
}
|
||||
|
||||
#region DTOs
|
||||
|
||||
/// <summary>Response for GET /scans/{id}/changes</summary>
|
||||
public sealed class MaterialChangesResponse
|
||||
{
|
||||
public required string ScanId { get; init; }
|
||||
public int TotalChanges { get; init; }
|
||||
public required ImmutableArray<MaterialChangeDto> Changes { get; init; }
|
||||
}
|
||||
|
||||
public sealed class MaterialChangeDto
|
||||
{
|
||||
public required string VulnId { get; init; }
|
||||
public required string Purl { get; init; }
|
||||
public bool HasMaterialChange { get; init; }
|
||||
public int PriorityScore { get; init; }
|
||||
public required string PreviousStateHash { get; init; }
|
||||
public required string CurrentStateHash { get; init; }
|
||||
public required ImmutableArray<DetectedChangeDto> Changes { get; init; }
|
||||
}
|
||||
|
||||
public sealed class DetectedChangeDto
|
||||
{
|
||||
public required string Rule { get; init; }
|
||||
public required string ChangeType { get; init; }
|
||||
public required string Direction { get; init; }
|
||||
public required string Reason { get; init; }
|
||||
public required string PreviousValue { get; init; }
|
||||
public required string CurrentValue { get; init; }
|
||||
public double Weight { get; init; }
|
||||
public string? SubType { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>Response for GET /images/{digest}/candidates</summary>
|
||||
public sealed class VexCandidatesResponse
|
||||
{
|
||||
public required string ImageDigest { get; init; }
|
||||
public int TotalCandidates { get; init; }
|
||||
public required ImmutableArray<VexCandidateDto> Candidates { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>Response for GET /candidates/{id}</summary>
|
||||
public sealed class VexCandidateResponse
|
||||
{
|
||||
public required VexCandidateDto Candidate { get; init; }
|
||||
}
|
||||
|
||||
public sealed class VexCandidateDto
|
||||
{
|
||||
public required string CandidateId { get; init; }
|
||||
public required string VulnId { get; init; }
|
||||
public required string Purl { get; init; }
|
||||
public required string ImageDigest { get; init; }
|
||||
public required string SuggestedStatus { get; init; }
|
||||
public required string Justification { get; init; }
|
||||
public required string Rationale { get; init; }
|
||||
public required ImmutableArray<EvidenceLinkDto> EvidenceLinks { get; init; }
|
||||
public double Confidence { get; init; }
|
||||
public DateTimeOffset GeneratedAt { get; init; }
|
||||
public DateTimeOffset ExpiresAt { get; init; }
|
||||
public bool RequiresReview { get; init; }
|
||||
}
|
||||
|
||||
public sealed class EvidenceLinkDto
|
||||
{
|
||||
public required string Type { get; init; }
|
||||
public required string Uri { get; init; }
|
||||
public string? Digest { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>Request for POST /candidates/{id}/review</summary>
|
||||
public sealed class ReviewRequest
|
||||
{
|
||||
public required string Action { get; init; }
|
||||
public string? Comment { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>Response for POST /candidates/{id}/review</summary>
|
||||
public sealed class ReviewResponse
|
||||
{
|
||||
public required string CandidateId { get; init; }
|
||||
public required string Action { get; init; }
|
||||
public required string ReviewedBy { get; init; }
|
||||
public DateTimeOffset ReviewedAt { get; init; }
|
||||
}
|
||||
|
||||
#endregion
|
||||
@@ -0,0 +1,167 @@
|
||||
namespace StellaOps.Scanner.SmartDiff.Detection;
|
||||
|
||||
/// <summary>
|
||||
/// Bridges the 7-state reachability lattice to the 3-bit gate model.
|
||||
/// Per Sprint 3500.3 - Smart-Diff Detection Rules.
|
||||
/// </summary>
|
||||
public static class ReachabilityGateBridge
|
||||
{
|
||||
/// <summary>
|
||||
/// Converts a lattice state to a 3-bit reachability gate.
|
||||
/// </summary>
|
||||
public static ReachabilityGate FromLatticeState(
|
||||
string latticeState,
|
||||
bool? configActivated = null,
|
||||
bool? runningUser = null)
|
||||
{
|
||||
var (reachable, confidence) = MapLatticeToReachable(latticeState);
|
||||
|
||||
return new ReachabilityGate(
|
||||
Reachable: reachable,
|
||||
ConfigActivated: configActivated,
|
||||
RunningUser: runningUser,
|
||||
Confidence: confidence,
|
||||
LatticeState: latticeState,
|
||||
Rationale: GenerateRationale(latticeState, reachable));
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Maps the 7-state lattice to the reachable boolean with confidence.
|
||||
/// </summary>
|
||||
/// <returns>Tuple of (reachable, confidence)</returns>
|
||||
public static (bool? Reachable, double Confidence) MapLatticeToReachable(string latticeState)
|
||||
{
|
||||
return latticeState.ToUpperInvariant() switch
|
||||
{
|
||||
// Confirmed states - highest confidence
|
||||
"CR" or "CONFIRMED_REACHABLE" => (true, 1.0),
|
||||
"CU" or "CONFIRMED_UNREACHABLE" => (false, 1.0),
|
||||
|
||||
// Static analysis states - high confidence
|
||||
"SR" or "STATIC_REACHABLE" => (true, 0.85),
|
||||
"SU" or "STATIC_UNREACHABLE" => (false, 0.85),
|
||||
|
||||
// Runtime observation states - medium-high confidence
|
||||
"RO" or "RUNTIME_OBSERVED" => (true, 0.90),
|
||||
"RU" or "RUNTIME_UNOBSERVED" => (false, 0.70), // Lower because absence != proof
|
||||
|
||||
// Unknown - no confidence
|
||||
"U" or "UNKNOWN" => (null, 0.0),
|
||||
|
||||
// Contested - conflicting evidence
|
||||
"X" or "CONTESTED" => (null, 0.5),
|
||||
|
||||
// Likely states (for systems with uncertainty quantification)
|
||||
"LR" or "LIKELY_REACHABLE" => (true, 0.75),
|
||||
"LU" or "LIKELY_UNREACHABLE" => (false, 0.75),
|
||||
|
||||
// Default for unrecognized
|
||||
_ => (null, 0.0)
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Generates human-readable rationale for the gate.
|
||||
/// </summary>
|
||||
public static string GenerateRationale(string latticeState, bool? reachable)
|
||||
{
|
||||
var stateDescription = latticeState.ToUpperInvariant() switch
|
||||
{
|
||||
"CR" or "CONFIRMED_REACHABLE" => "Confirmed reachable via static + runtime evidence",
|
||||
"CU" or "CONFIRMED_UNREACHABLE" => "Confirmed unreachable via static + runtime evidence",
|
||||
"SR" or "STATIC_REACHABLE" => "Statically reachable (call graph analysis)",
|
||||
"SU" or "STATIC_UNREACHABLE" => "Statically unreachable (no path in call graph)",
|
||||
"RO" or "RUNTIME_OBSERVED" => "Observed at runtime (instrumentation)",
|
||||
"RU" or "RUNTIME_UNOBSERVED" => "Not observed at runtime (no hits)",
|
||||
"U" or "UNKNOWN" => "Reachability unknown (insufficient evidence)",
|
||||
"X" or "CONTESTED" => "Contested (conflicting evidence)",
|
||||
"LR" or "LIKELY_REACHABLE" => "Likely reachable (heuristic analysis)",
|
||||
"LU" or "LIKELY_UNREACHABLE" => "Likely unreachable (heuristic analysis)",
|
||||
_ => $"Unrecognized lattice state: {latticeState}"
|
||||
};
|
||||
|
||||
var reachableStr = reachable switch
|
||||
{
|
||||
true => "REACHABLE",
|
||||
false => "UNREACHABLE",
|
||||
null => "UNKNOWN"
|
||||
};
|
||||
|
||||
return $"[{reachableStr}] {stateDescription}";
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Computes the 3-bit class from the gate values.
|
||||
/// </summary>
|
||||
public static int ComputeClass(ReachabilityGate gate)
|
||||
{
|
||||
// 3-bit encoding: [reachable][configActivated][runningUser]
|
||||
var bit0 = gate.Reachable == true ? 1 : 0;
|
||||
var bit1 = gate.ConfigActivated == true ? 1 : 0;
|
||||
var bit2 = gate.RunningUser == true ? 1 : 0;
|
||||
|
||||
return (bit2 << 2) | (bit1 << 1) | bit0;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Interprets the 3-bit class as a risk level.
|
||||
/// </summary>
|
||||
public static string InterpretClass(int gateClass)
|
||||
{
|
||||
// Class meanings:
|
||||
// 0 (000) - Not reachable, not activated, not running as user - lowest risk
|
||||
// 1 (001) - Reachable but not activated and not running as user
|
||||
// 2 (010) - Activated but not reachable and not running as user
|
||||
// 3 (011) - Reachable and activated but not running as user
|
||||
// 4 (100) - Running as user but not reachable or activated
|
||||
// 5 (101) - Reachable and running as user
|
||||
// 6 (110) - Activated and running as user
|
||||
// 7 (111) - All three true - highest risk
|
||||
|
||||
return gateClass switch
|
||||
{
|
||||
0 => "LOW - No conditions met",
|
||||
1 => "MEDIUM-LOW - Code reachable only",
|
||||
2 => "LOW - Config activated but unreachable",
|
||||
3 => "MEDIUM - Reachable and config activated",
|
||||
4 => "MEDIUM-LOW - Running as user only",
|
||||
5 => "MEDIUM-HIGH - Reachable as user",
|
||||
6 => "MEDIUM - Config activated as user",
|
||||
7 => "HIGH - All conditions met",
|
||||
_ => "UNKNOWN"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// 3-bit reachability gate representation.
|
||||
/// </summary>
|
||||
public sealed record ReachabilityGate(
|
||||
bool? Reachable,
|
||||
bool? ConfigActivated,
|
||||
bool? RunningUser,
|
||||
double Confidence,
|
||||
string? LatticeState,
|
||||
string Rationale)
|
||||
{
|
||||
/// <summary>
|
||||
/// Computes the 3-bit class.
|
||||
/// </summary>
|
||||
public int ComputeClass() => ReachabilityGateBridge.ComputeClass(this);
|
||||
|
||||
/// <summary>
|
||||
/// Gets the risk interpretation.
|
||||
/// </summary>
|
||||
public string RiskInterpretation => ReachabilityGateBridge.InterpretClass(ComputeClass());
|
||||
|
||||
/// <summary>
|
||||
/// Creates a gate with default null values.
|
||||
/// </summary>
|
||||
public static ReachabilityGate Unknown { get; } = new(
|
||||
Reachable: null,
|
||||
ConfigActivated: null,
|
||||
RunningUser: null,
|
||||
Confidence: 0.0,
|
||||
LatticeState: "U",
|
||||
Rationale: "[UNKNOWN] Reachability unknown");
|
||||
}
|
||||
@@ -0,0 +1,239 @@
|
||||
using System.Collections.Immutable;
|
||||
|
||||
namespace StellaOps.Scanner.SmartDiff.Detection;
|
||||
|
||||
/// <summary>
|
||||
/// Repository interface for risk state snapshots.
|
||||
/// Per Sprint 3500.3 - Smart-Diff Detection Rules.
|
||||
/// </summary>
|
||||
public interface IRiskStateRepository
|
||||
{
|
||||
/// <summary>
|
||||
/// Store a risk state snapshot.
|
||||
/// </summary>
|
||||
Task StoreSnapshotAsync(RiskStateSnapshot snapshot, CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Store multiple risk state snapshots.
|
||||
/// </summary>
|
||||
Task StoreSnapshotsAsync(IReadOnlyList<RiskStateSnapshot> snapshots, CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Get the latest snapshot for a finding.
|
||||
/// </summary>
|
||||
Task<RiskStateSnapshot?> GetLatestSnapshotAsync(FindingKey findingKey, CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Get snapshots for a scan.
|
||||
/// </summary>
|
||||
Task<IReadOnlyList<RiskStateSnapshot>> GetSnapshotsForScanAsync(string scanId, CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Get snapshot history for a finding.
|
||||
/// </summary>
|
||||
Task<IReadOnlyList<RiskStateSnapshot>> GetSnapshotHistoryAsync(
|
||||
FindingKey findingKey,
|
||||
int limit = 10,
|
||||
CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Get snapshots by state hash.
|
||||
/// </summary>
|
||||
Task<IReadOnlyList<RiskStateSnapshot>> GetSnapshotsByHashAsync(string stateHash, CancellationToken ct = default);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Repository interface for material risk changes.
|
||||
/// </summary>
|
||||
public interface IMaterialRiskChangeRepository
|
||||
{
|
||||
/// <summary>
|
||||
/// Store a material risk change result.
|
||||
/// </summary>
|
||||
Task StoreChangeAsync(MaterialRiskChangeResult change, string scanId, CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Store multiple material risk change results.
|
||||
/// </summary>
|
||||
Task StoreChangesAsync(IReadOnlyList<MaterialRiskChangeResult> changes, string scanId, CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Get material changes for a scan.
|
||||
/// </summary>
|
||||
Task<IReadOnlyList<MaterialRiskChangeResult>> GetChangesForScanAsync(string scanId, CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Get material changes for a finding.
|
||||
/// </summary>
|
||||
Task<IReadOnlyList<MaterialRiskChangeResult>> GetChangesForFindingAsync(
|
||||
FindingKey findingKey,
|
||||
int limit = 10,
|
||||
CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Query material changes with filters.
|
||||
/// </summary>
|
||||
Task<MaterialRiskChangeQueryResult> QueryChangesAsync(
|
||||
MaterialRiskChangeQuery query,
|
||||
CancellationToken ct = default);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Query for material risk changes.
|
||||
/// </summary>
|
||||
public sealed record MaterialRiskChangeQuery(
|
||||
string? ImageDigest = null,
|
||||
DateTimeOffset? Since = null,
|
||||
DateTimeOffset? Until = null,
|
||||
ImmutableArray<DetectionRule>? Rules = null,
|
||||
ImmutableArray<RiskDirection>? Directions = null,
|
||||
double? MinPriorityScore = null,
|
||||
int Offset = 0,
|
||||
int Limit = 100);
|
||||
|
||||
/// <summary>
|
||||
/// Result of material risk change query.
|
||||
/// </summary>
|
||||
public sealed record MaterialRiskChangeQueryResult(
|
||||
ImmutableArray<MaterialRiskChangeResult> Changes,
|
||||
int TotalCount,
|
||||
int Offset,
|
||||
int Limit);
|
||||
|
||||
/// <summary>
|
||||
/// In-memory implementation for testing.
|
||||
/// </summary>
|
||||
public sealed class InMemoryRiskStateRepository : IRiskStateRepository
|
||||
{
|
||||
private readonly List<RiskStateSnapshot> _snapshots = [];
|
||||
private readonly object _lock = new();
|
||||
|
||||
public Task StoreSnapshotAsync(RiskStateSnapshot snapshot, CancellationToken ct = default)
|
||||
{
|
||||
lock (_lock)
|
||||
{
|
||||
_snapshots.Add(snapshot);
|
||||
}
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public Task StoreSnapshotsAsync(IReadOnlyList<RiskStateSnapshot> snapshots, CancellationToken ct = default)
|
||||
{
|
||||
lock (_lock)
|
||||
{
|
||||
_snapshots.AddRange(snapshots);
|
||||
}
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public Task<RiskStateSnapshot?> GetLatestSnapshotAsync(FindingKey findingKey, CancellationToken ct = default)
|
||||
{
|
||||
lock (_lock)
|
||||
{
|
||||
var snapshot = _snapshots
|
||||
.Where(s => s.FindingKey == findingKey)
|
||||
.OrderByDescending(s => s.CapturedAt)
|
||||
.FirstOrDefault();
|
||||
return Task.FromResult(snapshot);
|
||||
}
|
||||
}
|
||||
|
||||
public Task<IReadOnlyList<RiskStateSnapshot>> GetSnapshotsForScanAsync(string scanId, CancellationToken ct = default)
|
||||
{
|
||||
lock (_lock)
|
||||
{
|
||||
var snapshots = _snapshots
|
||||
.Where(s => s.ScanId == scanId)
|
||||
.ToList();
|
||||
return Task.FromResult<IReadOnlyList<RiskStateSnapshot>>(snapshots);
|
||||
}
|
||||
}
|
||||
|
||||
public Task<IReadOnlyList<RiskStateSnapshot>> GetSnapshotHistoryAsync(
|
||||
FindingKey findingKey,
|
||||
int limit = 10,
|
||||
CancellationToken ct = default)
|
||||
{
|
||||
lock (_lock)
|
||||
{
|
||||
var snapshots = _snapshots
|
||||
.Where(s => s.FindingKey == findingKey)
|
||||
.OrderByDescending(s => s.CapturedAt)
|
||||
.Take(limit)
|
||||
.ToList();
|
||||
return Task.FromResult<IReadOnlyList<RiskStateSnapshot>>(snapshots);
|
||||
}
|
||||
}
|
||||
|
||||
public Task<IReadOnlyList<RiskStateSnapshot>> GetSnapshotsByHashAsync(string stateHash, CancellationToken ct = default)
|
||||
{
|
||||
lock (_lock)
|
||||
{
|
||||
var snapshots = _snapshots
|
||||
.Where(s => s.ComputeStateHash() == stateHash)
|
||||
.ToList();
|
||||
return Task.FromResult<IReadOnlyList<RiskStateSnapshot>>(snapshots);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// In-memory implementation for testing.
|
||||
/// </summary>
|
||||
public sealed class InMemoryVexCandidateStore : IVexCandidateStore
|
||||
{
|
||||
private readonly Dictionary<string, VexCandidate> _candidates = [];
|
||||
private readonly Dictionary<string, VexCandidateReview> _reviews = [];
|
||||
private readonly object _lock = new();
|
||||
|
||||
public Task StoreCandidatesAsync(IReadOnlyList<VexCandidate> candidates, CancellationToken ct = default)
|
||||
{
|
||||
lock (_lock)
|
||||
{
|
||||
foreach (var candidate in candidates)
|
||||
{
|
||||
_candidates[candidate.CandidateId] = candidate;
|
||||
}
|
||||
}
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public Task<IReadOnlyList<VexCandidate>> GetCandidatesAsync(string imageDigest, CancellationToken ct = default)
|
||||
{
|
||||
lock (_lock)
|
||||
{
|
||||
var candidates = _candidates.Values
|
||||
.Where(c => c.ImageDigest == imageDigest)
|
||||
.ToList();
|
||||
return Task.FromResult<IReadOnlyList<VexCandidate>>(candidates);
|
||||
}
|
||||
}
|
||||
|
||||
public Task<VexCandidate?> GetCandidateAsync(string candidateId, CancellationToken ct = default)
|
||||
{
|
||||
lock (_lock)
|
||||
{
|
||||
_candidates.TryGetValue(candidateId, out var candidate);
|
||||
return Task.FromResult(candidate);
|
||||
}
|
||||
}
|
||||
|
||||
public Task<bool> ReviewCandidateAsync(string candidateId, VexCandidateReview review, CancellationToken ct = default)
|
||||
{
|
||||
lock (_lock)
|
||||
{
|
||||
if (!_candidates.ContainsKey(candidateId))
|
||||
return Task.FromResult(false);
|
||||
|
||||
_reviews[candidateId] = review;
|
||||
|
||||
// Update candidate to mark as reviewed
|
||||
if (_candidates.TryGetValue(candidateId, out var candidate))
|
||||
{
|
||||
_candidates[candidateId] = candidate with { RequiresReview = false };
|
||||
}
|
||||
|
||||
return Task.FromResult(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,194 @@
|
||||
using System.Collections.Immutable;
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
|
||||
namespace StellaOps.Scanner.SmartDiff.Detection;
|
||||
|
||||
/// <summary>
|
||||
/// Emits VEX candidates for findings where vulnerable APIs are no longer present.
|
||||
/// Per Sprint 3500.3 - Smart-Diff Detection Rules.
|
||||
/// </summary>
|
||||
public sealed class VexCandidateEmitter
|
||||
{
|
||||
private readonly VexCandidateEmitterOptions _options;
|
||||
private readonly IVexCandidateStore? _store;
|
||||
|
||||
public VexCandidateEmitter(VexCandidateEmitterOptions? options = null, IVexCandidateStore? store = null)
|
||||
{
|
||||
_options = options ?? VexCandidateEmitterOptions.Default;
|
||||
_store = store;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Evaluate findings and emit VEX candidates for those with absent vulnerable APIs.
|
||||
/// </summary>
|
||||
public async Task<VexCandidateEmissionResult> EmitCandidatesAsync(
|
||||
VexCandidateEmissionContext context,
|
||||
CancellationToken ct = default)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(context);
|
||||
|
||||
var candidates = new List<VexCandidate>();
|
||||
|
||||
// Build lookup of current findings
|
||||
var currentFindingKeys = new HashSet<FindingKey>(
|
||||
context.CurrentFindings.Select(f => f.FindingKey));
|
||||
|
||||
// Evaluate previous findings that are still present
|
||||
foreach (var prevFinding in context.PreviousFindings)
|
||||
{
|
||||
// Skip if finding is no longer present (component removed)
|
||||
if (!currentFindingKeys.Contains(prevFinding.FindingKey))
|
||||
continue;
|
||||
|
||||
// Skip if already has a VEX status
|
||||
if (prevFinding.VexStatus != VexStatusType.Unknown &&
|
||||
prevFinding.VexStatus != VexStatusType.Affected)
|
||||
continue;
|
||||
|
||||
// Check if vulnerable APIs are now absent
|
||||
var apiCheck = CheckVulnerableApisAbsent(
|
||||
prevFinding,
|
||||
context.PreviousCallGraph,
|
||||
context.CurrentCallGraph);
|
||||
|
||||
if (!apiCheck.AllApisAbsent)
|
||||
continue;
|
||||
|
||||
// Check confidence threshold
|
||||
var confidence = ComputeConfidence(apiCheck);
|
||||
if (confidence < _options.MinConfidence)
|
||||
continue;
|
||||
|
||||
// Generate VEX candidate
|
||||
var candidate = CreateVexCandidate(prevFinding, apiCheck, context, confidence);
|
||||
candidates.Add(candidate);
|
||||
|
||||
// Rate limit per image
|
||||
if (candidates.Count >= _options.MaxCandidatesPerImage)
|
||||
break;
|
||||
}
|
||||
|
||||
// Store candidates (if configured)
|
||||
if (candidates.Count > 0 && _options.PersistCandidates && _store is not null)
|
||||
{
|
||||
await _store.StoreCandidatesAsync(candidates, ct);
|
||||
}
|
||||
|
||||
return new VexCandidateEmissionResult(
|
||||
ImageDigest: context.TargetImageDigest,
|
||||
CandidatesEmitted: candidates.Count,
|
||||
Candidates: [.. candidates],
|
||||
Timestamp: DateTimeOffset.UtcNow);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Checks if all vulnerable APIs for a finding are absent in current scan.
|
||||
/// </summary>
|
||||
private static VulnerableApiCheckResult CheckVulnerableApisAbsent(
|
||||
FindingSnapshot finding,
|
||||
CallGraphSnapshot? previousGraph,
|
||||
CallGraphSnapshot? currentGraph)
|
||||
{
|
||||
if (previousGraph is null || currentGraph is null)
|
||||
{
|
||||
return new VulnerableApiCheckResult(
|
||||
AllApisAbsent: false,
|
||||
AbsentApis: [],
|
||||
PresentApis: [],
|
||||
Reason: "Call graph not available");
|
||||
}
|
||||
|
||||
var vulnerableApis = finding.VulnerableApis;
|
||||
if (vulnerableApis.IsDefaultOrEmpty)
|
||||
{
|
||||
return new VulnerableApiCheckResult(
|
||||
AllApisAbsent: false,
|
||||
AbsentApis: [],
|
||||
PresentApis: [],
|
||||
Reason: "No vulnerable APIs tracked");
|
||||
}
|
||||
|
||||
var absentApis = new List<string>();
|
||||
var presentApis = new List<string>();
|
||||
|
||||
foreach (var api in vulnerableApis)
|
||||
{
|
||||
var isPresentInCurrent = currentGraph.ContainsSymbol(api);
|
||||
if (isPresentInCurrent)
|
||||
presentApis.Add(api);
|
||||
else
|
||||
absentApis.Add(api);
|
||||
}
|
||||
|
||||
return new VulnerableApiCheckResult(
|
||||
AllApisAbsent: presentApis.Count == 0 && absentApis.Count > 0,
|
||||
AbsentApis: [.. absentApis],
|
||||
PresentApis: [.. presentApis],
|
||||
Reason: presentApis.Count == 0
|
||||
? $"All {absentApis.Count} vulnerable APIs absent"
|
||||
: $"{presentApis.Count} vulnerable APIs still present");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a VEX candidate from a finding and API check.
|
||||
/// </summary>
|
||||
private VexCandidate CreateVexCandidate(
|
||||
FindingSnapshot finding,
|
||||
VulnerableApiCheckResult apiCheck,
|
||||
VexCandidateEmissionContext context,
|
||||
double confidence)
|
||||
{
|
||||
var evidenceLinks = new List<EvidenceLink>
|
||||
{
|
||||
new(
|
||||
Type: "callgraph_diff",
|
||||
Uri: $"callgraph://{context.PreviousScanId}/{context.CurrentScanId}",
|
||||
Digest: context.CurrentCallGraph?.Digest)
|
||||
};
|
||||
|
||||
foreach (var api in apiCheck.AbsentApis)
|
||||
{
|
||||
evidenceLinks.Add(new EvidenceLink(
|
||||
Type: "absent_api",
|
||||
Uri: $"symbol://{api}"));
|
||||
}
|
||||
|
||||
return new VexCandidate(
|
||||
CandidateId: GenerateCandidateId(finding, context),
|
||||
FindingKey: finding.FindingKey,
|
||||
SuggestedStatus: VexStatusType.NotAffected,
|
||||
Justification: VexJustification.VulnerableCodeNotPresent,
|
||||
Rationale: $"Vulnerable APIs no longer present in image: {string.Join(", ", apiCheck.AbsentApis)}",
|
||||
EvidenceLinks: [.. evidenceLinks],
|
||||
Confidence: confidence,
|
||||
ImageDigest: context.TargetImageDigest,
|
||||
GeneratedAt: DateTimeOffset.UtcNow,
|
||||
ExpiresAt: DateTimeOffset.UtcNow.Add(_options.CandidateTtl),
|
||||
RequiresReview: true);
|
||||
}
|
||||
|
||||
private static string GenerateCandidateId(
|
||||
FindingSnapshot finding,
|
||||
VexCandidateEmissionContext context)
|
||||
{
|
||||
var input = $"{context.TargetImageDigest}:{finding.FindingKey}:{DateTimeOffset.UtcNow.Ticks}";
|
||||
var hash = SHA256.HashData(Encoding.UTF8.GetBytes(input));
|
||||
return $"vexc-{Convert.ToHexString(hash).ToLowerInvariant()[..16]}";
|
||||
}
|
||||
|
||||
private static double ComputeConfidence(VulnerableApiCheckResult apiCheck)
|
||||
{
|
||||
if (apiCheck.PresentApis.Length > 0)
|
||||
return 0.0;
|
||||
|
||||
// Higher confidence with more absent APIs
|
||||
return apiCheck.AbsentApis.Length switch
|
||||
{
|
||||
>= 3 => 0.95,
|
||||
2 => 0.85,
|
||||
1 => 0.75,
|
||||
_ => 0.5
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,180 @@
|
||||
using System.Collections.Immutable;
|
||||
using System.Text.Json.Serialization;
|
||||
|
||||
namespace StellaOps.Scanner.SmartDiff.Detection;
|
||||
|
||||
/// <summary>
|
||||
/// A VEX candidate generated by Smart-Diff.
|
||||
/// Per Sprint 3500.3 - Smart-Diff Detection Rules.
|
||||
/// </summary>
|
||||
public sealed record VexCandidate(
|
||||
[property: JsonPropertyName("candidateId")] string CandidateId,
|
||||
[property: JsonPropertyName("findingKey")] FindingKey FindingKey,
|
||||
[property: JsonPropertyName("suggestedStatus")] VexStatusType SuggestedStatus,
|
||||
[property: JsonPropertyName("justification")] VexJustification Justification,
|
||||
[property: JsonPropertyName("rationale")] string Rationale,
|
||||
[property: JsonPropertyName("evidenceLinks")] ImmutableArray<EvidenceLink> EvidenceLinks,
|
||||
[property: JsonPropertyName("confidence")] double Confidence,
|
||||
[property: JsonPropertyName("imageDigest")] string ImageDigest,
|
||||
[property: JsonPropertyName("generatedAt")] DateTimeOffset GeneratedAt,
|
||||
[property: JsonPropertyName("expiresAt")] DateTimeOffset ExpiresAt,
|
||||
[property: JsonPropertyName("requiresReview")] bool RequiresReview);
|
||||
|
||||
/// <summary>
|
||||
/// VEX justification codes per OpenVEX specification.
|
||||
/// </summary>
|
||||
[JsonConverter(typeof(JsonStringEnumConverter<VexJustification>))]
|
||||
public enum VexJustification
|
||||
{
|
||||
[JsonStringEnumMemberName("component_not_present")]
|
||||
ComponentNotPresent,
|
||||
|
||||
[JsonStringEnumMemberName("vulnerable_code_not_present")]
|
||||
VulnerableCodeNotPresent,
|
||||
|
||||
[JsonStringEnumMemberName("vulnerable_code_not_in_execute_path")]
|
||||
VulnerableCodeNotInExecutePath,
|
||||
|
||||
[JsonStringEnumMemberName("vulnerable_code_cannot_be_controlled_by_adversary")]
|
||||
VulnerableCodeCannotBeControlledByAdversary,
|
||||
|
||||
[JsonStringEnumMemberName("inline_mitigations_already_exist")]
|
||||
InlineMitigationsAlreadyExist
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Result of vulnerable API presence check.
|
||||
/// </summary>
|
||||
public sealed record VulnerableApiCheckResult(
|
||||
[property: JsonPropertyName("allApisAbsent")] bool AllApisAbsent,
|
||||
[property: JsonPropertyName("absentApis")] ImmutableArray<string> AbsentApis,
|
||||
[property: JsonPropertyName("presentApis")] ImmutableArray<string> PresentApis,
|
||||
[property: JsonPropertyName("reason")] string Reason);
|
||||
|
||||
/// <summary>
|
||||
/// Result of VEX candidate emission.
|
||||
/// </summary>
|
||||
public sealed record VexCandidateEmissionResult(
|
||||
[property: JsonPropertyName("imageDigest")] string ImageDigest,
|
||||
[property: JsonPropertyName("candidatesEmitted")] int CandidatesEmitted,
|
||||
[property: JsonPropertyName("candidates")] ImmutableArray<VexCandidate> Candidates,
|
||||
[property: JsonPropertyName("timestamp")] DateTimeOffset Timestamp);
|
||||
|
||||
/// <summary>
|
||||
/// Context for VEX candidate emission.
|
||||
/// </summary>
|
||||
public sealed record VexCandidateEmissionContext(
|
||||
string PreviousScanId,
|
||||
string CurrentScanId,
|
||||
string TargetImageDigest,
|
||||
IReadOnlyList<FindingSnapshot> PreviousFindings,
|
||||
IReadOnlyList<FindingSnapshot> CurrentFindings,
|
||||
CallGraphSnapshot? PreviousCallGraph,
|
||||
CallGraphSnapshot? CurrentCallGraph);
|
||||
|
||||
/// <summary>
|
||||
/// Snapshot of a finding for VEX evaluation.
|
||||
/// </summary>
|
||||
public sealed record FindingSnapshot(
|
||||
[property: JsonPropertyName("findingKey")] FindingKey FindingKey,
|
||||
[property: JsonPropertyName("vexStatus")] VexStatusType VexStatus,
|
||||
[property: JsonPropertyName("vulnerableApis")] ImmutableArray<string> VulnerableApis);
|
||||
|
||||
/// <summary>
|
||||
/// Snapshot of call graph for API presence checking.
|
||||
/// </summary>
|
||||
public sealed class CallGraphSnapshot
|
||||
{
|
||||
private readonly HashSet<string> _symbols;
|
||||
|
||||
public string Digest { get; }
|
||||
|
||||
public CallGraphSnapshot(string digest, IEnumerable<string> symbols)
|
||||
{
|
||||
Digest = digest;
|
||||
_symbols = new HashSet<string>(symbols, StringComparer.Ordinal);
|
||||
}
|
||||
|
||||
public bool ContainsSymbol(string symbol) => _symbols.Contains(symbol);
|
||||
|
||||
public int SymbolCount => _symbols.Count;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Configuration for VEX candidate emission.
|
||||
/// </summary>
|
||||
public sealed class VexCandidateEmitterOptions
|
||||
{
|
||||
public static readonly VexCandidateEmitterOptions Default = new();
|
||||
|
||||
/// <summary>
|
||||
/// Maximum candidates to emit per image.
|
||||
/// </summary>
|
||||
public int MaxCandidatesPerImage { get; init; } = 50;
|
||||
|
||||
/// <summary>
|
||||
/// Whether to persist candidates to storage.
|
||||
/// </summary>
|
||||
public bool PersistCandidates { get; init; } = true;
|
||||
|
||||
/// <summary>
|
||||
/// TTL for generated candidates.
|
||||
/// </summary>
|
||||
public TimeSpan CandidateTtl { get; init; } = TimeSpan.FromDays(30);
|
||||
|
||||
/// <summary>
|
||||
/// Minimum confidence threshold for emission.
|
||||
/// </summary>
|
||||
public double MinConfidence { get; init; } = 0.7;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Interface for VEX candidate storage.
|
||||
/// </summary>
|
||||
public interface IVexCandidateStore
|
||||
{
|
||||
/// <summary>
|
||||
/// Store candidates.
|
||||
/// </summary>
|
||||
Task StoreCandidatesAsync(IReadOnlyList<VexCandidate> candidates, CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Get candidates for an image.
|
||||
/// </summary>
|
||||
Task<IReadOnlyList<VexCandidate>> GetCandidatesAsync(string imageDigest, CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Get a specific candidate by ID.
|
||||
/// </summary>
|
||||
Task<VexCandidate?> GetCandidateAsync(string candidateId, CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Mark a candidate as reviewed.
|
||||
/// </summary>
|
||||
Task<bool> ReviewCandidateAsync(string candidateId, VexCandidateReview review, CancellationToken ct = default);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Review action for a VEX candidate.
|
||||
/// </summary>
|
||||
public sealed record VexCandidateReview(
|
||||
[property: JsonPropertyName("action")] VexReviewAction Action,
|
||||
[property: JsonPropertyName("reviewer")] string Reviewer,
|
||||
[property: JsonPropertyName("comment")] string? Comment,
|
||||
[property: JsonPropertyName("reviewedAt")] DateTimeOffset ReviewedAt);
|
||||
|
||||
/// <summary>
|
||||
/// Review action types.
|
||||
/// </summary>
|
||||
[JsonConverter(typeof(JsonStringEnumConverter<VexReviewAction>))]
|
||||
public enum VexReviewAction
|
||||
{
|
||||
[JsonStringEnumMemberName("accept")]
|
||||
Accept,
|
||||
|
||||
[JsonStringEnumMemberName("reject")]
|
||||
Reject,
|
||||
|
||||
[JsonStringEnumMemberName("defer")]
|
||||
Defer
|
||||
}
|
||||
@@ -0,0 +1,370 @@
|
||||
-- Migration: 005_smart_diff_tables
|
||||
-- Sprint: SPRINT_3500_0003_0001_smart_diff_detection
|
||||
-- Task: SDIFF-DET-016
|
||||
-- Description: Smart-Diff risk state snapshots, material changes, and VEX candidates
|
||||
|
||||
-- Ensure scanner schema exists
|
||||
CREATE SCHEMA IF NOT EXISTS scanner;
|
||||
|
||||
-- =============================================================================
|
||||
-- Enums for Smart-Diff
|
||||
-- =============================================================================
|
||||
|
||||
-- VEX status types
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE scanner.vex_status_type AS ENUM (
|
||||
'unknown',
|
||||
'affected',
|
||||
'not_affected',
|
||||
'fixed',
|
||||
'under_investigation'
|
||||
);
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN NULL;
|
||||
END $$;
|
||||
|
||||
-- Policy decision types
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE scanner.policy_decision_type AS ENUM (
|
||||
'allow',
|
||||
'warn',
|
||||
'block'
|
||||
);
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN NULL;
|
||||
END $$;
|
||||
|
||||
-- Detection rule types
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE scanner.detection_rule AS ENUM (
|
||||
'R1_ReachabilityFlip',
|
||||
'R2_VexFlip',
|
||||
'R3_RangeBoundary',
|
||||
'R4_IntelligenceFlip'
|
||||
);
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN NULL;
|
||||
END $$;
|
||||
|
||||
-- Material change types
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE scanner.material_change_type AS ENUM (
|
||||
'reachability_flip',
|
||||
'vex_flip',
|
||||
'range_boundary',
|
||||
'kev_added',
|
||||
'kev_removed',
|
||||
'epss_threshold',
|
||||
'policy_flip'
|
||||
);
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN NULL;
|
||||
END $$;
|
||||
|
||||
-- Risk direction
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE scanner.risk_direction AS ENUM (
|
||||
'increased',
|
||||
'decreased',
|
||||
'neutral'
|
||||
);
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN NULL;
|
||||
END $$;
|
||||
|
||||
-- VEX justification codes
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE scanner.vex_justification AS ENUM (
|
||||
'component_not_present',
|
||||
'vulnerable_code_not_present',
|
||||
'vulnerable_code_not_in_execute_path',
|
||||
'vulnerable_code_cannot_be_controlled_by_adversary',
|
||||
'inline_mitigations_already_exist'
|
||||
);
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN NULL;
|
||||
END $$;
|
||||
|
||||
-- VEX review actions
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE scanner.vex_review_action AS ENUM (
|
||||
'accept',
|
||||
'reject',
|
||||
'defer'
|
||||
);
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN NULL;
|
||||
END $$;
|
||||
|
||||
-- =============================================================================
|
||||
-- Table: scanner.risk_state_snapshots
|
||||
-- Purpose: Store point-in-time risk state for findings
|
||||
-- =============================================================================
|
||||
CREATE TABLE IF NOT EXISTS scanner.risk_state_snapshots (
|
||||
-- Identity
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
tenant_id UUID NOT NULL,
|
||||
|
||||
-- Finding identification (composite key)
|
||||
vuln_id TEXT NOT NULL,
|
||||
purl TEXT NOT NULL,
|
||||
|
||||
-- Scan context
|
||||
scan_id TEXT NOT NULL,
|
||||
captured_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Risk state dimensions
|
||||
reachable BOOLEAN,
|
||||
lattice_state TEXT,
|
||||
vex_status scanner.vex_status_type NOT NULL DEFAULT 'unknown',
|
||||
in_affected_range BOOLEAN,
|
||||
|
||||
-- Intelligence signals
|
||||
kev BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
epss_score NUMERIC(5, 4),
|
||||
|
||||
-- Policy state
|
||||
policy_flags TEXT[] DEFAULT '{}',
|
||||
policy_decision scanner.policy_decision_type,
|
||||
|
||||
-- State hash for change detection (deterministic)
|
||||
state_hash TEXT NOT NULL,
|
||||
|
||||
-- Audit
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Constraints
|
||||
CONSTRAINT risk_state_unique_per_scan UNIQUE (tenant_id, scan_id, vuln_id, purl)
|
||||
);
|
||||
|
||||
-- Indexes for risk_state_snapshots
|
||||
CREATE INDEX IF NOT EXISTS idx_risk_state_tenant_finding
|
||||
ON scanner.risk_state_snapshots (tenant_id, vuln_id, purl);
|
||||
CREATE INDEX IF NOT EXISTS idx_risk_state_scan
|
||||
ON scanner.risk_state_snapshots (scan_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_risk_state_captured_at
|
||||
ON scanner.risk_state_snapshots USING BRIN (captured_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_risk_state_hash
|
||||
ON scanner.risk_state_snapshots (state_hash);
|
||||
|
||||
-- =============================================================================
|
||||
-- Table: scanner.material_risk_changes
|
||||
-- Purpose: Store detected material risk changes between scans
|
||||
-- =============================================================================
|
||||
CREATE TABLE IF NOT EXISTS scanner.material_risk_changes (
|
||||
-- Identity
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
tenant_id UUID NOT NULL,
|
||||
|
||||
-- Finding identification
|
||||
vuln_id TEXT NOT NULL,
|
||||
purl TEXT NOT NULL,
|
||||
|
||||
-- Scan context
|
||||
scan_id TEXT NOT NULL,
|
||||
|
||||
-- Change summary
|
||||
has_material_change BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
priority_score NUMERIC(6, 4) NOT NULL DEFAULT 0,
|
||||
|
||||
-- State hashes
|
||||
previous_state_hash TEXT NOT NULL,
|
||||
current_state_hash TEXT NOT NULL,
|
||||
|
||||
-- Detected changes (JSONB array)
|
||||
changes JSONB NOT NULL DEFAULT '[]',
|
||||
|
||||
-- Audit
|
||||
detected_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Constraints
|
||||
CONSTRAINT material_change_unique_per_scan UNIQUE (tenant_id, scan_id, vuln_id, purl)
|
||||
);
|
||||
|
||||
-- Indexes for material_risk_changes
|
||||
CREATE INDEX IF NOT EXISTS idx_material_changes_tenant_scan
|
||||
ON scanner.material_risk_changes (tenant_id, scan_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_material_changes_priority
|
||||
ON scanner.material_risk_changes (priority_score DESC)
|
||||
WHERE has_material_change = TRUE;
|
||||
CREATE INDEX IF NOT EXISTS idx_material_changes_detected_at
|
||||
ON scanner.material_risk_changes USING BRIN (detected_at);
|
||||
|
||||
-- GIN index for JSON querying
|
||||
CREATE INDEX IF NOT EXISTS idx_material_changes_changes_gin
|
||||
ON scanner.material_risk_changes USING GIN (changes);
|
||||
|
||||
-- =============================================================================
|
||||
-- Table: scanner.vex_candidates
|
||||
-- Purpose: Store auto-generated VEX candidates for review
|
||||
-- =============================================================================
|
||||
CREATE TABLE IF NOT EXISTS scanner.vex_candidates (
|
||||
-- Identity
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
candidate_id TEXT NOT NULL UNIQUE,
|
||||
tenant_id UUID NOT NULL,
|
||||
|
||||
-- Finding identification
|
||||
vuln_id TEXT NOT NULL,
|
||||
purl TEXT NOT NULL,
|
||||
|
||||
-- Image context
|
||||
image_digest TEXT NOT NULL,
|
||||
|
||||
-- Suggested VEX assertion
|
||||
suggested_status scanner.vex_status_type NOT NULL,
|
||||
justification scanner.vex_justification NOT NULL,
|
||||
rationale TEXT NOT NULL,
|
||||
|
||||
-- Evidence links (JSONB array)
|
||||
evidence_links JSONB NOT NULL DEFAULT '[]',
|
||||
|
||||
-- Confidence and validity
|
||||
confidence NUMERIC(4, 3) NOT NULL,
|
||||
generated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
expires_at TIMESTAMPTZ NOT NULL,
|
||||
|
||||
-- Review state
|
||||
requires_review BOOLEAN NOT NULL DEFAULT TRUE,
|
||||
review_action scanner.vex_review_action,
|
||||
reviewed_by TEXT,
|
||||
reviewed_at TIMESTAMPTZ,
|
||||
review_comment TEXT,
|
||||
|
||||
-- Audit
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- Indexes for vex_candidates
|
||||
CREATE INDEX IF NOT EXISTS idx_vex_candidates_tenant_image
|
||||
ON scanner.vex_candidates (tenant_id, image_digest);
|
||||
CREATE INDEX IF NOT EXISTS idx_vex_candidates_pending_review
|
||||
ON scanner.vex_candidates (tenant_id, requires_review, confidence DESC)
|
||||
WHERE requires_review = TRUE;
|
||||
CREATE INDEX IF NOT EXISTS idx_vex_candidates_expires
|
||||
ON scanner.vex_candidates (expires_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_vex_candidates_candidate_id
|
||||
ON scanner.vex_candidates (candidate_id);
|
||||
|
||||
-- GIN index for evidence links
|
||||
CREATE INDEX IF NOT EXISTS idx_vex_candidates_evidence_gin
|
||||
ON scanner.vex_candidates USING GIN (evidence_links);
|
||||
|
||||
-- =============================================================================
|
||||
-- RLS Policies (for multi-tenant isolation)
|
||||
-- =============================================================================
|
||||
|
||||
-- Enable RLS
|
||||
ALTER TABLE scanner.risk_state_snapshots ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE scanner.material_risk_changes ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE scanner.vex_candidates ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- RLS function for tenant isolation
|
||||
CREATE OR REPLACE FUNCTION scanner.current_tenant_id()
|
||||
RETURNS UUID AS $$
|
||||
BEGIN
|
||||
RETURN NULLIF(current_setting('app.current_tenant_id', TRUE), '')::UUID;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql STABLE;
|
||||
|
||||
-- Policies for risk_state_snapshots
|
||||
DROP POLICY IF EXISTS risk_state_tenant_isolation ON scanner.risk_state_snapshots;
|
||||
CREATE POLICY risk_state_tenant_isolation ON scanner.risk_state_snapshots
|
||||
USING (tenant_id = scanner.current_tenant_id());
|
||||
|
||||
-- Policies for material_risk_changes
|
||||
DROP POLICY IF EXISTS material_changes_tenant_isolation ON scanner.material_risk_changes;
|
||||
CREATE POLICY material_changes_tenant_isolation ON scanner.material_risk_changes
|
||||
USING (tenant_id = scanner.current_tenant_id());
|
||||
|
||||
-- Policies for vex_candidates
|
||||
DROP POLICY IF EXISTS vex_candidates_tenant_isolation ON scanner.vex_candidates;
|
||||
CREATE POLICY vex_candidates_tenant_isolation ON scanner.vex_candidates
|
||||
USING (tenant_id = scanner.current_tenant_id());
|
||||
|
||||
-- =============================================================================
|
||||
-- Helper Functions
|
||||
-- =============================================================================
|
||||
|
||||
-- Function to get material changes for a scan
|
||||
CREATE OR REPLACE FUNCTION scanner.get_material_changes_for_scan(
|
||||
p_scan_id TEXT,
|
||||
p_min_priority NUMERIC DEFAULT NULL
|
||||
)
|
||||
RETURNS TABLE (
|
||||
vuln_id TEXT,
|
||||
purl TEXT,
|
||||
priority_score NUMERIC,
|
||||
changes JSONB
|
||||
) AS $$
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
mc.vuln_id,
|
||||
mc.purl,
|
||||
mc.priority_score,
|
||||
mc.changes
|
||||
FROM scanner.material_risk_changes mc
|
||||
WHERE mc.scan_id = p_scan_id
|
||||
AND mc.has_material_change = TRUE
|
||||
AND (p_min_priority IS NULL OR mc.priority_score >= p_min_priority)
|
||||
ORDER BY mc.priority_score DESC;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql STABLE;
|
||||
|
||||
-- Function to get pending VEX candidates for review
|
||||
CREATE OR REPLACE FUNCTION scanner.get_pending_vex_candidates(
|
||||
p_image_digest TEXT DEFAULT NULL,
|
||||
p_min_confidence NUMERIC DEFAULT 0.7,
|
||||
p_limit INT DEFAULT 50
|
||||
)
|
||||
RETURNS TABLE (
|
||||
candidate_id TEXT,
|
||||
vuln_id TEXT,
|
||||
purl TEXT,
|
||||
image_digest TEXT,
|
||||
suggested_status scanner.vex_status_type,
|
||||
justification scanner.vex_justification,
|
||||
rationale TEXT,
|
||||
confidence NUMERIC,
|
||||
evidence_links JSONB
|
||||
) AS $$
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
vc.candidate_id,
|
||||
vc.vuln_id,
|
||||
vc.purl,
|
||||
vc.image_digest,
|
||||
vc.suggested_status,
|
||||
vc.justification,
|
||||
vc.rationale,
|
||||
vc.confidence,
|
||||
vc.evidence_links
|
||||
FROM scanner.vex_candidates vc
|
||||
WHERE vc.requires_review = TRUE
|
||||
AND vc.expires_at > NOW()
|
||||
AND vc.confidence >= p_min_confidence
|
||||
AND (p_image_digest IS NULL OR vc.image_digest = p_image_digest)
|
||||
ORDER BY vc.confidence DESC
|
||||
LIMIT p_limit;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql STABLE;
|
||||
|
||||
-- =============================================================================
|
||||
-- Comments
|
||||
-- =============================================================================
|
||||
|
||||
COMMENT ON TABLE scanner.risk_state_snapshots IS
|
||||
'Point-in-time risk state snapshots for Smart-Diff change detection';
|
||||
COMMENT ON TABLE scanner.material_risk_changes IS
|
||||
'Detected material risk changes between scans (R1-R4 rules)';
|
||||
COMMENT ON TABLE scanner.vex_candidates IS
|
||||
'Auto-generated VEX candidates based on absent vulnerable APIs';
|
||||
|
||||
COMMENT ON COLUMN scanner.risk_state_snapshots.state_hash IS
|
||||
'SHA-256 of normalized state for deterministic change detection';
|
||||
COMMENT ON COLUMN scanner.material_risk_changes.changes IS
|
||||
'JSONB array of DetectedChange records';
|
||||
COMMENT ON COLUMN scanner.vex_candidates.evidence_links IS
|
||||
'JSONB array of EvidenceLink records with type, uri, digest';
|
||||
@@ -0,0 +1,244 @@
|
||||
using System.Collections.Immutable;
|
||||
using System.Text.Json;
|
||||
using Dapper;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Npgsql;
|
||||
using StellaOps.Scanner.SmartDiff.Detection;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Postgres;
|
||||
|
||||
/// <summary>
|
||||
/// PostgreSQL implementation of IMaterialRiskChangeRepository.
|
||||
/// Per Sprint 3500.3 - Smart-Diff Detection Rules.
|
||||
/// </summary>
|
||||
public sealed class PostgresMaterialRiskChangeRepository : IMaterialRiskChangeRepository
|
||||
{
|
||||
private readonly ScannerDataSource _dataSource;
|
||||
private readonly ILogger<PostgresMaterialRiskChangeRepository> _logger;
|
||||
private static readonly JsonSerializerOptions JsonOptions = new()
|
||||
{
|
||||
PropertyNamingPolicy = JsonNamingPolicy.CamelCase
|
||||
};
|
||||
|
||||
public PostgresMaterialRiskChangeRepository(
|
||||
ScannerDataSource dataSource,
|
||||
ILogger<PostgresMaterialRiskChangeRepository> logger)
|
||||
{
|
||||
_dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
}
|
||||
|
||||
public async Task StoreChangeAsync(MaterialRiskChangeResult change, string scanId, CancellationToken ct = default)
|
||||
{
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(ct);
|
||||
await InsertChangeAsync(connection, change, scanId, ct);
|
||||
}
|
||||
|
||||
public async Task StoreChangesAsync(IReadOnlyList<MaterialRiskChangeResult> changes, string scanId, CancellationToken ct = default)
|
||||
{
|
||||
if (changes.Count == 0)
|
||||
return;
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(ct);
|
||||
await using var transaction = await connection.BeginTransactionAsync(ct);
|
||||
|
||||
try
|
||||
{
|
||||
foreach (var change in changes)
|
||||
{
|
||||
await InsertChangeAsync(connection, change, scanId, ct, transaction);
|
||||
}
|
||||
|
||||
await transaction.CommitAsync(ct);
|
||||
_logger.LogDebug("Stored {Count} material risk changes for scan {ScanId}", changes.Count, scanId);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogError(ex, "Failed to store material risk changes for scan {ScanId}", scanId);
|
||||
await transaction.RollbackAsync(ct);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<MaterialRiskChangeResult>> GetChangesForScanAsync(string scanId, CancellationToken ct = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT
|
||||
vuln_id, purl, has_material_change, priority_score,
|
||||
previous_state_hash, current_state_hash, changes
|
||||
FROM scanner.material_risk_changes
|
||||
WHERE scan_id = @ScanId
|
||||
ORDER BY priority_score DESC
|
||||
""";
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(ct);
|
||||
var rows = await connection.QueryAsync<MaterialRiskChangeRow>(sql, new { ScanId = scanId });
|
||||
|
||||
return rows.Select(r => r.ToResult()).ToList();
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<MaterialRiskChangeResult>> GetChangesForFindingAsync(
|
||||
FindingKey findingKey,
|
||||
int limit = 10,
|
||||
CancellationToken ct = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT
|
||||
vuln_id, purl, has_material_change, priority_score,
|
||||
previous_state_hash, current_state_hash, changes
|
||||
FROM scanner.material_risk_changes
|
||||
WHERE vuln_id = @VulnId AND purl = @Purl
|
||||
ORDER BY detected_at DESC
|
||||
LIMIT @Limit
|
||||
""";
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(ct);
|
||||
var rows = await connection.QueryAsync<MaterialRiskChangeRow>(sql, new
|
||||
{
|
||||
VulnId = findingKey.VulnId,
|
||||
Purl = findingKey.Purl,
|
||||
Limit = limit
|
||||
});
|
||||
|
||||
return rows.Select(r => r.ToResult()).ToList();
|
||||
}
|
||||
|
||||
public async Task<MaterialRiskChangeQueryResult> QueryChangesAsync(
|
||||
MaterialRiskChangeQuery query,
|
||||
CancellationToken ct = default)
|
||||
{
|
||||
var conditions = new List<string> { "has_material_change = TRUE" };
|
||||
var parameters = new DynamicParameters();
|
||||
|
||||
if (!string.IsNullOrEmpty(query.ImageDigest))
|
||||
{
|
||||
// Would need a join with scan metadata for image filtering
|
||||
// For now, skip this filter
|
||||
}
|
||||
|
||||
if (query.Since.HasValue)
|
||||
{
|
||||
conditions.Add("detected_at >= @Since");
|
||||
parameters.Add("Since", query.Since.Value);
|
||||
}
|
||||
|
||||
if (query.Until.HasValue)
|
||||
{
|
||||
conditions.Add("detected_at <= @Until");
|
||||
parameters.Add("Until", query.Until.Value);
|
||||
}
|
||||
|
||||
if (query.MinPriorityScore.HasValue)
|
||||
{
|
||||
conditions.Add("priority_score >= @MinPriority");
|
||||
parameters.Add("MinPriority", query.MinPriorityScore.Value);
|
||||
}
|
||||
|
||||
var whereClause = string.Join(" AND ", conditions);
|
||||
|
||||
// Count query
|
||||
var countSql = $"SELECT COUNT(*) FROM scanner.material_risk_changes WHERE {whereClause}";
|
||||
|
||||
// Data query
|
||||
var dataSql = $"""
|
||||
SELECT
|
||||
vuln_id, purl, has_material_change, priority_score,
|
||||
previous_state_hash, current_state_hash, changes
|
||||
FROM scanner.material_risk_changes
|
||||
WHERE {whereClause}
|
||||
ORDER BY priority_score DESC
|
||||
OFFSET @Offset LIMIT @Limit
|
||||
""";
|
||||
|
||||
parameters.Add("Offset", query.Offset);
|
||||
parameters.Add("Limit", query.Limit);
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(ct);
|
||||
|
||||
var totalCount = await connection.ExecuteScalarAsync<int>(countSql, parameters);
|
||||
var rows = await connection.QueryAsync<MaterialRiskChangeRow>(dataSql, parameters);
|
||||
|
||||
var changes = rows.Select(r => r.ToResult()).ToImmutableArray();
|
||||
|
||||
return new MaterialRiskChangeQueryResult(
|
||||
Changes: changes,
|
||||
TotalCount: totalCount,
|
||||
Offset: query.Offset,
|
||||
Limit: query.Limit);
|
||||
}
|
||||
|
||||
private static async Task InsertChangeAsync(
|
||||
NpgsqlConnection connection,
|
||||
MaterialRiskChangeResult change,
|
||||
string scanId,
|
||||
CancellationToken ct,
|
||||
NpgsqlTransaction? transaction = null)
|
||||
{
|
||||
const string sql = """
|
||||
INSERT INTO scanner.material_risk_changes (
|
||||
tenant_id, vuln_id, purl, scan_id,
|
||||
has_material_change, priority_score,
|
||||
previous_state_hash, current_state_hash, changes
|
||||
) VALUES (
|
||||
@TenantId, @VulnId, @Purl, @ScanId,
|
||||
@HasMaterialChange, @PriorityScore,
|
||||
@PreviousStateHash, @CurrentStateHash, @Changes::jsonb
|
||||
)
|
||||
ON CONFLICT (tenant_id, scan_id, vuln_id, purl) DO UPDATE SET
|
||||
has_material_change = EXCLUDED.has_material_change,
|
||||
priority_score = EXCLUDED.priority_score,
|
||||
previous_state_hash = EXCLUDED.previous_state_hash,
|
||||
current_state_hash = EXCLUDED.current_state_hash,
|
||||
changes = EXCLUDED.changes
|
||||
""";
|
||||
|
||||
var tenantId = GetCurrentTenantId();
|
||||
var changesJson = JsonSerializer.Serialize(change.Changes, JsonOptions);
|
||||
|
||||
await connection.ExecuteAsync(new CommandDefinition(sql, new
|
||||
{
|
||||
TenantId = tenantId,
|
||||
VulnId = change.FindingKey.VulnId,
|
||||
Purl = change.FindingKey.Purl,
|
||||
ScanId = scanId,
|
||||
HasMaterialChange = change.HasMaterialChange,
|
||||
PriorityScore = change.PriorityScore,
|
||||
PreviousStateHash = change.PreviousStateHash,
|
||||
CurrentStateHash = change.CurrentStateHash,
|
||||
Changes = changesJson
|
||||
}, transaction: transaction, cancellationToken: ct));
|
||||
}
|
||||
|
||||
private static Guid GetCurrentTenantId()
|
||||
{
|
||||
return Guid.Parse("00000000-0000-0000-0000-000000000001");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Row mapping class for Dapper.
|
||||
/// </summary>
|
||||
private sealed class MaterialRiskChangeRow
|
||||
{
|
||||
public string vuln_id { get; set; } = "";
|
||||
public string purl { get; set; } = "";
|
||||
public bool has_material_change { get; set; }
|
||||
public decimal priority_score { get; set; }
|
||||
public string previous_state_hash { get; set; } = "";
|
||||
public string current_state_hash { get; set; } = "";
|
||||
public string changes { get; set; } = "[]";
|
||||
|
||||
public MaterialRiskChangeResult ToResult()
|
||||
{
|
||||
var detectedChanges = JsonSerializer.Deserialize<List<DetectedChange>>(changes, JsonOptions)
|
||||
?? [];
|
||||
|
||||
return new MaterialRiskChangeResult(
|
||||
FindingKey: new FindingKey(vuln_id, purl),
|
||||
HasMaterialChange: has_material_change,
|
||||
Changes: [.. detectedChanges],
|
||||
PriorityScore: (int)priority_score,
|
||||
PreviousStateHash: previous_state_hash,
|
||||
CurrentStateHash: current_state_hash);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,261 @@
|
||||
using System.Collections.Immutable;
|
||||
using System.Data;
|
||||
using System.Text.Json;
|
||||
using Dapper;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Npgsql;
|
||||
using StellaOps.Scanner.SmartDiff.Detection;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Postgres;
|
||||
|
||||
/// <summary>
|
||||
/// PostgreSQL implementation of IRiskStateRepository.
|
||||
/// Per Sprint 3500.3 - Smart-Diff Detection Rules.
|
||||
/// </summary>
|
||||
public sealed class PostgresRiskStateRepository : IRiskStateRepository
|
||||
{
|
||||
private readonly ScannerDataSource _dataSource;
|
||||
private readonly ILogger<PostgresRiskStateRepository> _logger;
|
||||
|
||||
public PostgresRiskStateRepository(
|
||||
ScannerDataSource dataSource,
|
||||
ILogger<PostgresRiskStateRepository> logger)
|
||||
{
|
||||
_dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
}
|
||||
|
||||
public async Task StoreSnapshotAsync(RiskStateSnapshot snapshot, CancellationToken ct = default)
|
||||
{
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(ct);
|
||||
await InsertSnapshotAsync(connection, snapshot, ct);
|
||||
}
|
||||
|
||||
public async Task StoreSnapshotsAsync(IReadOnlyList<RiskStateSnapshot> snapshots, CancellationToken ct = default)
|
||||
{
|
||||
if (snapshots.Count == 0)
|
||||
return;
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(ct);
|
||||
await using var transaction = await connection.BeginTransactionAsync(ct);
|
||||
|
||||
try
|
||||
{
|
||||
foreach (var snapshot in snapshots)
|
||||
{
|
||||
await InsertSnapshotAsync(connection, snapshot, ct, transaction);
|
||||
}
|
||||
|
||||
await transaction.CommitAsync(ct);
|
||||
}
|
||||
catch
|
||||
{
|
||||
await transaction.RollbackAsync(ct);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
public async Task<RiskStateSnapshot?> GetLatestSnapshotAsync(FindingKey findingKey, CancellationToken ct = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT
|
||||
vuln_id, purl, scan_id, captured_at,
|
||||
reachable, lattice_state, vex_status::TEXT, in_affected_range,
|
||||
kev, epss_score, policy_flags, policy_decision::TEXT, state_hash
|
||||
FROM scanner.risk_state_snapshots
|
||||
WHERE vuln_id = @VulnId AND purl = @Purl
|
||||
ORDER BY captured_at DESC
|
||||
LIMIT 1
|
||||
""";
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(ct);
|
||||
var row = await connection.QuerySingleOrDefaultAsync<RiskStateRow>(sql, new
|
||||
{
|
||||
VulnId = findingKey.VulnId,
|
||||
Purl = findingKey.Purl
|
||||
});
|
||||
|
||||
return row?.ToSnapshot();
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<RiskStateSnapshot>> GetSnapshotsForScanAsync(string scanId, CancellationToken ct = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT
|
||||
vuln_id, purl, scan_id, captured_at,
|
||||
reachable, lattice_state, vex_status::TEXT, in_affected_range,
|
||||
kev, epss_score, policy_flags, policy_decision::TEXT, state_hash
|
||||
FROM scanner.risk_state_snapshots
|
||||
WHERE scan_id = @ScanId
|
||||
ORDER BY vuln_id, purl
|
||||
""";
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(ct);
|
||||
var rows = await connection.QueryAsync<RiskStateRow>(sql, new { ScanId = scanId });
|
||||
|
||||
return rows.Select(r => r.ToSnapshot()).ToList();
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<RiskStateSnapshot>> GetSnapshotHistoryAsync(
|
||||
FindingKey findingKey,
|
||||
int limit = 10,
|
||||
CancellationToken ct = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT
|
||||
vuln_id, purl, scan_id, captured_at,
|
||||
reachable, lattice_state, vex_status::TEXT, in_affected_range,
|
||||
kev, epss_score, policy_flags, policy_decision::TEXT, state_hash
|
||||
FROM scanner.risk_state_snapshots
|
||||
WHERE vuln_id = @VulnId AND purl = @Purl
|
||||
ORDER BY captured_at DESC
|
||||
LIMIT @Limit
|
||||
""";
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(ct);
|
||||
var rows = await connection.QueryAsync<RiskStateRow>(sql, new
|
||||
{
|
||||
VulnId = findingKey.VulnId,
|
||||
Purl = findingKey.Purl,
|
||||
Limit = limit
|
||||
});
|
||||
|
||||
return rows.Select(r => r.ToSnapshot()).ToList();
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<RiskStateSnapshot>> GetSnapshotsByHashAsync(string stateHash, CancellationToken ct = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT
|
||||
vuln_id, purl, scan_id, captured_at,
|
||||
reachable, lattice_state, vex_status::TEXT, in_affected_range,
|
||||
kev, epss_score, policy_flags, policy_decision::TEXT, state_hash
|
||||
FROM scanner.risk_state_snapshots
|
||||
WHERE state_hash = @StateHash
|
||||
ORDER BY captured_at DESC
|
||||
""";
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(ct);
|
||||
var rows = await connection.QueryAsync<RiskStateRow>(sql, new { StateHash = stateHash });
|
||||
|
||||
return rows.Select(r => r.ToSnapshot()).ToList();
|
||||
}
|
||||
|
||||
private static async Task InsertSnapshotAsync(
|
||||
NpgsqlConnection connection,
|
||||
RiskStateSnapshot snapshot,
|
||||
CancellationToken ct,
|
||||
NpgsqlTransaction? transaction = null)
|
||||
{
|
||||
const string sql = """
|
||||
INSERT INTO scanner.risk_state_snapshots (
|
||||
tenant_id, vuln_id, purl, scan_id, captured_at,
|
||||
reachable, lattice_state, vex_status, in_affected_range,
|
||||
kev, epss_score, policy_flags, policy_decision, state_hash
|
||||
) VALUES (
|
||||
@TenantId, @VulnId, @Purl, @ScanId, @CapturedAt,
|
||||
@Reachable, @LatticeState, @VexStatus::scanner.vex_status_type, @InAffectedRange,
|
||||
@Kev, @EpssScore, @PolicyFlags, @PolicyDecision::scanner.policy_decision_type, @StateHash
|
||||
)
|
||||
ON CONFLICT (tenant_id, scan_id, vuln_id, purl) DO UPDATE SET
|
||||
reachable = EXCLUDED.reachable,
|
||||
lattice_state = EXCLUDED.lattice_state,
|
||||
vex_status = EXCLUDED.vex_status,
|
||||
in_affected_range = EXCLUDED.in_affected_range,
|
||||
kev = EXCLUDED.kev,
|
||||
epss_score = EXCLUDED.epss_score,
|
||||
policy_flags = EXCLUDED.policy_flags,
|
||||
policy_decision = EXCLUDED.policy_decision,
|
||||
state_hash = EXCLUDED.state_hash
|
||||
""";
|
||||
|
||||
var tenantId = GetCurrentTenantId();
|
||||
|
||||
await connection.ExecuteAsync(new CommandDefinition(sql, new
|
||||
{
|
||||
TenantId = tenantId,
|
||||
VulnId = snapshot.FindingKey.VulnId,
|
||||
Purl = snapshot.FindingKey.Purl,
|
||||
ScanId = snapshot.ScanId,
|
||||
CapturedAt = snapshot.CapturedAt,
|
||||
Reachable = snapshot.Reachable,
|
||||
LatticeState = snapshot.LatticeState,
|
||||
VexStatus = snapshot.VexStatus.ToString().ToLowerInvariant(),
|
||||
InAffectedRange = snapshot.InAffectedRange,
|
||||
Kev = snapshot.Kev,
|
||||
EpssScore = snapshot.EpssScore,
|
||||
PolicyFlags = snapshot.PolicyFlags.ToArray(),
|
||||
PolicyDecision = snapshot.PolicyDecision?.ToString().ToLowerInvariant(),
|
||||
StateHash = snapshot.ComputeStateHash()
|
||||
}, transaction: transaction, cancellationToken: ct));
|
||||
}
|
||||
|
||||
private static Guid GetCurrentTenantId()
|
||||
{
|
||||
// In production, this would come from the current context
|
||||
// For now, return a default tenant ID
|
||||
return Guid.Parse("00000000-0000-0000-0000-000000000001");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Row mapping class for Dapper.
|
||||
/// </summary>
|
||||
private sealed class RiskStateRow
|
||||
{
|
||||
public string vuln_id { get; set; } = "";
|
||||
public string purl { get; set; } = "";
|
||||
public string scan_id { get; set; } = "";
|
||||
public DateTimeOffset captured_at { get; set; }
|
||||
public bool? reachable { get; set; }
|
||||
public string? lattice_state { get; set; }
|
||||
public string vex_status { get; set; } = "unknown";
|
||||
public bool? in_affected_range { get; set; }
|
||||
public bool kev { get; set; }
|
||||
public decimal? epss_score { get; set; }
|
||||
public string[]? policy_flags { get; set; }
|
||||
public string? policy_decision { get; set; }
|
||||
public string state_hash { get; set; } = "";
|
||||
|
||||
public RiskStateSnapshot ToSnapshot()
|
||||
{
|
||||
return new RiskStateSnapshot(
|
||||
FindingKey: new FindingKey(vuln_id, purl),
|
||||
ScanId: scan_id,
|
||||
CapturedAt: captured_at,
|
||||
Reachable: reachable,
|
||||
LatticeState: lattice_state,
|
||||
VexStatus: ParseVexStatus(vex_status),
|
||||
InAffectedRange: in_affected_range,
|
||||
Kev: kev,
|
||||
EpssScore: epss_score.HasValue ? (double)epss_score.Value : null,
|
||||
PolicyFlags: policy_flags?.ToImmutableArray() ?? [],
|
||||
PolicyDecision: ParsePolicyDecision(policy_decision));
|
||||
}
|
||||
|
||||
private static VexStatusType ParseVexStatus(string value)
|
||||
{
|
||||
return value.ToLowerInvariant() switch
|
||||
{
|
||||
"affected" => VexStatusType.Affected,
|
||||
"not_affected" => VexStatusType.NotAffected,
|
||||
"fixed" => VexStatusType.Fixed,
|
||||
"under_investigation" => VexStatusType.UnderInvestigation,
|
||||
_ => VexStatusType.Unknown
|
||||
};
|
||||
}
|
||||
|
||||
private static PolicyDecisionType? ParsePolicyDecision(string? value)
|
||||
{
|
||||
if (string.IsNullOrEmpty(value))
|
||||
return null;
|
||||
|
||||
return value.ToLowerInvariant() switch
|
||||
{
|
||||
"allow" => PolicyDecisionType.Allow,
|
||||
"warn" => PolicyDecisionType.Warn,
|
||||
"block" => PolicyDecisionType.Block,
|
||||
_ => null
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,268 @@
|
||||
using System.Collections.Immutable;
|
||||
using System.Text.Json;
|
||||
using Dapper;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Npgsql;
|
||||
using StellaOps.Scanner.SmartDiff.Detection;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Postgres;
|
||||
|
||||
/// <summary>
|
||||
/// PostgreSQL implementation of IVexCandidateStore.
|
||||
/// Per Sprint 3500.3 - Smart-Diff Detection Rules.
|
||||
/// </summary>
|
||||
public sealed class PostgresVexCandidateStore : IVexCandidateStore
|
||||
{
|
||||
private readonly ScannerDataSource _dataSource;
|
||||
private readonly ILogger<PostgresVexCandidateStore> _logger;
|
||||
private static readonly JsonSerializerOptions JsonOptions = new()
|
||||
{
|
||||
PropertyNamingPolicy = JsonNamingPolicy.CamelCase
|
||||
};
|
||||
|
||||
public PostgresVexCandidateStore(
|
||||
ScannerDataSource dataSource,
|
||||
ILogger<PostgresVexCandidateStore> logger)
|
||||
{
|
||||
_dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
}
|
||||
|
||||
public async Task StoreCandidatesAsync(IReadOnlyList<VexCandidate> candidates, CancellationToken ct = default)
|
||||
{
|
||||
if (candidates.Count == 0)
|
||||
return;
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(ct);
|
||||
await using var transaction = await connection.BeginTransactionAsync(ct);
|
||||
|
||||
try
|
||||
{
|
||||
foreach (var candidate in candidates)
|
||||
{
|
||||
await InsertCandidateAsync(connection, candidate, ct, transaction);
|
||||
}
|
||||
|
||||
await transaction.CommitAsync(ct);
|
||||
_logger.LogDebug("Stored {Count} VEX candidates", candidates.Count);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogError(ex, "Failed to store VEX candidates");
|
||||
await transaction.RollbackAsync(ct);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<VexCandidate>> GetCandidatesAsync(string imageDigest, CancellationToken ct = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT
|
||||
candidate_id, vuln_id, purl, image_digest,
|
||||
suggested_status::TEXT, justification::TEXT, rationale,
|
||||
evidence_links, confidence, generated_at, expires_at,
|
||||
requires_review, review_action::TEXT, reviewed_by, reviewed_at, review_comment
|
||||
FROM scanner.vex_candidates
|
||||
WHERE image_digest = @ImageDigest
|
||||
ORDER BY confidence DESC
|
||||
""";
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(ct);
|
||||
var rows = await connection.QueryAsync<VexCandidateRow>(sql, new { ImageDigest = imageDigest });
|
||||
|
||||
return rows.Select(r => r.ToCandidate()).ToList();
|
||||
}
|
||||
|
||||
public async Task<VexCandidate?> GetCandidateAsync(string candidateId, CancellationToken ct = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT
|
||||
candidate_id, vuln_id, purl, image_digest,
|
||||
suggested_status::TEXT, justification::TEXT, rationale,
|
||||
evidence_links, confidence, generated_at, expires_at,
|
||||
requires_review, review_action::TEXT, reviewed_by, reviewed_at, review_comment
|
||||
FROM scanner.vex_candidates
|
||||
WHERE candidate_id = @CandidateId
|
||||
""";
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(ct);
|
||||
var row = await connection.QuerySingleOrDefaultAsync<VexCandidateRow>(sql, new { CandidateId = candidateId });
|
||||
|
||||
return row?.ToCandidate();
|
||||
}
|
||||
|
||||
public async Task<bool> ReviewCandidateAsync(string candidateId, VexCandidateReview review, CancellationToken ct = default)
|
||||
{
|
||||
const string sql = """
|
||||
UPDATE scanner.vex_candidates SET
|
||||
requires_review = FALSE,
|
||||
review_action = @ReviewAction::scanner.vex_review_action,
|
||||
reviewed_by = @ReviewedBy,
|
||||
reviewed_at = @ReviewedAt,
|
||||
review_comment = @ReviewComment
|
||||
WHERE candidate_id = @CandidateId
|
||||
""";
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(ct);
|
||||
var affected = await connection.ExecuteAsync(sql, new
|
||||
{
|
||||
CandidateId = candidateId,
|
||||
ReviewAction = review.Action.ToString().ToLowerInvariant(),
|
||||
ReviewedBy = review.Reviewer,
|
||||
ReviewedAt = review.ReviewedAt,
|
||||
ReviewComment = review.Comment
|
||||
});
|
||||
|
||||
if (affected > 0)
|
||||
{
|
||||
_logger.LogInformation("Reviewed VEX candidate {CandidateId} with action {Action}",
|
||||
candidateId, review.Action);
|
||||
}
|
||||
|
||||
return affected > 0;
|
||||
}
|
||||
|
||||
private static async Task InsertCandidateAsync(
|
||||
NpgsqlConnection connection,
|
||||
VexCandidate candidate,
|
||||
CancellationToken ct,
|
||||
NpgsqlTransaction? transaction = null)
|
||||
{
|
||||
const string sql = """
|
||||
INSERT INTO scanner.vex_candidates (
|
||||
tenant_id, candidate_id, vuln_id, purl, image_digest,
|
||||
suggested_status, justification, rationale,
|
||||
evidence_links, confidence, generated_at, expires_at, requires_review
|
||||
) VALUES (
|
||||
@TenantId, @CandidateId, @VulnId, @Purl, @ImageDigest,
|
||||
@SuggestedStatus::scanner.vex_status_type, @Justification::scanner.vex_justification, @Rationale,
|
||||
@EvidenceLinks::jsonb, @Confidence, @GeneratedAt, @ExpiresAt, @RequiresReview
|
||||
)
|
||||
ON CONFLICT (candidate_id) DO UPDATE SET
|
||||
suggested_status = EXCLUDED.suggested_status,
|
||||
justification = EXCLUDED.justification,
|
||||
rationale = EXCLUDED.rationale,
|
||||
evidence_links = EXCLUDED.evidence_links,
|
||||
confidence = EXCLUDED.confidence,
|
||||
expires_at = EXCLUDED.expires_at
|
||||
""";
|
||||
|
||||
var tenantId = GetCurrentTenantId();
|
||||
var evidenceLinksJson = JsonSerializer.Serialize(candidate.EvidenceLinks, JsonOptions);
|
||||
|
||||
await connection.ExecuteAsync(new CommandDefinition(sql, new
|
||||
{
|
||||
TenantId = tenantId,
|
||||
CandidateId = candidate.CandidateId,
|
||||
VulnId = candidate.FindingKey.VulnId,
|
||||
Purl = candidate.FindingKey.Purl,
|
||||
ImageDigest = candidate.ImageDigest,
|
||||
SuggestedStatus = MapVexStatus(candidate.SuggestedStatus),
|
||||
Justification = MapJustification(candidate.Justification),
|
||||
Rationale = candidate.Rationale,
|
||||
EvidenceLinks = evidenceLinksJson,
|
||||
Confidence = candidate.Confidence,
|
||||
GeneratedAt = candidate.GeneratedAt,
|
||||
ExpiresAt = candidate.ExpiresAt,
|
||||
RequiresReview = candidate.RequiresReview
|
||||
}, transaction: transaction, cancellationToken: ct));
|
||||
}
|
||||
|
||||
private static string MapVexStatus(VexStatusType status)
|
||||
{
|
||||
return status switch
|
||||
{
|
||||
VexStatusType.Affected => "affected",
|
||||
VexStatusType.NotAffected => "not_affected",
|
||||
VexStatusType.Fixed => "fixed",
|
||||
VexStatusType.UnderInvestigation => "under_investigation",
|
||||
_ => "unknown"
|
||||
};
|
||||
}
|
||||
|
||||
private static string MapJustification(VexJustification justification)
|
||||
{
|
||||
return justification switch
|
||||
{
|
||||
VexJustification.ComponentNotPresent => "component_not_present",
|
||||
VexJustification.VulnerableCodeNotPresent => "vulnerable_code_not_present",
|
||||
VexJustification.VulnerableCodeNotInExecutePath => "vulnerable_code_not_in_execute_path",
|
||||
VexJustification.VulnerableCodeCannotBeControlledByAdversary => "vulnerable_code_cannot_be_controlled_by_adversary",
|
||||
VexJustification.InlineMitigationsAlreadyExist => "inline_mitigations_already_exist",
|
||||
_ => "vulnerable_code_not_present"
|
||||
};
|
||||
}
|
||||
|
||||
private static Guid GetCurrentTenantId()
|
||||
{
|
||||
// In production, this would come from the current context
|
||||
return Guid.Parse("00000000-0000-0000-0000-000000000001");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Row mapping class for Dapper.
|
||||
/// </summary>
|
||||
private sealed class VexCandidateRow
|
||||
{
|
||||
public string candidate_id { get; set; } = "";
|
||||
public string vuln_id { get; set; } = "";
|
||||
public string purl { get; set; } = "";
|
||||
public string image_digest { get; set; } = "";
|
||||
public string suggested_status { get; set; } = "not_affected";
|
||||
public string justification { get; set; } = "vulnerable_code_not_present";
|
||||
public string rationale { get; set; } = "";
|
||||
public string evidence_links { get; set; } = "[]";
|
||||
public decimal confidence { get; set; }
|
||||
public DateTimeOffset generated_at { get; set; }
|
||||
public DateTimeOffset expires_at { get; set; }
|
||||
public bool requires_review { get; set; }
|
||||
public string? review_action { get; set; }
|
||||
public string? reviewed_by { get; set; }
|
||||
public DateTimeOffset? reviewed_at { get; set; }
|
||||
public string? review_comment { get; set; }
|
||||
|
||||
public VexCandidate ToCandidate()
|
||||
{
|
||||
var links = JsonSerializer.Deserialize<List<EvidenceLink>>(evidence_links, JsonOptions)
|
||||
?? [];
|
||||
|
||||
return new VexCandidate(
|
||||
CandidateId: candidate_id,
|
||||
FindingKey: new FindingKey(vuln_id, purl),
|
||||
SuggestedStatus: ParseVexStatus(suggested_status),
|
||||
Justification: ParseJustification(justification),
|
||||
Rationale: rationale,
|
||||
EvidenceLinks: [.. links],
|
||||
Confidence: (double)confidence,
|
||||
ImageDigest: image_digest,
|
||||
GeneratedAt: generated_at,
|
||||
ExpiresAt: expires_at,
|
||||
RequiresReview: requires_review);
|
||||
}
|
||||
|
||||
private static VexStatusType ParseVexStatus(string value)
|
||||
{
|
||||
return value.ToLowerInvariant() switch
|
||||
{
|
||||
"affected" => VexStatusType.Affected,
|
||||
"not_affected" => VexStatusType.NotAffected,
|
||||
"fixed" => VexStatusType.Fixed,
|
||||
"under_investigation" => VexStatusType.UnderInvestigation,
|
||||
_ => VexStatusType.Unknown
|
||||
};
|
||||
}
|
||||
|
||||
private static VexJustification ParseJustification(string value)
|
||||
{
|
||||
return value.ToLowerInvariant() switch
|
||||
{
|
||||
"component_not_present" => VexJustification.ComponentNotPresent,
|
||||
"vulnerable_code_not_present" => VexJustification.VulnerableCodeNotPresent,
|
||||
"vulnerable_code_not_in_execute_path" => VexJustification.VulnerableCodeNotInExecutePath,
|
||||
"vulnerable_code_cannot_be_controlled_by_adversary" => VexJustification.VulnerableCodeCannotBeControlledByAdversary,
|
||||
"inline_mitigations_already_exist" => VexJustification.InlineMitigationsAlreadyExist,
|
||||
_ => VexJustification.VulnerableCodeNotPresent
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,472 @@
|
||||
{
|
||||
"$schema": "https://stellaops.io/schemas/smart-diff/v1/state-comparison.json",
|
||||
"version": "1.0.0",
|
||||
"description": "Golden fixtures for Smart-Diff state comparison determinism testing",
|
||||
"testCases": [
|
||||
{
|
||||
"id": "R1-001",
|
||||
"name": "Reachability flip: unreachable to reachable",
|
||||
"rule": "R1_ReachabilityFlip",
|
||||
"previous": {
|
||||
"findingKey": {
|
||||
"vulnId": "CVE-2024-1234",
|
||||
"purl": "pkg:npm/lodash@4.17.20"
|
||||
},
|
||||
"scanId": "scan-prev-001",
|
||||
"capturedAt": "2024-12-01T10:00:00Z",
|
||||
"reachable": false,
|
||||
"latticeState": "SU",
|
||||
"vexStatus": "affected",
|
||||
"inAffectedRange": true,
|
||||
"kev": false,
|
||||
"epssScore": 0.05,
|
||||
"policyFlags": [],
|
||||
"policyDecision": null
|
||||
},
|
||||
"current": {
|
||||
"findingKey": {
|
||||
"vulnId": "CVE-2024-1234",
|
||||
"purl": "pkg:npm/lodash@4.17.20"
|
||||
},
|
||||
"scanId": "scan-curr-001",
|
||||
"capturedAt": "2024-12-15T10:00:00Z",
|
||||
"reachable": true,
|
||||
"latticeState": "CR",
|
||||
"vexStatus": "affected",
|
||||
"inAffectedRange": true,
|
||||
"kev": false,
|
||||
"epssScore": 0.05,
|
||||
"policyFlags": [],
|
||||
"policyDecision": null
|
||||
},
|
||||
"expected": {
|
||||
"hasMaterialChange": true,
|
||||
"direction": "increased",
|
||||
"changeType": "reachability_flip",
|
||||
"priorityScoreContribution": 500
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "R1-002",
|
||||
"name": "Reachability flip: reachable to unreachable",
|
||||
"rule": "R1_ReachabilityFlip",
|
||||
"previous": {
|
||||
"findingKey": {
|
||||
"vulnId": "CVE-2024-5678",
|
||||
"purl": "pkg:pypi/requests@2.28.0"
|
||||
},
|
||||
"scanId": "scan-prev-002",
|
||||
"capturedAt": "2024-12-01T10:00:00Z",
|
||||
"reachable": true,
|
||||
"latticeState": "CR",
|
||||
"vexStatus": "affected",
|
||||
"inAffectedRange": true,
|
||||
"kev": false,
|
||||
"epssScore": 0.10,
|
||||
"policyFlags": [],
|
||||
"policyDecision": null
|
||||
},
|
||||
"current": {
|
||||
"findingKey": {
|
||||
"vulnId": "CVE-2024-5678",
|
||||
"purl": "pkg:pypi/requests@2.28.0"
|
||||
},
|
||||
"scanId": "scan-curr-002",
|
||||
"capturedAt": "2024-12-15T10:00:00Z",
|
||||
"reachable": false,
|
||||
"latticeState": "CU",
|
||||
"vexStatus": "affected",
|
||||
"inAffectedRange": true,
|
||||
"kev": false,
|
||||
"epssScore": 0.10,
|
||||
"policyFlags": [],
|
||||
"policyDecision": null
|
||||
},
|
||||
"expected": {
|
||||
"hasMaterialChange": true,
|
||||
"direction": "decreased",
|
||||
"changeType": "reachability_flip",
|
||||
"priorityScoreContribution": 500
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "R2-001",
|
||||
"name": "VEX flip: affected to not_affected",
|
||||
"rule": "R2_VexFlip",
|
||||
"previous": {
|
||||
"findingKey": {
|
||||
"vulnId": "CVE-2024-9999",
|
||||
"purl": "pkg:maven/org.example/core@1.0.0"
|
||||
},
|
||||
"scanId": "scan-prev-003",
|
||||
"capturedAt": "2024-12-01T10:00:00Z",
|
||||
"reachable": true,
|
||||
"latticeState": "SR",
|
||||
"vexStatus": "affected",
|
||||
"inAffectedRange": true,
|
||||
"kev": false,
|
||||
"epssScore": 0.02,
|
||||
"policyFlags": [],
|
||||
"policyDecision": null
|
||||
},
|
||||
"current": {
|
||||
"findingKey": {
|
||||
"vulnId": "CVE-2024-9999",
|
||||
"purl": "pkg:maven/org.example/core@1.0.0"
|
||||
},
|
||||
"scanId": "scan-curr-003",
|
||||
"capturedAt": "2024-12-15T10:00:00Z",
|
||||
"reachable": true,
|
||||
"latticeState": "SR",
|
||||
"vexStatus": "not_affected",
|
||||
"inAffectedRange": true,
|
||||
"kev": false,
|
||||
"epssScore": 0.02,
|
||||
"policyFlags": [],
|
||||
"policyDecision": null
|
||||
},
|
||||
"expected": {
|
||||
"hasMaterialChange": true,
|
||||
"direction": "decreased",
|
||||
"changeType": "vex_flip",
|
||||
"priorityScoreContribution": 150
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "R2-002",
|
||||
"name": "VEX flip: not_affected to affected",
|
||||
"rule": "R2_VexFlip",
|
||||
"previous": {
|
||||
"findingKey": {
|
||||
"vulnId": "CVE-2024-8888",
|
||||
"purl": "pkg:golang/github.com/example/pkg@v1.2.3"
|
||||
},
|
||||
"scanId": "scan-prev-004",
|
||||
"capturedAt": "2024-12-01T10:00:00Z",
|
||||
"reachable": true,
|
||||
"latticeState": "SR",
|
||||
"vexStatus": "not_affected",
|
||||
"inAffectedRange": true,
|
||||
"kev": false,
|
||||
"epssScore": 0.03,
|
||||
"policyFlags": [],
|
||||
"policyDecision": null
|
||||
},
|
||||
"current": {
|
||||
"findingKey": {
|
||||
"vulnId": "CVE-2024-8888",
|
||||
"purl": "pkg:golang/github.com/example/pkg@v1.2.3"
|
||||
},
|
||||
"scanId": "scan-curr-004",
|
||||
"capturedAt": "2024-12-15T10:00:00Z",
|
||||
"reachable": true,
|
||||
"latticeState": "SR",
|
||||
"vexStatus": "affected",
|
||||
"inAffectedRange": true,
|
||||
"kev": false,
|
||||
"epssScore": 0.03,
|
||||
"policyFlags": [],
|
||||
"policyDecision": null
|
||||
},
|
||||
"expected": {
|
||||
"hasMaterialChange": true,
|
||||
"direction": "increased",
|
||||
"changeType": "vex_flip",
|
||||
"priorityScoreContribution": 150
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "R3-001",
|
||||
"name": "Range boundary: exits affected range",
|
||||
"rule": "R3_RangeBoundary",
|
||||
"previous": {
|
||||
"findingKey": {
|
||||
"vulnId": "CVE-2024-7777",
|
||||
"purl": "pkg:npm/express@4.17.0"
|
||||
},
|
||||
"scanId": "scan-prev-005",
|
||||
"capturedAt": "2024-12-01T10:00:00Z",
|
||||
"reachable": true,
|
||||
"latticeState": "SR",
|
||||
"vexStatus": "affected",
|
||||
"inAffectedRange": true,
|
||||
"kev": false,
|
||||
"epssScore": 0.04,
|
||||
"policyFlags": [],
|
||||
"policyDecision": null
|
||||
},
|
||||
"current": {
|
||||
"findingKey": {
|
||||
"vulnId": "CVE-2024-7777",
|
||||
"purl": "pkg:npm/express@4.18.0"
|
||||
},
|
||||
"scanId": "scan-curr-005",
|
||||
"capturedAt": "2024-12-15T10:00:00Z",
|
||||
"reachable": true,
|
||||
"latticeState": "SR",
|
||||
"vexStatus": "affected",
|
||||
"inAffectedRange": false,
|
||||
"kev": false,
|
||||
"epssScore": 0.04,
|
||||
"policyFlags": [],
|
||||
"policyDecision": null
|
||||
},
|
||||
"expected": {
|
||||
"hasMaterialChange": true,
|
||||
"direction": "decreased",
|
||||
"changeType": "range_boundary",
|
||||
"priorityScoreContribution": 200
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "R4-001",
|
||||
"name": "KEV added",
|
||||
"rule": "R4_IntelligenceFlip",
|
||||
"previous": {
|
||||
"findingKey": {
|
||||
"vulnId": "CVE-2024-6666",
|
||||
"purl": "pkg:npm/axios@0.21.0"
|
||||
},
|
||||
"scanId": "scan-prev-006",
|
||||
"capturedAt": "2024-12-01T10:00:00Z",
|
||||
"reachable": true,
|
||||
"latticeState": "SR",
|
||||
"vexStatus": "affected",
|
||||
"inAffectedRange": true,
|
||||
"kev": false,
|
||||
"epssScore": 0.08,
|
||||
"policyFlags": [],
|
||||
"policyDecision": null
|
||||
},
|
||||
"current": {
|
||||
"findingKey": {
|
||||
"vulnId": "CVE-2024-6666",
|
||||
"purl": "pkg:npm/axios@0.21.0"
|
||||
},
|
||||
"scanId": "scan-curr-006",
|
||||
"capturedAt": "2024-12-15T10:00:00Z",
|
||||
"reachable": true,
|
||||
"latticeState": "SR",
|
||||
"vexStatus": "affected",
|
||||
"inAffectedRange": true,
|
||||
"kev": true,
|
||||
"epssScore": 0.45,
|
||||
"policyFlags": [],
|
||||
"policyDecision": null
|
||||
},
|
||||
"expected": {
|
||||
"hasMaterialChange": true,
|
||||
"direction": "increased",
|
||||
"changeType": "kev_added",
|
||||
"priorityScoreContribution": 1000
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "R4-002",
|
||||
"name": "EPSS crosses threshold (0.1)",
|
||||
"rule": "R4_IntelligenceFlip",
|
||||
"previous": {
|
||||
"findingKey": {
|
||||
"vulnId": "CVE-2024-5555",
|
||||
"purl": "pkg:pypi/django@3.2.0"
|
||||
},
|
||||
"scanId": "scan-prev-007",
|
||||
"capturedAt": "2024-12-01T10:00:00Z",
|
||||
"reachable": true,
|
||||
"latticeState": "SR",
|
||||
"vexStatus": "affected",
|
||||
"inAffectedRange": true,
|
||||
"kev": false,
|
||||
"epssScore": 0.05,
|
||||
"policyFlags": [],
|
||||
"policyDecision": null
|
||||
},
|
||||
"current": {
|
||||
"findingKey": {
|
||||
"vulnId": "CVE-2024-5555",
|
||||
"purl": "pkg:pypi/django@3.2.0"
|
||||
},
|
||||
"scanId": "scan-curr-007",
|
||||
"capturedAt": "2024-12-15T10:00:00Z",
|
||||
"reachable": true,
|
||||
"latticeState": "SR",
|
||||
"vexStatus": "affected",
|
||||
"inAffectedRange": true,
|
||||
"kev": false,
|
||||
"epssScore": 0.15,
|
||||
"policyFlags": [],
|
||||
"policyDecision": null
|
||||
},
|
||||
"expected": {
|
||||
"hasMaterialChange": true,
|
||||
"direction": "increased",
|
||||
"changeType": "epss_threshold",
|
||||
"priorityScoreContribution": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "R4-003",
|
||||
"name": "Policy flip: allow to block",
|
||||
"rule": "R4_IntelligenceFlip",
|
||||
"previous": {
|
||||
"findingKey": {
|
||||
"vulnId": "CVE-2024-4444",
|
||||
"purl": "pkg:npm/moment@2.29.0"
|
||||
},
|
||||
"scanId": "scan-prev-008",
|
||||
"capturedAt": "2024-12-01T10:00:00Z",
|
||||
"reachable": true,
|
||||
"latticeState": "SR",
|
||||
"vexStatus": "affected",
|
||||
"inAffectedRange": true,
|
||||
"kev": false,
|
||||
"epssScore": 0.06,
|
||||
"policyFlags": [],
|
||||
"policyDecision": "allow"
|
||||
},
|
||||
"current": {
|
||||
"findingKey": {
|
||||
"vulnId": "CVE-2024-4444",
|
||||
"purl": "pkg:npm/moment@2.29.0"
|
||||
},
|
||||
"scanId": "scan-curr-008",
|
||||
"capturedAt": "2024-12-15T10:00:00Z",
|
||||
"reachable": true,
|
||||
"latticeState": "SR",
|
||||
"vexStatus": "affected",
|
||||
"inAffectedRange": true,
|
||||
"kev": false,
|
||||
"epssScore": 0.06,
|
||||
"policyFlags": ["HIGH_SEVERITY"],
|
||||
"policyDecision": "block"
|
||||
},
|
||||
"expected": {
|
||||
"hasMaterialChange": true,
|
||||
"direction": "increased",
|
||||
"changeType": "policy_flip",
|
||||
"priorityScoreContribution": 300
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "MULTI-001",
|
||||
"name": "Multiple changes: KEV + reachability flip",
|
||||
"rule": "Multiple",
|
||||
"previous": {
|
||||
"findingKey": {
|
||||
"vulnId": "CVE-2024-3333",
|
||||
"purl": "pkg:npm/jquery@3.5.0"
|
||||
},
|
||||
"scanId": "scan-prev-009",
|
||||
"capturedAt": "2024-12-01T10:00:00Z",
|
||||
"reachable": false,
|
||||
"latticeState": "SU",
|
||||
"vexStatus": "affected",
|
||||
"inAffectedRange": true,
|
||||
"kev": false,
|
||||
"epssScore": 0.07,
|
||||
"policyFlags": [],
|
||||
"policyDecision": null
|
||||
},
|
||||
"current": {
|
||||
"findingKey": {
|
||||
"vulnId": "CVE-2024-3333",
|
||||
"purl": "pkg:npm/jquery@3.5.0"
|
||||
},
|
||||
"scanId": "scan-curr-009",
|
||||
"capturedAt": "2024-12-15T10:00:00Z",
|
||||
"reachable": true,
|
||||
"latticeState": "CR",
|
||||
"vexStatus": "affected",
|
||||
"inAffectedRange": true,
|
||||
"kev": true,
|
||||
"epssScore": 0.35,
|
||||
"policyFlags": [],
|
||||
"policyDecision": null
|
||||
},
|
||||
"expected": {
|
||||
"hasMaterialChange": true,
|
||||
"direction": "increased",
|
||||
"changeCount": 2,
|
||||
"totalPriorityScore": 1500
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "NO-CHANGE-001",
|
||||
"name": "No material change - identical states",
|
||||
"rule": "None",
|
||||
"previous": {
|
||||
"findingKey": {
|
||||
"vulnId": "CVE-2024-2222",
|
||||
"purl": "pkg:npm/underscore@1.13.0"
|
||||
},
|
||||
"scanId": "scan-prev-010",
|
||||
"capturedAt": "2024-12-01T10:00:00Z",
|
||||
"reachable": true,
|
||||
"latticeState": "SR",
|
||||
"vexStatus": "affected",
|
||||
"inAffectedRange": true,
|
||||
"kev": false,
|
||||
"epssScore": 0.02,
|
||||
"policyFlags": [],
|
||||
"policyDecision": null
|
||||
},
|
||||
"current": {
|
||||
"findingKey": {
|
||||
"vulnId": "CVE-2024-2222",
|
||||
"purl": "pkg:npm/underscore@1.13.0"
|
||||
},
|
||||
"scanId": "scan-curr-010",
|
||||
"capturedAt": "2024-12-15T10:00:00Z",
|
||||
"reachable": true,
|
||||
"latticeState": "SR",
|
||||
"vexStatus": "affected",
|
||||
"inAffectedRange": true,
|
||||
"kev": false,
|
||||
"epssScore": 0.02,
|
||||
"policyFlags": [],
|
||||
"policyDecision": null
|
||||
},
|
||||
"expected": {
|
||||
"hasMaterialChange": false,
|
||||
"changeCount": 0,
|
||||
"totalPriorityScore": 0
|
||||
}
|
||||
}
|
||||
],
|
||||
"stateHashTestCases": [
|
||||
{
|
||||
"id": "HASH-001",
|
||||
"name": "State hash determinism - same input produces same hash",
|
||||
"state": {
|
||||
"findingKey": {
|
||||
"vulnId": "CVE-2024-1111",
|
||||
"purl": "pkg:npm/test@1.0.0"
|
||||
},
|
||||
"scanId": "scan-hash-001",
|
||||
"capturedAt": "2024-12-15T10:00:00Z",
|
||||
"reachable": true,
|
||||
"latticeState": "CR",
|
||||
"vexStatus": "affected",
|
||||
"inAffectedRange": true,
|
||||
"kev": false,
|
||||
"epssScore": 0.05,
|
||||
"policyFlags": ["FLAG_A", "FLAG_B"],
|
||||
"policyDecision": "warn"
|
||||
},
|
||||
"expectedHashPrefix": "sha256:"
|
||||
},
|
||||
{
|
||||
"id": "HASH-002",
|
||||
"name": "State hash differs with reachability change",
|
||||
"state1": {
|
||||
"reachable": true,
|
||||
"vexStatus": "affected"
|
||||
},
|
||||
"state2": {
|
||||
"reachable": false,
|
||||
"vexStatus": "affected"
|
||||
},
|
||||
"expectDifferentHash": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,447 @@
|
||||
using System.Collections.Immutable;
|
||||
using StellaOps.Scanner.SmartDiff.Detection;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Scanner.SmartDiff.Tests;
|
||||
|
||||
public class MaterialRiskChangeDetectorTests
|
||||
{
|
||||
private readonly MaterialRiskChangeDetector _detector = new();
|
||||
|
||||
private static RiskStateSnapshot CreateSnapshot(
|
||||
string vulnId = "CVE-2024-1234",
|
||||
string purl = "pkg:npm/example@1.0.0",
|
||||
string scanId = "scan-1",
|
||||
bool? reachable = null,
|
||||
VexStatusType vexStatus = VexStatusType.Unknown,
|
||||
bool? inAffectedRange = null,
|
||||
bool kev = false,
|
||||
double? epssScore = null,
|
||||
PolicyDecisionType? policyDecision = null)
|
||||
{
|
||||
return new RiskStateSnapshot(
|
||||
FindingKey: new FindingKey(vulnId, purl),
|
||||
ScanId: scanId,
|
||||
CapturedAt: DateTimeOffset.UtcNow,
|
||||
Reachable: reachable,
|
||||
LatticeState: null,
|
||||
VexStatus: vexStatus,
|
||||
InAffectedRange: inAffectedRange,
|
||||
Kev: kev,
|
||||
EpssScore: epssScore,
|
||||
PolicyFlags: [],
|
||||
PolicyDecision: policyDecision);
|
||||
}
|
||||
|
||||
#region R1: Reachability Flip Tests
|
||||
|
||||
[Fact]
|
||||
public void R1_Detects_ReachabilityFlip_FalseToTrue()
|
||||
{
|
||||
// Arrange
|
||||
var prev = CreateSnapshot(reachable: false);
|
||||
var curr = CreateSnapshot(reachable: true);
|
||||
|
||||
// Act
|
||||
var result = _detector.Compare(prev, curr);
|
||||
|
||||
// Assert
|
||||
Assert.True(result.HasMaterialChange);
|
||||
Assert.Single(result.Changes);
|
||||
Assert.Equal(DetectionRule.R1_ReachabilityFlip, result.Changes[0].Rule);
|
||||
Assert.Equal(RiskDirection.Increased, result.Changes[0].Direction);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void R1_Detects_ReachabilityFlip_TrueToFalse()
|
||||
{
|
||||
// Arrange
|
||||
var prev = CreateSnapshot(reachable: true);
|
||||
var curr = CreateSnapshot(reachable: false);
|
||||
|
||||
// Act
|
||||
var result = _detector.Compare(prev, curr);
|
||||
|
||||
// Assert
|
||||
Assert.True(result.HasMaterialChange);
|
||||
Assert.Single(result.Changes);
|
||||
Assert.Equal(DetectionRule.R1_ReachabilityFlip, result.Changes[0].Rule);
|
||||
Assert.Equal(RiskDirection.Decreased, result.Changes[0].Direction);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void R1_Ignores_NullToValue()
|
||||
{
|
||||
// Arrange
|
||||
var prev = CreateSnapshot(reachable: null);
|
||||
var curr = CreateSnapshot(reachable: true);
|
||||
|
||||
// Act
|
||||
var result = _detector.Compare(prev, curr);
|
||||
|
||||
// Assert
|
||||
Assert.False(result.HasMaterialChange);
|
||||
Assert.Empty(result.Changes);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void R1_Ignores_NoChange()
|
||||
{
|
||||
// Arrange
|
||||
var prev = CreateSnapshot(reachable: true);
|
||||
var curr = CreateSnapshot(reachable: true);
|
||||
|
||||
// Act
|
||||
var result = _detector.Compare(prev, curr);
|
||||
|
||||
// Assert
|
||||
Assert.False(result.HasMaterialChange);
|
||||
Assert.Empty(result.Changes);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region R2: VEX Status Flip Tests
|
||||
|
||||
[Fact]
|
||||
public void R2_Detects_VexFlip_NotAffectedToAffected()
|
||||
{
|
||||
// Arrange
|
||||
var prev = CreateSnapshot(vexStatus: VexStatusType.NotAffected);
|
||||
var curr = CreateSnapshot(vexStatus: VexStatusType.Affected);
|
||||
|
||||
// Act
|
||||
var result = _detector.Compare(prev, curr);
|
||||
|
||||
// Assert
|
||||
Assert.True(result.HasMaterialChange);
|
||||
Assert.Single(result.Changes);
|
||||
Assert.Equal(DetectionRule.R2_VexFlip, result.Changes[0].Rule);
|
||||
Assert.Equal(RiskDirection.Increased, result.Changes[0].Direction);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void R2_Detects_VexFlip_AffectedToFixed()
|
||||
{
|
||||
// Arrange
|
||||
var prev = CreateSnapshot(vexStatus: VexStatusType.Affected);
|
||||
var curr = CreateSnapshot(vexStatus: VexStatusType.Fixed);
|
||||
|
||||
// Act
|
||||
var result = _detector.Compare(prev, curr);
|
||||
|
||||
// Assert
|
||||
Assert.True(result.HasMaterialChange);
|
||||
Assert.Single(result.Changes);
|
||||
Assert.Equal(DetectionRule.R2_VexFlip, result.Changes[0].Rule);
|
||||
Assert.Equal(RiskDirection.Decreased, result.Changes[0].Direction);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void R2_Detects_VexFlip_UnknownToAffected()
|
||||
{
|
||||
// Arrange
|
||||
var prev = CreateSnapshot(vexStatus: VexStatusType.Unknown);
|
||||
var curr = CreateSnapshot(vexStatus: VexStatusType.Affected);
|
||||
|
||||
// Act
|
||||
var result = _detector.Compare(prev, curr);
|
||||
|
||||
// Assert
|
||||
Assert.True(result.HasMaterialChange);
|
||||
Assert.Equal(RiskDirection.Increased, result.Changes[0].Direction);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void R2_Ignores_NonMeaningfulTransition()
|
||||
{
|
||||
// Arrange - Fixed to NotAffected isn't meaningful (both safe states)
|
||||
var prev = CreateSnapshot(vexStatus: VexStatusType.Fixed);
|
||||
var curr = CreateSnapshot(vexStatus: VexStatusType.NotAffected);
|
||||
|
||||
// Act
|
||||
var result = _detector.Compare(prev, curr);
|
||||
|
||||
// Assert
|
||||
Assert.False(result.HasMaterialChange);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region R3: Affected Range Boundary Tests
|
||||
|
||||
[Fact]
|
||||
public void R3_Detects_RangeEntry()
|
||||
{
|
||||
// Arrange
|
||||
var prev = CreateSnapshot(inAffectedRange: false);
|
||||
var curr = CreateSnapshot(inAffectedRange: true);
|
||||
|
||||
// Act
|
||||
var result = _detector.Compare(prev, curr);
|
||||
|
||||
// Assert
|
||||
Assert.True(result.HasMaterialChange);
|
||||
Assert.Single(result.Changes);
|
||||
Assert.Equal(DetectionRule.R3_RangeBoundary, result.Changes[0].Rule);
|
||||
Assert.Equal(RiskDirection.Increased, result.Changes[0].Direction);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void R3_Detects_RangeExit()
|
||||
{
|
||||
// Arrange
|
||||
var prev = CreateSnapshot(inAffectedRange: true);
|
||||
var curr = CreateSnapshot(inAffectedRange: false);
|
||||
|
||||
// Act
|
||||
var result = _detector.Compare(prev, curr);
|
||||
|
||||
// Assert
|
||||
Assert.True(result.HasMaterialChange);
|
||||
Assert.Single(result.Changes);
|
||||
Assert.Equal(DetectionRule.R3_RangeBoundary, result.Changes[0].Rule);
|
||||
Assert.Equal(RiskDirection.Decreased, result.Changes[0].Direction);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void R3_Ignores_NullTransition()
|
||||
{
|
||||
// Arrange
|
||||
var prev = CreateSnapshot(inAffectedRange: null);
|
||||
var curr = CreateSnapshot(inAffectedRange: true);
|
||||
|
||||
// Act
|
||||
var result = _detector.Compare(prev, curr);
|
||||
|
||||
// Assert
|
||||
Assert.False(result.HasMaterialChange);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region R4: Intelligence/Policy Flip Tests
|
||||
|
||||
[Fact]
|
||||
public void R4_Detects_KevAdded()
|
||||
{
|
||||
// Arrange
|
||||
var prev = CreateSnapshot(kev: false);
|
||||
var curr = CreateSnapshot(kev: true);
|
||||
|
||||
// Act
|
||||
var result = _detector.Compare(prev, curr);
|
||||
|
||||
// Assert
|
||||
Assert.True(result.HasMaterialChange);
|
||||
Assert.Single(result.Changes);
|
||||
Assert.Equal(DetectionRule.R4_IntelligenceFlip, result.Changes[0].Rule);
|
||||
Assert.Equal(MaterialChangeType.KevAdded, result.Changes[0].ChangeType);
|
||||
Assert.Equal(RiskDirection.Increased, result.Changes[0].Direction);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void R4_Detects_KevRemoved()
|
||||
{
|
||||
// Arrange
|
||||
var prev = CreateSnapshot(kev: true);
|
||||
var curr = CreateSnapshot(kev: false);
|
||||
|
||||
// Act
|
||||
var result = _detector.Compare(prev, curr);
|
||||
|
||||
// Assert
|
||||
Assert.True(result.HasMaterialChange);
|
||||
Assert.Equal(MaterialChangeType.KevRemoved, result.Changes[0].ChangeType);
|
||||
Assert.Equal(RiskDirection.Decreased, result.Changes[0].Direction);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void R4_Detects_EpssThresholdCrossing_Up()
|
||||
{
|
||||
// Arrange - EPSS crossing above 0.5 threshold
|
||||
var prev = CreateSnapshot(epssScore: 0.3);
|
||||
var curr = CreateSnapshot(epssScore: 0.7);
|
||||
|
||||
// Act
|
||||
var result = _detector.Compare(prev, curr);
|
||||
|
||||
// Assert
|
||||
Assert.True(result.HasMaterialChange);
|
||||
Assert.Single(result.Changes);
|
||||
Assert.Equal(MaterialChangeType.EpssThreshold, result.Changes[0].ChangeType);
|
||||
Assert.Equal(RiskDirection.Increased, result.Changes[0].Direction);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void R4_Detects_EpssThresholdCrossing_Down()
|
||||
{
|
||||
// Arrange
|
||||
var prev = CreateSnapshot(epssScore: 0.7);
|
||||
var curr = CreateSnapshot(epssScore: 0.3);
|
||||
|
||||
// Act
|
||||
var result = _detector.Compare(prev, curr);
|
||||
|
||||
// Assert
|
||||
Assert.True(result.HasMaterialChange);
|
||||
Assert.Equal(MaterialChangeType.EpssThreshold, result.Changes[0].ChangeType);
|
||||
Assert.Equal(RiskDirection.Decreased, result.Changes[0].Direction);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void R4_Ignores_EpssWithinThreshold()
|
||||
{
|
||||
// Arrange - Both below threshold
|
||||
var prev = CreateSnapshot(epssScore: 0.2);
|
||||
var curr = CreateSnapshot(epssScore: 0.4);
|
||||
|
||||
// Act
|
||||
var result = _detector.Compare(prev, curr);
|
||||
|
||||
// Assert
|
||||
Assert.False(result.HasMaterialChange);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void R4_Detects_PolicyFlip_AllowToBlock()
|
||||
{
|
||||
// Arrange
|
||||
var prev = CreateSnapshot(policyDecision: PolicyDecisionType.Allow);
|
||||
var curr = CreateSnapshot(policyDecision: PolicyDecisionType.Block);
|
||||
|
||||
// Act
|
||||
var result = _detector.Compare(prev, curr);
|
||||
|
||||
// Assert
|
||||
Assert.True(result.HasMaterialChange);
|
||||
Assert.Equal(MaterialChangeType.PolicyFlip, result.Changes[0].ChangeType);
|
||||
Assert.Equal(RiskDirection.Increased, result.Changes[0].Direction);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void R4_Detects_PolicyFlip_BlockToAllow()
|
||||
{
|
||||
// Arrange
|
||||
var prev = CreateSnapshot(policyDecision: PolicyDecisionType.Block);
|
||||
var curr = CreateSnapshot(policyDecision: PolicyDecisionType.Allow);
|
||||
|
||||
// Act
|
||||
var result = _detector.Compare(prev, curr);
|
||||
|
||||
// Assert
|
||||
Assert.True(result.HasMaterialChange);
|
||||
Assert.Equal(MaterialChangeType.PolicyFlip, result.Changes[0].ChangeType);
|
||||
Assert.Equal(RiskDirection.Decreased, result.Changes[0].Direction);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Multiple Changes Tests
|
||||
|
||||
[Fact]
|
||||
public void Detects_MultipleChanges()
|
||||
{
|
||||
// Arrange - Multiple rule violations
|
||||
var prev = CreateSnapshot(reachable: false, kev: false);
|
||||
var curr = CreateSnapshot(reachable: true, kev: true);
|
||||
|
||||
// Act
|
||||
var result = _detector.Compare(prev, curr);
|
||||
|
||||
// Assert
|
||||
Assert.True(result.HasMaterialChange);
|
||||
Assert.Equal(2, result.Changes.Length);
|
||||
Assert.Contains(result.Changes, c => c.Rule == DetectionRule.R1_ReachabilityFlip);
|
||||
Assert.Contains(result.Changes, c => c.ChangeType == MaterialChangeType.KevAdded);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Priority Score Tests
|
||||
|
||||
[Fact]
|
||||
public void ComputesPriorityScore_ForRiskIncrease()
|
||||
{
|
||||
// Arrange
|
||||
var prev = CreateSnapshot(reachable: false, epssScore: 0.8);
|
||||
var curr = CreateSnapshot(reachable: true, epssScore: 0.8);
|
||||
|
||||
// Act
|
||||
var result = _detector.Compare(prev, curr);
|
||||
|
||||
// Assert
|
||||
Assert.True(result.PriorityScore > 0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ComputesPriorityScore_ForRiskDecrease()
|
||||
{
|
||||
// Arrange
|
||||
var prev = CreateSnapshot(reachable: true, epssScore: 0.8);
|
||||
var curr = CreateSnapshot(reachable: false, epssScore: 0.8);
|
||||
|
||||
// Act
|
||||
var result = _detector.Compare(prev, curr);
|
||||
|
||||
// Assert
|
||||
Assert.True(result.PriorityScore < 0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PriorityScore_ZeroWhenNoChanges()
|
||||
{
|
||||
// Arrange
|
||||
var prev = CreateSnapshot();
|
||||
var curr = CreateSnapshot();
|
||||
|
||||
// Act
|
||||
var result = _detector.Compare(prev, curr);
|
||||
|
||||
// Assert
|
||||
Assert.Equal(0, result.PriorityScore);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region State Hash Tests
|
||||
|
||||
[Fact]
|
||||
public void StateHash_DifferentForDifferentStates()
|
||||
{
|
||||
// Arrange
|
||||
var snap1 = CreateSnapshot(reachable: true);
|
||||
var snap2 = CreateSnapshot(reachable: false);
|
||||
|
||||
// Act & Assert
|
||||
Assert.NotEqual(snap1.ComputeStateHash(), snap2.ComputeStateHash());
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StateHash_SameForSameState()
|
||||
{
|
||||
// Arrange
|
||||
var snap1 = CreateSnapshot(reachable: true, kev: true);
|
||||
var snap2 = CreateSnapshot(reachable: true, kev: true);
|
||||
|
||||
// Act & Assert
|
||||
Assert.Equal(snap1.ComputeStateHash(), snap2.ComputeStateHash());
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Error Handling Tests
|
||||
|
||||
[Fact]
|
||||
public void ThrowsOnFindingKeyMismatch()
|
||||
{
|
||||
// Arrange
|
||||
var prev = CreateSnapshot(vulnId: "CVE-2024-1111");
|
||||
var curr = CreateSnapshot(vulnId: "CVE-2024-2222");
|
||||
|
||||
// Act & Assert
|
||||
Assert.Throws<ArgumentException>(() => _detector.Compare(prev, curr));
|
||||
}
|
||||
|
||||
#endregion
|
||||
}
|
||||
@@ -0,0 +1,298 @@
|
||||
using StellaOps.Scanner.SmartDiff.Detection;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Scanner.SmartDiff.Tests;
|
||||
|
||||
public class ReachabilityGateBridgeTests
|
||||
{
|
||||
#region Lattice State Mapping Tests
|
||||
|
||||
[Theory]
|
||||
[InlineData("CR", true, 1.0)]
|
||||
[InlineData("CONFIRMED_REACHABLE", true, 1.0)]
|
||||
[InlineData("CU", false, 1.0)]
|
||||
[InlineData("CONFIRMED_UNREACHABLE", false, 1.0)]
|
||||
public void MapLatticeToReachable_ConfirmedStates_HighestConfidence(
|
||||
string latticeState, bool expectedReachable, double expectedConfidence)
|
||||
{
|
||||
// Act
|
||||
var (reachable, confidence) = ReachabilityGateBridge.MapLatticeToReachable(latticeState);
|
||||
|
||||
// Assert
|
||||
Assert.Equal(expectedReachable, reachable);
|
||||
Assert.Equal(expectedConfidence, confidence);
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[InlineData("SR", true, 0.85)]
|
||||
[InlineData("STATIC_REACHABLE", true, 0.85)]
|
||||
[InlineData("SU", false, 0.85)]
|
||||
[InlineData("STATIC_UNREACHABLE", false, 0.85)]
|
||||
public void MapLatticeToReachable_StaticStates_HighConfidence(
|
||||
string latticeState, bool expectedReachable, double expectedConfidence)
|
||||
{
|
||||
// Act
|
||||
var (reachable, confidence) = ReachabilityGateBridge.MapLatticeToReachable(latticeState);
|
||||
|
||||
// Assert
|
||||
Assert.Equal(expectedReachable, reachable);
|
||||
Assert.Equal(expectedConfidence, confidence);
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[InlineData("RO", true, 0.90)]
|
||||
[InlineData("RUNTIME_OBSERVED", true, 0.90)]
|
||||
[InlineData("RU", false, 0.70)]
|
||||
[InlineData("RUNTIME_UNOBSERVED", false, 0.70)]
|
||||
public void MapLatticeToReachable_RuntimeStates_CorrectConfidence(
|
||||
string latticeState, bool expectedReachable, double expectedConfidence)
|
||||
{
|
||||
// Act
|
||||
var (reachable, confidence) = ReachabilityGateBridge.MapLatticeToReachable(latticeState);
|
||||
|
||||
// Assert
|
||||
Assert.Equal(expectedReachable, reachable);
|
||||
Assert.Equal(expectedConfidence, confidence);
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[InlineData("U")]
|
||||
[InlineData("UNKNOWN")]
|
||||
public void MapLatticeToReachable_UnknownState_NullWithZeroConfidence(string latticeState)
|
||||
{
|
||||
// Act
|
||||
var (reachable, confidence) = ReachabilityGateBridge.MapLatticeToReachable(latticeState);
|
||||
|
||||
// Assert
|
||||
Assert.Null(reachable);
|
||||
Assert.Equal(0.0, confidence);
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[InlineData("X")]
|
||||
[InlineData("CONTESTED")]
|
||||
public void MapLatticeToReachable_ContestedState_NullWithMediumConfidence(string latticeState)
|
||||
{
|
||||
// Act
|
||||
var (reachable, confidence) = ReachabilityGateBridge.MapLatticeToReachable(latticeState);
|
||||
|
||||
// Assert
|
||||
Assert.Null(reachable);
|
||||
Assert.Equal(0.5, confidence);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void MapLatticeToReachable_UnrecognizedState_NullWithZeroConfidence()
|
||||
{
|
||||
// Act
|
||||
var (reachable, confidence) = ReachabilityGateBridge.MapLatticeToReachable("INVALID_STATE");
|
||||
|
||||
// Assert
|
||||
Assert.Null(reachable);
|
||||
Assert.Equal(0.0, confidence);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region FromLatticeState Tests
|
||||
|
||||
[Fact]
|
||||
public void FromLatticeState_CreatesGateWithCorrectValues()
|
||||
{
|
||||
// Act
|
||||
var gate = ReachabilityGateBridge.FromLatticeState("CR", configActivated: true, runningUser: false);
|
||||
|
||||
// Assert
|
||||
Assert.True(gate.Reachable);
|
||||
Assert.True(gate.ConfigActivated);
|
||||
Assert.False(gate.RunningUser);
|
||||
Assert.Equal(1.0, gate.Confidence);
|
||||
Assert.Equal("CR", gate.LatticeState);
|
||||
Assert.Contains("REACHABLE", gate.Rationale);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void FromLatticeState_UnknownState_CreatesGateWithNulls()
|
||||
{
|
||||
// Act
|
||||
var gate = ReachabilityGateBridge.FromLatticeState("U");
|
||||
|
||||
// Assert
|
||||
Assert.Null(gate.Reachable);
|
||||
Assert.Equal(0.0, gate.Confidence);
|
||||
Assert.Contains("UNKNOWN", gate.Rationale);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region ComputeClass Tests
|
||||
|
||||
[Fact]
|
||||
public void ComputeClass_AllFalse_ReturnsZero()
|
||||
{
|
||||
// Arrange
|
||||
var gate = new ReachabilityGate(
|
||||
Reachable: false,
|
||||
ConfigActivated: false,
|
||||
RunningUser: false,
|
||||
Confidence: 1.0,
|
||||
LatticeState: "CU",
|
||||
Rationale: "test");
|
||||
|
||||
// Act
|
||||
var gateClass = gate.ComputeClass();
|
||||
|
||||
// Assert
|
||||
Assert.Equal(0, gateClass);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ComputeClass_OnlyReachable_ReturnsOne()
|
||||
{
|
||||
// Arrange
|
||||
var gate = new ReachabilityGate(
|
||||
Reachable: true,
|
||||
ConfigActivated: false,
|
||||
RunningUser: false,
|
||||
Confidence: 1.0,
|
||||
LatticeState: "CR",
|
||||
Rationale: "test");
|
||||
|
||||
// Act
|
||||
var gateClass = gate.ComputeClass();
|
||||
|
||||
// Assert
|
||||
Assert.Equal(1, gateClass);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ComputeClass_ReachableAndActivated_ReturnsThree()
|
||||
{
|
||||
// Arrange
|
||||
var gate = new ReachabilityGate(
|
||||
Reachable: true,
|
||||
ConfigActivated: true,
|
||||
RunningUser: false,
|
||||
Confidence: 1.0,
|
||||
LatticeState: "CR",
|
||||
Rationale: "test");
|
||||
|
||||
// Act
|
||||
var gateClass = gate.ComputeClass();
|
||||
|
||||
// Assert
|
||||
Assert.Equal(3, gateClass);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ComputeClass_AllTrue_ReturnsSeven()
|
||||
{
|
||||
// Arrange
|
||||
var gate = new ReachabilityGate(
|
||||
Reachable: true,
|
||||
ConfigActivated: true,
|
||||
RunningUser: true,
|
||||
Confidence: 1.0,
|
||||
LatticeState: "CR",
|
||||
Rationale: "test");
|
||||
|
||||
// Act
|
||||
var gateClass = gate.ComputeClass();
|
||||
|
||||
// Assert
|
||||
Assert.Equal(7, gateClass);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ComputeClass_NullsAsZero()
|
||||
{
|
||||
// Arrange - nulls should be treated as false (0)
|
||||
var gate = new ReachabilityGate(
|
||||
Reachable: null,
|
||||
ConfigActivated: null,
|
||||
RunningUser: null,
|
||||
Confidence: 0.0,
|
||||
LatticeState: "U",
|
||||
Rationale: "test");
|
||||
|
||||
// Act
|
||||
var gateClass = gate.ComputeClass();
|
||||
|
||||
// Assert
|
||||
Assert.Equal(0, gateClass);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region InterpretClass Tests
|
||||
|
||||
[Theory]
|
||||
[InlineData(0, "LOW")]
|
||||
[InlineData(7, "HIGH")]
|
||||
public void InterpretClass_ExtremeCases_CorrectRiskLevel(int gateClass, string expectedRiskContains)
|
||||
{
|
||||
// Act
|
||||
var interpretation = ReachabilityGateBridge.InterpretClass(gateClass);
|
||||
|
||||
// Assert
|
||||
Assert.Contains(expectedRiskContains, interpretation);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RiskInterpretation_Property_ReturnsCorrectValue()
|
||||
{
|
||||
// Arrange
|
||||
var gate = new ReachabilityGate(
|
||||
Reachable: true,
|
||||
ConfigActivated: true,
|
||||
RunningUser: true,
|
||||
Confidence: 1.0,
|
||||
LatticeState: "CR",
|
||||
Rationale: "test");
|
||||
|
||||
// Act
|
||||
var interpretation = gate.RiskInterpretation;
|
||||
|
||||
// Assert
|
||||
Assert.Contains("HIGH", interpretation);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Static Unknown Gate Tests
|
||||
|
||||
[Fact]
|
||||
public void Unknown_HasExpectedValues()
|
||||
{
|
||||
// Act
|
||||
var gate = ReachabilityGate.Unknown;
|
||||
|
||||
// Assert
|
||||
Assert.Null(gate.Reachable);
|
||||
Assert.Null(gate.ConfigActivated);
|
||||
Assert.Null(gate.RunningUser);
|
||||
Assert.Equal(0.0, gate.Confidence);
|
||||
Assert.Equal("U", gate.LatticeState);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Rationale Generation Tests
|
||||
|
||||
[Theory]
|
||||
[InlineData("CR", "Confirmed reachable")]
|
||||
[InlineData("SR", "Statically reachable")]
|
||||
[InlineData("RO", "Observed at runtime")]
|
||||
[InlineData("U", "unknown")]
|
||||
[InlineData("X", "Contested")]
|
||||
public void GenerateRationale_IncludesStateDescription(string latticeState, string expectedContains)
|
||||
{
|
||||
// Act
|
||||
var rationale = ReachabilityGateBridge.GenerateRationale(latticeState, true);
|
||||
|
||||
// Assert
|
||||
Assert.Contains(expectedContains, rationale, StringComparison.OrdinalIgnoreCase);
|
||||
}
|
||||
|
||||
#endregion
|
||||
}
|
||||
@@ -0,0 +1,374 @@
|
||||
using System.Collections.Immutable;
|
||||
using System.Text.Json;
|
||||
using StellaOps.Scanner.SmartDiff.Detection;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Scanner.SmartDiff.Tests;
|
||||
|
||||
/// <summary>
|
||||
/// Golden fixture tests for Smart-Diff state comparison determinism.
|
||||
/// Per Sprint 3500.3 - ensures stable, reproducible change detection.
|
||||
/// </summary>
|
||||
public class StateComparisonGoldenTests
|
||||
{
|
||||
private static readonly string FixturePath = Path.Combine(
|
||||
AppContext.BaseDirectory,
|
||||
"Fixtures",
|
||||
"state-comparison.v1.json");
|
||||
|
||||
private static readonly JsonSerializerOptions JsonOptions = new()
|
||||
{
|
||||
PropertyNameCaseInsensitive = true
|
||||
};
|
||||
|
||||
private readonly MaterialRiskChangeDetector _detector;
|
||||
|
||||
public StateComparisonGoldenTests()
|
||||
{
|
||||
_detector = new MaterialRiskChangeDetector();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void GoldenFixture_Exists()
|
||||
{
|
||||
Assert.True(File.Exists(FixturePath), $"Fixture file not found: {FixturePath}");
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[MemberData(nameof(GetTestCases))]
|
||||
public void DetectChanges_MatchesGoldenFixture(GoldenTestCase testCase)
|
||||
{
|
||||
// Arrange
|
||||
var previous = ParseSnapshot(testCase.Previous);
|
||||
var current = ParseSnapshot(testCase.Current);
|
||||
|
||||
// Act
|
||||
var result = _detector.DetectChanges(previous, current);
|
||||
|
||||
// Assert
|
||||
Assert.Equal(testCase.Expected.HasMaterialChange, result.HasMaterialChange);
|
||||
|
||||
if (testCase.Expected.ChangeCount.HasValue)
|
||||
{
|
||||
Assert.Equal(testCase.Expected.ChangeCount.Value, result.Changes.Length);
|
||||
}
|
||||
|
||||
if (testCase.Expected.TotalPriorityScore.HasValue)
|
||||
{
|
||||
Assert.Equal(testCase.Expected.TotalPriorityScore.Value, result.PriorityScore);
|
||||
}
|
||||
|
||||
if (testCase.Expected.ChangeType is not null && result.Changes.Length > 0)
|
||||
{
|
||||
var expectedType = ParseChangeType(testCase.Expected.ChangeType);
|
||||
Assert.Contains(result.Changes, c => c.ChangeType == expectedType);
|
||||
}
|
||||
|
||||
if (testCase.Expected.Direction is not null && result.Changes.Length > 0)
|
||||
{
|
||||
var expectedDirection = ParseDirection(testCase.Expected.Direction);
|
||||
Assert.Contains(result.Changes, c => c.Direction == expectedDirection);
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StateHash_IsDeterministic()
|
||||
{
|
||||
// Arrange
|
||||
var snapshot = new RiskStateSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-1111", "pkg:npm/test@1.0.0"),
|
||||
ScanId: "scan-hash-001",
|
||||
CapturedAt: DateTimeOffset.Parse("2024-12-15T10:00:00Z"),
|
||||
Reachable: true,
|
||||
LatticeState: "CR",
|
||||
VexStatus: VexStatusType.Affected,
|
||||
InAffectedRange: true,
|
||||
Kev: false,
|
||||
EpssScore: 0.05,
|
||||
PolicyFlags: ["FLAG_A", "FLAG_B"],
|
||||
PolicyDecision: PolicyDecisionType.Warn);
|
||||
|
||||
// Act - compute hash multiple times
|
||||
var hash1 = snapshot.ComputeStateHash();
|
||||
var hash2 = snapshot.ComputeStateHash();
|
||||
var hash3 = snapshot.ComputeStateHash();
|
||||
|
||||
// Assert - all hashes must be identical
|
||||
Assert.Equal(hash1, hash2);
|
||||
Assert.Equal(hash2, hash3);
|
||||
Assert.StartsWith("sha256:", hash1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StateHash_DiffersWithReachabilityChange()
|
||||
{
|
||||
// Arrange
|
||||
var baseSnapshot = new RiskStateSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-1111", "pkg:npm/test@1.0.0"),
|
||||
ScanId: "scan-hash-001",
|
||||
CapturedAt: DateTimeOffset.Parse("2024-12-15T10:00:00Z"),
|
||||
Reachable: true,
|
||||
LatticeState: "CR",
|
||||
VexStatus: VexStatusType.Affected,
|
||||
InAffectedRange: true,
|
||||
Kev: false,
|
||||
EpssScore: 0.05,
|
||||
PolicyFlags: [],
|
||||
PolicyDecision: null);
|
||||
|
||||
var modifiedSnapshot = baseSnapshot with { Reachable = false };
|
||||
|
||||
// Act
|
||||
var hash1 = baseSnapshot.ComputeStateHash();
|
||||
var hash2 = modifiedSnapshot.ComputeStateHash();
|
||||
|
||||
// Assert - hashes must differ
|
||||
Assert.NotEqual(hash1, hash2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StateHash_DiffersWithVexStatusChange()
|
||||
{
|
||||
// Arrange
|
||||
var baseSnapshot = new RiskStateSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-1111", "pkg:npm/test@1.0.0"),
|
||||
ScanId: "scan-hash-001",
|
||||
CapturedAt: DateTimeOffset.Parse("2024-12-15T10:00:00Z"),
|
||||
Reachable: true,
|
||||
LatticeState: "CR",
|
||||
VexStatus: VexStatusType.Affected,
|
||||
InAffectedRange: true,
|
||||
Kev: false,
|
||||
EpssScore: 0.05,
|
||||
PolicyFlags: [],
|
||||
PolicyDecision: null);
|
||||
|
||||
var modifiedSnapshot = baseSnapshot with { VexStatus = VexStatusType.NotAffected };
|
||||
|
||||
// Act
|
||||
var hash1 = baseSnapshot.ComputeStateHash();
|
||||
var hash2 = modifiedSnapshot.ComputeStateHash();
|
||||
|
||||
// Assert - hashes must differ
|
||||
Assert.NotEqual(hash1, hash2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StateHash_SameForEquivalentStates()
|
||||
{
|
||||
// Arrange - two snapshots with same risk-relevant fields but different scan IDs
|
||||
var snapshot1 = new RiskStateSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-1111", "pkg:npm/test@1.0.0"),
|
||||
ScanId: "scan-001",
|
||||
CapturedAt: DateTimeOffset.Parse("2024-12-15T10:00:00Z"),
|
||||
Reachable: true,
|
||||
LatticeState: "CR",
|
||||
VexStatus: VexStatusType.Affected,
|
||||
InAffectedRange: true,
|
||||
Kev: false,
|
||||
EpssScore: 0.05,
|
||||
PolicyFlags: [],
|
||||
PolicyDecision: null);
|
||||
|
||||
var snapshot2 = new RiskStateSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-1111", "pkg:npm/test@1.0.0"),
|
||||
ScanId: "scan-002", // Different scan ID
|
||||
CapturedAt: DateTimeOffset.Parse("2024-12-16T10:00:00Z"), // Different timestamp
|
||||
Reachable: true,
|
||||
LatticeState: "CR",
|
||||
VexStatus: VexStatusType.Affected,
|
||||
InAffectedRange: true,
|
||||
Kev: false,
|
||||
EpssScore: 0.05,
|
||||
PolicyFlags: [],
|
||||
PolicyDecision: null);
|
||||
|
||||
// Act
|
||||
var hash1 = snapshot1.ComputeStateHash();
|
||||
var hash2 = snapshot2.ComputeStateHash();
|
||||
|
||||
// Assert - hashes should be the same (scan ID and timestamp are not part of state hash)
|
||||
Assert.Equal(hash1, hash2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PriorityScore_IsConsistent()
|
||||
{
|
||||
// Arrange - KEV flip should always produce same priority
|
||||
var previous = new RiskStateSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-6666", "pkg:npm/axios@0.21.0"),
|
||||
ScanId: "scan-prev",
|
||||
CapturedAt: DateTimeOffset.Parse("2024-12-01T10:00:00Z"),
|
||||
Reachable: true,
|
||||
LatticeState: "SR",
|
||||
VexStatus: VexStatusType.Affected,
|
||||
InAffectedRange: true,
|
||||
Kev: false,
|
||||
EpssScore: 0.08,
|
||||
PolicyFlags: [],
|
||||
PolicyDecision: null);
|
||||
|
||||
var current = previous with
|
||||
{
|
||||
ScanId = "scan-curr",
|
||||
CapturedAt = DateTimeOffset.Parse("2024-12-15T10:00:00Z"),
|
||||
Kev = true
|
||||
};
|
||||
|
||||
// Act - detect multiple times
|
||||
var result1 = _detector.DetectChanges(previous, current);
|
||||
var result2 = _detector.DetectChanges(previous, current);
|
||||
var result3 = _detector.DetectChanges(previous, current);
|
||||
|
||||
// Assert - priority score should be deterministic
|
||||
Assert.Equal(result1.PriorityScore, result2.PriorityScore);
|
||||
Assert.Equal(result2.PriorityScore, result3.PriorityScore);
|
||||
}
|
||||
|
||||
#region Data Loading
|
||||
|
||||
public static IEnumerable<object[]> GetTestCases()
|
||||
{
|
||||
if (!File.Exists(FixturePath))
|
||||
{
|
||||
yield break;
|
||||
}
|
||||
|
||||
var json = File.ReadAllText(FixturePath);
|
||||
var fixture = JsonSerializer.Deserialize<GoldenFixture>(json, JsonOptions);
|
||||
|
||||
if (fixture?.TestCases is null)
|
||||
{
|
||||
yield break;
|
||||
}
|
||||
|
||||
foreach (var testCase in fixture.TestCases)
|
||||
{
|
||||
yield return new object[] { testCase };
|
||||
}
|
||||
}
|
||||
|
||||
private static RiskStateSnapshot ParseSnapshot(SnapshotData data)
|
||||
{
|
||||
return new RiskStateSnapshot(
|
||||
FindingKey: new FindingKey(data.FindingKey.VulnId, data.FindingKey.Purl),
|
||||
ScanId: data.ScanId,
|
||||
CapturedAt: DateTimeOffset.Parse(data.CapturedAt),
|
||||
Reachable: data.Reachable,
|
||||
LatticeState: data.LatticeState,
|
||||
VexStatus: ParseVexStatus(data.VexStatus),
|
||||
InAffectedRange: data.InAffectedRange,
|
||||
Kev: data.Kev,
|
||||
EpssScore: data.EpssScore,
|
||||
PolicyFlags: data.PolicyFlags?.ToImmutableArray() ?? [],
|
||||
PolicyDecision: ParsePolicyDecision(data.PolicyDecision));
|
||||
}
|
||||
|
||||
private static VexStatusType ParseVexStatus(string value)
|
||||
{
|
||||
return value.ToLowerInvariant() switch
|
||||
{
|
||||
"affected" => VexStatusType.Affected,
|
||||
"not_affected" => VexStatusType.NotAffected,
|
||||
"fixed" => VexStatusType.Fixed,
|
||||
"under_investigation" => VexStatusType.UnderInvestigation,
|
||||
_ => VexStatusType.Unknown
|
||||
};
|
||||
}
|
||||
|
||||
private static PolicyDecisionType? ParsePolicyDecision(string? value)
|
||||
{
|
||||
if (string.IsNullOrEmpty(value))
|
||||
return null;
|
||||
|
||||
return value.ToLowerInvariant() switch
|
||||
{
|
||||
"allow" => PolicyDecisionType.Allow,
|
||||
"warn" => PolicyDecisionType.Warn,
|
||||
"block" => PolicyDecisionType.Block,
|
||||
_ => null
|
||||
};
|
||||
}
|
||||
|
||||
private static MaterialChangeType ParseChangeType(string value)
|
||||
{
|
||||
return value.ToLowerInvariant() switch
|
||||
{
|
||||
"reachability_flip" => MaterialChangeType.ReachabilityFlip,
|
||||
"vex_flip" => MaterialChangeType.VexFlip,
|
||||
"range_boundary" => MaterialChangeType.RangeBoundary,
|
||||
"kev_added" => MaterialChangeType.KevAdded,
|
||||
"kev_removed" => MaterialChangeType.KevRemoved,
|
||||
"epss_threshold" => MaterialChangeType.EpssThreshold,
|
||||
"policy_flip" => MaterialChangeType.PolicyFlip,
|
||||
_ => throw new ArgumentException($"Unknown change type: {value}")
|
||||
};
|
||||
}
|
||||
|
||||
private static RiskDirection ParseDirection(string value)
|
||||
{
|
||||
return value.ToLowerInvariant() switch
|
||||
{
|
||||
"increased" => RiskDirection.Increased,
|
||||
"decreased" => RiskDirection.Decreased,
|
||||
"neutral" => RiskDirection.Neutral,
|
||||
_ => throw new ArgumentException($"Unknown direction: {value}")
|
||||
};
|
||||
}
|
||||
|
||||
#endregion
|
||||
}
|
||||
|
||||
#region Fixture DTOs
|
||||
|
||||
public class GoldenFixture
|
||||
{
|
||||
public string? Version { get; set; }
|
||||
public string? Description { get; set; }
|
||||
public List<GoldenTestCase>? TestCases { get; set; }
|
||||
}
|
||||
|
||||
public class GoldenTestCase
|
||||
{
|
||||
public string Id { get; set; } = "";
|
||||
public string Name { get; set; } = "";
|
||||
public string? Rule { get; set; }
|
||||
public SnapshotData Previous { get; set; } = new();
|
||||
public SnapshotData Current { get; set; } = new();
|
||||
public ExpectedResult Expected { get; set; } = new();
|
||||
|
||||
public override string ToString() => $"{Id}: {Name}";
|
||||
}
|
||||
|
||||
public class SnapshotData
|
||||
{
|
||||
public FindingKeyData FindingKey { get; set; } = new();
|
||||
public string ScanId { get; set; } = "";
|
||||
public string CapturedAt { get; set; } = "";
|
||||
public bool? Reachable { get; set; }
|
||||
public string? LatticeState { get; set; }
|
||||
public string VexStatus { get; set; } = "unknown";
|
||||
public bool? InAffectedRange { get; set; }
|
||||
public bool Kev { get; set; }
|
||||
public double? EpssScore { get; set; }
|
||||
public List<string>? PolicyFlags { get; set; }
|
||||
public string? PolicyDecision { get; set; }
|
||||
}
|
||||
|
||||
public class FindingKeyData
|
||||
{
|
||||
public string VulnId { get; set; } = "";
|
||||
public string Purl { get; set; } = "";
|
||||
}
|
||||
|
||||
public class ExpectedResult
|
||||
{
|
||||
public bool HasMaterialChange { get; set; }
|
||||
public string? Direction { get; set; }
|
||||
public string? ChangeType { get; set; }
|
||||
public int? ChangeCount { get; set; }
|
||||
public int? TotalPriorityScore { get; set; }
|
||||
public int? PriorityScoreContribution { get; set; }
|
||||
}
|
||||
|
||||
#endregion
|
||||
@@ -0,0 +1,386 @@
|
||||
using System.Collections.Immutable;
|
||||
using StellaOps.Scanner.SmartDiff.Detection;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Scanner.SmartDiff.Tests;
|
||||
|
||||
public class VexCandidateEmitterTests
|
||||
{
|
||||
private readonly InMemoryVexCandidateStore _store = new();
|
||||
|
||||
#region Basic Emission Tests
|
||||
|
||||
[Fact]
|
||||
public async Task EmitCandidates_WithAbsentApis_EmitsCandidate()
|
||||
{
|
||||
// Arrange
|
||||
var emitter = new VexCandidateEmitter(store: _store);
|
||||
|
||||
var prevCallGraph = new CallGraphSnapshot("prev-digest", ["vuln_api_1", "vuln_api_2", "safe_api"]);
|
||||
var currCallGraph = new CallGraphSnapshot("curr-digest", ["safe_api"]); // vuln APIs removed
|
||||
|
||||
var context = new VexCandidateEmissionContext(
|
||||
PreviousScanId: "scan-001",
|
||||
CurrentScanId: "scan-002",
|
||||
TargetImageDigest: "sha256:abc123",
|
||||
PreviousFindings: [new FindingSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-1234", "pkg:npm/example@1.0.0"),
|
||||
VexStatus: VexStatusType.Affected,
|
||||
VulnerableApis: ["vuln_api_1", "vuln_api_2"])],
|
||||
CurrentFindings: [new FindingSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-1234", "pkg:npm/example@1.0.0"),
|
||||
VexStatus: VexStatusType.Affected,
|
||||
VulnerableApis: ["vuln_api_1", "vuln_api_2"])],
|
||||
PreviousCallGraph: prevCallGraph,
|
||||
CurrentCallGraph: currCallGraph);
|
||||
|
||||
// Act
|
||||
var result = await emitter.EmitCandidatesAsync(context);
|
||||
|
||||
// Assert
|
||||
Assert.Equal(1, result.CandidatesEmitted);
|
||||
Assert.Single(result.Candidates);
|
||||
Assert.Equal(VexStatusType.NotAffected, result.Candidates[0].SuggestedStatus);
|
||||
Assert.Equal(VexJustification.VulnerableCodeNotPresent, result.Candidates[0].Justification);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task EmitCandidates_WithPresentApis_DoesNotEmit()
|
||||
{
|
||||
// Arrange
|
||||
var emitter = new VexCandidateEmitter(store: _store);
|
||||
|
||||
var prevCallGraph = new CallGraphSnapshot("prev-digest", ["vuln_api_1", "safe_api"]);
|
||||
var currCallGraph = new CallGraphSnapshot("curr-digest", ["vuln_api_1", "safe_api"]); // vuln API still present
|
||||
|
||||
var context = new VexCandidateEmissionContext(
|
||||
PreviousScanId: "scan-001",
|
||||
CurrentScanId: "scan-002",
|
||||
TargetImageDigest: "sha256:abc123",
|
||||
PreviousFindings: [new FindingSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-1234", "pkg:npm/example@1.0.0"),
|
||||
VexStatus: VexStatusType.Affected,
|
||||
VulnerableApis: ["vuln_api_1"])],
|
||||
CurrentFindings: [new FindingSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-1234", "pkg:npm/example@1.0.0"),
|
||||
VexStatus: VexStatusType.Affected,
|
||||
VulnerableApis: ["vuln_api_1"])],
|
||||
PreviousCallGraph: prevCallGraph,
|
||||
CurrentCallGraph: currCallGraph);
|
||||
|
||||
// Act
|
||||
var result = await emitter.EmitCandidatesAsync(context);
|
||||
|
||||
// Assert
|
||||
Assert.Equal(0, result.CandidatesEmitted);
|
||||
Assert.Empty(result.Candidates);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task EmitCandidates_FindingAlreadyNotAffected_DoesNotEmit()
|
||||
{
|
||||
// Arrange
|
||||
var emitter = new VexCandidateEmitter(store: _store);
|
||||
|
||||
var prevCallGraph = new CallGraphSnapshot("prev-digest", ["vuln_api_1"]);
|
||||
var currCallGraph = new CallGraphSnapshot("curr-digest", []); // API removed
|
||||
|
||||
var context = new VexCandidateEmissionContext(
|
||||
PreviousScanId: "scan-001",
|
||||
CurrentScanId: "scan-002",
|
||||
TargetImageDigest: "sha256:abc123",
|
||||
PreviousFindings: [new FindingSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-1234", "pkg:npm/example@1.0.0"),
|
||||
VexStatus: VexStatusType.NotAffected, // Already not affected
|
||||
VulnerableApis: ["vuln_api_1"])],
|
||||
CurrentFindings: [new FindingSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-1234", "pkg:npm/example@1.0.0"),
|
||||
VexStatus: VexStatusType.NotAffected,
|
||||
VulnerableApis: ["vuln_api_1"])],
|
||||
PreviousCallGraph: prevCallGraph,
|
||||
CurrentCallGraph: currCallGraph);
|
||||
|
||||
// Act
|
||||
var result = await emitter.EmitCandidatesAsync(context);
|
||||
|
||||
// Assert
|
||||
Assert.Equal(0, result.CandidatesEmitted);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Call Graph Tests
|
||||
|
||||
[Fact]
|
||||
public async Task EmitCandidates_NoCallGraph_DoesNotEmit()
|
||||
{
|
||||
// Arrange
|
||||
var emitter = new VexCandidateEmitter(store: _store);
|
||||
|
||||
var context = new VexCandidateEmissionContext(
|
||||
PreviousScanId: "scan-001",
|
||||
CurrentScanId: "scan-002",
|
||||
TargetImageDigest: "sha256:abc123",
|
||||
PreviousFindings: [new FindingSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-1234", "pkg:npm/example@1.0.0"),
|
||||
VexStatus: VexStatusType.Affected,
|
||||
VulnerableApis: ["vuln_api_1"])],
|
||||
CurrentFindings: [new FindingSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-1234", "pkg:npm/example@1.0.0"),
|
||||
VexStatus: VexStatusType.Affected,
|
||||
VulnerableApis: ["vuln_api_1"])],
|
||||
PreviousCallGraph: null,
|
||||
CurrentCallGraph: null);
|
||||
|
||||
// Act
|
||||
var result = await emitter.EmitCandidatesAsync(context);
|
||||
|
||||
// Assert
|
||||
Assert.Equal(0, result.CandidatesEmitted);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task EmitCandidates_NoVulnerableApis_DoesNotEmit()
|
||||
{
|
||||
// Arrange
|
||||
var emitter = new VexCandidateEmitter(store: _store);
|
||||
|
||||
var prevCallGraph = new CallGraphSnapshot("prev-digest", ["api_1"]);
|
||||
var currCallGraph = new CallGraphSnapshot("curr-digest", []);
|
||||
|
||||
var context = new VexCandidateEmissionContext(
|
||||
PreviousScanId: "scan-001",
|
||||
CurrentScanId: "scan-002",
|
||||
TargetImageDigest: "sha256:abc123",
|
||||
PreviousFindings: [new FindingSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-1234", "pkg:npm/example@1.0.0"),
|
||||
VexStatus: VexStatusType.Affected,
|
||||
VulnerableApis: [])], // No vulnerable APIs tracked
|
||||
CurrentFindings: [new FindingSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-1234", "pkg:npm/example@1.0.0"),
|
||||
VexStatus: VexStatusType.Affected,
|
||||
VulnerableApis: [])],
|
||||
PreviousCallGraph: prevCallGraph,
|
||||
CurrentCallGraph: currCallGraph);
|
||||
|
||||
// Act
|
||||
var result = await emitter.EmitCandidatesAsync(context);
|
||||
|
||||
// Assert
|
||||
Assert.Equal(0, result.CandidatesEmitted);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Confidence Tests
|
||||
|
||||
[Fact]
|
||||
public async Task EmitCandidates_MultipleAbsentApis_HigherConfidence()
|
||||
{
|
||||
// Arrange
|
||||
var emitter = new VexCandidateEmitter(store: _store);
|
||||
|
||||
var prevCallGraph = new CallGraphSnapshot("prev-digest", ["vuln_1", "vuln_2", "vuln_3"]);
|
||||
var currCallGraph = new CallGraphSnapshot("curr-digest", []); // All removed
|
||||
|
||||
var context = new VexCandidateEmissionContext(
|
||||
PreviousScanId: "scan-001",
|
||||
CurrentScanId: "scan-002",
|
||||
TargetImageDigest: "sha256:abc123",
|
||||
PreviousFindings: [new FindingSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-1234", "pkg:npm/example@1.0.0"),
|
||||
VexStatus: VexStatusType.Affected,
|
||||
VulnerableApis: ["vuln_1", "vuln_2", "vuln_3"])],
|
||||
CurrentFindings: [new FindingSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-1234", "pkg:npm/example@1.0.0"),
|
||||
VexStatus: VexStatusType.Affected,
|
||||
VulnerableApis: ["vuln_1", "vuln_2", "vuln_3"])],
|
||||
PreviousCallGraph: prevCallGraph,
|
||||
CurrentCallGraph: currCallGraph);
|
||||
|
||||
// Act
|
||||
var result = await emitter.EmitCandidatesAsync(context);
|
||||
|
||||
// Assert
|
||||
Assert.Single(result.Candidates);
|
||||
Assert.Equal(0.95, result.Candidates[0].Confidence); // 3+ APIs = 0.95
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task EmitCandidates_BelowConfidenceThreshold_DoesNotEmit()
|
||||
{
|
||||
// Arrange - Set high threshold
|
||||
var options = new VexCandidateEmitterOptions { MinConfidence = 0.99 };
|
||||
var emitter = new VexCandidateEmitter(options: options, store: _store);
|
||||
|
||||
var prevCallGraph = new CallGraphSnapshot("prev-digest", ["vuln_1"]);
|
||||
var currCallGraph = new CallGraphSnapshot("curr-digest", []);
|
||||
|
||||
var context = new VexCandidateEmissionContext(
|
||||
PreviousScanId: "scan-001",
|
||||
CurrentScanId: "scan-002",
|
||||
TargetImageDigest: "sha256:abc123",
|
||||
PreviousFindings: [new FindingSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-1234", "pkg:npm/example@1.0.0"),
|
||||
VexStatus: VexStatusType.Affected,
|
||||
VulnerableApis: ["vuln_1"])],
|
||||
CurrentFindings: [new FindingSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-1234", "pkg:npm/example@1.0.0"),
|
||||
VexStatus: VexStatusType.Affected,
|
||||
VulnerableApis: ["vuln_1"])],
|
||||
PreviousCallGraph: prevCallGraph,
|
||||
CurrentCallGraph: currCallGraph);
|
||||
|
||||
// Act
|
||||
var result = await emitter.EmitCandidatesAsync(context);
|
||||
|
||||
// Assert - Single API = 0.75 confidence, below 0.99 threshold
|
||||
Assert.Equal(0, result.CandidatesEmitted);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Rate Limiting Tests
|
||||
|
||||
[Fact]
|
||||
public async Task EmitCandidates_RespectsMaxCandidatesLimit()
|
||||
{
|
||||
// Arrange
|
||||
var options = new VexCandidateEmitterOptions { MaxCandidatesPerImage = 2 };
|
||||
var emitter = new VexCandidateEmitter(options: options, store: _store);
|
||||
|
||||
var prevCallGraph = new CallGraphSnapshot("prev-digest", ["vuln_1", "vuln_2", "vuln_3"]);
|
||||
var currCallGraph = new CallGraphSnapshot("curr-digest", []);
|
||||
|
||||
var findings = Enumerable.Range(1, 5).Select(i => new FindingSnapshot(
|
||||
FindingKey: new FindingKey($"CVE-2024-{i}", $"pkg:npm/example{i}@1.0.0"),
|
||||
VexStatus: VexStatusType.Affected,
|
||||
VulnerableApis: [$"vuln_{i}"])).ToList();
|
||||
|
||||
var context = new VexCandidateEmissionContext(
|
||||
PreviousScanId: "scan-001",
|
||||
CurrentScanId: "scan-002",
|
||||
TargetImageDigest: "sha256:abc123",
|
||||
PreviousFindings: findings,
|
||||
CurrentFindings: findings,
|
||||
PreviousCallGraph: prevCallGraph,
|
||||
CurrentCallGraph: currCallGraph);
|
||||
|
||||
// Act
|
||||
var result = await emitter.EmitCandidatesAsync(context);
|
||||
|
||||
// Assert
|
||||
Assert.Equal(2, result.CandidatesEmitted);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Storage Tests
|
||||
|
||||
[Fact]
|
||||
public async Task EmitCandidates_StoresCandidates()
|
||||
{
|
||||
// Arrange
|
||||
var options = new VexCandidateEmitterOptions { PersistCandidates = true };
|
||||
var emitter = new VexCandidateEmitter(options: options, store: _store);
|
||||
|
||||
var prevCallGraph = new CallGraphSnapshot("prev-digest", ["vuln_api"]);
|
||||
var currCallGraph = new CallGraphSnapshot("curr-digest", []);
|
||||
|
||||
var context = new VexCandidateEmissionContext(
|
||||
PreviousScanId: "scan-001",
|
||||
CurrentScanId: "scan-002",
|
||||
TargetImageDigest: "sha256:abc123",
|
||||
PreviousFindings: [new FindingSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-1234", "pkg:npm/example@1.0.0"),
|
||||
VexStatus: VexStatusType.Affected,
|
||||
VulnerableApis: ["vuln_api"])],
|
||||
CurrentFindings: [new FindingSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-1234", "pkg:npm/example@1.0.0"),
|
||||
VexStatus: VexStatusType.Affected,
|
||||
VulnerableApis: ["vuln_api"])],
|
||||
PreviousCallGraph: prevCallGraph,
|
||||
CurrentCallGraph: currCallGraph);
|
||||
|
||||
// Act
|
||||
await emitter.EmitCandidatesAsync(context);
|
||||
|
||||
// Assert
|
||||
var stored = await _store.GetCandidatesAsync("sha256:abc123");
|
||||
Assert.Single(stored);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task EmitCandidates_NoPersist_DoesNotStore()
|
||||
{
|
||||
// Arrange
|
||||
var options = new VexCandidateEmitterOptions { PersistCandidates = false };
|
||||
var emitter = new VexCandidateEmitter(options: options, store: _store);
|
||||
|
||||
var prevCallGraph = new CallGraphSnapshot("prev-digest", ["vuln_api"]);
|
||||
var currCallGraph = new CallGraphSnapshot("curr-digest", []);
|
||||
|
||||
var context = new VexCandidateEmissionContext(
|
||||
PreviousScanId: "scan-001",
|
||||
CurrentScanId: "scan-002",
|
||||
TargetImageDigest: "sha256:abc123",
|
||||
PreviousFindings: [new FindingSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-1234", "pkg:npm/example@1.0.0"),
|
||||
VexStatus: VexStatusType.Affected,
|
||||
VulnerableApis: ["vuln_api"])],
|
||||
CurrentFindings: [new FindingSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-1234", "pkg:npm/example@1.0.0"),
|
||||
VexStatus: VexStatusType.Affected,
|
||||
VulnerableApis: ["vuln_api"])],
|
||||
PreviousCallGraph: prevCallGraph,
|
||||
CurrentCallGraph: currCallGraph);
|
||||
|
||||
// Act
|
||||
var result = await emitter.EmitCandidatesAsync(context);
|
||||
|
||||
// Assert - Candidate emitted but not stored
|
||||
Assert.Equal(1, result.CandidatesEmitted);
|
||||
var stored = await _store.GetCandidatesAsync("sha256:abc123");
|
||||
Assert.Empty(stored);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Evidence Link Tests
|
||||
|
||||
[Fact]
|
||||
public async Task EmitCandidates_IncludesEvidenceLinks()
|
||||
{
|
||||
// Arrange
|
||||
var emitter = new VexCandidateEmitter(store: _store);
|
||||
|
||||
var prevCallGraph = new CallGraphSnapshot("prev-digest", ["vuln_api_1", "vuln_api_2"]);
|
||||
var currCallGraph = new CallGraphSnapshot("curr-digest", []);
|
||||
|
||||
var context = new VexCandidateEmissionContext(
|
||||
PreviousScanId: "scan-001",
|
||||
CurrentScanId: "scan-002",
|
||||
TargetImageDigest: "sha256:abc123",
|
||||
PreviousFindings: [new FindingSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-1234", "pkg:npm/example@1.0.0"),
|
||||
VexStatus: VexStatusType.Affected,
|
||||
VulnerableApis: ["vuln_api_1", "vuln_api_2"])],
|
||||
CurrentFindings: [new FindingSnapshot(
|
||||
FindingKey: new FindingKey("CVE-2024-1234", "pkg:npm/example@1.0.0"),
|
||||
VexStatus: VexStatusType.Affected,
|
||||
VulnerableApis: ["vuln_api_1", "vuln_api_2"])],
|
||||
PreviousCallGraph: prevCallGraph,
|
||||
CurrentCallGraph: currCallGraph);
|
||||
|
||||
// Act
|
||||
var result = await emitter.EmitCandidatesAsync(context);
|
||||
|
||||
// Assert
|
||||
var candidate = result.Candidates[0];
|
||||
Assert.Contains(candidate.EvidenceLinks, e => e.Type == "callgraph_diff");
|
||||
Assert.Contains(candidate.EvidenceLinks, e => e.Type == "absent_api" && e.Uri.Contains("vuln_api_1"));
|
||||
Assert.Contains(candidate.EvidenceLinks, e => e.Type == "absent_api" && e.Uri.Contains("vuln_api_2"));
|
||||
}
|
||||
|
||||
#endregion
|
||||
}
|
||||
@@ -0,0 +1,368 @@
|
||||
using System.Collections.Immutable;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using StellaOps.Scanner.SmartDiff.Detection;
|
||||
using StellaOps.Scanner.Storage.Postgres;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Tests;
|
||||
|
||||
/// <summary>
|
||||
/// Integration tests for Smart-Diff PostgreSQL repositories.
|
||||
/// Per Sprint 3500.3 - SDIFF-DET-026.
|
||||
/// </summary>
|
||||
[Collection("scanner-postgres")]
|
||||
public class SmartDiffRepositoryIntegrationTests : IAsyncLifetime
|
||||
{
|
||||
private readonly ScannerPostgresFixture _fixture;
|
||||
private PostgresRiskStateRepository _riskStateRepo = null!;
|
||||
private PostgresVexCandidateStore _vexCandidateStore = null!;
|
||||
private PostgresMaterialRiskChangeRepository _changeRepo = null!;
|
||||
|
||||
public SmartDiffRepositoryIntegrationTests(ScannerPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
}
|
||||
|
||||
public async Task InitializeAsync()
|
||||
{
|
||||
await _fixture.TruncateAllTablesAsync();
|
||||
|
||||
var dataSource = CreateDataSource();
|
||||
var logger = NullLoggerFactory.Instance;
|
||||
|
||||
_riskStateRepo = new PostgresRiskStateRepository(
|
||||
dataSource,
|
||||
logger.CreateLogger<PostgresRiskStateRepository>());
|
||||
|
||||
_vexCandidateStore = new PostgresVexCandidateStore(
|
||||
dataSource,
|
||||
logger.CreateLogger<PostgresVexCandidateStore>());
|
||||
|
||||
_changeRepo = new PostgresMaterialRiskChangeRepository(
|
||||
dataSource,
|
||||
logger.CreateLogger<PostgresMaterialRiskChangeRepository>());
|
||||
}
|
||||
|
||||
public Task DisposeAsync() => Task.CompletedTask;
|
||||
|
||||
private ScannerDataSource CreateDataSource()
|
||||
{
|
||||
var options = new ScannerStorageOptions
|
||||
{
|
||||
Postgres = new StellaOps.Infrastructure.Postgres.Options.PostgresOptions
|
||||
{
|
||||
ConnectionString = _fixture.ConnectionString,
|
||||
SchemaName = _fixture.SchemaName
|
||||
}
|
||||
};
|
||||
|
||||
return new ScannerDataSource(
|
||||
Microsoft.Extensions.Options.Options.Create(options),
|
||||
NullLoggerFactory.Instance.CreateLogger<ScannerDataSource>());
|
||||
}
|
||||
|
||||
#region RiskStateSnapshot Tests
|
||||
|
||||
[Fact]
|
||||
public async Task StoreSnapshot_ThenRetrieve_ReturnsCorrectData()
|
||||
{
|
||||
// Arrange
|
||||
var snapshot = CreateTestSnapshot("CVE-2024-1234", "pkg:npm/lodash@4.17.21", "scan-001");
|
||||
|
||||
// Act
|
||||
await _riskStateRepo.StoreSnapshotAsync(snapshot);
|
||||
var retrieved = await _riskStateRepo.GetLatestSnapshotAsync(snapshot.FindingKey);
|
||||
|
||||
// Assert
|
||||
Assert.NotNull(retrieved);
|
||||
Assert.Equal(snapshot.FindingKey.VulnId, retrieved.FindingKey.VulnId);
|
||||
Assert.Equal(snapshot.FindingKey.Purl, retrieved.FindingKey.Purl);
|
||||
Assert.Equal(snapshot.Reachable, retrieved.Reachable);
|
||||
Assert.Equal(snapshot.VexStatus, retrieved.VexStatus);
|
||||
Assert.Equal(snapshot.Kev, retrieved.Kev);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task StoreMultipleSnapshots_GetHistory_ReturnsInOrder()
|
||||
{
|
||||
// Arrange
|
||||
var findingKey = new FindingKey("CVE-2024-5678", "pkg:pypi/requests@2.28.0");
|
||||
|
||||
var snapshot1 = CreateTestSnapshot(findingKey.VulnId, findingKey.Purl, "scan-001",
|
||||
capturedAt: DateTimeOffset.UtcNow.AddHours(-2));
|
||||
var snapshot2 = CreateTestSnapshot(findingKey.VulnId, findingKey.Purl, "scan-002",
|
||||
capturedAt: DateTimeOffset.UtcNow.AddHours(-1));
|
||||
var snapshot3 = CreateTestSnapshot(findingKey.VulnId, findingKey.Purl, "scan-003",
|
||||
capturedAt: DateTimeOffset.UtcNow);
|
||||
|
||||
// Act
|
||||
await _riskStateRepo.StoreSnapshotsAsync([snapshot1, snapshot2, snapshot3]);
|
||||
var history = await _riskStateRepo.GetSnapshotHistoryAsync(findingKey, limit: 10);
|
||||
|
||||
// Assert
|
||||
Assert.Equal(3, history.Count);
|
||||
Assert.Equal("scan-003", history[0].ScanId); // Most recent first
|
||||
Assert.Equal("scan-002", history[1].ScanId);
|
||||
Assert.Equal("scan-001", history[2].ScanId);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetSnapshotsForScan_ReturnsAllForScan()
|
||||
{
|
||||
// Arrange
|
||||
var scanId = "scan-bulk-001";
|
||||
var snapshot1 = CreateTestSnapshot("CVE-2024-1111", "pkg:npm/a@1.0.0", scanId);
|
||||
var snapshot2 = CreateTestSnapshot("CVE-2024-2222", "pkg:npm/b@2.0.0", scanId);
|
||||
var snapshot3 = CreateTestSnapshot("CVE-2024-3333", "pkg:npm/c@3.0.0", "other-scan");
|
||||
|
||||
await _riskStateRepo.StoreSnapshotsAsync([snapshot1, snapshot2, snapshot3]);
|
||||
|
||||
// Act
|
||||
var results = await _riskStateRepo.GetSnapshotsForScanAsync(scanId);
|
||||
|
||||
// Assert
|
||||
Assert.Equal(2, results.Count);
|
||||
Assert.All(results, r => Assert.Equal(scanId, r.ScanId));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task StateHash_IsDeterministic()
|
||||
{
|
||||
// Arrange
|
||||
var snapshot = CreateTestSnapshot("CVE-2024-HASH", "pkg:npm/hash-test@1.0.0", "scan-hash");
|
||||
|
||||
// Act
|
||||
await _riskStateRepo.StoreSnapshotAsync(snapshot);
|
||||
var hash1 = snapshot.ComputeStateHash();
|
||||
|
||||
var retrieved = await _riskStateRepo.GetLatestSnapshotAsync(snapshot.FindingKey);
|
||||
var hash2 = retrieved!.ComputeStateHash();
|
||||
|
||||
// Assert
|
||||
Assert.Equal(hash1, hash2);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region VexCandidate Tests
|
||||
|
||||
[Fact]
|
||||
public async Task StoreCandidates_ThenRetrieve_ReturnsCorrectData()
|
||||
{
|
||||
// Arrange
|
||||
var candidate = CreateTestCandidate("CVE-2024-VEX1", "pkg:npm/vex-test@1.0.0", "sha256:abc123");
|
||||
|
||||
// Act
|
||||
await _vexCandidateStore.StoreCandidatesAsync([candidate]);
|
||||
var retrieved = await _vexCandidateStore.GetCandidateAsync(candidate.CandidateId);
|
||||
|
||||
// Assert
|
||||
Assert.NotNull(retrieved);
|
||||
Assert.Equal(candidate.CandidateId, retrieved.CandidateId);
|
||||
Assert.Equal(candidate.SuggestedStatus, retrieved.SuggestedStatus);
|
||||
Assert.Equal(candidate.Justification, retrieved.Justification);
|
||||
Assert.Equal(candidate.Confidence, retrieved.Confidence, precision: 2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetCandidatesForImage_ReturnsFilteredResults()
|
||||
{
|
||||
// Arrange
|
||||
var imageDigest = "sha256:image123";
|
||||
var candidate1 = CreateTestCandidate("CVE-2024-A", "pkg:npm/a@1.0.0", imageDigest);
|
||||
var candidate2 = CreateTestCandidate("CVE-2024-B", "pkg:npm/b@1.0.0", imageDigest);
|
||||
var candidate3 = CreateTestCandidate("CVE-2024-C", "pkg:npm/c@1.0.0", "sha256:other");
|
||||
|
||||
await _vexCandidateStore.StoreCandidatesAsync([candidate1, candidate2, candidate3]);
|
||||
|
||||
// Act
|
||||
var results = await _vexCandidateStore.GetCandidatesAsync(imageDigest);
|
||||
|
||||
// Assert
|
||||
Assert.Equal(2, results.Count);
|
||||
Assert.All(results, r => Assert.Equal(imageDigest, r.ImageDigest));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ReviewCandidate_UpdatesReviewStatus()
|
||||
{
|
||||
// Arrange
|
||||
var candidate = CreateTestCandidate("CVE-2024-REVIEW", "pkg:npm/review@1.0.0", "sha256:review");
|
||||
await _vexCandidateStore.StoreCandidatesAsync([candidate]);
|
||||
|
||||
var review = new VexCandidateReview(
|
||||
Action: VexReviewAction.Accept,
|
||||
Reviewer: "test-user@example.com",
|
||||
ReviewedAt: DateTimeOffset.UtcNow,
|
||||
Comment: "Verified via manual code review");
|
||||
|
||||
// Act
|
||||
var success = await _vexCandidateStore.ReviewCandidateAsync(candidate.CandidateId, review);
|
||||
var retrieved = await _vexCandidateStore.GetCandidateAsync(candidate.CandidateId);
|
||||
|
||||
// Assert
|
||||
Assert.True(success);
|
||||
Assert.NotNull(retrieved);
|
||||
Assert.False(retrieved.RequiresReview);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ReviewCandidate_NonExistent_ReturnsFalse()
|
||||
{
|
||||
// Arrange
|
||||
var review = new VexCandidateReview(
|
||||
Action: VexReviewAction.Reject,
|
||||
Reviewer: "test@example.com",
|
||||
ReviewedAt: DateTimeOffset.UtcNow,
|
||||
Comment: "Test");
|
||||
|
||||
// Act
|
||||
var success = await _vexCandidateStore.ReviewCandidateAsync("non-existent-id", review);
|
||||
|
||||
// Assert
|
||||
Assert.False(success);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region MaterialRiskChange Tests
|
||||
|
||||
[Fact]
|
||||
public async Task StoreChange_ThenRetrieve_ReturnsCorrectData()
|
||||
{
|
||||
// Arrange
|
||||
var change = CreateTestChange("CVE-2024-CHG1", "pkg:npm/change@1.0.0", hasMaterialChange: true);
|
||||
var scanId = "scan-change-001";
|
||||
|
||||
// Act
|
||||
await _changeRepo.StoreChangeAsync(change, scanId);
|
||||
var results = await _changeRepo.GetChangesForScanAsync(scanId);
|
||||
|
||||
// Assert
|
||||
Assert.Single(results);
|
||||
Assert.Equal(change.FindingKey.VulnId, results[0].FindingKey.VulnId);
|
||||
Assert.Equal(change.HasMaterialChange, results[0].HasMaterialChange);
|
||||
Assert.Equal(change.PriorityScore, results[0].PriorityScore);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task StoreMultipleChanges_QueryByFinding_ReturnsHistory()
|
||||
{
|
||||
// Arrange
|
||||
var findingKey = new FindingKey("CVE-2024-HIST", "pkg:npm/history@1.0.0");
|
||||
var change1 = CreateTestChange(findingKey.VulnId, findingKey.Purl, hasMaterialChange: true, priority: 100);
|
||||
var change2 = CreateTestChange(findingKey.VulnId, findingKey.Purl, hasMaterialChange: true, priority: 200);
|
||||
|
||||
await _changeRepo.StoreChangeAsync(change1, "scan-h1");
|
||||
await _changeRepo.StoreChangeAsync(change2, "scan-h2");
|
||||
|
||||
// Act
|
||||
var history = await _changeRepo.GetChangesForFindingAsync(findingKey, limit: 10);
|
||||
|
||||
// Assert
|
||||
Assert.Equal(2, history.Count);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task QueryChanges_WithMinPriority_FiltersCorrectly()
|
||||
{
|
||||
// Arrange
|
||||
var change1 = CreateTestChange("CVE-2024-P1", "pkg:npm/p1@1.0.0", hasMaterialChange: true, priority: 50);
|
||||
var change2 = CreateTestChange("CVE-2024-P2", "pkg:npm/p2@1.0.0", hasMaterialChange: true, priority: 150);
|
||||
var change3 = CreateTestChange("CVE-2024-P3", "pkg:npm/p3@1.0.0", hasMaterialChange: true, priority: 250);
|
||||
|
||||
await _changeRepo.StoreChangesAsync([change1, change2, change3], "scan-priority");
|
||||
|
||||
var query = new MaterialRiskChangeQuery
|
||||
{
|
||||
MinPriorityScore = 100,
|
||||
Offset = 0,
|
||||
Limit = 100
|
||||
};
|
||||
|
||||
// Act
|
||||
var result = await _changeRepo.QueryChangesAsync(query);
|
||||
|
||||
// Assert
|
||||
Assert.Equal(2, result.Changes.Length);
|
||||
Assert.All(result.Changes, c => Assert.True(c.PriorityScore >= 100));
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Test Data Factories
|
||||
|
||||
private static RiskStateSnapshot CreateTestSnapshot(
|
||||
string vulnId,
|
||||
string purl,
|
||||
string scanId,
|
||||
DateTimeOffset? capturedAt = null)
|
||||
{
|
||||
return new RiskStateSnapshot(
|
||||
FindingKey: new FindingKey(vulnId, purl),
|
||||
ScanId: scanId,
|
||||
CapturedAt: capturedAt ?? DateTimeOffset.UtcNow,
|
||||
Reachable: true,
|
||||
LatticeState: "CR",
|
||||
VexStatus: VexStatusType.Affected,
|
||||
InAffectedRange: true,
|
||||
Kev: false,
|
||||
EpssScore: 0.05,
|
||||
PolicyFlags: ["TEST_FLAG"],
|
||||
PolicyDecision: PolicyDecisionType.Warn);
|
||||
}
|
||||
|
||||
private static VexCandidate CreateTestCandidate(
|
||||
string vulnId,
|
||||
string purl,
|
||||
string imageDigest)
|
||||
{
|
||||
return new VexCandidate(
|
||||
CandidateId: $"cand-{Guid.NewGuid():N}",
|
||||
FindingKey: new FindingKey(vulnId, purl),
|
||||
SuggestedStatus: VexStatusType.NotAffected,
|
||||
Justification: VexJustification.VulnerableCodeNotInExecutePath,
|
||||
Rationale: "Test rationale - vulnerable code path not executed",
|
||||
EvidenceLinks:
|
||||
[
|
||||
new EvidenceLink("call_graph", "stellaops://graph/test", "sha256:evidence123")
|
||||
],
|
||||
Confidence: 0.85,
|
||||
ImageDigest: imageDigest,
|
||||
GeneratedAt: DateTimeOffset.UtcNow,
|
||||
ExpiresAt: DateTimeOffset.UtcNow.AddDays(30),
|
||||
RequiresReview: true);
|
||||
}
|
||||
|
||||
private static MaterialRiskChangeResult CreateTestChange(
|
||||
string vulnId,
|
||||
string purl,
|
||||
bool hasMaterialChange,
|
||||
int priority = 100)
|
||||
{
|
||||
var changes = hasMaterialChange
|
||||
?
|
||||
[
|
||||
new DetectedChange(
|
||||
Rule: DetectionRule.R1_ReachabilityFlip,
|
||||
ChangeType: MaterialChangeType.ReachabilityFlip,
|
||||
Direction: RiskDirection.Increased,
|
||||
Reason: "Test reachability flip",
|
||||
PreviousValue: "false",
|
||||
CurrentValue: "true",
|
||||
Weight: 1.0)
|
||||
]
|
||||
: ImmutableArray<DetectedChange>.Empty;
|
||||
|
||||
return new MaterialRiskChangeResult(
|
||||
FindingKey: new FindingKey(vulnId, purl),
|
||||
HasMaterialChange: hasMaterialChange,
|
||||
Changes: changes,
|
||||
PriorityScore: priority,
|
||||
PreviousStateHash: "sha256:prev",
|
||||
CurrentStateHash: "sha256:curr");
|
||||
}
|
||||
|
||||
#endregion
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
## Roles
|
||||
- **Scheduler Worker/WebService Engineer**: .NET 10 (preview) across workers, web service, and shared libraries; keep jobs/metrics deterministic and tenant-safe.
|
||||
- **QA / Reliability**: Adds/maintains unit + integration tests in `__Tests`, covers determinism, job orchestration, and metrics; validates Mongo/Redis/NATS contracts without live cloud deps.
|
||||
- **QA / Reliability**: Adds/maintains unit + integration tests in `__Tests`, covers determinism, job orchestration, and metrics; validates PostgreSQL/Redis/NATS contracts without live cloud deps.
|
||||
- **Docs/Runbook Touches**: Update `docs/modules/scheduler/**` and `operations/` assets when contracts or operational characteristics change.
|
||||
|
||||
## Required Reading
|
||||
|
||||
@@ -6,21 +6,21 @@
|
||||
|
||||
| Method | Path | Description | Scopes |
|
||||
| ------ | ---- | ----------- | ------ |
|
||||
| `GET` | `/api/v1/scheduler/runs` | List runs for the current tenant (filter by schedule, state, createdAfter, cursor). | `scheduler.runs.read` |
|
||||
| `GET` | `/api/v1/scheduler/runs/{runId}` | Retrieve run details. | `scheduler.runs.read` |
|
||||
| `GET` | `/api/v1/scheduler/runs/{runId}/deltas` | Fetch deterministic delta metadata for the specified run. | `scheduler.runs.read` |
|
||||
| `GET` | `/api/v1/scheduler/runs/queue/lag` | Snapshot queue depth per transport/queue for console dashboards. | `scheduler.runs.read` |
|
||||
| `GET` | `/api/v1/scheduler/runs/{runId}/stream` | Server-sent events (SSE) stream for live progress, queue lag, and heartbeats. | `scheduler.runs.read` |
|
||||
| `POST` | `/api/v1/scheduler/runs` | Create an ad-hoc run bound to an existing schedule. | `scheduler.runs.write` |
|
||||
| `POST` | `/api/v1/scheduler/runs/{runId}/cancel` | Transition a run to `cancelled` when still in a non-terminal state. | `scheduler.runs.manage` |
|
||||
| `POST` | `/api/v1/scheduler/runs/{runId}/retry` | Clone a terminal run into a new manual retry, preserving provenance. | `scheduler.runs.manage` |
|
||||
| `POST` | `/api/v1/scheduler/runs/preview` | Resolve impacted images using the ImpactIndex without enqueuing work. | `scheduler.runs.preview` |
|
||||
| `GET` | `/api/v1/scheduler/policies/simulations` | List policy simulations for the current tenant (filters: policyId, status, since, limit). | `policy:simulate` |
|
||||
| `GET` | `/api/v1/scheduler/policies/simulations/{simulationId}` | Retrieve simulation status snapshot. | `policy:simulate` |
|
||||
| `GET` | `/api/v1/scheduler/policies/simulations/{simulationId}/stream` | SSE stream emitting simulation status, queue lag, and heartbeats. | `policy:simulate` |
|
||||
| `POST` | `/api/v1/scheduler/policies/simulations` | Enqueue a policy simulation (mode=`simulate`) with optional SBOM inputs and metadata. | `policy:simulate` |
|
||||
| `POST` | `/api/v1/scheduler/policies/simulations/{simulationId}/cancel` | Request cancellation for an in-flight simulation. | `policy:simulate` |
|
||||
| `POST` | `/api/v1/scheduler/policies/simulations/{simulationId}/retry` | Clone a terminal simulation into a new run preserving inputs/metadata. | `policy:simulate` |
|
||||
| `GET` | `/api/v1/scheduler/runs` | List runs for the current tenant (filter by schedule, state, createdAfter, cursor). | `scheduler.runs.read` |
|
||||
| `GET` | `/api/v1/scheduler/runs/{runId}` | Retrieve run details. | `scheduler.runs.read` |
|
||||
| `GET` | `/api/v1/scheduler/runs/{runId}/deltas` | Fetch deterministic delta metadata for the specified run. | `scheduler.runs.read` |
|
||||
| `GET` | `/api/v1/scheduler/runs/queue/lag` | Snapshot queue depth per transport/queue for console dashboards. | `scheduler.runs.read` |
|
||||
| `GET` | `/api/v1/scheduler/runs/{runId}/stream` | Server-sent events (SSE) stream for live progress, queue lag, and heartbeats. | `scheduler.runs.read` |
|
||||
| `POST` | `/api/v1/scheduler/runs` | Create an ad-hoc run bound to an existing schedule. | `scheduler.runs.write` |
|
||||
| `POST` | `/api/v1/scheduler/runs/{runId}/cancel` | Transition a run to `cancelled` when still in a non-terminal state. | `scheduler.runs.manage` |
|
||||
| `POST` | `/api/v1/scheduler/runs/{runId}/retry` | Clone a terminal run into a new manual retry, preserving provenance. | `scheduler.runs.manage` |
|
||||
| `POST` | `/api/v1/scheduler/runs/preview` | Resolve impacted images using the ImpactIndex without enqueuing work. | `scheduler.runs.preview` |
|
||||
| `GET` | `/api/v1/scheduler/policies/simulations` | List policy simulations for the current tenant (filters: policyId, status, since, limit). | `policy:simulate` |
|
||||
| `GET` | `/api/v1/scheduler/policies/simulations/{simulationId}` | Retrieve simulation status snapshot. | `policy:simulate` |
|
||||
| `GET` | `/api/v1/scheduler/policies/simulations/{simulationId}/stream` | SSE stream emitting simulation status, queue lag, and heartbeats. | `policy:simulate` |
|
||||
| `POST` | `/api/v1/scheduler/policies/simulations` | Enqueue a policy simulation (mode=`simulate`) with optional SBOM inputs and metadata. | `policy:simulate` |
|
||||
| `POST` | `/api/v1/scheduler/policies/simulations/{simulationId}/cancel` | Request cancellation for an in-flight simulation. | `policy:simulate` |
|
||||
| `POST` | `/api/v1/scheduler/policies/simulations/{simulationId}/retry` | Clone a terminal simulation into a new run preserving inputs/metadata. | `policy:simulate` |
|
||||
|
||||
All endpoints require a tenant context (`X-Tenant-Id`) and the appropriate scheduler scopes. Development mode allows header-based auth; production deployments must rely on Authority-issued tokens (OpTok + DPoP).
|
||||
|
||||
@@ -80,12 +80,12 @@ GET /api/v1/scheduler/runs?scheduleId=sch_4f2c7d9e0a2b4c64a0e7b5f9d65c1234&state
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"runs": [
|
||||
{
|
||||
"schemaVersion": "scheduler.run@1",
|
||||
"id": "run_c7b4e9d2f6a04f8784a40476d8a2f771",
|
||||
"tenantId": "tenant-alpha",
|
||||
{
|
||||
"runs": [
|
||||
{
|
||||
"schemaVersion": "scheduler.run@1",
|
||||
"id": "run_c7b4e9d2f6a04f8784a40476d8a2f771",
|
||||
"tenantId": "tenant-alpha",
|
||||
"scheduleId": "sch_4f2c7d9e0a2b4c64a0e7b5f9d65c1234",
|
||||
"trigger": "manual",
|
||||
"state": "planning",
|
||||
@@ -103,13 +103,13 @@ GET /api/v1/scheduler/runs?scheduleId=sch_4f2c7d9e0a2b4c64a0e7b5f9d65c1234&state
|
||||
"reason": {
|
||||
"manualReason": "Nightly backfill"
|
||||
},
|
||||
"createdAt": "2025-10-26T03:12:45Z"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
When additional pages are available the response includes `"nextCursor": "<base64>"`. Clients pass this cursor via `?cursor=` to fetch the next deterministic slice (ordering = `createdAt desc, id desc`).
|
||||
"createdAt": "2025-10-26T03:12:45Z"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
When additional pages are available the response includes `"nextCursor": "<base64>"`. Clients pass this cursor via `?cursor=` to fetch the next deterministic slice (ordering = `createdAt desc, id desc`).
|
||||
|
||||
## Cancel Run
|
||||
|
||||
@@ -148,33 +148,33 @@ POST /api/v1/scheduler/runs/run_c7b4e9d2f6a04f8784a40476d8a2f771/cancel
|
||||
|
||||
## Impact Preview
|
||||
|
||||
`/api/v1/scheduler/runs/preview` resolves impacted images via the ImpactIndex without mutating state. When `scheduleId` is provided the schedule selector is reused; callers may alternatively supply an explicit selector.
|
||||
|
||||
## Retry Run
|
||||
|
||||
`POST /api/v1/scheduler/runs/{runId}/retry` clones a terminal run into a new manual run with `retryOf` pointing to the original identifier. Retry is scope-gated with `scheduler.runs.manage`; the new run’s `reason.manualReason` gains a `retry-of:<runId>` suffix for provenance.
|
||||
|
||||
## Run deltas
|
||||
|
||||
`GET /api/v1/scheduler/runs/{runId}/deltas` returns an immutable, deterministically sorted array of delta summaries (`[imageDigest, severity slices, KEV hits, attestations]`).
|
||||
|
||||
## Queue lag snapshot
|
||||
|
||||
`GET /api/v1/scheduler/runs/queue/lag` exposes queue depth summaries for planner/runner transports. The payload includes `capturedAt`, `totalDepth`, `maxDepth`, and ordered queue entries (transport + queue + depth). Console uses this for backlog dashboards and alert thresholds.
|
||||
|
||||
## Live stream (SSE)
|
||||
|
||||
`GET /api/v1/scheduler/runs/{runId}/stream` emits server-sent events for:
|
||||
|
||||
- `initial` — full run snapshot
|
||||
- `stateChanged` — state/started/finished transitions
|
||||
- `segmentProgress` — stats updates
|
||||
- `deltaSummary` — deltas available
|
||||
- `queueLag` — periodic queue snapshots
|
||||
- `heartbeat` — uptime keep-alive (default 5s)
|
||||
- `completed` — terminal summary
|
||||
|
||||
The stream is tolerant to clients reconnecting (idempotent payloads, deterministic ordering) and honours tenant scope plus cancellation tokens.
|
||||
`/api/v1/scheduler/runs/preview` resolves impacted images via the ImpactIndex without mutating state. When `scheduleId` is provided the schedule selector is reused; callers may alternatively supply an explicit selector.
|
||||
|
||||
## Retry Run
|
||||
|
||||
`POST /api/v1/scheduler/runs/{runId}/retry` clones a terminal run into a new manual run with `retryOf` pointing to the original identifier. Retry is scope-gated with `scheduler.runs.manage`; the new run’s `reason.manualReason` gains a `retry-of:<runId>` suffix for provenance.
|
||||
|
||||
## Run deltas
|
||||
|
||||
`GET /api/v1/scheduler/runs/{runId}/deltas` returns an immutable, deterministically sorted array of delta summaries (`[imageDigest, severity slices, KEV hits, attestations]`).
|
||||
|
||||
## Queue lag snapshot
|
||||
|
||||
`GET /api/v1/scheduler/runs/queue/lag` exposes queue depth summaries for planner/runner transports. The payload includes `capturedAt`, `totalDepth`, `maxDepth`, and ordered queue entries (transport + queue + depth). Console uses this for backlog dashboards and alert thresholds.
|
||||
|
||||
## Live stream (SSE)
|
||||
|
||||
`GET /api/v1/scheduler/runs/{runId}/stream` emits server-sent events for:
|
||||
|
||||
- `initial` — full run snapshot
|
||||
- `stateChanged` — state/started/finished transitions
|
||||
- `segmentProgress` — stats updates
|
||||
- `deltaSummary` — deltas available
|
||||
- `queueLag` — periodic queue snapshots
|
||||
- `heartbeat` — uptime keep-alive (default 5s)
|
||||
- `completed` — terminal summary
|
||||
|
||||
The stream is tolerant to clients reconnecting (idempotent payloads, deterministic ordering) and honours tenant scope plus cancellation tokens.
|
||||
|
||||
```http
|
||||
POST /api/v1/scheduler/runs/preview
|
||||
@@ -216,106 +216,106 @@ POST /api/v1/scheduler/runs/preview
|
||||
|
||||
### Integration notes
|
||||
|
||||
* Run creation and cancellation produce audit entries under category `scheduler.run` with correlation metadata when provided.
|
||||
* The preview endpoint relies on the ImpactIndex stub in development. Production deployments must register the concrete index implementation before use.
|
||||
* Planner/worker orchestration tasks will wire run creation to queueing in SCHED-WORKER-16-201/202.
|
||||
|
||||
## Policy simulations
|
||||
|
||||
The policy simulation APIs mirror the run endpoints but operate on policy-mode jobs (`mode=simulate`) scoped by tenant and RBAC (`policy:simulate`).
|
||||
|
||||
### Create simulation
|
||||
|
||||
```http
|
||||
POST /api/v1/scheduler/policies/simulations
|
||||
X-Tenant-Id: tenant-alpha
|
||||
Authorization: Bearer <OpTok>
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"policyId": "P-7",
|
||||
"policyVersion": 4,
|
||||
"priority": "normal",
|
||||
"metadata": {
|
||||
"source": "console.review"
|
||||
},
|
||||
"inputs": {
|
||||
"sbomSet": ["sbom:S-318", "sbom:S-42"],
|
||||
"captureExplain": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```json
|
||||
HTTP/1.1 201 Created
|
||||
Location: /api/v1/scheduler/policies/simulations/run:P-7:20251103T153000Z:e4d1a9b2
|
||||
{
|
||||
"simulation": {
|
||||
"schemaVersion": "scheduler.policy-run-status@1",
|
||||
"runId": "run:P-7:20251103T153000Z:e4d1a9b2",
|
||||
"tenantId": "tenant-alpha",
|
||||
"policyId": "P-7",
|
||||
"policyVersion": 4,
|
||||
"mode": "simulate",
|
||||
"status": "queued",
|
||||
"priority": "normal",
|
||||
"queuedAt": "2025-11-03T15:30:00Z",
|
||||
"stats": {
|
||||
"components": 0,
|
||||
"rulesFired": 0,
|
||||
"findingsWritten": 0,
|
||||
"vexOverrides": 0
|
||||
},
|
||||
"inputs": {
|
||||
"sbomSet": ["sbom:S-318", "sbom:S-42"],
|
||||
"captureExplain": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Canonical payload lives in `samples/api/scheduler/policy-simulation-status.json`.
|
||||
|
||||
### List and fetch simulations
|
||||
|
||||
- `GET /api/v1/scheduler/policies/simulations?policyId=P-7&status=queued&limit=25`
|
||||
- `GET /api/v1/scheduler/policies/simulations/{simulationId}`
|
||||
|
||||
The response envelope mirrors `policy-run-status` but uses `simulations` / `simulation` wrappers. All metadata keys are lower-case; retries append `retry-of=<priorRunId>` for provenance.
|
||||
|
||||
### Cancel and retry
|
||||
|
||||
- `POST /api/v1/scheduler/policies/simulations/{simulationId}/cancel`
|
||||
- Marks the job as `cancellationRequested` and surfaces the reason. Worker execution honours this flag before leasing.
|
||||
- `POST /api/v1/scheduler/policies/simulations/{simulationId}/retry`
|
||||
- Clones a terminal simulation, preserving inputs/metadata and adding `metadata.retry-of` pointing to the original run ID. Returns `409 Conflict` when the simulation is not terminal.
|
||||
|
||||
### Live stream (SSE)
|
||||
|
||||
`GET /api/v1/scheduler/policies/simulations/{simulationId}/stream` emits:
|
||||
|
||||
- `retry` — reconnection hint (milliseconds) emitted before events.
|
||||
- `initial` — current simulation snapshot.
|
||||
- `status` — status/attempt/stat updates.
|
||||
- `queueLag` — periodic queue depth summary (shares payload with run streams).
|
||||
- `heartbeat` — keep-alive ping (default 5s; configurable under `Scheduler:RunStream`).
|
||||
- `completed` — terminal summary (`succeeded`, `failed`, or `cancelled`).
|
||||
- `notFound` — emitted if the run record disappears while streaming.
|
||||
|
||||
Heartbeats, queue lag summaries, and the reconnection directive are sent immediately after connection so Console clients receive deterministic telemetry when loading a simulation workspace.
|
||||
|
||||
### Metrics
|
||||
|
||||
```
|
||||
GET /api/v1/scheduler/policies/simulations/metrics
|
||||
X-Tenant-Id: tenant-alpha
|
||||
Authorization: Bearer <OpTok>
|
||||
```
|
||||
|
||||
Returns queue depth and latency summaries tailored for simulation dashboards and alerting. Response properties align with the metric names exposed via OTEL (`policy_simulation_queue_depth`, `policy_simulation_latency_seconds`). Canonical payload lives at `samples/api/scheduler/policy-simulation-metrics.json`.
|
||||
|
||||
- `policy_simulation_queue_depth.total` — pending simulation jobs (aggregate of `pending`, `dispatching`, `submitted`).
|
||||
- `policy_simulation_latency.*` — latency percentiles (seconds) computed from the most recent terminal simulations.
|
||||
|
||||
> **Note:** When Mongo storage is not configured the metrics provider is disabled and the endpoint responds with `501 Not Implemented`.
|
||||
* Run creation and cancellation produce audit entries under category `scheduler.run` with correlation metadata when provided.
|
||||
* The preview endpoint relies on the ImpactIndex stub in development. Production deployments must register the concrete index implementation before use.
|
||||
* Planner/worker orchestration tasks will wire run creation to queueing in SCHED-WORKER-16-201/202.
|
||||
|
||||
## Policy simulations
|
||||
|
||||
The policy simulation APIs mirror the run endpoints but operate on policy-mode jobs (`mode=simulate`) scoped by tenant and RBAC (`policy:simulate`).
|
||||
|
||||
### Create simulation
|
||||
|
||||
```http
|
||||
POST /api/v1/scheduler/policies/simulations
|
||||
X-Tenant-Id: tenant-alpha
|
||||
Authorization: Bearer <OpTok>
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"policyId": "P-7",
|
||||
"policyVersion": 4,
|
||||
"priority": "normal",
|
||||
"metadata": {
|
||||
"source": "console.review"
|
||||
},
|
||||
"inputs": {
|
||||
"sbomSet": ["sbom:S-318", "sbom:S-42"],
|
||||
"captureExplain": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```json
|
||||
HTTP/1.1 201 Created
|
||||
Location: /api/v1/scheduler/policies/simulations/run:P-7:20251103T153000Z:e4d1a9b2
|
||||
{
|
||||
"simulation": {
|
||||
"schemaVersion": "scheduler.policy-run-status@1",
|
||||
"runId": "run:P-7:20251103T153000Z:e4d1a9b2",
|
||||
"tenantId": "tenant-alpha",
|
||||
"policyId": "P-7",
|
||||
"policyVersion": 4,
|
||||
"mode": "simulate",
|
||||
"status": "queued",
|
||||
"priority": "normal",
|
||||
"queuedAt": "2025-11-03T15:30:00Z",
|
||||
"stats": {
|
||||
"components": 0,
|
||||
"rulesFired": 0,
|
||||
"findingsWritten": 0,
|
||||
"vexOverrides": 0
|
||||
},
|
||||
"inputs": {
|
||||
"sbomSet": ["sbom:S-318", "sbom:S-42"],
|
||||
"captureExplain": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Canonical payload lives in `samples/api/scheduler/policy-simulation-status.json`.
|
||||
|
||||
### List and fetch simulations
|
||||
|
||||
- `GET /api/v1/scheduler/policies/simulations?policyId=P-7&status=queued&limit=25`
|
||||
- `GET /api/v1/scheduler/policies/simulations/{simulationId}`
|
||||
|
||||
The response envelope mirrors `policy-run-status` but uses `simulations` / `simulation` wrappers. All metadata keys are lower-case; retries append `retry-of=<priorRunId>` for provenance.
|
||||
|
||||
### Cancel and retry
|
||||
|
||||
- `POST /api/v1/scheduler/policies/simulations/{simulationId}/cancel`
|
||||
- Marks the job as `cancellationRequested` and surfaces the reason. Worker execution honours this flag before leasing.
|
||||
- `POST /api/v1/scheduler/policies/simulations/{simulationId}/retry`
|
||||
- Clones a terminal simulation, preserving inputs/metadata and adding `metadata.retry-of` pointing to the original run ID. Returns `409 Conflict` when the simulation is not terminal.
|
||||
|
||||
### Live stream (SSE)
|
||||
|
||||
`GET /api/v1/scheduler/policies/simulations/{simulationId}/stream` emits:
|
||||
|
||||
- `retry` — reconnection hint (milliseconds) emitted before events.
|
||||
- `initial` — current simulation snapshot.
|
||||
- `status` — status/attempt/stat updates.
|
||||
- `queueLag` — periodic queue depth summary (shares payload with run streams).
|
||||
- `heartbeat` — keep-alive ping (default 5s; configurable under `Scheduler:RunStream`).
|
||||
- `completed` — terminal summary (`succeeded`, `failed`, or `cancelled`).
|
||||
- `notFound` — emitted if the run record disappears while streaming.
|
||||
|
||||
Heartbeats, queue lag summaries, and the reconnection directive are sent immediately after connection so Console clients receive deterministic telemetry when loading a simulation workspace.
|
||||
|
||||
### Metrics
|
||||
|
||||
```
|
||||
GET /api/v1/scheduler/policies/simulations/metrics
|
||||
X-Tenant-Id: tenant-alpha
|
||||
Authorization: Bearer <OpTok>
|
||||
```
|
||||
|
||||
Returns queue depth and latency summaries tailored for simulation dashboards and alerting. Response properties align with the metric names exposed via OTEL (`policy_simulation_queue_depth`, `policy_simulation_latency_seconds`). Canonical payload lives at `samples/api/scheduler/policy-simulation-metrics.json`.
|
||||
|
||||
- `policy_simulation_queue_depth.total` — pending simulation jobs (aggregate of `pending`, `dispatching`, `submitted`).
|
||||
- `policy_simulation_latency.*` — latency percentiles (seconds) computed from the most recent terminal simulations.
|
||||
|
||||
> **Note:** When PostgreSQL storage is not configured the metrics provider is disabled and the endpoint responds with `501 Not Implemented`.
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
- `GET /api/v1/scheduler/policies/simulations/metrics` (scope: `policy:simulate`)
|
||||
- Returns queue depth grouped by status plus latency percentiles derived from the most recent sample window (default 200 terminal runs).
|
||||
- Surface area is unchanged from the implementation in Sprint 27 week 1; consumers should continue to rely on the contract in `samples/api/scheduler/policy-simulation-metrics.json`.
|
||||
- When backing storage is not Mongo the endpoint responds `501 Not Implemented`.
|
||||
- When backing storage is not PostgreSQL the endpoint responds `501 Not Implemented`.
|
||||
|
||||
## 2. Completion webhooks
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
## Goals
|
||||
- Track schema revisions for `Schedule` and `Run` documents so storage upgrades are deterministic across air-gapped installs.
|
||||
- Provide reusable upgrade helpers that normalize Mongo snapshots (raw BSON → JSON) into the latest DTOs without mutating inputs.
|
||||
- Provide reusable upgrade helpers that normalize PostgreSQL snapshots (raw JSONB → JSON) into the latest DTOs without mutating inputs.
|
||||
- Formalize the allowed `RunState` graph and surface guard-rail helpers (timestamps, stats monotonicity) for planners/runners.
|
||||
|
||||
## Non-goals
|
||||
@@ -17,7 +17,7 @@
|
||||
- `scheduler.impact-set@1` (shared envelope used by planners).
|
||||
- Expose `EnsureSchedule`, `EnsureRun`, `EnsureImpactSet` helpers mirroring the Notify model pattern to normalize missing/whitespace values.
|
||||
- Extend `Schedule`, `Run`, and `ImpactSet` records with an optional `schemaVersion` constructor parameter defaulting through the `Ensure*` helpers. The canonical JSON serializer will list `schemaVersion` first so documents round-trip deterministically.
|
||||
- Persisted Mongo documents will now always include `schemaVersion`; exporters/backups can rely on this when bundling Offline Kit snapshots.
|
||||
- Persisted PostgreSQL documents will now always include `schemaVersion`; exporters/backups can rely on this when bundling Offline Kit snapshots.
|
||||
|
||||
## Migration Helper Shape
|
||||
- Add `SchedulerSchemaMigration` static class with:
|
||||
@@ -55,8 +55,8 @@
|
||||
- Expose small helper to tag `RunReason.ImpactWindowFrom/To` automatically when set by planners (using normalized ISO-8601).
|
||||
|
||||
## Interaction Points
|
||||
- **WebService**: call `SchedulerSchemaMigration.UpgradeSchedule` when returning schedules from Mongo, so clients always see the newest DTO regardless of stored version.
|
||||
- **Storage.Mongo**: wrap DTO round-trips; the migration helper acts during read, and the state machine ensures updates respect transition rules before writing.
|
||||
- **WebService**: call `SchedulerSchemaMigration.UpgradeSchedule` when returning schedules from PostgreSQL, so clients always see the newest DTO regardless of stored version.
|
||||
- **Storage.Postgres**: wrap DTO round-trips; the migration helper acts during read, and the state machine ensures updates respect transition rules before writing.
|
||||
- **Queue/Worker**: use `RunStateMachine.EnsureTransition` to guard planner/runner state updates (replace ad-hoc `with run` clones).
|
||||
- **Offline Kit**: embed `schemaVersion` in exported JSON/Trivy artifacts; migrations ensure air-gapped upgrades flow without manual scripts.
|
||||
|
||||
@@ -67,20 +67,20 @@
|
||||
4. Update modules (Storage, WebService, Worker) to use new helpers; add logging around migrations/transitions.
|
||||
|
||||
## Test Strategy
|
||||
- **Migration happy-path**: load sample Mongo fixtures for `schedule@1` and `run@1`, assert `schemaVersion` normalization, deduplicated subscribers, limits defaults. Include snapshots without the version field to exercise defaulting logic.
|
||||
- **Migration happy-path**: load sample PostgreSQL fixtures for `schedule@1` and `run@1`, assert `schemaVersion` normalization, deduplicated subscribers, limits defaults. Include snapshots without the version field to exercise defaulting logic.
|
||||
- **Legacy upgrade cases**: craft synthetic `schedule@0` / `run@0` JSON fragments (missing new fields, using old enum names) and verify version-specific fixups produce the latest DTO while populating `MigrationResult.Warnings`.
|
||||
- **Strict mode behavior**: attempt to upgrade documents with unexpected properties and ensure warnings/throws align with configuration.
|
||||
- **Run state transitions**: unit-test `RunStateMachine` for every allowed edge, invalid transitions, and timestamp/error invariants (e.g., `FinishedAt` only set on terminal states). Provide parameterized tests to confirm stats monotonicity enforcement.
|
||||
- **Serialization determinism**: round-trip upgraded DTOs via `CanonicalJsonSerializer` to confirm property order includes `schemaVersion` first and produces stable hashes.
|
||||
- **Documentation snippets**: extend module README or API docs with example migrations/run-state usage; verify via doc samples test (if available) or include as part of CI doc linting.
|
||||
|
||||
## Open Questions
|
||||
- Do we need downgrade (`ToVersion`) helpers for Offline Kit exports? (Assumed no for now. Add backlog item if required.)
|
||||
- Should `ImpactSet` migrations live here or in ImpactIndex module? (Lean towards here because DTO defined in Models; coordinate with ImpactIndex guild if they need specialized upgrades.)
|
||||
- How do we surface migration warnings to telemetry? Proposal: caller logs `warning` with `MigrationResult.Warnings` immediately after calling helper.
|
||||
|
||||
## Status — 2025-10-20
|
||||
|
||||
- `SchedulerSchemaMigration` now upgrades legacy `@0` schedule/run/impact-set documents to the `@1` schema, defaulting missing counters/arrays and normalizing booleans & severities. Each backfill emits a warning so storage/web callers can log the mutation.
|
||||
- `RunStateMachine.EnsureTransition` guards timestamp ordering and stats monotonicity; builders and extension helpers are wired into the scheduler worker/web service plans.
|
||||
- Tests exercising legacy upgrades live in `StellaOps.Scheduler.Models.Tests/SchedulerSchemaMigrationTests.cs`; add new fixtures there when introducing additional schema versions.
|
||||
## Open Questions
|
||||
- Do we need downgrade (`ToVersion`) helpers for Offline Kit exports? (Assumed no for now. Add backlog item if required.)
|
||||
- Should `ImpactSet` migrations live here or in ImpactIndex module? (Lean towards here because DTO defined in Models; coordinate with ImpactIndex guild if they need specialized upgrades.)
|
||||
- How do we surface migration warnings to telemetry? Proposal: caller logs `warning` with `MigrationResult.Warnings` immediately after calling helper.
|
||||
|
||||
## Status — 2025-10-20
|
||||
|
||||
- `SchedulerSchemaMigration` now upgrades legacy `@0` schedule/run/impact-set documents to the `@1` schema, defaulting missing counters/arrays and normalizing booleans & severities. Each backfill emits a warning so storage/web callers can log the mutation.
|
||||
- `RunStateMachine.EnsureTransition` guards timestamp ordering and stats monotonicity; builders and extension helpers are wired into the scheduler worker/web service plans.
|
||||
- Tests exercising legacy upgrades live in `StellaOps.Scheduler.Models.Tests/SchedulerSchemaMigrationTests.cs`; add new fixtures there when introducing additional schema versions.
|
||||
|
||||
@@ -15,7 +15,7 @@ surface) so we can operate across tenants without bespoke cursors.
|
||||
- Delegates resolution to `PlannerExecutionService` which:
|
||||
- Pulls the owning `Schedule` and normalises its selector to the run tenant.
|
||||
- Invokes `IImpactTargetingService` to resolve impacted digests.
|
||||
- Emits canonical `ImpactSet` snapshots to Mongo for reuse/debugging.
|
||||
- Emits canonical `ImpactSet` snapshots to PostgreSQL for reuse/debugging.
|
||||
- Updates run stats/state and projects summaries via `IRunSummaryService`.
|
||||
- Enqueues a deterministic `PlannerQueueMessage` to the planner queue when
|
||||
impacted images exist; otherwise the run completes immediately.
|
||||
|
||||
@@ -49,6 +49,6 @@ exponential backoff.
|
||||
|
||||
- `AddSchedulerWorker(configuration)` registers impact targeting, planner
|
||||
dispatch, runner execution, and the three hosted services. Call it after
|
||||
`AddSchedulerQueues` and `AddSchedulerMongoStorage` when bootstrapping the
|
||||
`AddSchedulerQueues` and `AddSchedulerPostgresStorage` when bootstrapping the
|
||||
worker host.
|
||||
- Extend execution metrics (Sprint 16-205) before exposing Prometheus counters.
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
_Sprint 20 · Scheduler Worker Guild_
|
||||
|
||||
This milestone introduces the worker-side plumbing required to trigger Policy Engine
|
||||
runs from scheduler-managed jobs. The worker now leases policy run jobs from Mongo,
|
||||
runs from scheduler-managed jobs. The worker now leases policy run jobs from PostgreSQL,
|
||||
submits them to the Policy Engine REST API, and tracks submission state deterministically.
|
||||
|
||||
## Highlights
|
||||
@@ -11,8 +11,8 @@ submits them to the Policy Engine REST API, and tracks submission state determin
|
||||
- New `PolicyRunJob` DTO (stored in `policy_jobs`) captures run metadata, attempts,
|
||||
lease ownership, and cancellation markers. Schema version `scheduler.policy-run-job@1`
|
||||
added to `SchedulerSchemaVersions` with canonical serializer coverage.
|
||||
- Mongo storage gains `policy_jobs` collection with indexes on `{tenantId, status, availableAt}`
|
||||
and `runId` uniqueness for idempotency. Repository `IPolicyRunJobRepository` exposes
|
||||
- PostgreSQL storage gains `policy_jobs` table with indexes on `(tenant_id, status, available_at)`
|
||||
and `run_id` uniqueness for idempotency. Repository `IPolicyRunJobRepository` exposes
|
||||
leasing and replace semantics guarded by lease owner checks.
|
||||
- Worker options now include `Policy` dispatch/API subsections covering lease cadence,
|
||||
retry backoff, idempotency headers, and base URL validation.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
_Sprint 21 · Scheduler Worker Guild_
|
||||
|
||||
The graph build worker leases pending `GraphBuildJob` records from Mongo, invokes
|
||||
The graph build worker leases pending `GraphBuildJob` records from PostgreSQL, invokes
|
||||
Cartographer to construct graph snapshots, and records terminal status via the
|
||||
Scheduler WebService webhook so downstream systems observe completion events.
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ Expose policy-aware vulnerability listing, detail, simulation, workflow, and exp
|
||||
|
||||
## Tooling
|
||||
- .NET 10 preview minimal API with async streaming for exports.
|
||||
- PostgreSQL/Mongo projections from Findings Ledger; Redis for query caching as needed.
|
||||
- PostgreSQL projections from Findings Ledger; Redis for query caching as needed.
|
||||
- Integration with Policy Engine batch eval and simulation endpoints.
|
||||
|
||||
## Definition of Done
|
||||
|
||||
@@ -6,7 +6,7 @@ Team 8 owns the end-to-end security posture for StellaOps Authority and its cons
|
||||
|
||||
## Operational Boundaries
|
||||
|
||||
- Primary workspace: `src/__Libraries/StellaOps.Cryptography`, `src/Authority/StellaOps.Authority/StellaOps.Authority.Plugin.Standard`, `src/Authority/StellaOps.Authority/StellaOps.Authority.Storage.Mongo`, and Authority host (`src/Authority/StellaOps.Authority/StellaOps.Authority`).
|
||||
- Primary workspace: `src/__Libraries/StellaOps.Cryptography`, `src/Authority/StellaOps.Authority/StellaOps.Authority.Plugin.Standard`, `src/Authority/StellaOps.Authority/StellaOps.Authority.Storage.Postgres`, and Authority host (`src/Authority/StellaOps.Authority/StellaOps.Authority`).
|
||||
- Coordinate cross-module changes via TASKS.md updates and PR descriptions.
|
||||
- Never bypass deterministic behaviour (sorted keys, stable timestamps).
|
||||
- Tests live alongside owning projects (`*.Tests`). Extend goldens instead of rewriting.
|
||||
|
||||
Reference in New Issue
Block a user