sprints work
This commit is contained in:
@@ -1,19 +1,50 @@
|
||||
namespace StellaOps.Audit.ReplayToken;
|
||||
|
||||
/// <summary>
|
||||
/// Result of token verification including expiration check.
|
||||
/// </summary>
|
||||
public enum ReplayTokenVerificationResult
|
||||
{
|
||||
/// <summary>Token is valid and not expired.</summary>
|
||||
Valid = 0,
|
||||
|
||||
/// <summary>Token hash does not match the inputs (tampered or different inputs).</summary>
|
||||
Invalid = 1,
|
||||
|
||||
/// <summary>Token has expired.</summary>
|
||||
Expired = 2
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Generates deterministic replay tokens for audit and reproducibility.
|
||||
/// </summary>
|
||||
public interface IReplayTokenGenerator
|
||||
{
|
||||
/// <summary>
|
||||
/// Generates a replay token from the given inputs.
|
||||
/// Generates a replay token from the given inputs without expiration (v1.0).
|
||||
/// </summary>
|
||||
/// <param name="request">The inputs to hash.</param>
|
||||
/// <returns>A deterministic replay token.</returns>
|
||||
ReplayToken Generate(ReplayTokenRequest request);
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that inputs match a previously generated token.
|
||||
/// Generates a replay token from the given inputs with expiration (v2.0).
|
||||
/// </summary>
|
||||
/// <param name="request">The inputs to hash.</param>
|
||||
/// <param name="expiration">How long the token is valid. If null, uses ReplayToken.DefaultExpiration.</param>
|
||||
/// <returns>A deterministic replay token with expiration.</returns>
|
||||
ReplayToken GenerateWithExpiration(ReplayTokenRequest request, TimeSpan? expiration = null);
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that inputs match a previously generated token (does not check expiration).
|
||||
/// </summary>
|
||||
bool Verify(ReplayToken token, ReplayTokenRequest request);
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that inputs match a previously generated token and checks expiration.
|
||||
/// </summary>
|
||||
/// <param name="token">The token to verify.</param>
|
||||
/// <param name="request">The inputs to verify against.</param>
|
||||
/// <returns>The verification result including expiration check.</returns>
|
||||
ReplayTokenVerificationResult VerifyWithExpiration(ReplayToken token, ReplayTokenRequest request);
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
namespace StellaOps.Audit.ReplayToken;
|
||||
|
||||
/// <summary>
|
||||
/// A deterministic, content-addressable replay token.
|
||||
/// A deterministic, content-addressable replay token with optional expiration.
|
||||
/// </summary>
|
||||
public sealed class ReplayToken : IEquatable<ReplayToken>
|
||||
{
|
||||
@@ -9,6 +9,16 @@ public sealed class ReplayToken : IEquatable<ReplayToken>
|
||||
public const string DefaultAlgorithm = "SHA-256";
|
||||
public const string DefaultVersion = "1.0";
|
||||
|
||||
/// <summary>
|
||||
/// Version 2.0 includes expiration support.
|
||||
/// </summary>
|
||||
public const string VersionWithExpiration = "2.0";
|
||||
|
||||
/// <summary>
|
||||
/// Default expiration duration (1 hour).
|
||||
/// </summary>
|
||||
public static readonly TimeSpan DefaultExpiration = TimeSpan.FromHours(1);
|
||||
|
||||
/// <summary>
|
||||
/// The token value (SHA-256 hash in hex).
|
||||
/// </summary>
|
||||
@@ -30,11 +40,30 @@ public sealed class ReplayToken : IEquatable<ReplayToken>
|
||||
public DateTimeOffset GeneratedAt { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Canonical representation for storage.
|
||||
/// Timestamp when token expires. Null means no expiration (v1.0 behavior).
|
||||
/// </summary>
|
||||
public string Canonical => $"{Scheme}:v{Version}:{Algorithm}:{Value}";
|
||||
public DateTimeOffset? ExpiresAt { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Canonical representation for storage.
|
||||
/// For v2.0+, includes expiration timestamp.
|
||||
/// </summary>
|
||||
public string Canonical => ExpiresAt.HasValue
|
||||
? $"{Scheme}:v{Version}:{Algorithm}:{Value}:{ExpiresAt.Value.ToUnixTimeSeconds()}"
|
||||
: $"{Scheme}:v{Version}:{Algorithm}:{Value}";
|
||||
|
||||
/// <summary>
|
||||
/// Creates a replay token without expiration (v1.0 compatibility).
|
||||
/// </summary>
|
||||
public ReplayToken(string value, DateTimeOffset generatedAt, string? algorithm = null, string? version = null)
|
||||
: this(value, generatedAt, null, algorithm, version)
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a replay token with optional expiration.
|
||||
/// </summary>
|
||||
public ReplayToken(string value, DateTimeOffset generatedAt, DateTimeOffset? expiresAt, string? algorithm = null, string? version = null)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(value))
|
||||
{
|
||||
@@ -43,12 +72,56 @@ public sealed class ReplayToken : IEquatable<ReplayToken>
|
||||
|
||||
Value = value.Trim();
|
||||
GeneratedAt = generatedAt;
|
||||
ExpiresAt = expiresAt;
|
||||
Algorithm = string.IsNullOrWhiteSpace(algorithm) ? DefaultAlgorithm : algorithm.Trim();
|
||||
Version = string.IsNullOrWhiteSpace(version) ? DefaultVersion : version.Trim();
|
||||
|
||||
// Default to v2.0 if expiration is set, otherwise use provided or default
|
||||
if (expiresAt.HasValue && string.IsNullOrWhiteSpace(version))
|
||||
{
|
||||
Version = VersionWithExpiration;
|
||||
}
|
||||
else
|
||||
{
|
||||
Version = string.IsNullOrWhiteSpace(version) ? DefaultVersion : version.Trim();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Checks if the token has expired.
|
||||
/// </summary>
|
||||
/// <param name="currentTime">The current time to check against. If null, uses DateTimeOffset.UtcNow.</param>
|
||||
/// <returns>True if the token has expired; false if not expired or has no expiration.</returns>
|
||||
public bool IsExpired(DateTimeOffset? currentTime = null)
|
||||
{
|
||||
if (!ExpiresAt.HasValue)
|
||||
{
|
||||
return false; // v1.0 tokens without expiration never expire
|
||||
}
|
||||
|
||||
var now = currentTime ?? DateTimeOffset.UtcNow;
|
||||
return now >= ExpiresAt.Value;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Gets the remaining time until expiration.
|
||||
/// </summary>
|
||||
/// <param name="currentTime">The current time to check against. If null, uses DateTimeOffset.UtcNow.</param>
|
||||
/// <returns>The remaining time, or null if no expiration or already expired.</returns>
|
||||
public TimeSpan? GetTimeToExpiration(DateTimeOffset? currentTime = null)
|
||||
{
|
||||
if (!ExpiresAt.HasValue)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
var now = currentTime ?? DateTimeOffset.UtcNow;
|
||||
var remaining = ExpiresAt.Value - now;
|
||||
return remaining > TimeSpan.Zero ? remaining : null;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Parse a canonical token string.
|
||||
/// Supports both v1.0 format (4 parts) and v2.0 format with expiration (5 parts).
|
||||
/// </summary>
|
||||
public static ReplayToken Parse(string canonical)
|
||||
{
|
||||
@@ -58,7 +131,10 @@ public sealed class ReplayToken : IEquatable<ReplayToken>
|
||||
}
|
||||
|
||||
var parts = canonical.Split(':', StringSplitOptions.TrimEntries | StringSplitOptions.RemoveEmptyEntries);
|
||||
if (parts.Length != 4 || !string.Equals(parts[0], Scheme, StringComparison.Ordinal))
|
||||
|
||||
// v1.0 format: replay:v1.0:SHA-256:hash (4 parts)
|
||||
// v2.0 format: replay:v2.0:SHA-256:hash:expiry_unix_seconds (5 parts)
|
||||
if (parts.Length < 4 || parts.Length > 5 || !string.Equals(parts[0], Scheme, StringComparison.Ordinal))
|
||||
{
|
||||
throw new FormatException($"Invalid replay token format: {canonical}");
|
||||
}
|
||||
@@ -73,7 +149,35 @@ public sealed class ReplayToken : IEquatable<ReplayToken>
|
||||
var algorithm = parts[2];
|
||||
var value = parts[3];
|
||||
|
||||
return new ReplayToken(value, DateTimeOffset.UnixEpoch, algorithm, version);
|
||||
DateTimeOffset? expiresAt = null;
|
||||
if (parts.Length == 5)
|
||||
{
|
||||
if (!long.TryParse(parts[4], out var unixSeconds))
|
||||
{
|
||||
throw new FormatException($"Invalid expiration timestamp in replay token: {canonical}");
|
||||
}
|
||||
expiresAt = DateTimeOffset.FromUnixTimeSeconds(unixSeconds);
|
||||
}
|
||||
|
||||
return new ReplayToken(value, DateTimeOffset.UnixEpoch, expiresAt, algorithm, version);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Try to parse a canonical token string.
|
||||
/// </summary>
|
||||
/// <returns>True if parsing succeeded; false otherwise.</returns>
|
||||
public static bool TryParse(string canonical, out ReplayToken? token)
|
||||
{
|
||||
try
|
||||
{
|
||||
token = Parse(canonical);
|
||||
return true;
|
||||
}
|
||||
catch
|
||||
{
|
||||
token = null;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public override string ToString() => Canonical;
|
||||
|
||||
@@ -35,6 +35,19 @@ public sealed class Sha256ReplayTokenGenerator : IReplayTokenGenerator
|
||||
return new ReplayToken(hashHex, _timeProvider.GetUtcNow());
|
||||
}
|
||||
|
||||
public ReplayToken GenerateWithExpiration(ReplayTokenRequest request, TimeSpan? expiration = null)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(request);
|
||||
|
||||
var canonical = Canonicalize(request);
|
||||
var hashHex = ComputeHash(canonical);
|
||||
|
||||
var now = _timeProvider.GetUtcNow();
|
||||
var expiresAt = now + (expiration ?? ReplayToken.DefaultExpiration);
|
||||
|
||||
return new ReplayToken(hashHex, now, expiresAt, ReplayToken.DefaultAlgorithm, ReplayToken.VersionWithExpiration);
|
||||
}
|
||||
|
||||
public bool Verify(ReplayToken token, ReplayTokenRequest request)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(token);
|
||||
@@ -44,6 +57,27 @@ public sealed class Sha256ReplayTokenGenerator : IReplayTokenGenerator
|
||||
return string.Equals(token.Value, computed.Value, StringComparison.OrdinalIgnoreCase);
|
||||
}
|
||||
|
||||
public ReplayTokenVerificationResult VerifyWithExpiration(ReplayToken token, ReplayTokenRequest request)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(token);
|
||||
ArgumentNullException.ThrowIfNull(request);
|
||||
|
||||
// Check hash first
|
||||
var computed = Generate(request);
|
||||
if (!string.Equals(token.Value, computed.Value, StringComparison.OrdinalIgnoreCase))
|
||||
{
|
||||
return ReplayTokenVerificationResult.Invalid;
|
||||
}
|
||||
|
||||
// Check expiration
|
||||
if (token.IsExpired(_timeProvider.GetUtcNow()))
|
||||
{
|
||||
return ReplayTokenVerificationResult.Expired;
|
||||
}
|
||||
|
||||
return ReplayTokenVerificationResult.Valid;
|
||||
}
|
||||
|
||||
private string ComputeHash(string input)
|
||||
{
|
||||
var bytes = Encoding.UTF8.GetBytes(input);
|
||||
|
||||
@@ -0,0 +1,39 @@
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Infrastructure.Postgres.Connections;
|
||||
using StellaOps.Infrastructure.Postgres.Options;
|
||||
|
||||
namespace StellaOps.Evidence.Storage.Postgres;
|
||||
|
||||
/// <summary>
|
||||
/// PostgreSQL data source for the Evidence module.
|
||||
/// Manages connections with tenant context for evidence storage.
|
||||
/// </summary>
|
||||
public sealed class EvidenceDataSource : DataSourceBase
|
||||
{
|
||||
/// <summary>
|
||||
/// Default schema name for Evidence tables.
|
||||
/// </summary>
|
||||
public const string DefaultSchemaName = "evidence";
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new Evidence data source.
|
||||
/// </summary>
|
||||
public EvidenceDataSource(IOptions<PostgresOptions> options, ILogger<EvidenceDataSource> logger)
|
||||
: base(CreateOptions(options.Value), logger)
|
||||
{
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
protected override string ModuleName => "Evidence";
|
||||
|
||||
private static PostgresOptions CreateOptions(PostgresOptions baseOptions)
|
||||
{
|
||||
// Use default schema if not specified
|
||||
if (string.IsNullOrWhiteSpace(baseOptions.SchemaName))
|
||||
{
|
||||
baseOptions.SchemaName = DefaultSchemaName;
|
||||
}
|
||||
return baseOptions;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,58 @@
|
||||
-- -----------------------------------------------------------------------------
|
||||
-- 001_initial_schema.sql
|
||||
-- Sprint: 8100.0012.0002 - Unified Evidence Model
|
||||
-- Description: Creates the evidence schema and records table for unified evidence storage
|
||||
-- -----------------------------------------------------------------------------
|
||||
|
||||
-- Create schema
|
||||
CREATE SCHEMA IF NOT EXISTS evidence;
|
||||
|
||||
-- Evidence records table
|
||||
CREATE TABLE IF NOT EXISTS evidence.records (
|
||||
evidence_id TEXT PRIMARY KEY,
|
||||
subject_node_id TEXT NOT NULL,
|
||||
evidence_type SMALLINT NOT NULL,
|
||||
payload BYTEA NOT NULL,
|
||||
payload_schema_ver TEXT NOT NULL,
|
||||
external_cid TEXT,
|
||||
provenance JSONB NOT NULL,
|
||||
signatures JSONB NOT NULL DEFAULT '[]',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
tenant_id UUID NOT NULL
|
||||
);
|
||||
|
||||
-- Index for subject-based queries (most common access pattern)
|
||||
CREATE INDEX IF NOT EXISTS idx_evidence_subject
|
||||
ON evidence.records (subject_node_id, evidence_type);
|
||||
|
||||
-- Index for type-based queries with recency ordering
|
||||
CREATE INDEX IF NOT EXISTS idx_evidence_type
|
||||
ON evidence.records (evidence_type, created_at DESC);
|
||||
|
||||
-- Index for tenant-based queries with recency ordering
|
||||
CREATE INDEX IF NOT EXISTS idx_evidence_tenant
|
||||
ON evidence.records (tenant_id, created_at DESC);
|
||||
|
||||
-- Index for external CID lookups (when payload stored externally)
|
||||
CREATE INDEX IF NOT EXISTS idx_evidence_external_cid
|
||||
ON evidence.records (external_cid)
|
||||
WHERE external_cid IS NOT NULL;
|
||||
|
||||
-- Enable Row-Level Security
|
||||
ALTER TABLE evidence.records ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- RLS policy: tenant isolation
|
||||
CREATE POLICY evidence_tenant_isolation ON evidence.records
|
||||
USING (tenant_id = current_setting('app.tenant_id', true)::uuid);
|
||||
|
||||
-- Comment on table
|
||||
COMMENT ON TABLE evidence.records IS 'Unified evidence storage for content-addressed proof records';
|
||||
COMMENT ON COLUMN evidence.records.evidence_id IS 'Content-addressed identifier (sha256:{hex})';
|
||||
COMMENT ON COLUMN evidence.records.subject_node_id IS 'Content-addressed subject identifier this evidence applies to';
|
||||
COMMENT ON COLUMN evidence.records.evidence_type IS 'Type discriminator (1=Reachability, 2=Scan, 3=Policy, etc.)';
|
||||
COMMENT ON COLUMN evidence.records.payload IS 'Canonical JSON payload bytes';
|
||||
COMMENT ON COLUMN evidence.records.payload_schema_ver IS 'Schema version for payload format';
|
||||
COMMENT ON COLUMN evidence.records.external_cid IS 'Optional CID for large payloads stored externally';
|
||||
COMMENT ON COLUMN evidence.records.provenance IS 'Generation provenance (generator, timestamp, etc.)';
|
||||
COMMENT ON COLUMN evidence.records.signatures IS 'Cryptographic signatures attesting to this evidence';
|
||||
COMMENT ON COLUMN evidence.records.tenant_id IS 'Tenant identifier for multi-tenant isolation';
|
||||
@@ -0,0 +1,309 @@
|
||||
using System.Text.Json;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Npgsql;
|
||||
using NpgsqlTypes;
|
||||
using StellaOps.Evidence.Core;
|
||||
using StellaOps.Infrastructure.Postgres.Repositories;
|
||||
|
||||
namespace StellaOps.Evidence.Storage.Postgres;
|
||||
|
||||
/// <summary>
|
||||
/// PostgreSQL implementation of <see cref="IEvidenceStore"/>.
|
||||
/// Stores evidence records with content-addressed IDs and tenant isolation via RLS.
|
||||
/// </summary>
|
||||
public sealed class PostgresEvidenceStore : RepositoryBase<EvidenceDataSource>, IEvidenceStore
|
||||
{
|
||||
private readonly string _tenantId;
|
||||
private static readonly JsonSerializerOptions JsonOptions = new()
|
||||
{
|
||||
PropertyNamingPolicy = JsonNamingPolicy.CamelCase
|
||||
};
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new PostgreSQL evidence store for the specified tenant.
|
||||
/// </summary>
|
||||
/// <param name="dataSource">Evidence data source.</param>
|
||||
/// <param name="tenantId">Tenant identifier for RLS.</param>
|
||||
/// <param name="logger">Logger instance.</param>
|
||||
public PostgresEvidenceStore(
|
||||
EvidenceDataSource dataSource,
|
||||
string tenantId,
|
||||
ILogger<PostgresEvidenceStore> logger)
|
||||
: base(dataSource, logger)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(tenantId);
|
||||
_tenantId = tenantId;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<string> StoreAsync(IEvidence evidence, CancellationToken ct = default)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(evidence);
|
||||
|
||||
const string sql = """
|
||||
INSERT INTO evidence.records (
|
||||
evidence_id, subject_node_id, evidence_type, payload,
|
||||
payload_schema_ver, external_cid, provenance, signatures, tenant_id
|
||||
) VALUES (
|
||||
@evidenceId, @subjectNodeId, @evidenceType, @payload,
|
||||
@payloadSchemaVer, @externalCid, @provenance, @signatures, @tenantId
|
||||
)
|
||||
ON CONFLICT (evidence_id) DO NOTHING
|
||||
RETURNING evidence_id
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(_tenantId, "writer", ct)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
|
||||
AddEvidenceParameters(command, evidence);
|
||||
|
||||
var result = await command.ExecuteScalarAsync(ct).ConfigureAwait(false);
|
||||
|
||||
// If result is null, row already existed (idempotent)
|
||||
return evidence.EvidenceId;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<int> StoreBatchAsync(IEnumerable<IEvidence> evidenceRecords, CancellationToken ct = default)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(evidenceRecords);
|
||||
|
||||
var records = evidenceRecords.ToList();
|
||||
if (records.Count == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(_tenantId, "writer", ct)
|
||||
.ConfigureAwait(false);
|
||||
await using var transaction = await connection.BeginTransactionAsync(ct).ConfigureAwait(false);
|
||||
|
||||
var storedCount = 0;
|
||||
|
||||
foreach (var evidence in records)
|
||||
{
|
||||
const string sql = """
|
||||
INSERT INTO evidence.records (
|
||||
evidence_id, subject_node_id, evidence_type, payload,
|
||||
payload_schema_ver, external_cid, provenance, signatures, tenant_id
|
||||
) VALUES (
|
||||
@evidenceId, @subjectNodeId, @evidenceType, @payload,
|
||||
@payloadSchemaVer, @externalCid, @provenance, @signatures, @tenantId
|
||||
)
|
||||
ON CONFLICT (evidence_id) DO NOTHING
|
||||
""";
|
||||
|
||||
await using var command = new NpgsqlCommand(sql, connection, transaction)
|
||||
{
|
||||
CommandTimeout = CommandTimeoutSeconds
|
||||
};
|
||||
|
||||
AddEvidenceParameters(command, evidence);
|
||||
|
||||
var affected = await command.ExecuteNonQueryAsync(ct).ConfigureAwait(false);
|
||||
if (affected > 0)
|
||||
{
|
||||
storedCount++;
|
||||
}
|
||||
}
|
||||
|
||||
await transaction.CommitAsync(ct).ConfigureAwait(false);
|
||||
return storedCount;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IEvidence?> GetByIdAsync(string evidenceId, CancellationToken ct = default)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(evidenceId);
|
||||
|
||||
const string sql = """
|
||||
SELECT evidence_id, subject_node_id, evidence_type, payload,
|
||||
payload_schema_ver, external_cid, provenance, signatures
|
||||
FROM evidence.records
|
||||
WHERE evidence_id = @evidenceId
|
||||
""";
|
||||
|
||||
return await QuerySingleOrDefaultAsync<IEvidence>(
|
||||
_tenantId,
|
||||
sql,
|
||||
cmd => AddParameter(cmd, "@evidenceId", evidenceId),
|
||||
MapEvidence,
|
||||
ct).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<IEvidence>> GetBySubjectAsync(
|
||||
string subjectNodeId,
|
||||
EvidenceType? typeFilter = null,
|
||||
CancellationToken ct = default)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(subjectNodeId);
|
||||
|
||||
var sql = """
|
||||
SELECT evidence_id, subject_node_id, evidence_type, payload,
|
||||
payload_schema_ver, external_cid, provenance, signatures
|
||||
FROM evidence.records
|
||||
WHERE subject_node_id = @subjectNodeId
|
||||
""";
|
||||
|
||||
if (typeFilter.HasValue)
|
||||
{
|
||||
sql += " AND evidence_type = @evidenceType";
|
||||
}
|
||||
|
||||
sql += " ORDER BY created_at DESC";
|
||||
|
||||
return await QueryAsync<IEvidence>(
|
||||
_tenantId,
|
||||
sql,
|
||||
cmd =>
|
||||
{
|
||||
AddParameter(cmd, "@subjectNodeId", subjectNodeId);
|
||||
if (typeFilter.HasValue)
|
||||
{
|
||||
AddParameter(cmd, "@evidenceType", (short)typeFilter.Value);
|
||||
}
|
||||
},
|
||||
MapEvidence,
|
||||
ct).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<IEvidence>> GetByTypeAsync(
|
||||
EvidenceType evidenceType,
|
||||
int limit = 100,
|
||||
CancellationToken ct = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT evidence_id, subject_node_id, evidence_type, payload,
|
||||
payload_schema_ver, external_cid, provenance, signatures
|
||||
FROM evidence.records
|
||||
WHERE evidence_type = @evidenceType
|
||||
ORDER BY created_at DESC
|
||||
LIMIT @limit
|
||||
""";
|
||||
|
||||
return await QueryAsync<IEvidence>(
|
||||
_tenantId,
|
||||
sql,
|
||||
cmd =>
|
||||
{
|
||||
AddParameter(cmd, "@evidenceType", (short)evidenceType);
|
||||
AddParameter(cmd, "@limit", limit);
|
||||
},
|
||||
MapEvidence,
|
||||
ct).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<bool> ExistsAsync(string subjectNodeId, EvidenceType type, CancellationToken ct = default)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(subjectNodeId);
|
||||
|
||||
const string sql = """
|
||||
SELECT EXISTS(
|
||||
SELECT 1 FROM evidence.records
|
||||
WHERE subject_node_id = @subjectNodeId
|
||||
AND evidence_type = @evidenceType
|
||||
)
|
||||
""";
|
||||
|
||||
var result = await ExecuteScalarAsync<bool>(
|
||||
_tenantId,
|
||||
sql,
|
||||
cmd =>
|
||||
{
|
||||
AddParameter(cmd, "@subjectNodeId", subjectNodeId);
|
||||
AddParameter(cmd, "@evidenceType", (short)type);
|
||||
},
|
||||
ct).ConfigureAwait(false);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<bool> DeleteAsync(string evidenceId, CancellationToken ct = default)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(evidenceId);
|
||||
|
||||
const string sql = """
|
||||
DELETE FROM evidence.records
|
||||
WHERE evidence_id = @evidenceId
|
||||
""";
|
||||
|
||||
var affected = await ExecuteAsync(
|
||||
_tenantId,
|
||||
sql,
|
||||
cmd => AddParameter(cmd, "@evidenceId", evidenceId),
|
||||
ct).ConfigureAwait(false);
|
||||
|
||||
return affected > 0;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<int> CountBySubjectAsync(string subjectNodeId, CancellationToken ct = default)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(subjectNodeId);
|
||||
|
||||
const string sql = """
|
||||
SELECT COUNT(*)
|
||||
FROM evidence.records
|
||||
WHERE subject_node_id = @subjectNodeId
|
||||
""";
|
||||
|
||||
var result = await ExecuteScalarAsync<long>(
|
||||
_tenantId,
|
||||
sql,
|
||||
cmd => AddParameter(cmd, "@subjectNodeId", subjectNodeId),
|
||||
ct).ConfigureAwait(false);
|
||||
|
||||
return (int)result;
|
||||
}
|
||||
|
||||
private void AddEvidenceParameters(NpgsqlCommand command, IEvidence evidence)
|
||||
{
|
||||
AddParameter(command, "@evidenceId", evidence.EvidenceId);
|
||||
AddParameter(command, "@subjectNodeId", evidence.SubjectNodeId);
|
||||
AddParameter(command, "@evidenceType", (short)evidence.EvidenceType);
|
||||
command.Parameters.Add(new NpgsqlParameter<byte[]>("@payload", NpgsqlDbType.Bytea)
|
||||
{
|
||||
TypedValue = evidence.Payload.ToArray()
|
||||
});
|
||||
AddParameter(command, "@payloadSchemaVer", evidence.PayloadSchemaVersion);
|
||||
AddParameter(command, "@externalCid", evidence.ExternalPayloadCid);
|
||||
AddJsonbParameter(command, "@provenance", JsonSerializer.Serialize(evidence.Provenance, JsonOptions));
|
||||
AddJsonbParameter(command, "@signatures", JsonSerializer.Serialize(evidence.Signatures, JsonOptions));
|
||||
AddParameter(command, "@tenantId", Guid.Parse(_tenantId));
|
||||
}
|
||||
|
||||
private static IEvidence MapEvidence(NpgsqlDataReader reader)
|
||||
{
|
||||
var evidenceId = reader.GetString(0);
|
||||
var subjectNodeId = reader.GetString(1);
|
||||
var evidenceType = (EvidenceType)reader.GetInt16(2);
|
||||
var payload = reader.GetFieldValue<byte[]>(3);
|
||||
var payloadSchemaVer = reader.GetString(4);
|
||||
var externalCid = GetNullableString(reader, 5);
|
||||
var provenanceJson = reader.GetString(6);
|
||||
var signaturesJson = reader.GetString(7);
|
||||
|
||||
var provenance = JsonSerializer.Deserialize<EvidenceProvenance>(provenanceJson, JsonOptions)
|
||||
?? throw new InvalidOperationException($"Failed to deserialize provenance for evidence {evidenceId}");
|
||||
|
||||
var signatures = JsonSerializer.Deserialize<List<EvidenceSignature>>(signaturesJson, JsonOptions)
|
||||
?? [];
|
||||
|
||||
return new EvidenceRecord
|
||||
{
|
||||
EvidenceId = evidenceId,
|
||||
SubjectNodeId = subjectNodeId,
|
||||
EvidenceType = evidenceType,
|
||||
Payload = payload,
|
||||
PayloadSchemaVersion = payloadSchemaVer,
|
||||
ExternalPayloadCid = externalCid,
|
||||
Provenance = provenance,
|
||||
Signatures = signatures
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
using Microsoft.Extensions.Logging;
|
||||
using StellaOps.Evidence.Core;
|
||||
|
||||
namespace StellaOps.Evidence.Storage.Postgres;
|
||||
|
||||
/// <summary>
|
||||
/// Factory for creating tenant-scoped PostgreSQL evidence stores.
|
||||
/// </summary>
|
||||
public sealed class PostgresEvidenceStoreFactory
|
||||
{
|
||||
private readonly EvidenceDataSource _dataSource;
|
||||
private readonly ILoggerFactory _loggerFactory;
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new evidence store factory.
|
||||
/// </summary>
|
||||
/// <param name="dataSource">Evidence data source.</param>
|
||||
/// <param name="loggerFactory">Logger factory.</param>
|
||||
public PostgresEvidenceStoreFactory(
|
||||
EvidenceDataSource dataSource,
|
||||
ILoggerFactory loggerFactory)
|
||||
{
|
||||
_dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource));
|
||||
_loggerFactory = loggerFactory ?? throw new ArgumentNullException(nameof(loggerFactory));
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates an evidence store for the specified tenant.
|
||||
/// </summary>
|
||||
/// <param name="tenantId">Tenant identifier (GUID as string).</param>
|
||||
/// <returns>Evidence store scoped to the tenant.</returns>
|
||||
public IEvidenceStore Create(string tenantId)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(tenantId);
|
||||
|
||||
return new PostgresEvidenceStore(
|
||||
_dataSource,
|
||||
tenantId,
|
||||
_loggerFactory.CreateLogger<PostgresEvidenceStore>());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,55 @@
|
||||
using Microsoft.Extensions.DependencyInjection;
|
||||
using Microsoft.Extensions.DependencyInjection.Extensions;
|
||||
using StellaOps.Infrastructure.Postgres.Options;
|
||||
|
||||
namespace StellaOps.Evidence.Storage.Postgres;
|
||||
|
||||
/// <summary>
|
||||
/// Service collection extensions for Evidence PostgreSQL storage.
|
||||
/// </summary>
|
||||
public static class ServiceCollectionExtensions
|
||||
{
|
||||
/// <summary>
|
||||
/// Adds Evidence PostgreSQL storage services.
|
||||
/// </summary>
|
||||
/// <param name="services">Service collection.</param>
|
||||
/// <param name="configureOptions">Optional: configure PostgreSQL options.</param>
|
||||
/// <returns>Service collection for chaining.</returns>
|
||||
public static IServiceCollection AddEvidencePostgresStorage(
|
||||
this IServiceCollection services,
|
||||
Action<PostgresOptions>? configureOptions = null)
|
||||
{
|
||||
// Register PostgreSQL options if not already registered
|
||||
if (configureOptions is not null)
|
||||
{
|
||||
services.Configure(configureOptions);
|
||||
}
|
||||
|
||||
// Register data source as singleton
|
||||
services.TryAddSingleton<EvidenceDataSource>();
|
||||
|
||||
// Register factory for creating tenant-scoped stores
|
||||
services.TryAddSingleton<PostgresEvidenceStoreFactory>();
|
||||
|
||||
return services;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Adds Evidence PostgreSQL storage services with a connection string.
|
||||
/// </summary>
|
||||
/// <param name="services">Service collection.</param>
|
||||
/// <param name="connectionString">PostgreSQL connection string.</param>
|
||||
/// <returns>Service collection for chaining.</returns>
|
||||
public static IServiceCollection AddEvidencePostgresStorage(
|
||||
this IServiceCollection services,
|
||||
string connectionString)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(connectionString);
|
||||
|
||||
return services.AddEvidencePostgresStorage(options =>
|
||||
{
|
||||
options.ConnectionString = connectionString;
|
||||
options.SchemaName = EvidenceDataSource.DefaultSchemaName;
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
<?xml version="1.0" ?>
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
|
||||
<PropertyGroup>
|
||||
<TargetFramework>net10.0</TargetFramework>
|
||||
<ImplicitUsings>enable</ImplicitUsings>
|
||||
<Nullable>enable</Nullable>
|
||||
<LangVersion>preview</LangVersion>
|
||||
<TreatWarningsAsErrors>false</TreatWarningsAsErrors>
|
||||
<RootNamespace>StellaOps.Evidence.Storage.Postgres</RootNamespace>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<EmbeddedResource Include="Migrations\**\*.sql" LogicalName="%(RecursiveDir)%(Filename)%(Extension)" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="..\StellaOps.Infrastructure.Postgres\StellaOps.Infrastructure.Postgres.csproj" />
|
||||
<ProjectReference Include="..\StellaOps.Evidence.Core\StellaOps.Evidence.Core.csproj" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
191
src/__Libraries/StellaOps.Provcache.Api/ApiModels.cs
Normal file
191
src/__Libraries/StellaOps.Provcache.Api/ApiModels.cs
Normal file
@@ -0,0 +1,191 @@
|
||||
namespace StellaOps.Provcache.Api;
|
||||
|
||||
/// <summary>
|
||||
/// Response model for GET /v1/provcache/{veriKey}.
|
||||
/// </summary>
|
||||
public sealed class ProvcacheGetResponse
|
||||
{
|
||||
/// <summary>
|
||||
/// The VeriKey that was looked up.
|
||||
/// </summary>
|
||||
public required string VeriKey { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// The cache entry if found.
|
||||
/// </summary>
|
||||
public ProvcacheEntry? Entry { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// The source of the cache hit (valkey, postgres, etc.).
|
||||
/// </summary>
|
||||
public string? Source { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Time taken for the lookup in milliseconds.
|
||||
/// </summary>
|
||||
public double ElapsedMs { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Status: "hit", "miss", "bypassed", "expired".
|
||||
/// </summary>
|
||||
public required string Status { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Request model for POST /v1/provcache.
|
||||
/// </summary>
|
||||
public sealed class ProvcacheCreateRequest
|
||||
{
|
||||
/// <summary>
|
||||
/// The cache entry to store.
|
||||
/// </summary>
|
||||
public ProvcacheEntry? Entry { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Response model for POST /v1/provcache.
|
||||
/// </summary>
|
||||
public sealed class ProvcacheCreateResponse
|
||||
{
|
||||
/// <summary>
|
||||
/// The VeriKey that was stored.
|
||||
/// </summary>
|
||||
public required string VeriKey { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Whether the store operation succeeded.
|
||||
/// </summary>
|
||||
public required bool Success { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// When the entry expires.
|
||||
/// </summary>
|
||||
public DateTimeOffset ExpiresAt { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Request model for POST /v1/provcache/invalidate.
|
||||
/// </summary>
|
||||
public sealed class ProvcacheInvalidateRequest
|
||||
{
|
||||
/// <summary>
|
||||
/// The invalidation type. If null, defaults to exact VeriKey match.
|
||||
/// </summary>
|
||||
public InvalidationType? Type { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// The value to match for invalidation.
|
||||
/// For VeriKey type: exact VeriKey.
|
||||
/// For PolicyHash type: policy hash to match.
|
||||
/// For Pattern type: glob pattern.
|
||||
/// </summary>
|
||||
public required string Value { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Reason for invalidation (for audit log).
|
||||
/// </summary>
|
||||
public string? Reason { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Actor performing the invalidation (for audit log).
|
||||
/// </summary>
|
||||
public string? Actor { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Response model for POST /v1/provcache/invalidate.
|
||||
/// </summary>
|
||||
public sealed class ProvcacheInvalidateResponse
|
||||
{
|
||||
/// <summary>
|
||||
/// Number of entries affected by the invalidation.
|
||||
/// </summary>
|
||||
public long EntriesAffected { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// The invalidation type that was used.
|
||||
/// </summary>
|
||||
public required string Type { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// The value that was matched.
|
||||
/// </summary>
|
||||
public required string Value { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Reason for invalidation if provided.
|
||||
/// </summary>
|
||||
public string? Reason { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Response model for GET /v1/provcache/metrics.
|
||||
/// </summary>
|
||||
public sealed class ProvcacheMetricsResponse
|
||||
{
|
||||
/// <summary>
|
||||
/// Total number of cache requests.
|
||||
/// </summary>
|
||||
public long TotalRequests { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Total number of cache hits.
|
||||
/// </summary>
|
||||
public long TotalHits { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Total number of cache misses.
|
||||
/// </summary>
|
||||
public long TotalMisses { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Total number of invalidations performed.
|
||||
/// </summary>
|
||||
public long TotalInvalidations { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Cache hit rate (0.0 - 1.0).
|
||||
/// </summary>
|
||||
public double HitRate { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Current number of entries in the cache.
|
||||
/// </summary>
|
||||
public long CurrentEntryCount { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Average lookup latency in milliseconds.
|
||||
/// </summary>
|
||||
public double AvgLatencyMs { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// 99th percentile lookup latency in milliseconds.
|
||||
/// </summary>
|
||||
public double P99LatencyMs { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Whether the Valkey cache layer is healthy.
|
||||
/// </summary>
|
||||
public bool ValkeyCacheHealthy { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Whether the Postgres repository layer is healthy.
|
||||
/// </summary>
|
||||
public bool PostgresRepositoryHealthy { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// When these metrics were collected.
|
||||
/// </summary>
|
||||
public DateTimeOffset CollectedAt { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Additional invalidation type for direct VeriKey invalidation via API.
|
||||
/// </summary>
|
||||
internal static class InvalidationTypeExtensions
|
||||
{
|
||||
/// <summary>
|
||||
/// Direct VeriKey invalidation type constant.
|
||||
/// </summary>
|
||||
public const string VeriKey = "VeriKey";
|
||||
}
|
||||
@@ -0,0 +1,280 @@
|
||||
using Microsoft.AspNetCore.Builder;
|
||||
using Microsoft.AspNetCore.Http;
|
||||
using Microsoft.AspNetCore.Routing;
|
||||
using Microsoft.Extensions.DependencyInjection;
|
||||
using Microsoft.Extensions.Logging;
|
||||
|
||||
namespace StellaOps.Provcache.Api;
|
||||
|
||||
/// <summary>
|
||||
/// Marker class for logging in Provcache API endpoints.
|
||||
/// </summary>
|
||||
public sealed class ProvcacheApiEndpoints;
|
||||
|
||||
/// <summary>
|
||||
/// Extension methods for mapping Provcache API endpoints.
|
||||
/// </summary>
|
||||
public static class ProvcacheEndpointExtensions
|
||||
{
|
||||
/// <summary>
|
||||
/// Maps Provcache API endpoints to the specified route builder.
|
||||
/// </summary>
|
||||
/// <param name="endpoints">The endpoint route builder.</param>
|
||||
/// <param name="prefix">The route prefix (default: "/v1/provcache").</param>
|
||||
/// <returns>A route group builder for further customization.</returns>
|
||||
public static RouteGroupBuilder MapProvcacheEndpoints(
|
||||
this IEndpointRouteBuilder endpoints,
|
||||
string prefix = "/v1/provcache")
|
||||
{
|
||||
var group = endpoints.MapGroup(prefix)
|
||||
.WithTags("Provcache")
|
||||
.WithOpenApi();
|
||||
|
||||
// GET /v1/provcache/{veriKey}
|
||||
group.MapGet("/{veriKey}", GetByVeriKey)
|
||||
.WithName("GetProvcacheEntry")
|
||||
.WithSummary("Get a cached decision by VeriKey")
|
||||
.WithDescription("Retrieves a cached evaluation decision by its VeriKey. Returns 200 if found, 204 if not cached, 410 if expired.")
|
||||
.Produces<ProvcacheGetResponse>(StatusCodes.Status200OK)
|
||||
.Produces(StatusCodes.Status204NoContent)
|
||||
.Produces(StatusCodes.Status410Gone)
|
||||
.Produces<ProblemDetails>(StatusCodes.Status500InternalServerError);
|
||||
|
||||
// POST /v1/provcache
|
||||
group.MapPost("/", CreateOrUpdate)
|
||||
.WithName("CreateOrUpdateProvcacheEntry")
|
||||
.WithSummary("Store a decision in the cache (idempotent)")
|
||||
.WithDescription("Stores or updates a cached evaluation decision. This operation is idempotent - storing the same VeriKey multiple times is safe.")
|
||||
.Accepts<ProvcacheCreateRequest>("application/json")
|
||||
.Produces<ProvcacheCreateResponse>(StatusCodes.Status201Created)
|
||||
.Produces<ProvcacheCreateResponse>(StatusCodes.Status200OK)
|
||||
.Produces<ProblemDetails>(StatusCodes.Status400BadRequest)
|
||||
.Produces<ProblemDetails>(StatusCodes.Status500InternalServerError);
|
||||
|
||||
// POST /v1/provcache/invalidate
|
||||
group.MapPost("/invalidate", Invalidate)
|
||||
.WithName("InvalidateProvcacheEntries")
|
||||
.WithSummary("Invalidate cache entries by key or pattern")
|
||||
.WithDescription("Invalidates one or more cache entries. Can invalidate by exact VeriKey, policy hash, signer set hash, feed epoch, or pattern.")
|
||||
.Accepts<ProvcacheInvalidateRequest>("application/json")
|
||||
.Produces<ProvcacheInvalidateResponse>(StatusCodes.Status200OK)
|
||||
.Produces<ProblemDetails>(StatusCodes.Status400BadRequest)
|
||||
.Produces<ProblemDetails>(StatusCodes.Status500InternalServerError);
|
||||
|
||||
// GET /v1/provcache/metrics
|
||||
group.MapGet("/metrics", GetMetrics)
|
||||
.WithName("GetProvcacheMetrics")
|
||||
.WithSummary("Get cache performance metrics")
|
||||
.WithDescription("Returns current cache metrics including hit rate, miss rate, latency percentiles, and entry counts.")
|
||||
.Produces<ProvcacheMetricsResponse>(StatusCodes.Status200OK)
|
||||
.Produces<ProblemDetails>(StatusCodes.Status500InternalServerError);
|
||||
|
||||
return group;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// GET /v1/provcache/{veriKey}
|
||||
/// </summary>
|
||||
private static async Task<IResult> GetByVeriKey(
|
||||
string veriKey,
|
||||
bool? bypassCache,
|
||||
IProvcacheService provcacheService,
|
||||
ILogger<ProvcacheApiEndpoints> logger,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
logger.LogDebug("GET /v1/provcache/{VeriKey}", veriKey);
|
||||
|
||||
try
|
||||
{
|
||||
var result = await provcacheService.GetAsync(veriKey, bypassCache ?? false, cancellationToken);
|
||||
|
||||
return result.Status switch
|
||||
{
|
||||
ProvcacheResultStatus.CacheHit => Results.Ok(new ProvcacheGetResponse
|
||||
{
|
||||
VeriKey = result.Entry!.VeriKey,
|
||||
Entry = result.Entry,
|
||||
Source = result.Source,
|
||||
ElapsedMs = result.ElapsedMs,
|
||||
Status = "hit"
|
||||
}),
|
||||
ProvcacheResultStatus.Bypassed => Results.Ok(new ProvcacheGetResponse
|
||||
{
|
||||
VeriKey = veriKey,
|
||||
Entry = null,
|
||||
Source = null,
|
||||
ElapsedMs = result.ElapsedMs,
|
||||
Status = "bypassed"
|
||||
}),
|
||||
ProvcacheResultStatus.Expired => Results.StatusCode(StatusCodes.Status410Gone),
|
||||
_ => Results.NoContent()
|
||||
};
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
logger.LogError(ex, "Error getting cache entry for VeriKey {VeriKey}", veriKey);
|
||||
return Results.Problem(
|
||||
detail: ex.Message,
|
||||
statusCode: StatusCodes.Status500InternalServerError,
|
||||
title: "Cache lookup failed");
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// POST /v1/provcache
|
||||
/// </summary>
|
||||
private static async Task<IResult> CreateOrUpdate(
|
||||
ProvcacheCreateRequest request,
|
||||
IProvcacheService provcacheService,
|
||||
ILogger<ProvcacheApiEndpoints> logger,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
logger.LogDebug("POST /v1/provcache for VeriKey {VeriKey}", request.Entry?.VeriKey);
|
||||
|
||||
if (request.Entry is null)
|
||||
{
|
||||
return Results.Problem(
|
||||
detail: "Request body must contain a valid entry",
|
||||
statusCode: StatusCodes.Status400BadRequest,
|
||||
title: "Invalid request");
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
var success = await provcacheService.SetAsync(request.Entry, cancellationToken);
|
||||
|
||||
if (!success)
|
||||
{
|
||||
return Results.Problem(
|
||||
detail: "Failed to store cache entry",
|
||||
statusCode: StatusCodes.Status500InternalServerError,
|
||||
title: "Cache write failed");
|
||||
}
|
||||
|
||||
return Results.Created($"/v1/provcache/{request.Entry.VeriKey}", new ProvcacheCreateResponse
|
||||
{
|
||||
VeriKey = request.Entry.VeriKey,
|
||||
Success = true,
|
||||
ExpiresAt = request.Entry.ExpiresAt
|
||||
});
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
logger.LogError(ex, "Error storing cache entry for VeriKey {VeriKey}", request.Entry?.VeriKey);
|
||||
return Results.Problem(
|
||||
detail: ex.Message,
|
||||
statusCode: StatusCodes.Status500InternalServerError,
|
||||
title: "Cache write failed");
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// POST /v1/provcache/invalidate
|
||||
/// </summary>
|
||||
private static async Task<IResult> Invalidate(
|
||||
ProvcacheInvalidateRequest request,
|
||||
IProvcacheService provcacheService,
|
||||
ILogger<ProvcacheApiEndpoints> logger,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
logger.LogDebug("POST /v1/provcache/invalidate type={Type} value={Value}", request.Type, request.Value);
|
||||
|
||||
try
|
||||
{
|
||||
// If single VeriKey invalidation (Type is null = single VeriKey mode)
|
||||
if (request.Type is null)
|
||||
{
|
||||
var success = await provcacheService.InvalidateAsync(request.Value, request.Reason, cancellationToken);
|
||||
return Results.Ok(new ProvcacheInvalidateResponse
|
||||
{
|
||||
EntriesAffected = success ? 1 : 0,
|
||||
Type = "verikey",
|
||||
Value = request.Value,
|
||||
Reason = request.Reason
|
||||
});
|
||||
}
|
||||
|
||||
// Bulk invalidation
|
||||
var invalidationRequest = new InvalidationRequest
|
||||
{
|
||||
Type = request.Type ?? InvalidationType.Pattern,
|
||||
Value = request.Value,
|
||||
Reason = request.Reason,
|
||||
Actor = request.Actor
|
||||
};
|
||||
|
||||
var result = await provcacheService.InvalidateByAsync(invalidationRequest, cancellationToken);
|
||||
|
||||
return Results.Ok(new ProvcacheInvalidateResponse
|
||||
{
|
||||
EntriesAffected = result.EntriesAffected,
|
||||
Type = request.Type?.ToString() ?? "pattern",
|
||||
Value = request.Value,
|
||||
Reason = request.Reason
|
||||
});
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
logger.LogError(ex, "Error invalidating cache entries");
|
||||
return Results.Problem(
|
||||
detail: ex.Message,
|
||||
statusCode: StatusCodes.Status500InternalServerError,
|
||||
title: "Cache invalidation failed");
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// GET /v1/provcache/metrics
|
||||
/// </summary>
|
||||
private static async Task<IResult> GetMetrics(
|
||||
IProvcacheService provcacheService,
|
||||
ILogger<ProvcacheApiEndpoints> logger,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
logger.LogDebug("GET /v1/provcache/metrics");
|
||||
|
||||
try
|
||||
{
|
||||
var metrics = await provcacheService.GetMetricsAsync(cancellationToken);
|
||||
|
||||
var hitRate = metrics.TotalRequests > 0
|
||||
? (double)metrics.TotalHits / metrics.TotalRequests
|
||||
: 0;
|
||||
|
||||
return Results.Ok(new ProvcacheMetricsResponse
|
||||
{
|
||||
TotalRequests = metrics.TotalRequests,
|
||||
TotalHits = metrics.TotalHits,
|
||||
TotalMisses = metrics.TotalMisses,
|
||||
TotalInvalidations = metrics.TotalInvalidations,
|
||||
HitRate = hitRate,
|
||||
CurrentEntryCount = metrics.CurrentEntryCount,
|
||||
AvgLatencyMs = metrics.AvgLatencyMs,
|
||||
P99LatencyMs = metrics.P99LatencyMs,
|
||||
ValkeyCacheHealthy = metrics.ValkeyCacheHealthy,
|
||||
PostgresRepositoryHealthy = metrics.PostgresRepositoryHealthy,
|
||||
CollectedAt = metrics.CollectedAt
|
||||
});
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
logger.LogError(ex, "Error getting cache metrics");
|
||||
return Results.Problem(
|
||||
detail: ex.Message,
|
||||
statusCode: StatusCodes.Status500InternalServerError,
|
||||
title: "Metrics retrieval failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Placeholder for problem details when ASP.NET Core's ProblemDetails isn't available.
|
||||
/// </summary>
|
||||
internal sealed class ProblemDetails
|
||||
{
|
||||
public string? Type { get; set; }
|
||||
public string? Title { get; set; }
|
||||
public int? Status { get; set; }
|
||||
public string? Detail { get; set; }
|
||||
public string? Instance { get; set; }
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
|
||||
<PropertyGroup>
|
||||
<TargetFramework>net10.0</TargetFramework>
|
||||
<ImplicitUsings>enable</ImplicitUsings>
|
||||
<Nullable>enable</Nullable>
|
||||
<LangVersion>preview</LangVersion>
|
||||
<TreatWarningsAsErrors>false</TreatWarningsAsErrors>
|
||||
<RootNamespace>StellaOps.Provcache.Api</RootNamespace>
|
||||
<AssemblyName>StellaOps.Provcache.Api</AssemblyName>
|
||||
<Description>API endpoints for Provcache - Provenance Cache for StellaOps</Description>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<FrameworkReference Include="Microsoft.AspNetCore.App" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="Microsoft.AspNetCore.OpenApi" Version="10.0.0" />
|
||||
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" Version="10.0.0" />
|
||||
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="10.0.0" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="../StellaOps.Provcache/StellaOps.Provcache.csproj" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
@@ -0,0 +1,336 @@
|
||||
using System.Text.Json;
|
||||
using Microsoft.EntityFrameworkCore;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using StellaOps.Provcache.Entities;
|
||||
|
||||
namespace StellaOps.Provcache.Postgres;
|
||||
|
||||
/// <summary>
|
||||
/// PostgreSQL implementation of <see cref="IProvcacheRepository"/>.
|
||||
/// </summary>
|
||||
public sealed class PostgresProvcacheRepository : IProvcacheRepository
|
||||
{
|
||||
private readonly ProvcacheDbContext _context;
|
||||
private readonly ILogger<PostgresProvcacheRepository> _logger;
|
||||
private readonly JsonSerializerOptions _jsonOptions;
|
||||
|
||||
public PostgresProvcacheRepository(
|
||||
ProvcacheDbContext context,
|
||||
ILogger<PostgresProvcacheRepository> logger)
|
||||
{
|
||||
_context = context ?? throw new ArgumentNullException(nameof(context));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
_jsonOptions = new JsonSerializerOptions
|
||||
{
|
||||
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
|
||||
WriteIndented = false
|
||||
};
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<ProvcacheEntry?> GetAsync(string veriKey, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var entity = await _context.ProvcacheItems
|
||||
.AsNoTracking()
|
||||
.FirstOrDefaultAsync(e => e.VeriKey == veriKey, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
return entity is null ? null : MapToEntry(entity);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyDictionary<string, ProvcacheEntry>> GetManyAsync(
|
||||
IEnumerable<string> veriKeys,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var keyList = veriKeys.ToList();
|
||||
if (keyList.Count == 0)
|
||||
return new Dictionary<string, ProvcacheEntry>();
|
||||
|
||||
var entities = await _context.ProvcacheItems
|
||||
.AsNoTracking()
|
||||
.Where(e => keyList.Contains(e.VeriKey))
|
||||
.ToListAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
return entities.ToDictionary(e => e.VeriKey, MapToEntry);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task UpsertAsync(ProvcacheEntry entry, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var entity = MapToEntity(entry);
|
||||
|
||||
var existing = await _context.ProvcacheItems
|
||||
.FirstOrDefaultAsync(e => e.VeriKey == entry.VeriKey, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
if (existing is null)
|
||||
{
|
||||
_context.ProvcacheItems.Add(entity);
|
||||
}
|
||||
else
|
||||
{
|
||||
_context.Entry(existing).CurrentValues.SetValues(entity);
|
||||
}
|
||||
|
||||
await _context.SaveChangesAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task UpsertManyAsync(IEnumerable<ProvcacheEntry> entries, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var entryList = entries.ToList();
|
||||
if (entryList.Count == 0)
|
||||
return;
|
||||
|
||||
var veriKeys = entryList.Select(e => e.VeriKey).ToList();
|
||||
var existing = await _context.ProvcacheItems
|
||||
.Where(e => veriKeys.Contains(e.VeriKey))
|
||||
.ToDictionaryAsync(e => e.VeriKey, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
foreach (var entry in entryList)
|
||||
{
|
||||
var entity = MapToEntity(entry);
|
||||
|
||||
if (existing.TryGetValue(entry.VeriKey, out var existingEntity))
|
||||
{
|
||||
_context.Entry(existingEntity).CurrentValues.SetValues(entity);
|
||||
}
|
||||
else
|
||||
{
|
||||
_context.ProvcacheItems.Add(entity);
|
||||
}
|
||||
}
|
||||
|
||||
await _context.SaveChangesAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<bool> DeleteAsync(string veriKey, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var entity = await _context.ProvcacheItems
|
||||
.FirstOrDefaultAsync(e => e.VeriKey == veriKey, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
if (entity is null)
|
||||
return false;
|
||||
|
||||
_context.ProvcacheItems.Remove(entity);
|
||||
await _context.SaveChangesAsync(cancellationToken).ConfigureAwait(false);
|
||||
return true;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<long> DeleteByPolicyHashAsync(string policyHash, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var deleted = await _context.ProvcacheItems
|
||||
.Where(e => e.PolicyHash == policyHash)
|
||||
.ExecuteDeleteAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
if (deleted > 0)
|
||||
{
|
||||
await LogRevocationAsync("policy", policyHash, "policy-update", deleted, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
|
||||
return deleted;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<long> DeleteBySignerSetHashAsync(string signerSetHash, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var deleted = await _context.ProvcacheItems
|
||||
.Where(e => e.SignerSetHash == signerSetHash)
|
||||
.ExecuteDeleteAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
if (deleted > 0)
|
||||
{
|
||||
await LogRevocationAsync("signer", signerSetHash, "signer-revocation", deleted, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
|
||||
return deleted;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<long> DeleteByFeedEpochOlderThanAsync(string feedEpoch, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var deleted = await _context.ProvcacheItems
|
||||
.Where(e => string.Compare(e.FeedEpoch, feedEpoch, StringComparison.Ordinal) < 0)
|
||||
.ExecuteDeleteAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
if (deleted > 0)
|
||||
{
|
||||
await LogRevocationAsync("feed", feedEpoch, "feed-update", deleted, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
|
||||
return deleted;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<long> DeleteExpiredAsync(DateTimeOffset asOf, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var deleted = await _context.ProvcacheItems
|
||||
.Where(e => e.ExpiresAt <= asOf)
|
||||
.ExecuteDeleteAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
if (deleted > 0)
|
||||
{
|
||||
await LogRevocationAsync("expired", asOf.ToString("O"), "ttl-expiry", deleted, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
|
||||
return deleted;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task IncrementHitCountAsync(string veriKey, CancellationToken cancellationToken = default)
|
||||
{
|
||||
await _context.ProvcacheItems
|
||||
.Where(e => e.VeriKey == veriKey)
|
||||
.ExecuteUpdateAsync(
|
||||
setters => setters
|
||||
.SetProperty(e => e.HitCount, e => e.HitCount + 1)
|
||||
.SetProperty(e => e.LastAccessedAt, DateTimeOffset.UtcNow),
|
||||
cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<ProvcacheStatistics> GetStatisticsAsync(CancellationToken cancellationToken = default)
|
||||
{
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
var hourFromNow = now.AddHours(1);
|
||||
|
||||
var totalEntries = await _context.ProvcacheItems
|
||||
.LongCountAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
var totalHits = await _context.ProvcacheItems
|
||||
.SumAsync(e => e.HitCount, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
var expiringWithinHour = await _context.ProvcacheItems
|
||||
.LongCountAsync(e => e.ExpiresAt <= hourFromNow, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
var uniquePolicies = await _context.ProvcacheItems
|
||||
.Select(e => e.PolicyHash)
|
||||
.Distinct()
|
||||
.CountAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
var uniqueSignerSets = await _context.ProvcacheItems
|
||||
.Select(e => e.SignerSetHash)
|
||||
.Distinct()
|
||||
.CountAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
var oldest = await _context.ProvcacheItems
|
||||
.OrderBy(e => e.CreatedAt)
|
||||
.Select(e => (DateTimeOffset?)e.CreatedAt)
|
||||
.FirstOrDefaultAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
var newest = await _context.ProvcacheItems
|
||||
.OrderByDescending(e => e.CreatedAt)
|
||||
.Select(e => (DateTimeOffset?)e.CreatedAt)
|
||||
.FirstOrDefaultAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
return new ProvcacheStatistics
|
||||
{
|
||||
TotalEntries = totalEntries,
|
||||
TotalHits = totalHits,
|
||||
ExpiringWithinHour = expiringWithinHour,
|
||||
UniquePolicies = uniquePolicies,
|
||||
UniqueSignerSets = uniqueSignerSets,
|
||||
OldestEntry = oldest,
|
||||
NewestEntry = newest
|
||||
};
|
||||
}
|
||||
|
||||
private async Task LogRevocationAsync(
|
||||
string type,
|
||||
string targetHash,
|
||||
string reason,
|
||||
long entriesAffected,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
var revocation = new ProvcacheRevocationEntity
|
||||
{
|
||||
RevocationId = Guid.NewGuid(),
|
||||
RevocationType = type,
|
||||
TargetHash = targetHash,
|
||||
Reason = reason,
|
||||
EntriesAffected = entriesAffected,
|
||||
CreatedAt = DateTimeOffset.UtcNow
|
||||
};
|
||||
|
||||
_context.Revocations.Add(revocation);
|
||||
await _context.SaveChangesAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
_logger.LogInformation(
|
||||
"Logged revocation: type={Type}, target={TargetHash}, affected={EntriesAffected}",
|
||||
type,
|
||||
targetHash,
|
||||
entriesAffected);
|
||||
}
|
||||
|
||||
private ProvcacheEntry MapToEntry(ProvcacheItemEntity entity)
|
||||
{
|
||||
var replaySeed = JsonSerializer.Deserialize<ReplaySeed>(entity.ReplaySeed, _jsonOptions)
|
||||
?? new ReplaySeed { FeedIds = [], RuleIds = [] };
|
||||
|
||||
return new ProvcacheEntry
|
||||
{
|
||||
VeriKey = entity.VeriKey,
|
||||
Decision = new DecisionDigest
|
||||
{
|
||||
DigestVersion = entity.DigestVersion,
|
||||
VeriKey = entity.VeriKey,
|
||||
VerdictHash = entity.VerdictHash,
|
||||
ProofRoot = entity.ProofRoot,
|
||||
ReplaySeed = replaySeed,
|
||||
CreatedAt = entity.CreatedAt,
|
||||
ExpiresAt = entity.ExpiresAt,
|
||||
TrustScore = entity.TrustScore
|
||||
},
|
||||
PolicyHash = entity.PolicyHash,
|
||||
SignerSetHash = entity.SignerSetHash,
|
||||
FeedEpoch = entity.FeedEpoch,
|
||||
CreatedAt = entity.CreatedAt,
|
||||
ExpiresAt = entity.ExpiresAt,
|
||||
HitCount = entity.HitCount,
|
||||
LastAccessedAt = entity.LastAccessedAt
|
||||
};
|
||||
}
|
||||
|
||||
private ProvcacheItemEntity MapToEntity(ProvcacheEntry entry)
|
||||
{
|
||||
return new ProvcacheItemEntity
|
||||
{
|
||||
VeriKey = entry.VeriKey,
|
||||
DigestVersion = entry.Decision.DigestVersion,
|
||||
VerdictHash = entry.Decision.VerdictHash,
|
||||
ProofRoot = entry.Decision.ProofRoot,
|
||||
ReplaySeed = JsonSerializer.Serialize(entry.Decision.ReplaySeed, _jsonOptions),
|
||||
PolicyHash = entry.PolicyHash,
|
||||
SignerSetHash = entry.SignerSetHash,
|
||||
FeedEpoch = entry.FeedEpoch,
|
||||
TrustScore = entry.Decision.TrustScore,
|
||||
HitCount = entry.HitCount,
|
||||
CreatedAt = entry.CreatedAt,
|
||||
ExpiresAt = entry.ExpiresAt,
|
||||
UpdatedAt = DateTimeOffset.UtcNow,
|
||||
LastAccessedAt = entry.LastAccessedAt
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,58 @@
|
||||
using Microsoft.EntityFrameworkCore;
|
||||
using StellaOps.Provcache.Entities;
|
||||
|
||||
namespace StellaOps.Provcache.Postgres;
|
||||
|
||||
/// <summary>
|
||||
/// EF Core DbContext for Provcache storage.
|
||||
/// </summary>
|
||||
public class ProvcacheDbContext : DbContext
|
||||
{
|
||||
public ProvcacheDbContext(DbContextOptions<ProvcacheDbContext> options) : base(options)
|
||||
{
|
||||
}
|
||||
|
||||
public DbSet<ProvcacheItemEntity> ProvcacheItems => Set<ProvcacheItemEntity>();
|
||||
public DbSet<ProvcacheEvidenceChunkEntity> EvidenceChunks => Set<ProvcacheEvidenceChunkEntity>();
|
||||
public DbSet<ProvcacheRevocationEntity> Revocations => Set<ProvcacheRevocationEntity>();
|
||||
|
||||
protected override void OnModelCreating(ModelBuilder modelBuilder)
|
||||
{
|
||||
base.OnModelCreating(modelBuilder);
|
||||
|
||||
modelBuilder.HasDefaultSchema("provcache");
|
||||
|
||||
// ProvcacheItemEntity configuration
|
||||
modelBuilder.Entity<ProvcacheItemEntity>(entity =>
|
||||
{
|
||||
entity.HasKey(e => e.VeriKey);
|
||||
|
||||
entity.HasIndex(e => e.PolicyHash);
|
||||
entity.HasIndex(e => e.SignerSetHash);
|
||||
entity.HasIndex(e => e.FeedEpoch);
|
||||
entity.HasIndex(e => e.ExpiresAt);
|
||||
entity.HasIndex(e => e.CreatedAt);
|
||||
|
||||
entity.Property(e => e.ReplaySeed)
|
||||
.HasColumnType("jsonb");
|
||||
});
|
||||
|
||||
// ProvcacheEvidenceChunkEntity configuration
|
||||
modelBuilder.Entity<ProvcacheEvidenceChunkEntity>(entity =>
|
||||
{
|
||||
entity.HasKey(e => e.ChunkId);
|
||||
|
||||
entity.HasIndex(e => e.ProofRoot);
|
||||
entity.HasIndex(e => new { e.ProofRoot, e.ChunkIndex }).IsUnique();
|
||||
});
|
||||
|
||||
// ProvcacheRevocationEntity configuration
|
||||
modelBuilder.Entity<ProvcacheRevocationEntity>(entity =>
|
||||
{
|
||||
entity.HasKey(e => e.RevocationId);
|
||||
|
||||
entity.HasIndex(e => e.CreatedAt);
|
||||
entity.HasIndex(e => e.TargetHash);
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,27 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
|
||||
<PropertyGroup>
|
||||
<TargetFramework>net10.0</TargetFramework>
|
||||
<ImplicitUsings>enable</ImplicitUsings>
|
||||
<Nullable>enable</Nullable>
|
||||
<LangVersion>preview</LangVersion>
|
||||
<TreatWarningsAsErrors>false</TreatWarningsAsErrors>
|
||||
<RootNamespace>StellaOps.Provcache.Postgres</RootNamespace>
|
||||
<AssemblyName>StellaOps.Provcache.Postgres</AssemblyName>
|
||||
<Description>PostgreSQL storage implementation for StellaOps Provcache</Description>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="Microsoft.EntityFrameworkCore" Version="10.0.0" />
|
||||
<PackageReference Include="Npgsql.EntityFrameworkCore.PostgreSQL" Version="10.0.0" />
|
||||
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" Version="10.0.0" />
|
||||
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="10.0.0" />
|
||||
<PackageReference Include="Microsoft.Extensions.Options" Version="10.0.0" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="../StellaOps.Provcache/StellaOps.Provcache.csproj" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
@@ -0,0 +1,27 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
|
||||
<PropertyGroup>
|
||||
<TargetFramework>net10.0</TargetFramework>
|
||||
<ImplicitUsings>enable</ImplicitUsings>
|
||||
<Nullable>enable</Nullable>
|
||||
<LangVersion>preview</LangVersion>
|
||||
<TreatWarningsAsErrors>false</TreatWarningsAsErrors>
|
||||
<RootNamespace>StellaOps.Provcache.Valkey</RootNamespace>
|
||||
<AssemblyName>StellaOps.Provcache.Valkey</AssemblyName>
|
||||
<Description>Valkey/Redis cache store implementation for StellaOps Provcache</Description>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="StackExchange.Redis" Version="2.8.37" />
|
||||
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" Version="10.0.0" />
|
||||
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="10.0.0" />
|
||||
<PackageReference Include="Microsoft.Extensions.Options" Version="10.0.0" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="../StellaOps.Provcache/StellaOps.Provcache.csproj" />
|
||||
<ProjectReference Include="../StellaOps.Messaging.Transport.Valkey/StellaOps.Messaging.Transport.Valkey.csproj" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
@@ -0,0 +1,327 @@
|
||||
using System.Diagnostics;
|
||||
using System.Text.Json;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StackExchange.Redis;
|
||||
|
||||
namespace StellaOps.Provcache.Valkey;
|
||||
|
||||
/// <summary>
|
||||
/// Valkey/Redis implementation of <see cref="IProvcacheStore"/> with read-through caching.
|
||||
/// </summary>
|
||||
public sealed class ValkeyProvcacheStore : IProvcacheStore, IAsyncDisposable
|
||||
{
|
||||
private readonly IConnectionMultiplexer _connectionMultiplexer;
|
||||
private readonly ProvcacheOptions _options;
|
||||
private readonly ILogger<ValkeyProvcacheStore> _logger;
|
||||
private readonly JsonSerializerOptions _jsonOptions;
|
||||
private readonly SemaphoreSlim _connectionLock = new(1, 1);
|
||||
private IDatabase? _database;
|
||||
|
||||
/// <inheritdoc />
|
||||
public string ProviderName => "valkey";
|
||||
|
||||
public ValkeyProvcacheStore(
|
||||
IConnectionMultiplexer connectionMultiplexer,
|
||||
IOptions<ProvcacheOptions> options,
|
||||
ILogger<ValkeyProvcacheStore> logger)
|
||||
{
|
||||
_connectionMultiplexer = connectionMultiplexer ?? throw new ArgumentNullException(nameof(connectionMultiplexer));
|
||||
_options = options?.Value ?? throw new ArgumentNullException(nameof(options));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
_jsonOptions = new JsonSerializerOptions
|
||||
{
|
||||
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
|
||||
WriteIndented = false
|
||||
};
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async ValueTask<ProvcacheLookupResult> GetAsync(string veriKey, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var sw = Stopwatch.StartNew();
|
||||
|
||||
try
|
||||
{
|
||||
var db = await GetDatabaseAsync().ConfigureAwait(false);
|
||||
var redisKey = BuildKey(veriKey);
|
||||
|
||||
var value = await db.StringGetAsync(redisKey).ConfigureAwait(false);
|
||||
sw.Stop();
|
||||
|
||||
if (value.IsNullOrEmpty)
|
||||
{
|
||||
_logger.LogDebug("Cache miss for VeriKey {VeriKey} in {ElapsedMs}ms", veriKey, sw.Elapsed.TotalMilliseconds);
|
||||
return ProvcacheLookupResult.Miss(sw.Elapsed.TotalMilliseconds);
|
||||
}
|
||||
|
||||
var entry = JsonSerializer.Deserialize<ProvcacheEntry>((string)value!, _jsonOptions);
|
||||
if (entry is null)
|
||||
{
|
||||
_logger.LogWarning("Failed to deserialize cache entry for VeriKey {VeriKey}", veriKey);
|
||||
return ProvcacheLookupResult.Miss(sw.Elapsed.TotalMilliseconds);
|
||||
}
|
||||
|
||||
// Optionally refresh TTL on read (sliding expiration)
|
||||
if (_options.SlidingExpiration)
|
||||
{
|
||||
var ttl = entry.ExpiresAt - DateTimeOffset.UtcNow;
|
||||
if (ttl > TimeSpan.Zero)
|
||||
{
|
||||
await db.KeyExpireAsync(redisKey, ttl).ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
|
||||
_logger.LogDebug("Cache hit for VeriKey {VeriKey} in {ElapsedMs}ms", veriKey, sw.Elapsed.TotalMilliseconds);
|
||||
return ProvcacheLookupResult.Hit(entry, ProviderName, sw.Elapsed.TotalMilliseconds);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
sw.Stop();
|
||||
_logger.LogError(ex, "Error getting cache entry for VeriKey {VeriKey}", veriKey);
|
||||
return ProvcacheLookupResult.Miss(sw.Elapsed.TotalMilliseconds);
|
||||
}
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async ValueTask<ProvcacheBatchLookupResult> GetManyAsync(
|
||||
IEnumerable<string> veriKeys,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var sw = Stopwatch.StartNew();
|
||||
var keyList = veriKeys.ToList();
|
||||
|
||||
if (keyList.Count == 0)
|
||||
{
|
||||
return new ProvcacheBatchLookupResult
|
||||
{
|
||||
Hits = new Dictionary<string, ProvcacheEntry>(),
|
||||
Misses = [],
|
||||
ElapsedMs = 0
|
||||
};
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
var db = await GetDatabaseAsync().ConfigureAwait(false);
|
||||
var redisKeys = keyList.Select(k => (RedisKey)BuildKey(k)).ToArray();
|
||||
|
||||
var values = await db.StringGetAsync(redisKeys).ConfigureAwait(false);
|
||||
sw.Stop();
|
||||
|
||||
var hits = new Dictionary<string, ProvcacheEntry>();
|
||||
var misses = new List<string>();
|
||||
|
||||
for (int i = 0; i < keyList.Count; i++)
|
||||
{
|
||||
var veriKey = keyList[i];
|
||||
var value = values[i];
|
||||
|
||||
if (value.IsNullOrEmpty)
|
||||
{
|
||||
misses.Add(veriKey);
|
||||
continue;
|
||||
}
|
||||
|
||||
var entry = JsonSerializer.Deserialize<ProvcacheEntry>((string)value!, _jsonOptions);
|
||||
if (entry is not null)
|
||||
{
|
||||
hits[veriKey] = entry;
|
||||
}
|
||||
else
|
||||
{
|
||||
misses.Add(veriKey);
|
||||
}
|
||||
}
|
||||
|
||||
_logger.LogDebug(
|
||||
"Batch lookup: {Hits} hits, {Misses} misses in {ElapsedMs}ms",
|
||||
hits.Count,
|
||||
misses.Count,
|
||||
sw.Elapsed.TotalMilliseconds);
|
||||
|
||||
return new ProvcacheBatchLookupResult
|
||||
{
|
||||
Hits = hits,
|
||||
Misses = misses,
|
||||
ElapsedMs = sw.Elapsed.TotalMilliseconds
|
||||
};
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
sw.Stop();
|
||||
_logger.LogError(ex, "Error in batch cache lookup");
|
||||
return new ProvcacheBatchLookupResult
|
||||
{
|
||||
Hits = new Dictionary<string, ProvcacheEntry>(),
|
||||
Misses = keyList,
|
||||
ElapsedMs = sw.Elapsed.TotalMilliseconds
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async ValueTask SetAsync(ProvcacheEntry entry, CancellationToken cancellationToken = default)
|
||||
{
|
||||
try
|
||||
{
|
||||
var db = await GetDatabaseAsync().ConfigureAwait(false);
|
||||
var redisKey = BuildKey(entry.VeriKey);
|
||||
var value = JsonSerializer.Serialize(entry, _jsonOptions);
|
||||
|
||||
var ttl = entry.ExpiresAt - DateTimeOffset.UtcNow;
|
||||
if (ttl <= TimeSpan.Zero)
|
||||
{
|
||||
_logger.LogDebug("Skipping expired entry for VeriKey {VeriKey}", entry.VeriKey);
|
||||
return;
|
||||
}
|
||||
|
||||
// Cap TTL at MaxTtl
|
||||
if (ttl > _options.MaxTtl)
|
||||
{
|
||||
ttl = _options.MaxTtl;
|
||||
}
|
||||
|
||||
await db.StringSetAsync(redisKey, value, ttl).ConfigureAwait(false);
|
||||
|
||||
_logger.LogDebug("Stored cache entry for VeriKey {VeriKey} with TTL {Ttl}", entry.VeriKey, ttl);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogError(ex, "Error storing cache entry for VeriKey {VeriKey}", entry.VeriKey);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async ValueTask SetManyAsync(IEnumerable<ProvcacheEntry> entries, CancellationToken cancellationToken = default)
|
||||
{
|
||||
var entryList = entries.ToList();
|
||||
if (entryList.Count == 0)
|
||||
return;
|
||||
|
||||
try
|
||||
{
|
||||
var db = await GetDatabaseAsync().ConfigureAwait(false);
|
||||
var batch = db.CreateBatch();
|
||||
var tasks = new List<Task>();
|
||||
|
||||
foreach (var entry in entryList)
|
||||
{
|
||||
var redisKey = BuildKey(entry.VeriKey);
|
||||
var value = JsonSerializer.Serialize(entry, _jsonOptions);
|
||||
|
||||
var ttl = entry.ExpiresAt - DateTimeOffset.UtcNow;
|
||||
if (ttl <= TimeSpan.Zero)
|
||||
continue;
|
||||
|
||||
if (ttl > _options.MaxTtl)
|
||||
ttl = _options.MaxTtl;
|
||||
|
||||
tasks.Add(batch.StringSetAsync(redisKey, value, ttl));
|
||||
}
|
||||
|
||||
batch.Execute();
|
||||
await Task.WhenAll(tasks).ConfigureAwait(false);
|
||||
|
||||
_logger.LogDebug("Batch stored {Count} cache entries", entryList.Count);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogError(ex, "Error in batch cache store");
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async ValueTask<bool> InvalidateAsync(string veriKey, CancellationToken cancellationToken = default)
|
||||
{
|
||||
try
|
||||
{
|
||||
var db = await GetDatabaseAsync().ConfigureAwait(false);
|
||||
var redisKey = BuildKey(veriKey);
|
||||
|
||||
var deleted = await db.KeyDeleteAsync(redisKey).ConfigureAwait(false);
|
||||
|
||||
_logger.LogDebug("Invalidated cache entry for VeriKey {VeriKey}: {Deleted}", veriKey, deleted);
|
||||
return deleted;
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogError(ex, "Error invalidating cache entry for VeriKey {VeriKey}", veriKey);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async ValueTask<long> InvalidateByPatternAsync(string pattern, CancellationToken cancellationToken = default)
|
||||
{
|
||||
try
|
||||
{
|
||||
var db = await GetDatabaseAsync().ConfigureAwait(false);
|
||||
var server = _connectionMultiplexer.GetServer(_connectionMultiplexer.GetEndPoints().First());
|
||||
|
||||
var fullPattern = $"{_options.ValkeyKeyPrefix}{pattern}";
|
||||
var keys = server.Keys(pattern: fullPattern).ToArray();
|
||||
|
||||
if (keys.Length == 0)
|
||||
return 0;
|
||||
|
||||
var deleted = await db.KeyDeleteAsync(keys).ConfigureAwait(false);
|
||||
|
||||
_logger.LogInformation("Invalidated {Count} cache entries matching pattern {Pattern}", deleted, pattern);
|
||||
return deleted;
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogError(ex, "Error invalidating cache entries by pattern {Pattern}", pattern);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async ValueTask<ProvcacheEntry> GetOrSetAsync(
|
||||
string veriKey,
|
||||
Func<CancellationToken, ValueTask<ProvcacheEntry>> factory,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var result = await GetAsync(veriKey, cancellationToken).ConfigureAwait(false);
|
||||
if (result.IsHit && result.Entry is not null)
|
||||
{
|
||||
return result.Entry;
|
||||
}
|
||||
|
||||
var entry = await factory(cancellationToken).ConfigureAwait(false);
|
||||
await SetAsync(entry, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
private string BuildKey(string veriKey) => $"{_options.ValkeyKeyPrefix}{veriKey}";
|
||||
|
||||
private async Task<IDatabase> GetDatabaseAsync()
|
||||
{
|
||||
if (_database is not null)
|
||||
return _database;
|
||||
|
||||
await _connectionLock.WaitAsync().ConfigureAwait(false);
|
||||
try
|
||||
{
|
||||
_database ??= _connectionMultiplexer.GetDatabase();
|
||||
return _database;
|
||||
}
|
||||
finally
|
||||
{
|
||||
_connectionLock.Release();
|
||||
}
|
||||
}
|
||||
|
||||
public async ValueTask DisposeAsync()
|
||||
{
|
||||
_connectionLock.Dispose();
|
||||
|
||||
// Note: Don't dispose the connection multiplexer if it's shared (injected via DI)
|
||||
// The DI container will handle its lifetime
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
}
|
||||
325
src/__Libraries/StellaOps.Provcache/DecisionDigestBuilder.cs
Normal file
325
src/__Libraries/StellaOps.Provcache/DecisionDigestBuilder.cs
Normal file
@@ -0,0 +1,325 @@
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
using System.Text.Json;
|
||||
|
||||
namespace StellaOps.Provcache;
|
||||
|
||||
/// <summary>
|
||||
/// Builder for constructing <see cref="DecisionDigest"/> from evaluation results.
|
||||
/// Ensures deterministic digest computation for cache consistency.
|
||||
/// </summary>
|
||||
public sealed class DecisionDigestBuilder
|
||||
{
|
||||
private string? _veriKey;
|
||||
private string? _verdictHash;
|
||||
private string? _proofRoot;
|
||||
private ReplaySeed? _replaySeed;
|
||||
private DateTimeOffset? _createdAt;
|
||||
private DateTimeOffset? _expiresAt;
|
||||
private int? _trustScore;
|
||||
private readonly ProvcacheOptions _options;
|
||||
private readonly TimeProvider _timeProvider;
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new DecisionDigestBuilder with default options.
|
||||
/// </summary>
|
||||
public DecisionDigestBuilder() : this(new ProvcacheOptions(), TimeProvider.System)
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new DecisionDigestBuilder with the specified options.
|
||||
/// </summary>
|
||||
/// <param name="options">Provcache configuration options.</param>
|
||||
/// <param name="timeProvider">Time provider for timestamps.</param>
|
||||
public DecisionDigestBuilder(ProvcacheOptions options, TimeProvider? timeProvider = null)
|
||||
{
|
||||
_options = options ?? throw new ArgumentNullException(nameof(options));
|
||||
_timeProvider = timeProvider ?? TimeProvider.System;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the VeriKey for this digest.
|
||||
/// </summary>
|
||||
public DecisionDigestBuilder WithVeriKey(string veriKey)
|
||||
{
|
||||
_veriKey = veriKey ?? throw new ArgumentNullException(nameof(veriKey));
|
||||
return this;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the VeriKey from a builder.
|
||||
/// </summary>
|
||||
public DecisionDigestBuilder WithVeriKey(VeriKeyBuilder builder)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(builder);
|
||||
_veriKey = builder.Build();
|
||||
return this;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the verdict hash directly.
|
||||
/// </summary>
|
||||
public DecisionDigestBuilder WithVerdictHash(string verdictHash)
|
||||
{
|
||||
_verdictHash = verdictHash ?? throw new ArgumentNullException(nameof(verdictHash));
|
||||
return this;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Computes verdict hash from sorted dispositions.
|
||||
/// Dispositions are sorted by key for deterministic hashing.
|
||||
/// </summary>
|
||||
/// <param name="dispositions">Dictionary of finding ID to disposition.</param>
|
||||
public DecisionDigestBuilder WithDispositions(IReadOnlyDictionary<string, string> dispositions)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(dispositions);
|
||||
|
||||
// Sort by key for deterministic hash
|
||||
var sorted = dispositions
|
||||
.OrderBy(kvp => kvp.Key, StringComparer.Ordinal)
|
||||
.ToList();
|
||||
|
||||
if (sorted.Count == 0)
|
||||
{
|
||||
_verdictHash = ComputeHash(Encoding.UTF8.GetBytes("empty-verdict"));
|
||||
return this;
|
||||
}
|
||||
|
||||
// Build deterministic string: key1=value1|key2=value2|...
|
||||
var sb = new StringBuilder();
|
||||
foreach (var (key, value) in sorted)
|
||||
{
|
||||
if (sb.Length > 0) sb.Append('|');
|
||||
sb.Append(key);
|
||||
sb.Append('=');
|
||||
sb.Append(value);
|
||||
}
|
||||
|
||||
_verdictHash = ComputeHash(Encoding.UTF8.GetBytes(sb.ToString()));
|
||||
return this;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the proof root (Merkle root of evidence) directly.
|
||||
/// </summary>
|
||||
public DecisionDigestBuilder WithProofRoot(string proofRoot)
|
||||
{
|
||||
_proofRoot = proofRoot ?? throw new ArgumentNullException(nameof(proofRoot));
|
||||
return this;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Computes proof root from a list of evidence chunk hashes.
|
||||
/// Builds a simple binary Merkle tree for verification.
|
||||
/// </summary>
|
||||
/// <param name="evidenceChunkHashes">Ordered list of evidence chunk hashes.</param>
|
||||
public DecisionDigestBuilder WithEvidenceChunks(IReadOnlyList<string> evidenceChunkHashes)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(evidenceChunkHashes);
|
||||
|
||||
if (evidenceChunkHashes.Count == 0)
|
||||
{
|
||||
_proofRoot = ComputeHash(Encoding.UTF8.GetBytes("empty-proof"));
|
||||
return this;
|
||||
}
|
||||
|
||||
// Simple Merkle tree: recursively pair and hash until single root
|
||||
var currentLevel = evidenceChunkHashes
|
||||
.Select(h => Convert.FromHexString(StripPrefix(h)))
|
||||
.ToList();
|
||||
|
||||
while (currentLevel.Count > 1)
|
||||
{
|
||||
var nextLevel = new List<byte[]>();
|
||||
|
||||
for (int i = 0; i < currentLevel.Count; i += 2)
|
||||
{
|
||||
if (i + 1 < currentLevel.Count)
|
||||
{
|
||||
// Hash pair
|
||||
var combined = new byte[currentLevel[i].Length + currentLevel[i + 1].Length];
|
||||
currentLevel[i].CopyTo(combined, 0);
|
||||
currentLevel[i + 1].CopyTo(combined, currentLevel[i].Length);
|
||||
nextLevel.Add(SHA256.HashData(combined));
|
||||
}
|
||||
else
|
||||
{
|
||||
// Odd element: promote to next level
|
||||
nextLevel.Add(currentLevel[i]);
|
||||
}
|
||||
}
|
||||
|
||||
currentLevel = nextLevel;
|
||||
}
|
||||
|
||||
_proofRoot = $"sha256:{Convert.ToHexStringLower(currentLevel[0])}";
|
||||
return this;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the replay seed directly.
|
||||
/// </summary>
|
||||
public DecisionDigestBuilder WithReplaySeed(ReplaySeed replaySeed)
|
||||
{
|
||||
_replaySeed = replaySeed ?? throw new ArgumentNullException(nameof(replaySeed));
|
||||
return this;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Builds replay seed from feed and rule identifiers.
|
||||
/// </summary>
|
||||
public DecisionDigestBuilder WithReplaySeed(
|
||||
IEnumerable<string> feedIds,
|
||||
IEnumerable<string> ruleIds,
|
||||
DateTimeOffset? frozenEpoch = null)
|
||||
{
|
||||
_replaySeed = new ReplaySeed
|
||||
{
|
||||
FeedIds = feedIds?.ToList() ?? [],
|
||||
RuleIds = ruleIds?.ToList() ?? [],
|
||||
FrozenEpoch = frozenEpoch
|
||||
};
|
||||
return this;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets explicit timestamps for created and expires.
|
||||
/// </summary>
|
||||
public DecisionDigestBuilder WithTimestamps(DateTimeOffset createdAt, DateTimeOffset expiresAt)
|
||||
{
|
||||
_createdAt = createdAt;
|
||||
_expiresAt = expiresAt;
|
||||
return this;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets timestamps using the default TTL from options.
|
||||
/// </summary>
|
||||
public DecisionDigestBuilder WithDefaultTimestamps()
|
||||
{
|
||||
_createdAt = _timeProvider.GetUtcNow();
|
||||
_expiresAt = _createdAt.Value.Add(_options.DefaultTtl);
|
||||
return this;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the trust score directly.
|
||||
/// </summary>
|
||||
/// <param name="trustScore">Trust score (0-100).</param>
|
||||
public DecisionDigestBuilder WithTrustScore(int trustScore)
|
||||
{
|
||||
if (trustScore < 0 || trustScore > 100)
|
||||
throw new ArgumentOutOfRangeException(nameof(trustScore), "Trust score must be between 0 and 100.");
|
||||
|
||||
_trustScore = trustScore;
|
||||
return this;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Computes trust score from component scores using weighted formula.
|
||||
/// </summary>
|
||||
/// <param name="reachabilityScore">Reachability analysis coverage (0-100).</param>
|
||||
/// <param name="sbomCompletenessScore">SBOM completeness (0-100).</param>
|
||||
/// <param name="vexCoverageScore">VEX statement coverage (0-100).</param>
|
||||
/// <param name="policyFreshnessScore">Policy freshness (0-100).</param>
|
||||
/// <param name="signerTrustScore">Signer trust level (0-100).</param>
|
||||
public DecisionDigestBuilder WithTrustScore(
|
||||
int reachabilityScore,
|
||||
int sbomCompletenessScore,
|
||||
int vexCoverageScore,
|
||||
int policyFreshnessScore,
|
||||
int signerTrustScore)
|
||||
{
|
||||
// Weights from documentation:
|
||||
// Reachability: 25%, SBOM: 20%, VEX: 20%, Policy: 15%, Signer: 20%
|
||||
_trustScore = (int)Math.Round(
|
||||
reachabilityScore * 0.25 +
|
||||
sbomCompletenessScore * 0.20 +
|
||||
vexCoverageScore * 0.20 +
|
||||
policyFreshnessScore * 0.15 +
|
||||
signerTrustScore * 0.20);
|
||||
|
||||
// Clamp to valid range
|
||||
_trustScore = Math.Clamp(_trustScore.Value, 0, 100);
|
||||
return this;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Builds the final DecisionDigest.
|
||||
/// </summary>
|
||||
/// <returns>The constructed DecisionDigest.</returns>
|
||||
/// <exception cref="InvalidOperationException">If required components are missing.</exception>
|
||||
public DecisionDigest Build()
|
||||
{
|
||||
ValidateRequiredComponents();
|
||||
|
||||
return new DecisionDigest
|
||||
{
|
||||
DigestVersion = _options.DigestVersion,
|
||||
VeriKey = _veriKey!,
|
||||
VerdictHash = _verdictHash!,
|
||||
ProofRoot = _proofRoot!,
|
||||
ReplaySeed = _replaySeed!,
|
||||
CreatedAt = _createdAt!.Value,
|
||||
ExpiresAt = _expiresAt!.Value,
|
||||
TrustScore = _trustScore!.Value
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Resets the builder to its initial state.
|
||||
/// </summary>
|
||||
public DecisionDigestBuilder Reset()
|
||||
{
|
||||
_veriKey = null;
|
||||
_verdictHash = null;
|
||||
_proofRoot = null;
|
||||
_replaySeed = null;
|
||||
_createdAt = null;
|
||||
_expiresAt = null;
|
||||
_trustScore = null;
|
||||
return this;
|
||||
}
|
||||
|
||||
private void ValidateRequiredComponents()
|
||||
{
|
||||
var missing = new List<string>();
|
||||
|
||||
if (string.IsNullOrWhiteSpace(_veriKey))
|
||||
missing.Add("VeriKey");
|
||||
if (string.IsNullOrWhiteSpace(_verdictHash))
|
||||
missing.Add("VerdictHash");
|
||||
if (string.IsNullOrWhiteSpace(_proofRoot))
|
||||
missing.Add("ProofRoot");
|
||||
if (_replaySeed is null)
|
||||
missing.Add("ReplaySeed");
|
||||
if (!_createdAt.HasValue)
|
||||
missing.Add("CreatedAt");
|
||||
if (!_expiresAt.HasValue)
|
||||
missing.Add("ExpiresAt");
|
||||
if (!_trustScore.HasValue)
|
||||
missing.Add("TrustScore");
|
||||
|
||||
if (missing.Count > 0)
|
||||
{
|
||||
throw new InvalidOperationException(
|
||||
$"Cannot build DecisionDigest: missing required components: {string.Join(", ", missing)}. " +
|
||||
"Use the With* methods to set all required components before calling Build().");
|
||||
}
|
||||
}
|
||||
|
||||
private static string ComputeHash(ReadOnlySpan<byte> data)
|
||||
{
|
||||
Span<byte> hash = stackalloc byte[32];
|
||||
SHA256.HashData(data, hash);
|
||||
return $"sha256:{Convert.ToHexStringLower(hash)}";
|
||||
}
|
||||
|
||||
private static string StripPrefix(string hash)
|
||||
{
|
||||
if (hash.StartsWith("sha256:", StringComparison.OrdinalIgnoreCase))
|
||||
return hash[7..];
|
||||
return hash;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,217 @@
|
||||
using System.ComponentModel.DataAnnotations;
|
||||
using System.ComponentModel.DataAnnotations.Schema;
|
||||
|
||||
namespace StellaOps.Provcache.Entities;
|
||||
|
||||
/// <summary>
|
||||
/// EF Core entity for provcache.provcache_items table.
|
||||
/// </summary>
|
||||
[Table("provcache_items", Schema = "provcache")]
|
||||
public sealed class ProvcacheItemEntity
|
||||
{
|
||||
/// <summary>
|
||||
/// Composite cache key (VeriKey).
|
||||
/// </summary>
|
||||
[Key]
|
||||
[Column("verikey")]
|
||||
[MaxLength(512)]
|
||||
public required string VeriKey { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Schema version of the digest format.
|
||||
/// </summary>
|
||||
[Column("digest_version")]
|
||||
[MaxLength(16)]
|
||||
public required string DigestVersion { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Hash of sorted dispositions.
|
||||
/// </summary>
|
||||
[Column("verdict_hash")]
|
||||
[MaxLength(128)]
|
||||
public required string VerdictHash { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Merkle root of evidence.
|
||||
/// </summary>
|
||||
[Column("proof_root")]
|
||||
[MaxLength(128)]
|
||||
public required string ProofRoot { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Replay seed as JSON (feed IDs, rule IDs, frozen epoch).
|
||||
/// </summary>
|
||||
[Column("replay_seed", TypeName = "jsonb")]
|
||||
public required string ReplaySeed { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Hash of the policy bundle.
|
||||
/// </summary>
|
||||
[Column("policy_hash")]
|
||||
[MaxLength(128)]
|
||||
public required string PolicyHash { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Hash of the signer certificate set.
|
||||
/// </summary>
|
||||
[Column("signer_set_hash")]
|
||||
[MaxLength(128)]
|
||||
public required string SignerSetHash { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Advisory feed epoch identifier.
|
||||
/// </summary>
|
||||
[Column("feed_epoch")]
|
||||
[MaxLength(64)]
|
||||
public required string FeedEpoch { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Trust score (0-100).
|
||||
/// </summary>
|
||||
[Column("trust_score")]
|
||||
[Range(0, 100)]
|
||||
public int TrustScore { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Number of cache hits.
|
||||
/// </summary>
|
||||
[Column("hit_count")]
|
||||
public long HitCount { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// UTC timestamp when entry was created.
|
||||
/// </summary>
|
||||
[Column("created_at")]
|
||||
public DateTimeOffset CreatedAt { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// UTC timestamp when entry expires.
|
||||
/// </summary>
|
||||
[Column("expires_at")]
|
||||
public DateTimeOffset ExpiresAt { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// UTC timestamp when entry was last updated.
|
||||
/// </summary>
|
||||
[Column("updated_at")]
|
||||
public DateTimeOffset UpdatedAt { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// UTC timestamp when entry was last accessed.
|
||||
/// </summary>
|
||||
[Column("last_accessed_at")]
|
||||
public DateTimeOffset? LastAccessedAt { get; set; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// EF Core entity for provcache.prov_evidence_chunks table.
|
||||
/// </summary>
|
||||
[Table("prov_evidence_chunks", Schema = "provcache")]
|
||||
public sealed class ProvcacheEvidenceChunkEntity
|
||||
{
|
||||
/// <summary>
|
||||
/// Unique chunk identifier.
|
||||
/// </summary>
|
||||
[Key]
|
||||
[Column("chunk_id")]
|
||||
public Guid ChunkId { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Proof root this chunk belongs to.
|
||||
/// </summary>
|
||||
[Column("proof_root")]
|
||||
[MaxLength(128)]
|
||||
public required string ProofRoot { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Index of this chunk in the Merkle tree.
|
||||
/// </summary>
|
||||
[Column("chunk_index")]
|
||||
public int ChunkIndex { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Hash of the chunk content.
|
||||
/// </summary>
|
||||
[Column("chunk_hash")]
|
||||
[MaxLength(128)]
|
||||
public required string ChunkHash { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Chunk content.
|
||||
/// </summary>
|
||||
[Column("blob")]
|
||||
public required byte[] Blob { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Size of the blob in bytes.
|
||||
/// </summary>
|
||||
[Column("blob_size")]
|
||||
public int BlobSize { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// MIME type of the content.
|
||||
/// </summary>
|
||||
[Column("content_type")]
|
||||
[MaxLength(128)]
|
||||
public required string ContentType { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// UTC timestamp when chunk was created.
|
||||
/// </summary>
|
||||
[Column("created_at")]
|
||||
public DateTimeOffset CreatedAt { get; set; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// EF Core entity for provcache.prov_revocations table.
|
||||
/// </summary>
|
||||
[Table("prov_revocations", Schema = "provcache")]
|
||||
public sealed class ProvcacheRevocationEntity
|
||||
{
|
||||
/// <summary>
|
||||
/// Unique revocation identifier.
|
||||
/// </summary>
|
||||
[Key]
|
||||
[Column("revocation_id")]
|
||||
public Guid RevocationId { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Type of revocation (policy, signer, feed, pattern).
|
||||
/// </summary>
|
||||
[Column("revocation_type")]
|
||||
[MaxLength(64)]
|
||||
public required string RevocationType { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Target hash that was revoked.
|
||||
/// </summary>
|
||||
[Column("target_hash")]
|
||||
[MaxLength(256)]
|
||||
public required string TargetHash { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Reason for revocation.
|
||||
/// </summary>
|
||||
[Column("reason")]
|
||||
[MaxLength(512)]
|
||||
public string? Reason { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Actor who initiated the revocation.
|
||||
/// </summary>
|
||||
[Column("actor")]
|
||||
[MaxLength(256)]
|
||||
public string? Actor { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Number of entries affected by the revocation.
|
||||
/// </summary>
|
||||
[Column("entries_affected")]
|
||||
public long EntriesAffected { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// UTC timestamp when revocation occurred.
|
||||
/// </summary>
|
||||
[Column("created_at")]
|
||||
public DateTimeOffset CreatedAt { get; set; }
|
||||
}
|
||||
137
src/__Libraries/StellaOps.Provcache/IProvcacheRepository.cs
Normal file
137
src/__Libraries/StellaOps.Provcache/IProvcacheRepository.cs
Normal file
@@ -0,0 +1,137 @@
|
||||
namespace StellaOps.Provcache;
|
||||
|
||||
/// <summary>
|
||||
/// Repository interface for Provcache entries in persistent storage (Postgres).
|
||||
/// </summary>
|
||||
public interface IProvcacheRepository
|
||||
{
|
||||
/// <summary>
|
||||
/// Gets a cache entry by VeriKey.
|
||||
/// </summary>
|
||||
/// <param name="veriKey">The cache key.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>The cache entry if found, null otherwise.</returns>
|
||||
Task<ProvcacheEntry?> GetAsync(string veriKey, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets multiple cache entries by VeriKeys.
|
||||
/// </summary>
|
||||
/// <param name="veriKeys">The cache keys.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>Dictionary of found entries.</returns>
|
||||
Task<IReadOnlyDictionary<string, ProvcacheEntry>> GetManyAsync(
|
||||
IEnumerable<string> veriKeys,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Inserts or updates a cache entry.
|
||||
/// </summary>
|
||||
/// <param name="entry">The cache entry to upsert.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
Task UpsertAsync(ProvcacheEntry entry, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Inserts or updates multiple cache entries in a batch.
|
||||
/// </summary>
|
||||
/// <param name="entries">The cache entries to upsert.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
Task UpsertManyAsync(IEnumerable<ProvcacheEntry> entries, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Deletes a cache entry by VeriKey.
|
||||
/// </summary>
|
||||
/// <param name="veriKey">The cache key.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>True if the entry existed and was deleted.</returns>
|
||||
Task<bool> DeleteAsync(string veriKey, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Deletes entries matching a policy hash.
|
||||
/// Used when policy is updated.
|
||||
/// </summary>
|
||||
/// <param name="policyHash">The policy hash to match.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>Number of entries deleted.</returns>
|
||||
Task<long> DeleteByPolicyHashAsync(string policyHash, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Deletes entries matching a signer set hash.
|
||||
/// Used when a signer is revoked.
|
||||
/// </summary>
|
||||
/// <param name="signerSetHash">The signer set hash to match.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>Number of entries deleted.</returns>
|
||||
Task<long> DeleteBySignerSetHashAsync(string signerSetHash, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Deletes entries older than a feed epoch.
|
||||
/// Used when feeds are updated.
|
||||
/// </summary>
|
||||
/// <param name="feedEpoch">The minimum feed epoch to keep.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>Number of entries deleted.</returns>
|
||||
Task<long> DeleteByFeedEpochOlderThanAsync(string feedEpoch, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Deletes expired entries.
|
||||
/// </summary>
|
||||
/// <param name="asOf">The reference timestamp for expiry check.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>Number of entries deleted.</returns>
|
||||
Task<long> DeleteExpiredAsync(DateTimeOffset asOf, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Increments the hit count for an entry.
|
||||
/// </summary>
|
||||
/// <param name="veriKey">The cache key.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
Task IncrementHitCountAsync(string veriKey, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets cache statistics.
|
||||
/// </summary>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>Cache statistics.</returns>
|
||||
Task<ProvcacheStatistics> GetStatisticsAsync(CancellationToken cancellationToken = default);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Cache statistics for monitoring and diagnostics.
|
||||
/// </summary>
|
||||
public sealed record ProvcacheStatistics
|
||||
{
|
||||
/// <summary>
|
||||
/// Total number of entries in the cache.
|
||||
/// </summary>
|
||||
public long TotalEntries { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Total number of cache hits.
|
||||
/// </summary>
|
||||
public long TotalHits { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Number of entries expiring within the next hour.
|
||||
/// </summary>
|
||||
public long ExpiringWithinHour { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Number of unique policy hashes.
|
||||
/// </summary>
|
||||
public int UniquePolicies { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Number of unique signer set hashes.
|
||||
/// </summary>
|
||||
public int UniqueSignerSets { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Oldest entry timestamp.
|
||||
/// </summary>
|
||||
public DateTimeOffset? OldestEntry { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Newest entry timestamp.
|
||||
/// </summary>
|
||||
public DateTimeOffset? NewestEntry { get; init; }
|
||||
}
|
||||
363
src/__Libraries/StellaOps.Provcache/IProvcacheService.cs
Normal file
363
src/__Libraries/StellaOps.Provcache/IProvcacheService.cs
Normal file
@@ -0,0 +1,363 @@
|
||||
namespace StellaOps.Provcache;
|
||||
|
||||
/// <summary>
|
||||
/// High-level service interface for Provcache operations.
|
||||
/// Orchestrates cache store and repository with metrics and invalidation logic.
|
||||
/// </summary>
|
||||
public interface IProvcacheService
|
||||
{
|
||||
/// <summary>
|
||||
/// Gets a cached decision by VeriKey.
|
||||
/// </summary>
|
||||
/// <param name="veriKey">The cache key.</param>
|
||||
/// <param name="bypassCache">If true, skip cache and force re-evaluation.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>The cache result with decision if found.</returns>
|
||||
Task<ProvcacheServiceResult> GetAsync(
|
||||
string veriKey,
|
||||
bool bypassCache = false,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Stores a decision in the cache.
|
||||
/// </summary>
|
||||
/// <param name="entry">The cache entry to store.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>True if the entry was stored successfully.</returns>
|
||||
Task<bool> SetAsync(ProvcacheEntry entry, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets or computes a decision using a factory function for cache misses.
|
||||
/// </summary>
|
||||
/// <param name="veriKey">The cache key.</param>
|
||||
/// <param name="factory">Factory function to create the entry on cache miss.</param>
|
||||
/// <param name="bypassCache">If true, skip cache and force re-computation.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>The cached or newly computed entry.</returns>
|
||||
Task<ProvcacheEntry> GetOrComputeAsync(
|
||||
string veriKey,
|
||||
Func<CancellationToken, Task<ProvcacheEntry>> factory,
|
||||
bool bypassCache = false,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Invalidates a cache entry by VeriKey.
|
||||
/// </summary>
|
||||
/// <param name="veriKey">The cache key.</param>
|
||||
/// <param name="reason">Reason for invalidation (for audit log).</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>True if the entry existed and was invalidated.</returns>
|
||||
Task<bool> InvalidateAsync(
|
||||
string veriKey,
|
||||
string? reason = null,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Invalidates entries by invalidation criteria.
|
||||
/// </summary>
|
||||
/// <param name="request">The invalidation request.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>Invalidation result with count of affected entries.</returns>
|
||||
Task<InvalidationResult> InvalidateByAsync(
|
||||
InvalidationRequest request,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets cache metrics for monitoring.
|
||||
/// </summary>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>Cache metrics.</returns>
|
||||
Task<ProvcacheMetrics> GetMetricsAsync(CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Prunes expired entries from the cache.
|
||||
/// </summary>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>Number of entries pruned.</returns>
|
||||
Task<long> PruneExpiredAsync(CancellationToken cancellationToken = default);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Result of a cache service lookup.
|
||||
/// </summary>
|
||||
public sealed record ProvcacheServiceResult
|
||||
{
|
||||
/// <summary>
|
||||
/// The cache result status.
|
||||
/// </summary>
|
||||
public required ProvcacheResultStatus Status { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// The cache entry if found.
|
||||
/// </summary>
|
||||
public ProvcacheEntry? Entry { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Whether the result came from cache (true) or needs computation (false).
|
||||
/// </summary>
|
||||
public bool WasCached => Status == ProvcacheResultStatus.CacheHit;
|
||||
|
||||
/// <summary>
|
||||
/// Source of the cache hit for diagnostics.
|
||||
/// </summary>
|
||||
public string? Source { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Time taken for the lookup in milliseconds.
|
||||
/// </summary>
|
||||
public double ElapsedMs { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Creates a cache hit result.
|
||||
/// </summary>
|
||||
public static ProvcacheServiceResult Hit(ProvcacheEntry entry, string source, double elapsedMs) => new()
|
||||
{
|
||||
Status = ProvcacheResultStatus.CacheHit,
|
||||
Entry = entry,
|
||||
Source = source,
|
||||
ElapsedMs = elapsedMs
|
||||
};
|
||||
|
||||
/// <summary>
|
||||
/// Creates a cache miss result.
|
||||
/// </summary>
|
||||
public static ProvcacheServiceResult Miss(double elapsedMs) => new()
|
||||
{
|
||||
Status = ProvcacheResultStatus.CacheMiss,
|
||||
Entry = null,
|
||||
Source = null,
|
||||
ElapsedMs = elapsedMs
|
||||
};
|
||||
|
||||
/// <summary>
|
||||
/// Creates a bypassed result (cache was skipped).
|
||||
/// </summary>
|
||||
public static ProvcacheServiceResult Bypassed() => new()
|
||||
{
|
||||
Status = ProvcacheResultStatus.Bypassed,
|
||||
Entry = null,
|
||||
Source = null,
|
||||
ElapsedMs = 0
|
||||
};
|
||||
|
||||
/// <summary>
|
||||
/// Creates an expired result.
|
||||
/// </summary>
|
||||
public static ProvcacheServiceResult Expired(ProvcacheEntry entry, double elapsedMs) => new()
|
||||
{
|
||||
Status = ProvcacheResultStatus.Expired,
|
||||
Entry = entry,
|
||||
Source = "expired",
|
||||
ElapsedMs = elapsedMs
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Cache result status.
|
||||
/// </summary>
|
||||
public enum ProvcacheResultStatus
|
||||
{
|
||||
/// <summary>
|
||||
/// Entry was found in cache and is valid.
|
||||
/// </summary>
|
||||
CacheHit,
|
||||
|
||||
/// <summary>
|
||||
/// Entry was not found in cache.
|
||||
/// </summary>
|
||||
CacheMiss,
|
||||
|
||||
/// <summary>
|
||||
/// Cache was bypassed (force re-computation).
|
||||
/// </summary>
|
||||
Bypassed,
|
||||
|
||||
/// <summary>
|
||||
/// Entry was found but has expired.
|
||||
/// </summary>
|
||||
Expired
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Request for cache invalidation by criteria.
|
||||
/// </summary>
|
||||
public sealed record InvalidationRequest
|
||||
{
|
||||
/// <summary>
|
||||
/// The invalidation type.
|
||||
/// </summary>
|
||||
public required InvalidationType Type { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// The value to match for invalidation.
|
||||
/// </summary>
|
||||
public required string Value { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Reason for invalidation (for audit log).
|
||||
/// </summary>
|
||||
public string? Reason { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Actor who initiated the invalidation.
|
||||
/// </summary>
|
||||
public string? Actor { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Creates an invalidation request by policy hash.
|
||||
/// </summary>
|
||||
public static InvalidationRequest ByPolicyHash(string policyHash, string? reason = null) => new()
|
||||
{
|
||||
Type = InvalidationType.PolicyHash,
|
||||
Value = policyHash,
|
||||
Reason = reason ?? "policy-update"
|
||||
};
|
||||
|
||||
/// <summary>
|
||||
/// Creates an invalidation request by signer set hash.
|
||||
/// </summary>
|
||||
public static InvalidationRequest BySignerSetHash(string signerSetHash, string? reason = null) => new()
|
||||
{
|
||||
Type = InvalidationType.SignerSetHash,
|
||||
Value = signerSetHash,
|
||||
Reason = reason ?? "signer-revocation"
|
||||
};
|
||||
|
||||
/// <summary>
|
||||
/// Creates an invalidation request by feed epoch.
|
||||
/// </summary>
|
||||
public static InvalidationRequest ByFeedEpochOlderThan(string feedEpoch, string? reason = null) => new()
|
||||
{
|
||||
Type = InvalidationType.FeedEpochOlderThan,
|
||||
Value = feedEpoch,
|
||||
Reason = reason ?? "feed-update"
|
||||
};
|
||||
|
||||
/// <summary>
|
||||
/// Creates an invalidation request by key pattern.
|
||||
/// </summary>
|
||||
public static InvalidationRequest ByPattern(string pattern, string? reason = null) => new()
|
||||
{
|
||||
Type = InvalidationType.Pattern,
|
||||
Value = pattern,
|
||||
Reason = reason ?? "pattern-invalidation"
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Type of invalidation criteria.
|
||||
/// </summary>
|
||||
public enum InvalidationType
|
||||
{
|
||||
/// <summary>
|
||||
/// Invalidate by policy hash.
|
||||
/// </summary>
|
||||
PolicyHash,
|
||||
|
||||
/// <summary>
|
||||
/// Invalidate by signer set hash.
|
||||
/// </summary>
|
||||
SignerSetHash,
|
||||
|
||||
/// <summary>
|
||||
/// Invalidate entries with feed epoch older than specified.
|
||||
/// </summary>
|
||||
FeedEpochOlderThan,
|
||||
|
||||
/// <summary>
|
||||
/// Invalidate by key pattern.
|
||||
/// </summary>
|
||||
Pattern,
|
||||
|
||||
/// <summary>
|
||||
/// Invalidate expired entries.
|
||||
/// </summary>
|
||||
Expired
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Result of an invalidation operation.
|
||||
/// </summary>
|
||||
public sealed record InvalidationResult
|
||||
{
|
||||
/// <summary>
|
||||
/// Number of entries invalidated.
|
||||
/// </summary>
|
||||
public required long EntriesAffected { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// The invalidation request that was executed.
|
||||
/// </summary>
|
||||
public required InvalidationRequest Request { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Timestamp of the invalidation.
|
||||
/// </summary>
|
||||
public required DateTimeOffset Timestamp { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Whether the invalidation was logged for audit.
|
||||
/// </summary>
|
||||
public bool WasLogged { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Cache metrics for monitoring and observability.
|
||||
/// </summary>
|
||||
public sealed record ProvcacheMetrics
|
||||
{
|
||||
/// <summary>
|
||||
/// Total cache requests since startup.
|
||||
/// </summary>
|
||||
public long TotalRequests { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Total cache hits since startup.
|
||||
/// </summary>
|
||||
public long TotalHits { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Total cache misses since startup.
|
||||
/// </summary>
|
||||
public long TotalMisses { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Cache hit rate (0.0 - 1.0).
|
||||
/// </summary>
|
||||
public double HitRate => TotalRequests == 0 ? 0.0 : (double)TotalHits / TotalRequests;
|
||||
|
||||
/// <summary>
|
||||
/// Average lookup latency in milliseconds.
|
||||
/// </summary>
|
||||
public double AvgLatencyMs { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// P99 lookup latency in milliseconds.
|
||||
/// </summary>
|
||||
public double P99LatencyMs { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Current number of entries in cache.
|
||||
/// </summary>
|
||||
public long CurrentEntryCount { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Total invalidations since startup.
|
||||
/// </summary>
|
||||
public long TotalInvalidations { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Valkey cache health status.
|
||||
/// </summary>
|
||||
public bool ValkeyCacheHealthy { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Postgres repository health status.
|
||||
/// </summary>
|
||||
public bool PostgresRepositoryHealthy { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Timestamp when metrics were collected.
|
||||
/// </summary>
|
||||
public DateTimeOffset CollectedAt { get; init; }
|
||||
}
|
||||
150
src/__Libraries/StellaOps.Provcache/IProvcacheStore.cs
Normal file
150
src/__Libraries/StellaOps.Provcache/IProvcacheStore.cs
Normal file
@@ -0,0 +1,150 @@
|
||||
namespace StellaOps.Provcache;
|
||||
|
||||
/// <summary>
|
||||
/// Cache store interface for Provcache with read-through semantics.
|
||||
/// Abstracts the caching layer (Valkey, in-memory, etc.).
|
||||
/// </summary>
|
||||
public interface IProvcacheStore
|
||||
{
|
||||
/// <summary>
|
||||
/// Gets the store provider name for diagnostics.
|
||||
/// </summary>
|
||||
string ProviderName { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Gets a cache entry by VeriKey.
|
||||
/// </summary>
|
||||
/// <param name="veriKey">The cache key.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>Cache result indicating hit/miss with the entry if found.</returns>
|
||||
ValueTask<ProvcacheLookupResult> GetAsync(string veriKey, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets multiple cache entries by VeriKeys.
|
||||
/// </summary>
|
||||
/// <param name="veriKeys">The cache keys.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>Dictionary of found entries and list of misses.</returns>
|
||||
ValueTask<ProvcacheBatchLookupResult> GetManyAsync(
|
||||
IEnumerable<string> veriKeys,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Sets a cache entry.
|
||||
/// </summary>
|
||||
/// <param name="entry">The cache entry to set.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
ValueTask SetAsync(ProvcacheEntry entry, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Sets multiple cache entries in a batch.
|
||||
/// </summary>
|
||||
/// <param name="entries">The cache entries to set.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
ValueTask SetManyAsync(IEnumerable<ProvcacheEntry> entries, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Invalidates a cache entry by VeriKey.
|
||||
/// </summary>
|
||||
/// <param name="veriKey">The cache key.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>True if the entry existed and was invalidated.</returns>
|
||||
ValueTask<bool> InvalidateAsync(string veriKey, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Invalidates entries matching a key pattern.
|
||||
/// </summary>
|
||||
/// <param name="pattern">The key pattern (supports wildcards).</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>Number of entries invalidated.</returns>
|
||||
ValueTask<long> InvalidateByPatternAsync(string pattern, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets or sets a cache entry using a factory function for cache misses.
|
||||
/// </summary>
|
||||
/// <param name="veriKey">The cache key.</param>
|
||||
/// <param name="factory">Factory function to create the entry on cache miss.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>The cached or newly created entry.</returns>
|
||||
ValueTask<ProvcacheEntry> GetOrSetAsync(
|
||||
string veriKey,
|
||||
Func<CancellationToken, ValueTask<ProvcacheEntry>> factory,
|
||||
CancellationToken cancellationToken = default);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Result of a single cache lookup.
|
||||
/// </summary>
|
||||
public sealed record ProvcacheLookupResult
|
||||
{
|
||||
/// <summary>
|
||||
/// Whether the entry was found in cache.
|
||||
/// </summary>
|
||||
public required bool IsHit { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// The cache entry if found.
|
||||
/// </summary>
|
||||
public ProvcacheEntry? Entry { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Source of the cache hit (e.g., "valkey", "postgres").
|
||||
/// Null for cache misses.
|
||||
/// </summary>
|
||||
public string? Source { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Time taken for the lookup in milliseconds.
|
||||
/// </summary>
|
||||
public double ElapsedMs { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Creates a cache hit result.
|
||||
/// </summary>
|
||||
public static ProvcacheLookupResult Hit(ProvcacheEntry entry, string source, double elapsedMs) => new()
|
||||
{
|
||||
IsHit = true,
|
||||
Entry = entry,
|
||||
Source = source,
|
||||
ElapsedMs = elapsedMs
|
||||
};
|
||||
|
||||
/// <summary>
|
||||
/// Creates a cache miss result.
|
||||
/// </summary>
|
||||
public static ProvcacheLookupResult Miss(double elapsedMs) => new()
|
||||
{
|
||||
IsHit = false,
|
||||
Entry = null,
|
||||
Source = null,
|
||||
ElapsedMs = elapsedMs
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Result of a batch cache lookup.
|
||||
/// </summary>
|
||||
public sealed record ProvcacheBatchLookupResult
|
||||
{
|
||||
/// <summary>
|
||||
/// Entries found in cache, keyed by VeriKey.
|
||||
/// </summary>
|
||||
public required IReadOnlyDictionary<string, ProvcacheEntry> Hits { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// VeriKeys that were not found in cache.
|
||||
/// </summary>
|
||||
public required IReadOnlyList<string> Misses { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Time taken for the lookup in milliseconds.
|
||||
/// </summary>
|
||||
public double ElapsedMs { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Cache hit rate for this batch (0.0 - 1.0).
|
||||
/// </summary>
|
||||
public double HitRate => Hits.Count == 0 && Misses.Count == 0
|
||||
? 0.0
|
||||
: (double)Hits.Count / (Hits.Count + Misses.Count);
|
||||
}
|
||||
79
src/__Libraries/StellaOps.Provcache/Models/DecisionDigest.cs
Normal file
79
src/__Libraries/StellaOps.Provcache/Models/DecisionDigest.cs
Normal file
@@ -0,0 +1,79 @@
|
||||
namespace StellaOps.Provcache;
|
||||
|
||||
/// <summary>
|
||||
/// Canonicalized representation of an evaluation result that can be cached
|
||||
/// and verified for deterministic replay.
|
||||
/// </summary>
|
||||
public sealed record DecisionDigest
|
||||
{
|
||||
/// <summary>
|
||||
/// Schema version of this digest format.
|
||||
/// </summary>
|
||||
public required string DigestVersion { get; init; } = "v1";
|
||||
|
||||
/// <summary>
|
||||
/// Composite cache key that uniquely identifies the provenance decision context.
|
||||
/// Format: sha256:<hex>
|
||||
/// </summary>
|
||||
public required string VeriKey { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Hash of sorted dispositions from the evaluation result.
|
||||
/// Used for quick comparison without full evaluation replay.
|
||||
/// Format: sha256:<hex>
|
||||
/// </summary>
|
||||
public required string VerdictHash { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Merkle root of all evidence chunks used in this decision.
|
||||
/// Enables incremental verification without fetching full evidence.
|
||||
/// Format: sha256:<hex>
|
||||
/// </summary>
|
||||
public required string ProofRoot { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Identifiers needed to replay the evaluation with same inputs.
|
||||
/// Contains feed IDs and rule IDs.
|
||||
/// </summary>
|
||||
public required ReplaySeed ReplaySeed { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// UTC timestamp when this digest was created.
|
||||
/// </summary>
|
||||
public required DateTimeOffset CreatedAt { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// UTC timestamp when this digest expires and should be re-evaluated.
|
||||
/// </summary>
|
||||
public required DateTimeOffset ExpiresAt { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Composite trust score (0-100) indicating decision confidence.
|
||||
/// Based on reachability, SBOM completeness, VEX coverage, policy freshness, and signer trust.
|
||||
/// </summary>
|
||||
public required int TrustScore { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Identifiers needed to replay an evaluation with the same inputs.
|
||||
/// </summary>
|
||||
public sealed record ReplaySeed
|
||||
{
|
||||
/// <summary>
|
||||
/// Advisory feed identifiers used in evaluation.
|
||||
/// Example: ["cve-2024", "ghsa-2024", "oval-debian"]
|
||||
/// </summary>
|
||||
public required IReadOnlyList<string> FeedIds { get; init; } = [];
|
||||
|
||||
/// <summary>
|
||||
/// Policy rule identifiers used in evaluation.
|
||||
/// Example: ["default-policy-v2", "org-exceptions"]
|
||||
/// </summary>
|
||||
public required IReadOnlyList<string> RuleIds { get; init; } = [];
|
||||
|
||||
/// <summary>
|
||||
/// Optional: Frozen epoch timestamp for deterministic replay.
|
||||
/// If set, evaluation should use this timestamp instead of current time.
|
||||
/// </summary>
|
||||
public DateTimeOffset? FrozenEpoch { get; init; }
|
||||
}
|
||||
69
src/__Libraries/StellaOps.Provcache/Models/ProvcacheEntry.cs
Normal file
69
src/__Libraries/StellaOps.Provcache/Models/ProvcacheEntry.cs
Normal file
@@ -0,0 +1,69 @@
|
||||
namespace StellaOps.Provcache;
|
||||
|
||||
/// <summary>
|
||||
/// Cache entry wrapping a <see cref="DecisionDigest"/> with storage metadata.
|
||||
/// </summary>
|
||||
public sealed record ProvcacheEntry
|
||||
{
|
||||
/// <summary>
|
||||
/// Composite cache key (VeriKey) that uniquely identifies this entry.
|
||||
/// Format: sha256:<hex>
|
||||
/// </summary>
|
||||
public required string VeriKey { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// The cached decision digest.
|
||||
/// </summary>
|
||||
public required DecisionDigest Decision { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Hash of the policy bundle used for this decision.
|
||||
/// Used for invalidation when policy changes.
|
||||
/// Format: sha256:<hex>
|
||||
/// </summary>
|
||||
public required string PolicyHash { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Hash of the signer certificate set used for this decision.
|
||||
/// Used for invalidation on key rotation or revocation.
|
||||
/// Format: sha256:<hex>
|
||||
/// </summary>
|
||||
public required string SignerSetHash { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Advisory feed epoch identifier (e.g., "2024-12-24" or "2024-W52").
|
||||
/// Used for invalidation when feeds update.
|
||||
/// </summary>
|
||||
public required string FeedEpoch { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// UTC timestamp when this entry was created.
|
||||
/// </summary>
|
||||
public required DateTimeOffset CreatedAt { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// UTC timestamp when this entry expires.
|
||||
/// After expiry, the entry should be re-evaluated.
|
||||
/// </summary>
|
||||
public required DateTimeOffset ExpiresAt { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Number of cache hits for this entry.
|
||||
/// Used for cache analytics and LRU decisions.
|
||||
/// </summary>
|
||||
public long HitCount { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// UTC timestamp when this entry was last accessed.
|
||||
/// </summary>
|
||||
public DateTimeOffset? LastAccessedAt { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Creates a copy with incremented hit count and updated access time.
|
||||
/// </summary>
|
||||
public ProvcacheEntry WithHit(DateTimeOffset accessTime) => this with
|
||||
{
|
||||
HitCount = HitCount + 1,
|
||||
LastAccessedAt = accessTime
|
||||
};
|
||||
}
|
||||
149
src/__Libraries/StellaOps.Provcache/ProvcacheOptions.cs
Normal file
149
src/__Libraries/StellaOps.Provcache/ProvcacheOptions.cs
Normal file
@@ -0,0 +1,149 @@
|
||||
using System.ComponentModel.DataAnnotations;
|
||||
|
||||
namespace StellaOps.Provcache;
|
||||
|
||||
/// <summary>
|
||||
/// Configuration options for the Provcache service.
|
||||
/// </summary>
|
||||
public sealed class ProvcacheOptions
|
||||
{
|
||||
/// <summary>
|
||||
/// Configuration section name.
|
||||
/// </summary>
|
||||
public const string SectionName = "Provcache";
|
||||
|
||||
/// <summary>
|
||||
/// Default time-to-live for cache entries.
|
||||
/// Default: 24 hours.
|
||||
/// </summary>
|
||||
[Range(typeof(TimeSpan), "00:01:00", "7.00:00:00", ErrorMessage = "DefaultTtl must be between 1 minute and 7 days")]
|
||||
public TimeSpan DefaultTtl { get; set; } = TimeSpan.FromHours(24);
|
||||
|
||||
/// <summary>
|
||||
/// Maximum time-to-live for cache entries.
|
||||
/// Entries cannot be cached longer than this regardless of request.
|
||||
/// Default: 7 days.
|
||||
/// </summary>
|
||||
[Range(typeof(TimeSpan), "00:01:00", "30.00:00:00", ErrorMessage = "MaxTtl must be between 1 minute and 30 days")]
|
||||
public TimeSpan MaxTtl { get; set; } = TimeSpan.FromDays(7);
|
||||
|
||||
/// <summary>
|
||||
/// Time window bucket size for VeriKey computation.
|
||||
/// Entries within the same time window share cache keys.
|
||||
/// Smaller buckets = more granular invalidation but lower hit rate.
|
||||
/// Default: 1 hour.
|
||||
/// </summary>
|
||||
[Range(typeof(TimeSpan), "00:01:00", "24:00:00", ErrorMessage = "TimeWindowBucket must be between 1 minute and 24 hours")]
|
||||
public TimeSpan TimeWindowBucket { get; set; } = TimeSpan.FromHours(1);
|
||||
|
||||
/// <summary>
|
||||
/// Key prefix for Valkey storage.
|
||||
/// </summary>
|
||||
public string ValkeyKeyPrefix { get; set; } = "stellaops:prov:";
|
||||
|
||||
/// <summary>
|
||||
/// Enable write-behind to Postgres for persistence.
|
||||
/// When true, cache writes are immediately persisted to Valkey and
|
||||
/// asynchronously written to Postgres.
|
||||
/// Default: true.
|
||||
/// </summary>
|
||||
public bool EnableWriteBehind { get; set; } = true;
|
||||
|
||||
/// <summary>
|
||||
/// Interval for flushing write-behind queue to Postgres.
|
||||
/// Default: 5 seconds.
|
||||
/// </summary>
|
||||
[Range(typeof(TimeSpan), "00:00:01", "00:05:00", ErrorMessage = "WriteBehindFlushInterval must be between 1 second and 5 minutes")]
|
||||
public TimeSpan WriteBehindFlushInterval { get; set; } = TimeSpan.FromSeconds(5);
|
||||
|
||||
/// <summary>
|
||||
/// Maximum batch size for write-behind flush.
|
||||
/// Larger batches are more efficient but increase memory usage.
|
||||
/// Default: 100.
|
||||
/// </summary>
|
||||
[Range(1, 10000, ErrorMessage = "WriteBehindMaxBatchSize must be between 1 and 10000")]
|
||||
public int WriteBehindMaxBatchSize { get; set; } = 100;
|
||||
|
||||
/// <summary>
|
||||
/// Write-behind batch size (alias for WriteBehindMaxBatchSize).
|
||||
/// </summary>
|
||||
public int WriteBehindBatchSize => WriteBehindMaxBatchSize;
|
||||
|
||||
/// <summary>
|
||||
/// Write-behind flush interval in milliseconds.
|
||||
/// </summary>
|
||||
public int WriteBehindFlushIntervalMs => (int)WriteBehindFlushInterval.TotalMilliseconds;
|
||||
|
||||
/// <summary>
|
||||
/// Maximum capacity of the write-behind queue.
|
||||
/// When full, writes will block until space is available.
|
||||
/// Default: 10000.
|
||||
/// </summary>
|
||||
[Range(100, 1000000, ErrorMessage = "WriteBehindQueueCapacity must be between 100 and 1000000")]
|
||||
public int WriteBehindQueueCapacity { get; set; } = 10000;
|
||||
|
||||
/// <summary>
|
||||
/// Maximum retry attempts for failed write-behind operations.
|
||||
/// Default: 3.
|
||||
/// </summary>
|
||||
[Range(0, 10, ErrorMessage = "WriteBehindMaxRetries must be between 0 and 10")]
|
||||
public int WriteBehindMaxRetries { get; set; } = 3;
|
||||
|
||||
/// <summary>
|
||||
/// Chunk size for evidence storage in bytes.
|
||||
/// Default: 64 KB.
|
||||
/// </summary>
|
||||
[Range(1024, 1048576, ErrorMessage = "ChunkSize must be between 1 KB and 1 MB")]
|
||||
public int ChunkSize { get; set; } = 65536;
|
||||
|
||||
/// <summary>
|
||||
/// Maximum chunks per cache entry.
|
||||
/// Limits evidence size for a single entry.
|
||||
/// Default: 1000.
|
||||
/// </summary>
|
||||
[Range(1, 100000, ErrorMessage = "MaxChunksPerEntry must be between 1 and 100000")]
|
||||
public int MaxChunksPerEntry { get; set; } = 1000;
|
||||
|
||||
/// <summary>
|
||||
/// Allow clients to bypass cache and force re-evaluation.
|
||||
/// Default: true.
|
||||
/// </summary>
|
||||
public bool AllowCacheBypass { get; set; } = true;
|
||||
|
||||
/// <summary>
|
||||
/// Current digest version for serialization.
|
||||
/// </summary>
|
||||
public string DigestVersion { get; set; } = "v1";
|
||||
|
||||
/// <summary>
|
||||
/// Hash algorithm for VeriKey and digest computation.
|
||||
/// Default: SHA256.
|
||||
/// </summary>
|
||||
public string HashAlgorithm { get; set; } = "SHA256";
|
||||
|
||||
/// <summary>
|
||||
/// Enable Valkey cache layer.
|
||||
/// When false, only Postgres is used (slower but simpler).
|
||||
/// Default: true.
|
||||
/// </summary>
|
||||
public bool EnableValkeyCache { get; set; } = true;
|
||||
|
||||
/// <summary>
|
||||
/// Enable sliding expiration for cached entries.
|
||||
/// When true, TTL is refreshed on each cache hit.
|
||||
/// Default: false.
|
||||
/// </summary>
|
||||
public bool SlidingExpiration { get; set; } = false;
|
||||
|
||||
/// <summary>
|
||||
/// Compute time window epoch from a timestamp based on the configured bucket size.
|
||||
/// </summary>
|
||||
/// <param name="timestamp">The timestamp to compute the epoch for.</param>
|
||||
/// <returns>The epoch identifier string.</returns>
|
||||
public string ComputeTimeWindow(DateTimeOffset timestamp)
|
||||
{
|
||||
var bucketTicks = TimeWindowBucket.Ticks;
|
||||
var epoch = timestamp.UtcTicks / bucketTicks * bucketTicks;
|
||||
return new DateTimeOffset(epoch, TimeSpan.Zero).ToString("yyyy-MM-ddTHH:mm:ssZ");
|
||||
}
|
||||
}
|
||||
392
src/__Libraries/StellaOps.Provcache/ProvcacheService.cs
Normal file
392
src/__Libraries/StellaOps.Provcache/ProvcacheService.cs
Normal file
@@ -0,0 +1,392 @@
|
||||
using System.Diagnostics;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Microsoft.Extensions.Options;
|
||||
|
||||
namespace StellaOps.Provcache;
|
||||
|
||||
/// <summary>
|
||||
/// Default implementation of <see cref="IProvcacheService"/>.
|
||||
/// Orchestrates cache store and repository with metrics and invalidation logic.
|
||||
/// </summary>
|
||||
public sealed class ProvcacheService : IProvcacheService
|
||||
{
|
||||
private readonly IProvcacheStore _store;
|
||||
private readonly IProvcacheRepository _repository;
|
||||
private readonly IWriteBehindQueue? _writeBehindQueue;
|
||||
private readonly ProvcacheOptions _options;
|
||||
private readonly ILogger<ProvcacheService> _logger;
|
||||
private readonly TimeProvider _timeProvider;
|
||||
|
||||
// Metrics counters
|
||||
private long _totalRequests;
|
||||
private long _totalHits;
|
||||
private long _totalMisses;
|
||||
private long _totalInvalidations;
|
||||
private readonly object _metricsLock = new();
|
||||
private readonly List<double> _latencies = new(1000);
|
||||
|
||||
public ProvcacheService(
|
||||
IProvcacheStore store,
|
||||
IProvcacheRepository repository,
|
||||
IOptions<ProvcacheOptions> options,
|
||||
ILogger<ProvcacheService> logger,
|
||||
IWriteBehindQueue? writeBehindQueue = null,
|
||||
TimeProvider? timeProvider = null)
|
||||
{
|
||||
_store = store ?? throw new ArgumentNullException(nameof(store));
|
||||
_repository = repository ?? throw new ArgumentNullException(nameof(repository));
|
||||
_options = options?.Value ?? throw new ArgumentNullException(nameof(options));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
_writeBehindQueue = writeBehindQueue;
|
||||
_timeProvider = timeProvider ?? TimeProvider.System;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<ProvcacheServiceResult> GetAsync(
|
||||
string veriKey,
|
||||
bool bypassCache = false,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(veriKey);
|
||||
|
||||
if (bypassCache && _options.AllowCacheBypass)
|
||||
{
|
||||
_logger.LogDebug("Cache bypass requested for VeriKey {VeriKey}", veriKey);
|
||||
return ProvcacheServiceResult.Bypassed();
|
||||
}
|
||||
|
||||
var sw = Stopwatch.StartNew();
|
||||
using var activity = ProvcacheTelemetry.StartGetActivity(veriKey);
|
||||
|
||||
try
|
||||
{
|
||||
var result = await _store.GetAsync(veriKey, cancellationToken).ConfigureAwait(false);
|
||||
sw.Stop();
|
||||
|
||||
RecordMetrics(result.IsHit, sw.Elapsed.TotalMilliseconds);
|
||||
|
||||
if (result.IsHit && result.Entry is not null)
|
||||
{
|
||||
// Check if expired
|
||||
if (result.Entry.ExpiresAt <= _timeProvider.GetUtcNow())
|
||||
{
|
||||
_logger.LogDebug("Cache entry for VeriKey {VeriKey} is expired", veriKey);
|
||||
ProvcacheTelemetry.RecordRequest("get", "expired");
|
||||
return ProvcacheServiceResult.Expired(result.Entry, sw.Elapsed.TotalMilliseconds);
|
||||
}
|
||||
|
||||
_logger.LogDebug(
|
||||
"Cache hit for VeriKey {VeriKey} from {Source} in {ElapsedMs}ms",
|
||||
veriKey,
|
||||
result.Source,
|
||||
sw.Elapsed.TotalMilliseconds);
|
||||
|
||||
ProvcacheTelemetry.MarkCacheHit(activity, result.Source ?? "valkey");
|
||||
ProvcacheTelemetry.RecordHit(result.Source ?? "valkey");
|
||||
ProvcacheTelemetry.RecordRequest("get", ProvcacheTelemetry.ResultHit);
|
||||
ProvcacheTelemetry.RecordLatency("get", sw.Elapsed);
|
||||
|
||||
return ProvcacheServiceResult.Hit(result.Entry, result.Source!, sw.Elapsed.TotalMilliseconds);
|
||||
}
|
||||
|
||||
// Cache miss - try repository
|
||||
var dbEntry = await _repository.GetAsync(veriKey, cancellationToken).ConfigureAwait(false);
|
||||
sw.Stop();
|
||||
|
||||
if (dbEntry is not null)
|
||||
{
|
||||
// Check if expired
|
||||
if (dbEntry.ExpiresAt <= _timeProvider.GetUtcNow())
|
||||
{
|
||||
_logger.LogDebug("Database entry for VeriKey {VeriKey} is expired", veriKey);
|
||||
ProvcacheTelemetry.RecordRequest("get", "expired");
|
||||
return ProvcacheServiceResult.Expired(dbEntry, sw.Elapsed.TotalMilliseconds);
|
||||
}
|
||||
|
||||
// Backfill cache
|
||||
await _store.SetAsync(dbEntry, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
_logger.LogDebug(
|
||||
"Cache backfill for VeriKey {VeriKey} from postgres in {ElapsedMs}ms",
|
||||
veriKey,
|
||||
sw.Elapsed.TotalMilliseconds);
|
||||
|
||||
ProvcacheTelemetry.MarkCacheHit(activity, "postgres");
|
||||
ProvcacheTelemetry.RecordHit("postgres");
|
||||
ProvcacheTelemetry.RecordRequest("get", ProvcacheTelemetry.ResultHit);
|
||||
ProvcacheTelemetry.RecordLatency("get", sw.Elapsed);
|
||||
|
||||
return ProvcacheServiceResult.Hit(dbEntry, "postgres", sw.Elapsed.TotalMilliseconds);
|
||||
}
|
||||
|
||||
ProvcacheTelemetry.MarkCacheMiss(activity);
|
||||
ProvcacheTelemetry.RecordMiss();
|
||||
ProvcacheTelemetry.RecordRequest("get", ProvcacheTelemetry.ResultMiss);
|
||||
ProvcacheTelemetry.RecordLatency("get", sw.Elapsed);
|
||||
|
||||
_logger.LogDebug("Cache miss for VeriKey {VeriKey} in {ElapsedMs}ms", veriKey, sw.Elapsed.TotalMilliseconds);
|
||||
return ProvcacheServiceResult.Miss(sw.Elapsed.TotalMilliseconds);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
ProvcacheTelemetry.MarkError(activity, ex.Message);
|
||||
ProvcacheTelemetry.RecordRequest("get", ProvcacheTelemetry.ResultError);
|
||||
_logger.LogError(ex, "Error getting cache entry for VeriKey {VeriKey}", veriKey);
|
||||
return ProvcacheServiceResult.Miss(sw.Elapsed.TotalMilliseconds);
|
||||
}
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<bool> SetAsync(ProvcacheEntry entry, CancellationToken cancellationToken = default)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(entry);
|
||||
|
||||
var sw = Stopwatch.StartNew();
|
||||
using var activity = ProvcacheTelemetry.StartSetActivity(entry.VeriKey, entry.TrustScore);
|
||||
|
||||
try
|
||||
{
|
||||
// Store in cache
|
||||
await _store.SetAsync(entry, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
// Persist to repository (write-behind if enabled)
|
||||
if (_options.EnableWriteBehind && _writeBehindQueue is not null)
|
||||
{
|
||||
// Enqueue for batched async persistence
|
||||
await _writeBehindQueue.EnqueueAsync(entry, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Synchronous write to repository
|
||||
await _repository.UpsertAsync(entry, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
sw.Stop();
|
||||
ProvcacheTelemetry.RecordRequest("set", ProvcacheTelemetry.ResultCreated);
|
||||
ProvcacheTelemetry.RecordLatency("set", sw.Elapsed);
|
||||
|
||||
_logger.LogDebug("Stored cache entry for VeriKey {VeriKey}", entry.VeriKey);
|
||||
return true;
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
ProvcacheTelemetry.MarkError(activity, ex.Message);
|
||||
ProvcacheTelemetry.RecordRequest("set", ProvcacheTelemetry.ResultError);
|
||||
_logger.LogError(ex, "Error storing cache entry for VeriKey {VeriKey}", entry.VeriKey);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<ProvcacheEntry> GetOrComputeAsync(
|
||||
string veriKey,
|
||||
Func<CancellationToken, Task<ProvcacheEntry>> factory,
|
||||
bool bypassCache = false,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(veriKey);
|
||||
ArgumentNullException.ThrowIfNull(factory);
|
||||
|
||||
if (!bypassCache || !_options.AllowCacheBypass)
|
||||
{
|
||||
var result = await GetAsync(veriKey, bypassCache: false, cancellationToken).ConfigureAwait(false);
|
||||
if (result.WasCached && result.Entry is not null)
|
||||
{
|
||||
return result.Entry;
|
||||
}
|
||||
}
|
||||
|
||||
// Compute new entry
|
||||
var entry = await factory(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
// Store it
|
||||
await SetAsync(entry, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<bool> InvalidateAsync(
|
||||
string veriKey,
|
||||
string? reason = null,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(veriKey);
|
||||
|
||||
using var activity = ProvcacheTelemetry.StartInvalidateActivity("verikey", veriKey);
|
||||
|
||||
try
|
||||
{
|
||||
var invalidated = await _store.InvalidateAsync(veriKey, cancellationToken).ConfigureAwait(false);
|
||||
await _repository.DeleteAsync(veriKey, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
Interlocked.Increment(ref _totalInvalidations);
|
||||
ProvcacheTelemetry.RecordInvalidation("verikey");
|
||||
ProvcacheTelemetry.RecordRequest("invalidate", ProvcacheTelemetry.ResultInvalidated);
|
||||
|
||||
_logger.LogInformation(
|
||||
"Invalidated cache entry for VeriKey {VeriKey}, reason: {Reason}",
|
||||
veriKey,
|
||||
reason ?? "unspecified");
|
||||
|
||||
return invalidated;
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
ProvcacheTelemetry.MarkError(activity, ex.Message);
|
||||
ProvcacheTelemetry.RecordRequest("invalidate", ProvcacheTelemetry.ResultError);
|
||||
_logger.LogError(ex, "Error invalidating cache entry for VeriKey {VeriKey}", veriKey);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<InvalidationResult> InvalidateByAsync(
|
||||
InvalidationRequest request,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(request);
|
||||
|
||||
var invalidationType = request.Type?.ToString().ToLowerInvariant() ?? "unknown";
|
||||
using var activity = ProvcacheTelemetry.StartInvalidateActivity(invalidationType, request.Value);
|
||||
|
||||
try
|
||||
{
|
||||
long affected = 0;
|
||||
|
||||
switch (request.Type)
|
||||
{
|
||||
case InvalidationType.PolicyHash:
|
||||
affected = await _repository.DeleteByPolicyHashAsync(request.Value, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await _store.InvalidateByPatternAsync($"*policy:{request.Value}*", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
break;
|
||||
|
||||
case InvalidationType.SignerSetHash:
|
||||
affected = await _repository.DeleteBySignerSetHashAsync(request.Value, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await _store.InvalidateByPatternAsync($"*signer:{request.Value}*", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
break;
|
||||
|
||||
case InvalidationType.FeedEpochOlderThan:
|
||||
affected = await _repository.DeleteByFeedEpochOlderThanAsync(request.Value, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
break;
|
||||
|
||||
case InvalidationType.Pattern:
|
||||
affected = await _store.InvalidateByPatternAsync(request.Value, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
break;
|
||||
|
||||
case InvalidationType.Expired:
|
||||
affected = await _repository.DeleteExpiredAsync(_timeProvider.GetUtcNow(), cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
break;
|
||||
}
|
||||
|
||||
Interlocked.Add(ref _totalInvalidations, affected);
|
||||
ProvcacheTelemetry.RecordInvalidation(invalidationType, affected);
|
||||
ProvcacheTelemetry.RecordRequest("invalidate", ProvcacheTelemetry.ResultInvalidated);
|
||||
|
||||
_logger.LogInformation(
|
||||
"Invalidated {Count} cache entries by {Type}={Value}, reason: {Reason}, actor: {Actor}",
|
||||
affected,
|
||||
request.Type,
|
||||
request.Value,
|
||||
request.Reason,
|
||||
request.Actor);
|
||||
|
||||
return new InvalidationResult
|
||||
{
|
||||
EntriesAffected = affected,
|
||||
Request = request,
|
||||
Timestamp = _timeProvider.GetUtcNow(),
|
||||
WasLogged = true
|
||||
};
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
ProvcacheTelemetry.MarkError(activity, ex.Message);
|
||||
ProvcacheTelemetry.RecordRequest("invalidate", ProvcacheTelemetry.ResultError);
|
||||
_logger.LogError(
|
||||
ex,
|
||||
"Error invalidating cache entries by {Type}={Value}",
|
||||
request.Type,
|
||||
request.Value);
|
||||
|
||||
return new InvalidationResult
|
||||
{
|
||||
EntriesAffected = 0,
|
||||
Request = request,
|
||||
Timestamp = _timeProvider.GetUtcNow(),
|
||||
WasLogged = false
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<ProvcacheMetrics> GetMetricsAsync(CancellationToken cancellationToken = default)
|
||||
{
|
||||
var stats = await _repository.GetStatisticsAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
double avgLatency, p99Latency;
|
||||
lock (_metricsLock)
|
||||
{
|
||||
avgLatency = _latencies.Count > 0 ? _latencies.Average() : 0;
|
||||
p99Latency = _latencies.Count > 0
|
||||
? _latencies.OrderBy(x => x).ElementAt((int)(_latencies.Count * 0.99))
|
||||
: 0;
|
||||
}
|
||||
|
||||
return new ProvcacheMetrics
|
||||
{
|
||||
TotalRequests = Interlocked.Read(ref _totalRequests),
|
||||
TotalHits = Interlocked.Read(ref _totalHits),
|
||||
TotalMisses = Interlocked.Read(ref _totalMisses),
|
||||
TotalInvalidations = Interlocked.Read(ref _totalInvalidations),
|
||||
CurrentEntryCount = stats.TotalEntries,
|
||||
AvgLatencyMs = avgLatency,
|
||||
P99LatencyMs = p99Latency,
|
||||
ValkeyCacheHealthy = true, // TODO: Add health check
|
||||
PostgresRepositoryHealthy = true, // TODO: Add health check
|
||||
CollectedAt = _timeProvider.GetUtcNow()
|
||||
};
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<long> PruneExpiredAsync(CancellationToken cancellationToken = default)
|
||||
{
|
||||
var pruned = await _repository.DeleteExpiredAsync(_timeProvider.GetUtcNow(), cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
_logger.LogInformation("Pruned {Count} expired cache entries", pruned);
|
||||
return pruned;
|
||||
}
|
||||
|
||||
private void RecordMetrics(bool isHit, double latencyMs)
|
||||
{
|
||||
Interlocked.Increment(ref _totalRequests);
|
||||
|
||||
if (isHit)
|
||||
{
|
||||
Interlocked.Increment(ref _totalHits);
|
||||
}
|
||||
else
|
||||
{
|
||||
Interlocked.Increment(ref _totalMisses);
|
||||
}
|
||||
|
||||
lock (_metricsLock)
|
||||
{
|
||||
_latencies.Add(latencyMs);
|
||||
|
||||
// Keep only last 1000 latencies for percentile calculation
|
||||
if (_latencies.Count > 1000)
|
||||
{
|
||||
_latencies.RemoveAt(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,94 @@
|
||||
using Microsoft.Extensions.Configuration;
|
||||
using Microsoft.Extensions.DependencyInjection;
|
||||
|
||||
namespace StellaOps.Provcache;
|
||||
|
||||
/// <summary>
|
||||
/// Extension methods for registering Provcache services.
|
||||
/// </summary>
|
||||
public static class ProvcacheServiceCollectionExtensions
|
||||
{
|
||||
/// <summary>
|
||||
/// Adds Provcache services to the service collection.
|
||||
/// </summary>
|
||||
/// <param name="services">The service collection.</param>
|
||||
/// <param name="configuration">The configuration section.</param>
|
||||
/// <returns>The service collection for chaining.</returns>
|
||||
public static IServiceCollection AddProvcache(
|
||||
this IServiceCollection services,
|
||||
IConfiguration configuration)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(services);
|
||||
ArgumentNullException.ThrowIfNull(configuration);
|
||||
|
||||
// Register options
|
||||
services.Configure<ProvcacheOptions>(configuration.GetSection(ProvcacheOptions.SectionName));
|
||||
|
||||
// Register core services
|
||||
services.AddSingleton<IProvcacheService, ProvcacheService>();
|
||||
|
||||
// Register write-behind queue as hosted service
|
||||
services.AddSingleton<WriteBehindQueue>();
|
||||
services.AddSingleton<IWriteBehindQueue>(sp => sp.GetRequiredService<WriteBehindQueue>());
|
||||
services.AddHostedService(sp => sp.GetRequiredService<WriteBehindQueue>());
|
||||
|
||||
return services;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Adds Provcache services with custom options.
|
||||
/// </summary>
|
||||
/// <param name="services">The service collection.</param>
|
||||
/// <param name="configure">Action to configure options.</param>
|
||||
/// <returns>The service collection for chaining.</returns>
|
||||
public static IServiceCollection AddProvcache(
|
||||
this IServiceCollection services,
|
||||
Action<ProvcacheOptions> configure)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(services);
|
||||
ArgumentNullException.ThrowIfNull(configure);
|
||||
|
||||
// Register options
|
||||
services.Configure(configure);
|
||||
|
||||
// Register core services
|
||||
services.AddSingleton<IProvcacheService, ProvcacheService>();
|
||||
|
||||
// Register write-behind queue as hosted service
|
||||
services.AddSingleton<WriteBehindQueue>();
|
||||
services.AddSingleton<IWriteBehindQueue>(sp => sp.GetRequiredService<WriteBehindQueue>());
|
||||
services.AddHostedService(sp => sp.GetRequiredService<WriteBehindQueue>());
|
||||
|
||||
return services;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Adds the Valkey-backed cache store implementation.
|
||||
/// </summary>
|
||||
/// <param name="services">The service collection.</param>
|
||||
/// <returns>The service collection for chaining.</returns>
|
||||
public static IServiceCollection AddProvcacheValkeyStore(this IServiceCollection services)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(services);
|
||||
|
||||
// Note: Actual Valkey store implementation would be registered here
|
||||
// services.AddSingleton<IProvcacheStore, ValkeyProvcacheStore>();
|
||||
|
||||
return services;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Adds the Postgres repository implementation.
|
||||
/// </summary>
|
||||
/// <param name="services">The service collection.</param>
|
||||
/// <returns>The service collection for chaining.</returns>
|
||||
public static IServiceCollection AddProvcachePostgresRepository(this IServiceCollection services)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(services);
|
||||
|
||||
// Note: Actual Postgres repository implementation would be registered here
|
||||
// services.AddSingleton<IProvcacheRepository, PostgresProvcacheRepository>();
|
||||
|
||||
return services;
|
||||
}
|
||||
}
|
||||
369
src/__Libraries/StellaOps.Provcache/ProvcacheTelemetry.cs
Normal file
369
src/__Libraries/StellaOps.Provcache/ProvcacheTelemetry.cs
Normal file
@@ -0,0 +1,369 @@
|
||||
using System.Diagnostics;
|
||||
using System.Diagnostics.Metrics;
|
||||
|
||||
namespace StellaOps.Provcache;
|
||||
|
||||
/// <summary>
|
||||
/// Centralized telemetry for Provcache operations using OpenTelemetry conventions.
|
||||
/// Provides activity tracing for cache operations and metrics for monitoring.
|
||||
/// </summary>
|
||||
public static class ProvcacheTelemetry
|
||||
{
|
||||
/// <summary>
|
||||
/// Activity source name for OpenTelemetry tracing.
|
||||
/// </summary>
|
||||
public const string ActivitySourceName = "StellaOps.Provcache";
|
||||
|
||||
/// <summary>
|
||||
/// Meter name for Prometheus metrics.
|
||||
/// </summary>
|
||||
public const string MeterName = "StellaOps.Provcache";
|
||||
|
||||
#region Cache Result Constants
|
||||
|
||||
public const string ResultHit = "hit";
|
||||
public const string ResultMiss = "miss";
|
||||
public const string ResultError = "error";
|
||||
public const string ResultCreated = "created";
|
||||
public const string ResultUpdated = "updated";
|
||||
public const string ResultInvalidated = "invalidated";
|
||||
|
||||
#endregion
|
||||
|
||||
#region Metric Names
|
||||
|
||||
private const string RequestsTotalMetric = "provcache_requests_total";
|
||||
private const string HitsTotalMetric = "provcache_hits_total";
|
||||
private const string MissesTotalMetric = "provcache_misses_total";
|
||||
private const string InvalidationsTotalMetric = "provcache_invalidations_total";
|
||||
private const string WriteBehindTotalMetric = "provcache_writebehind_total";
|
||||
private const string WriteBehindQueueSizeMetric = "provcache_writebehind_queue_size";
|
||||
private const string LatencySecondsMetric = "provcache_latency_seconds";
|
||||
private const string EntriesSizeMetric = "provcache_entry_bytes";
|
||||
|
||||
#endregion
|
||||
|
||||
private static readonly ActivitySource ActivitySource = new(ActivitySourceName);
|
||||
private static readonly Meter Meter = new(MeterName);
|
||||
|
||||
#region Counters
|
||||
|
||||
private static readonly Counter<long> RequestsCounter = Meter.CreateCounter<long>(
|
||||
RequestsTotalMetric,
|
||||
unit: "count",
|
||||
description: "Total cache requests by operation and result.");
|
||||
|
||||
private static readonly Counter<long> HitsCounter = Meter.CreateCounter<long>(
|
||||
HitsTotalMetric,
|
||||
unit: "count",
|
||||
description: "Total cache hits by source (valkey, postgres).");
|
||||
|
||||
private static readonly Counter<long> MissesCounter = Meter.CreateCounter<long>(
|
||||
MissesTotalMetric,
|
||||
unit: "count",
|
||||
description: "Total cache misses.");
|
||||
|
||||
private static readonly Counter<long> InvalidationsCounter = Meter.CreateCounter<long>(
|
||||
InvalidationsTotalMetric,
|
||||
unit: "count",
|
||||
description: "Total cache invalidations by type.");
|
||||
|
||||
private static readonly Counter<long> WriteBehindCounter = Meter.CreateCounter<long>(
|
||||
WriteBehindTotalMetric,
|
||||
unit: "count",
|
||||
description: "Write-behind operations by result.");
|
||||
|
||||
#endregion
|
||||
|
||||
#region Histograms
|
||||
|
||||
private static readonly Histogram<double> LatencyHistogram = Meter.CreateHistogram<double>(
|
||||
LatencySecondsMetric,
|
||||
unit: "s",
|
||||
description: "Cache operation latency in seconds.");
|
||||
|
||||
private static readonly Histogram<long> EntrySizeHistogram = Meter.CreateHistogram<long>(
|
||||
EntriesSizeMetric,
|
||||
unit: "bytes",
|
||||
description: "Size of cache entries in bytes.");
|
||||
|
||||
#endregion
|
||||
|
||||
#region Gauges
|
||||
|
||||
private static int _writeBehindQueueSize;
|
||||
|
||||
/// <summary>
|
||||
/// Observable gauge for write-behind queue size.
|
||||
/// </summary>
|
||||
public static readonly ObservableGauge<int> WriteBehindQueueGauge = Meter.CreateObservableGauge(
|
||||
WriteBehindQueueSizeMetric,
|
||||
() => _writeBehindQueueSize,
|
||||
unit: "items",
|
||||
description: "Current write-behind queue size.");
|
||||
|
||||
/// <summary>
|
||||
/// Update the write-behind queue size gauge.
|
||||
/// </summary>
|
||||
/// <param name="size">Current queue size.</param>
|
||||
public static void SetWriteBehindQueueSize(int size)
|
||||
{
|
||||
_writeBehindQueueSize = size;
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Activity Tracing
|
||||
|
||||
/// <summary>
|
||||
/// Start a cache lookup activity.
|
||||
/// </summary>
|
||||
/// <param name="veriKey">The VeriKey being looked up.</param>
|
||||
/// <returns>The activity or null if tracing is not enabled.</returns>
|
||||
public static Activity? StartGetActivity(string veriKey)
|
||||
{
|
||||
var activity = ActivitySource.StartActivity("provcache.get", ActivityKind.Internal);
|
||||
if (activity is null) return null;
|
||||
|
||||
activity.SetTag("provcache.verikey", TruncateVeriKey(veriKey));
|
||||
return activity;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Start a cache store activity.
|
||||
/// </summary>
|
||||
/// <param name="veriKey">The VeriKey being stored.</param>
|
||||
/// <param name="trustScore">The trust score of the entry.</param>
|
||||
/// <returns>The activity or null if tracing is not enabled.</returns>
|
||||
public static Activity? StartSetActivity(string veriKey, int trustScore)
|
||||
{
|
||||
var activity = ActivitySource.StartActivity("provcache.set", ActivityKind.Internal);
|
||||
if (activity is null) return null;
|
||||
|
||||
activity.SetTag("provcache.verikey", TruncateVeriKey(veriKey));
|
||||
activity.SetTag("provcache.trust_score", trustScore);
|
||||
return activity;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Start an invalidation activity.
|
||||
/// </summary>
|
||||
/// <param name="invalidationType">The type of invalidation (verikey, policy_hash, signer_set_hash, feed_epoch).</param>
|
||||
/// <param name="targetValue">The target value for invalidation.</param>
|
||||
/// <returns>The activity or null if tracing is not enabled.</returns>
|
||||
public static Activity? StartInvalidateActivity(string invalidationType, string? targetValue)
|
||||
{
|
||||
var activity = ActivitySource.StartActivity("provcache.invalidate", ActivityKind.Internal);
|
||||
if (activity is null) return null;
|
||||
|
||||
activity.SetTag("provcache.invalidation_type", invalidationType);
|
||||
if (!string.IsNullOrWhiteSpace(targetValue))
|
||||
{
|
||||
activity.SetTag("provcache.target", TruncateVeriKey(targetValue));
|
||||
}
|
||||
|
||||
return activity;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Start a write-behind flush activity.
|
||||
/// </summary>
|
||||
/// <param name="batchSize">Number of entries in the batch.</param>
|
||||
/// <returns>The activity or null if tracing is not enabled.</returns>
|
||||
public static Activity? StartWriteBehindFlushActivity(int batchSize)
|
||||
{
|
||||
var activity = ActivitySource.StartActivity("provcache.writebehind.flush", ActivityKind.Internal);
|
||||
if (activity is null) return null;
|
||||
|
||||
activity.SetTag("provcache.batch_size", batchSize);
|
||||
return activity;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Start a VeriKey construction activity.
|
||||
/// </summary>
|
||||
/// <returns>The activity or null if tracing is not enabled.</returns>
|
||||
public static Activity? StartVeriKeyBuildActivity()
|
||||
{
|
||||
return ActivitySource.StartActivity("provcache.verikey.build", ActivityKind.Internal);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Start a DecisionDigest construction activity.
|
||||
/// </summary>
|
||||
/// <returns>The activity or null if tracing is not enabled.</returns>
|
||||
public static Activity? StartDecisionDigestBuildActivity()
|
||||
{
|
||||
return ActivitySource.StartActivity("provcache.digest.build", ActivityKind.Internal);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Mark an activity as a cache hit.
|
||||
/// </summary>
|
||||
/// <param name="activity">The activity to mark.</param>
|
||||
/// <param name="source">The source of the hit (valkey, postgres).</param>
|
||||
public static void MarkCacheHit(Activity? activity, string source)
|
||||
{
|
||||
activity?.SetTag("provcache.result", ResultHit);
|
||||
activity?.SetTag("provcache.source", source);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Mark an activity as a cache miss.
|
||||
/// </summary>
|
||||
/// <param name="activity">The activity to mark.</param>
|
||||
public static void MarkCacheMiss(Activity? activity)
|
||||
{
|
||||
activity?.SetTag("provcache.result", ResultMiss);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Mark an activity as having an error.
|
||||
/// </summary>
|
||||
/// <param name="activity">The activity to mark.</param>
|
||||
/// <param name="errorMessage">The error message.</param>
|
||||
public static void MarkError(Activity? activity, string errorMessage)
|
||||
{
|
||||
activity?.SetStatus(ActivityStatusCode.Error, errorMessage);
|
||||
activity?.SetTag("provcache.result", ResultError);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Metrics Recording
|
||||
|
||||
/// <summary>
|
||||
/// Record a cache request.
|
||||
/// </summary>
|
||||
/// <param name="operation">The operation type (get, set, invalidate).</param>
|
||||
/// <param name="result">The result (hit, miss, error, created, etc.).</param>
|
||||
public static void RecordRequest(string operation, string result)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(operation) || string.IsNullOrWhiteSpace(result))
|
||||
return;
|
||||
|
||||
var tags = new TagList
|
||||
{
|
||||
{ "operation", operation },
|
||||
{ "result", result }
|
||||
};
|
||||
|
||||
RequestsCounter.Add(1, tags);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Record a cache hit.
|
||||
/// </summary>
|
||||
/// <param name="source">The source of the hit (valkey, postgres).</param>
|
||||
public static void RecordHit(string source)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(source))
|
||||
return;
|
||||
|
||||
var tags = new TagList
|
||||
{
|
||||
{ "source", source }
|
||||
};
|
||||
|
||||
HitsCounter.Add(1, tags);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Record a cache miss.
|
||||
/// </summary>
|
||||
public static void RecordMiss()
|
||||
{
|
||||
MissesCounter.Add(1);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Record an invalidation.
|
||||
/// </summary>
|
||||
/// <param name="invalidationType">The type of invalidation.</param>
|
||||
/// <param name="count">Number of entries invalidated.</param>
|
||||
public static void RecordInvalidation(string invalidationType, long count = 1)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(invalidationType))
|
||||
return;
|
||||
|
||||
var tags = new TagList
|
||||
{
|
||||
{ "type", invalidationType }
|
||||
};
|
||||
|
||||
InvalidationsCounter.Add(count, tags);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Record a write-behind operation result.
|
||||
/// </summary>
|
||||
/// <param name="result">The result (ok, retry, failed).</param>
|
||||
/// <param name="count">Number of entries.</param>
|
||||
public static void RecordWriteBehind(string result, long count = 1)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(result))
|
||||
return;
|
||||
|
||||
var tags = new TagList
|
||||
{
|
||||
{ "result", result }
|
||||
};
|
||||
|
||||
WriteBehindCounter.Add(count, tags);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Record operation latency.
|
||||
/// </summary>
|
||||
/// <param name="operation">The operation type.</param>
|
||||
/// <param name="duration">The operation duration.</param>
|
||||
public static void RecordLatency(string operation, TimeSpan duration)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(operation))
|
||||
return;
|
||||
|
||||
var seconds = duration.TotalSeconds;
|
||||
if (double.IsNaN(seconds) || double.IsInfinity(seconds) || seconds < 0)
|
||||
seconds = 0d;
|
||||
|
||||
var tags = new TagList
|
||||
{
|
||||
{ "operation", operation }
|
||||
};
|
||||
|
||||
LatencyHistogram.Record(seconds, tags);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Record cache entry size.
|
||||
/// </summary>
|
||||
/// <param name="bytes">The size in bytes.</param>
|
||||
public static void RecordEntrySize(long bytes)
|
||||
{
|
||||
if (bytes < 0)
|
||||
return;
|
||||
|
||||
EntrySizeHistogram.Record(bytes);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Helper Methods
|
||||
|
||||
/// <summary>
|
||||
/// Truncate a VeriKey for logging (avoid huge trace payloads).
|
||||
/// </summary>
|
||||
private static string TruncateVeriKey(string? veriKey)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(veriKey))
|
||||
return "(empty)";
|
||||
|
||||
// Keep prefix and first 16 chars of hash
|
||||
if (veriKey.StartsWith("sha256:", StringComparison.OrdinalIgnoreCase) && veriKey.Length > 23)
|
||||
return veriKey[..23] + "...";
|
||||
|
||||
return veriKey.Length > 32 ? veriKey[..32] + "..." : veriKey;
|
||||
}
|
||||
|
||||
#endregion
|
||||
}
|
||||
@@ -0,0 +1,31 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
|
||||
<PropertyGroup>
|
||||
<TargetFramework>net10.0</TargetFramework>
|
||||
<ImplicitUsings>enable</ImplicitUsings>
|
||||
<Nullable>enable</Nullable>
|
||||
<LangVersion>preview</LangVersion>
|
||||
<TreatWarningsAsErrors>false</TreatWarningsAsErrors>
|
||||
<RootNamespace>StellaOps.Provcache</RootNamespace>
|
||||
<AssemblyName>StellaOps.Provcache</AssemblyName>
|
||||
<Description>Provenance Cache for StellaOps - Maximizing trust evidence density</Description>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="Microsoft.Extensions.Configuration.Abstractions" Version="10.0.0" />
|
||||
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" Version="10.0.0" />
|
||||
<PackageReference Include="Microsoft.Extensions.Hosting.Abstractions" Version="10.0.0" />
|
||||
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="10.0.0" />
|
||||
<PackageReference Include="Microsoft.Extensions.Options" Version="10.0.0" />
|
||||
<PackageReference Include="Microsoft.Extensions.Options.ConfigurationExtensions" Version="10.0.0" />
|
||||
<PackageReference Include="Microsoft.Extensions.Options.DataAnnotations" Version="10.0.0" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="../StellaOps.Canonical.Json/StellaOps.Canonical.Json.csproj" />
|
||||
<ProjectReference Include="../StellaOps.Cryptography/StellaOps.Cryptography.csproj" />
|
||||
<ProjectReference Include="../StellaOps.Messaging/StellaOps.Messaging.csproj" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
389
src/__Libraries/StellaOps.Provcache/VeriKeyBuilder.cs
Normal file
389
src/__Libraries/StellaOps.Provcache/VeriKeyBuilder.cs
Normal file
@@ -0,0 +1,389 @@
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
|
||||
namespace StellaOps.Provcache;
|
||||
|
||||
/// <summary>
|
||||
/// Fluent builder for constructing a VeriKey (provenance identity key).
|
||||
/// VeriKey is a composite hash that uniquely identifies a provenance decision context.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// <para>
|
||||
/// VeriKey = SHA256(source_hash || sbom_hash || vex_hash_set_hash || merge_policy_hash || signer_set_hash || time_window)
|
||||
/// </para>
|
||||
/// <para>
|
||||
/// Each component ensures cache invalidation when relevant inputs change:
|
||||
/// <list type="bullet">
|
||||
/// <item><c>source_hash</c>: Different artifacts get different keys</item>
|
||||
/// <item><c>sbom_hash</c>: SBOM changes (new packages) create new key</item>
|
||||
/// <item><c>vex_hash_set</c>: VEX updates create new key</item>
|
||||
/// <item><c>policy_hash</c>: Policy changes create new key</item>
|
||||
/// <item><c>signer_set_hash</c>: Key rotation creates new key (security)</item>
|
||||
/// <item><c>time_window</c>: Temporal bucketing enables controlled expiry</item>
|
||||
/// </list>
|
||||
/// </para>
|
||||
/// </remarks>
|
||||
public sealed class VeriKeyBuilder
|
||||
{
|
||||
private string? _sourceHash;
|
||||
private string? _sbomHash;
|
||||
private string? _vexHashSetHash;
|
||||
private string? _mergePolicyHash;
|
||||
private string? _signerSetHash;
|
||||
private string? _timeWindow;
|
||||
private readonly ProvcacheOptions _options;
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new VeriKeyBuilder with default options.
|
||||
/// </summary>
|
||||
public VeriKeyBuilder() : this(new ProvcacheOptions())
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new VeriKeyBuilder with the specified options.
|
||||
/// </summary>
|
||||
/// <param name="options">Provcache configuration options.</param>
|
||||
public VeriKeyBuilder(ProvcacheOptions options)
|
||||
{
|
||||
_options = options ?? throw new ArgumentNullException(nameof(options));
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the source artifact digest (image/artifact content-addressed hash).
|
||||
/// </summary>
|
||||
/// <param name="sourceHash">The artifact digest (e.g., sha256:abc123...).</param>
|
||||
/// <returns>This builder for fluent chaining.</returns>
|
||||
/// <exception cref="ArgumentException">If sourceHash is null or empty.</exception>
|
||||
public VeriKeyBuilder WithSourceHash(string sourceHash)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(sourceHash))
|
||||
throw new ArgumentException("Source hash cannot be null or empty.", nameof(sourceHash));
|
||||
|
||||
_sourceHash = NormalizeHash(sourceHash);
|
||||
return this;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the SBOM canonical hash.
|
||||
/// Automatically canonicalizes the SBOM content before hashing if raw bytes are provided.
|
||||
/// </summary>
|
||||
/// <param name="sbomHash">The SBOM canonical hash.</param>
|
||||
/// <returns>This builder for fluent chaining.</returns>
|
||||
public VeriKeyBuilder WithSbomHash(string sbomHash)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(sbomHash))
|
||||
throw new ArgumentException("SBOM hash cannot be null or empty.", nameof(sbomHash));
|
||||
|
||||
_sbomHash = NormalizeHash(sbomHash);
|
||||
return this;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Computes SBOM hash from raw SBOM bytes using canonical serialization.
|
||||
/// </summary>
|
||||
/// <param name="sbomBytes">Raw SBOM content bytes.</param>
|
||||
/// <returns>This builder for fluent chaining.</returns>
|
||||
public VeriKeyBuilder WithSbomBytes(ReadOnlySpan<byte> sbomBytes)
|
||||
{
|
||||
_sbomHash = ComputeHash(sbomBytes);
|
||||
return this;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the VEX hash set hash (sorted aggregation of VEX statement hashes).
|
||||
/// </summary>
|
||||
/// <param name="vexHashSetHash">The pre-computed VEX hash set hash.</param>
|
||||
/// <returns>This builder for fluent chaining.</returns>
|
||||
public VeriKeyBuilder WithVexHashSet(string vexHashSetHash)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(vexHashSetHash))
|
||||
throw new ArgumentException("VEX hash set hash cannot be null or empty.", nameof(vexHashSetHash));
|
||||
|
||||
_vexHashSetHash = NormalizeHash(vexHashSetHash);
|
||||
return this;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Computes VEX hash set from individual VEX statement hashes.
|
||||
/// Hashes are sorted lexicographically before aggregation for determinism.
|
||||
/// </summary>
|
||||
/// <param name="vexStatementHashes">Individual VEX statement hashes.</param>
|
||||
/// <returns>This builder for fluent chaining.</returns>
|
||||
public VeriKeyBuilder WithVexStatementHashes(IEnumerable<string> vexStatementHashes)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(vexStatementHashes);
|
||||
|
||||
// Sort hashes for deterministic aggregation
|
||||
var sortedHashes = vexStatementHashes
|
||||
.Where(h => !string.IsNullOrWhiteSpace(h))
|
||||
.Select(NormalizeHash)
|
||||
.Order(StringComparer.Ordinal)
|
||||
.ToList();
|
||||
|
||||
if (sortedHashes.Count == 0)
|
||||
{
|
||||
// Empty VEX set gets a well-known hash
|
||||
_vexHashSetHash = ComputeHash(Encoding.UTF8.GetBytes("empty-vex-set"));
|
||||
}
|
||||
else
|
||||
{
|
||||
// Concatenate sorted hashes and hash the result
|
||||
var concatenated = string.Join("|", sortedHashes);
|
||||
_vexHashSetHash = ComputeHash(Encoding.UTF8.GetBytes(concatenated));
|
||||
}
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the merge policy hash (PolicyBundle digest).
|
||||
/// </summary>
|
||||
/// <param name="policyHash">The policy bundle hash.</param>
|
||||
/// <returns>This builder for fluent chaining.</returns>
|
||||
public VeriKeyBuilder WithMergePolicyHash(string policyHash)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(policyHash))
|
||||
throw new ArgumentException("Policy hash cannot be null or empty.", nameof(policyHash));
|
||||
|
||||
_mergePolicyHash = NormalizeHash(policyHash);
|
||||
return this;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Computes policy hash from raw policy bundle bytes.
|
||||
/// </summary>
|
||||
/// <param name="policyBytes">Raw policy bundle content bytes.</param>
|
||||
/// <returns>This builder for fluent chaining.</returns>
|
||||
public VeriKeyBuilder WithMergePolicyBytes(ReadOnlySpan<byte> policyBytes)
|
||||
{
|
||||
_mergePolicyHash = ComputeHash(policyBytes);
|
||||
return this;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the signer set hash (sorted certificate chain hashes).
|
||||
/// </summary>
|
||||
/// <param name="signerSetHash">The pre-computed signer set hash.</param>
|
||||
/// <returns>This builder for fluent chaining.</returns>
|
||||
public VeriKeyBuilder WithSignerSetHash(string signerSetHash)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(signerSetHash))
|
||||
throw new ArgumentException("Signer set hash cannot be null or empty.", nameof(signerSetHash));
|
||||
|
||||
_signerSetHash = NormalizeHash(signerSetHash);
|
||||
return this;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Computes signer set hash from individual certificate hashes.
|
||||
/// Hashes are sorted lexicographically before aggregation for determinism.
|
||||
/// </summary>
|
||||
/// <param name="certificateHashes">Individual certificate hashes.</param>
|
||||
/// <returns>This builder for fluent chaining.</returns>
|
||||
public VeriKeyBuilder WithCertificateHashes(IEnumerable<string> certificateHashes)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(certificateHashes);
|
||||
|
||||
// Sort hashes for deterministic aggregation
|
||||
var sortedHashes = certificateHashes
|
||||
.Where(h => !string.IsNullOrWhiteSpace(h))
|
||||
.Select(NormalizeHash)
|
||||
.Order(StringComparer.Ordinal)
|
||||
.ToList();
|
||||
|
||||
if (sortedHashes.Count == 0)
|
||||
{
|
||||
// Empty signer set gets a well-known hash
|
||||
_signerSetHash = ComputeHash(Encoding.UTF8.GetBytes("empty-signer-set"));
|
||||
}
|
||||
else
|
||||
{
|
||||
// Concatenate sorted hashes and hash the result
|
||||
var concatenated = string.Join("|", sortedHashes);
|
||||
_signerSetHash = ComputeHash(Encoding.UTF8.GetBytes(concatenated));
|
||||
}
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the time window for epoch bucketing.
|
||||
/// </summary>
|
||||
/// <param name="timeWindow">The time window identifier (e.g., "2024-12-24T12:00:00Z").</param>
|
||||
/// <returns>This builder for fluent chaining.</returns>
|
||||
public VeriKeyBuilder WithTimeWindow(string timeWindow)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(timeWindow))
|
||||
throw new ArgumentException("Time window cannot be null or empty.", nameof(timeWindow));
|
||||
|
||||
_timeWindow = timeWindow;
|
||||
return this;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Computes time window from a timestamp using the configured bucket size.
|
||||
/// </summary>
|
||||
/// <param name="timestamp">The timestamp to bucket.</param>
|
||||
/// <returns>This builder for fluent chaining.</returns>
|
||||
public VeriKeyBuilder WithTimeWindow(DateTimeOffset timestamp)
|
||||
{
|
||||
_timeWindow = _options.ComputeTimeWindow(timestamp);
|
||||
return this;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Builds the final VeriKey by hashing all components together.
|
||||
/// </summary>
|
||||
/// <returns>The computed VeriKey in format "sha256:<hex>".</returns>
|
||||
/// <exception cref="InvalidOperationException">If required components are missing.</exception>
|
||||
public string Build()
|
||||
{
|
||||
ValidateRequiredComponents();
|
||||
|
||||
// Build composite hash input: all components concatenated with delimiters
|
||||
var components = new StringBuilder();
|
||||
components.Append("v1|"); // Version prefix for future compatibility
|
||||
components.Append(_sourceHash);
|
||||
components.Append('|');
|
||||
components.Append(_sbomHash);
|
||||
components.Append('|');
|
||||
components.Append(_vexHashSetHash);
|
||||
components.Append('|');
|
||||
components.Append(_mergePolicyHash);
|
||||
components.Append('|');
|
||||
components.Append(_signerSetHash);
|
||||
components.Append('|');
|
||||
components.Append(_timeWindow);
|
||||
|
||||
var compositeBytes = Encoding.UTF8.GetBytes(components.ToString());
|
||||
return ComputeHash(compositeBytes);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Builds a <see cref="VeriKeyComponents"/> record with all individual components.
|
||||
/// Useful for debugging and serialization.
|
||||
/// </summary>
|
||||
/// <returns>A record containing all VeriKey components.</returns>
|
||||
public VeriKeyComponents BuildWithComponents()
|
||||
{
|
||||
ValidateRequiredComponents();
|
||||
|
||||
return new VeriKeyComponents
|
||||
{
|
||||
VeriKey = Build(),
|
||||
SourceHash = _sourceHash!,
|
||||
SbomHash = _sbomHash!,
|
||||
VexHashSetHash = _vexHashSetHash!,
|
||||
MergePolicyHash = _mergePolicyHash!,
|
||||
SignerSetHash = _signerSetHash!,
|
||||
TimeWindow = _timeWindow!
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Resets the builder to its initial state.
|
||||
/// </summary>
|
||||
/// <returns>This builder for fluent chaining.</returns>
|
||||
public VeriKeyBuilder Reset()
|
||||
{
|
||||
_sourceHash = null;
|
||||
_sbomHash = null;
|
||||
_vexHashSetHash = null;
|
||||
_mergePolicyHash = null;
|
||||
_signerSetHash = null;
|
||||
_timeWindow = null;
|
||||
return this;
|
||||
}
|
||||
|
||||
private void ValidateRequiredComponents()
|
||||
{
|
||||
var missing = new List<string>();
|
||||
|
||||
if (string.IsNullOrWhiteSpace(_sourceHash))
|
||||
missing.Add("SourceHash");
|
||||
if (string.IsNullOrWhiteSpace(_sbomHash))
|
||||
missing.Add("SbomHash");
|
||||
if (string.IsNullOrWhiteSpace(_vexHashSetHash))
|
||||
missing.Add("VexHashSetHash");
|
||||
if (string.IsNullOrWhiteSpace(_mergePolicyHash))
|
||||
missing.Add("MergePolicyHash");
|
||||
if (string.IsNullOrWhiteSpace(_signerSetHash))
|
||||
missing.Add("SignerSetHash");
|
||||
if (string.IsNullOrWhiteSpace(_timeWindow))
|
||||
missing.Add("TimeWindow");
|
||||
|
||||
if (missing.Count > 0)
|
||||
{
|
||||
throw new InvalidOperationException(
|
||||
$"Cannot build VeriKey: missing required components: {string.Join(", ", missing)}. " +
|
||||
"Use the With* methods to set all required components before calling Build().");
|
||||
}
|
||||
}
|
||||
|
||||
private static string ComputeHash(ReadOnlySpan<byte> data)
|
||||
{
|
||||
Span<byte> hash = stackalloc byte[32];
|
||||
SHA256.HashData(data, hash);
|
||||
return $"sha256:{Convert.ToHexStringLower(hash)}";
|
||||
}
|
||||
|
||||
private static string NormalizeHash(string hash)
|
||||
{
|
||||
// If hash already has algorithm prefix, validate and return lowercase
|
||||
if (hash.StartsWith("sha256:", StringComparison.OrdinalIgnoreCase))
|
||||
{
|
||||
return $"sha256:{hash[7..].ToLowerInvariant()}";
|
||||
}
|
||||
|
||||
// Assume SHA256 if no prefix and looks like a hex string
|
||||
if (hash.Length == 64 && hash.All(c => char.IsAsciiHexDigit(c)))
|
||||
{
|
||||
return $"sha256:{hash.ToLowerInvariant()}";
|
||||
}
|
||||
|
||||
// Return as-is if not recognized (might be other hash format)
|
||||
return hash.ToLowerInvariant();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Record containing all VeriKey components for debugging and serialization.
|
||||
/// </summary>
|
||||
public sealed record VeriKeyComponents
|
||||
{
|
||||
/// <summary>
|
||||
/// The final computed VeriKey.
|
||||
/// </summary>
|
||||
public required string VeriKey { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Source artifact digest.
|
||||
/// </summary>
|
||||
public required string SourceHash { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// SBOM canonical hash.
|
||||
/// </summary>
|
||||
public required string SbomHash { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// VEX hash set hash.
|
||||
/// </summary>
|
||||
public required string VexHashSetHash { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Policy bundle hash.
|
||||
/// </summary>
|
||||
public required string MergePolicyHash { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Signer certificate set hash.
|
||||
/// </summary>
|
||||
public required string SignerSetHash { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Time window identifier.
|
||||
/// </summary>
|
||||
public required string TimeWindow { get; init; }
|
||||
}
|
||||
262
src/__Libraries/StellaOps.Provcache/WriteBehindQueue.cs
Normal file
262
src/__Libraries/StellaOps.Provcache/WriteBehindQueue.cs
Normal file
@@ -0,0 +1,262 @@
|
||||
using System.Threading.Channels;
|
||||
using Microsoft.Extensions.Hosting;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Microsoft.Extensions.Options;
|
||||
|
||||
namespace StellaOps.Provcache;
|
||||
|
||||
/// <summary>
|
||||
/// Background service that manages write-behind persistence for Provcache entries.
|
||||
/// Batches writes to Postgres for efficiency and provides retry logic for transient failures.
|
||||
/// </summary>
|
||||
public sealed class WriteBehindQueue : BackgroundService, IWriteBehindQueue
|
||||
{
|
||||
private readonly Channel<WriteBehindItem> _channel;
|
||||
private readonly IProvcacheRepository _repository;
|
||||
private readonly ProvcacheOptions _options;
|
||||
private readonly ILogger<WriteBehindQueue> _logger;
|
||||
private readonly TimeProvider _timeProvider;
|
||||
|
||||
// Metrics
|
||||
private long _totalEnqueued;
|
||||
private long _totalPersisted;
|
||||
private long _totalFailed;
|
||||
private long _totalRetries;
|
||||
private long _totalBatches;
|
||||
private long _currentQueueDepth;
|
||||
|
||||
public WriteBehindQueue(
|
||||
IProvcacheRepository repository,
|
||||
IOptions<ProvcacheOptions> options,
|
||||
ILogger<WriteBehindQueue> logger,
|
||||
TimeProvider? timeProvider = null)
|
||||
{
|
||||
_repository = repository ?? throw new ArgumentNullException(nameof(repository));
|
||||
_options = options?.Value ?? throw new ArgumentNullException(nameof(options));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
_timeProvider = timeProvider ?? TimeProvider.System;
|
||||
|
||||
// Bounded channel to provide backpressure
|
||||
_channel = Channel.CreateBounded<WriteBehindItem>(new BoundedChannelOptions(_options.WriteBehindQueueCapacity)
|
||||
{
|
||||
FullMode = BoundedChannelFullMode.Wait,
|
||||
SingleWriter = false,
|
||||
SingleReader = true
|
||||
});
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public ValueTask EnqueueAsync(ProvcacheEntry entry, CancellationToken cancellationToken = default)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(entry);
|
||||
|
||||
var item = new WriteBehindItem
|
||||
{
|
||||
Entry = entry,
|
||||
EnqueuedAt = _timeProvider.GetUtcNow(),
|
||||
RetryCount = 0
|
||||
};
|
||||
|
||||
Interlocked.Increment(ref _totalEnqueued);
|
||||
Interlocked.Increment(ref _currentQueueDepth);
|
||||
|
||||
return _channel.Writer.WriteAsync(item, cancellationToken);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public WriteBehindMetrics GetMetrics()
|
||||
{
|
||||
return new WriteBehindMetrics
|
||||
{
|
||||
TotalEnqueued = Interlocked.Read(ref _totalEnqueued),
|
||||
TotalPersisted = Interlocked.Read(ref _totalPersisted),
|
||||
TotalFailed = Interlocked.Read(ref _totalFailed),
|
||||
TotalRetries = Interlocked.Read(ref _totalRetries),
|
||||
TotalBatches = Interlocked.Read(ref _totalBatches),
|
||||
CurrentQueueDepth = Interlocked.Read(ref _currentQueueDepth),
|
||||
Timestamp = _timeProvider.GetUtcNow()
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Main processing loop that reads from the channel and batches writes.
|
||||
/// </summary>
|
||||
protected override async Task ExecuteAsync(CancellationToken stoppingToken)
|
||||
{
|
||||
_logger.LogInformation(
|
||||
"Write-behind queue started with batch size {BatchSize}, interval {IntervalMs}ms",
|
||||
_options.WriteBehindBatchSize,
|
||||
_options.WriteBehindFlushIntervalMs);
|
||||
|
||||
var batch = new List<WriteBehindItem>(_options.WriteBehindBatchSize);
|
||||
var flushInterval = TimeSpan.FromMilliseconds(_options.WriteBehindFlushIntervalMs);
|
||||
|
||||
while (!stoppingToken.IsCancellationRequested)
|
||||
{
|
||||
try
|
||||
{
|
||||
batch.Clear();
|
||||
|
||||
// Read items until batch is full or timeout
|
||||
using var cts = CancellationTokenSource.CreateLinkedTokenSource(stoppingToken);
|
||||
cts.CancelAfter(flushInterval);
|
||||
|
||||
try
|
||||
{
|
||||
while (batch.Count < _options.WriteBehindBatchSize)
|
||||
{
|
||||
var item = await _channel.Reader.ReadAsync(cts.Token).ConfigureAwait(false);
|
||||
batch.Add(item);
|
||||
Interlocked.Decrement(ref _currentQueueDepth);
|
||||
}
|
||||
}
|
||||
catch (OperationCanceledException) when (!stoppingToken.IsCancellationRequested)
|
||||
{
|
||||
// Timeout reached, process current batch
|
||||
}
|
||||
|
||||
if (batch.Count > 0)
|
||||
{
|
||||
await ProcessBatchAsync(batch, stoppingToken).ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
catch (OperationCanceledException) when (stoppingToken.IsCancellationRequested)
|
||||
{
|
||||
break;
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogError(ex, "Error in write-behind queue processing loop");
|
||||
await Task.Delay(1000, stoppingToken).ConfigureAwait(false); // Backoff on error
|
||||
}
|
||||
}
|
||||
|
||||
// Drain remaining items on shutdown
|
||||
await DrainAsync(CancellationToken.None).ConfigureAwait(false);
|
||||
|
||||
_logger.LogInformation("Write-behind queue stopped");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Processes a batch of items with retry logic.
|
||||
/// </summary>
|
||||
private async Task ProcessBatchAsync(List<WriteBehindItem> batch, CancellationToken cancellationToken)
|
||||
{
|
||||
var entries = batch.Select(b => b.Entry).ToList();
|
||||
|
||||
try
|
||||
{
|
||||
await _repository.UpsertManyAsync(entries, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
Interlocked.Add(ref _totalPersisted, batch.Count);
|
||||
Interlocked.Increment(ref _totalBatches);
|
||||
|
||||
_logger.LogDebug(
|
||||
"Write-behind batch persisted {Count} entries",
|
||||
batch.Count);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogWarning(
|
||||
ex,
|
||||
"Write-behind batch failed for {Count} entries, scheduling retries",
|
||||
batch.Count);
|
||||
|
||||
// Re-enqueue failed items for retry
|
||||
foreach (var item in batch)
|
||||
{
|
||||
if (item.RetryCount < _options.WriteBehindMaxRetries)
|
||||
{
|
||||
var retryItem = item with { RetryCount = item.RetryCount + 1 };
|
||||
Interlocked.Increment(ref _totalRetries);
|
||||
|
||||
if (_channel.Writer.TryWrite(retryItem))
|
||||
{
|
||||
Interlocked.Increment(ref _currentQueueDepth);
|
||||
}
|
||||
else
|
||||
{
|
||||
Interlocked.Increment(ref _totalFailed);
|
||||
_logger.LogError(
|
||||
"Write-behind queue full, dropping entry for VeriKey {VeriKey}",
|
||||
item.Entry.VeriKey);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
Interlocked.Increment(ref _totalFailed);
|
||||
_logger.LogError(
|
||||
"Write-behind max retries exceeded for VeriKey {VeriKey}",
|
||||
item.Entry.VeriKey);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Drains remaining items from the queue during shutdown.
|
||||
/// </summary>
|
||||
private async Task DrainAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
var batch = new List<WriteBehindItem>();
|
||||
|
||||
while (_channel.Reader.TryRead(out var item))
|
||||
{
|
||||
batch.Add(item);
|
||||
Interlocked.Decrement(ref _currentQueueDepth);
|
||||
|
||||
if (batch.Count >= _options.WriteBehindBatchSize)
|
||||
{
|
||||
await ProcessBatchAsync(batch, cancellationToken).ConfigureAwait(false);
|
||||
batch.Clear();
|
||||
}
|
||||
}
|
||||
|
||||
if (batch.Count > 0)
|
||||
{
|
||||
await ProcessBatchAsync(batch, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
_logger.LogInformation("Write-behind queue drained");
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Interface for write-behind queue operations.
|
||||
/// </summary>
|
||||
public interface IWriteBehindQueue
|
||||
{
|
||||
/// <summary>
|
||||
/// Enqueues an entry for asynchronous persistence.
|
||||
/// </summary>
|
||||
ValueTask EnqueueAsync(ProvcacheEntry entry, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets current queue metrics.
|
||||
/// </summary>
|
||||
WriteBehindMetrics GetMetrics();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Item in the write-behind queue with retry metadata.
|
||||
/// </summary>
|
||||
internal sealed record WriteBehindItem
|
||||
{
|
||||
public required ProvcacheEntry Entry { get; init; }
|
||||
public required DateTimeOffset EnqueuedAt { get; init; }
|
||||
public required int RetryCount { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Metrics for the write-behind queue.
|
||||
/// </summary>
|
||||
public sealed record WriteBehindMetrics
|
||||
{
|
||||
public required long TotalEnqueued { get; init; }
|
||||
public required long TotalPersisted { get; init; }
|
||||
public required long TotalFailed { get; init; }
|
||||
public required long TotalRetries { get; init; }
|
||||
public required long TotalBatches { get; init; }
|
||||
public required long CurrentQueueDepth { get; init; }
|
||||
public required DateTimeOffset Timestamp { get; init; }
|
||||
}
|
||||
@@ -160,106 +160,3 @@ public abstract class ConnectorResilienceTestBase : IDisposable
|
||||
GC.SuppressFinalize(this);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Base class for connector security tests.
|
||||
/// Tests URL allowlist, redirect handling, max payload size, decompression bombs.
|
||||
/// </summary>
|
||||
public abstract class ConnectorSecurityTestBase : IDisposable
|
||||
{
|
||||
protected readonly ConnectorHttpFixture HttpFixture;
|
||||
private bool _disposed;
|
||||
|
||||
protected ConnectorSecurityTestBase()
|
||||
{
|
||||
HttpFixture = new ConnectorHttpFixture();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Attempts to fetch from URL and returns whether it was allowed.
|
||||
/// </summary>
|
||||
protected abstract Task<bool> IsUrlAllowedAsync(string url, CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets the maximum allowed payload size in bytes.
|
||||
/// </summary>
|
||||
protected abstract long MaxPayloadSizeBytes { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Gets the list of allowed URL patterns/domains.
|
||||
/// </summary>
|
||||
protected abstract IReadOnlyList<string> AllowedUrlPatterns { get; }
|
||||
|
||||
[Fact]
|
||||
public async Task AllowlistedUrl_IsAccepted()
|
||||
{
|
||||
foreach (var pattern in AllowedUrlPatterns)
|
||||
{
|
||||
var url = pattern.Replace("*", "test");
|
||||
HttpFixture.AddJsonResponse(url, "{}");
|
||||
|
||||
var allowed = await IsUrlAllowedAsync(url);
|
||||
allowed.Should().BeTrue($"URL '{url}' should be allowed");
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task NonAllowlistedUrl_IsRejected()
|
||||
{
|
||||
var disallowedUrls = new[]
|
||||
{
|
||||
"https://evil.example.com/api",
|
||||
"http://malicious.test/data",
|
||||
"file:///etc/passwd",
|
||||
"data:text/html,<script>alert(1)</script>"
|
||||
};
|
||||
|
||||
foreach (var url in disallowedUrls)
|
||||
{
|
||||
HttpFixture.AddJsonResponse(url, "{}");
|
||||
|
||||
var allowed = await IsUrlAllowedAsync(url);
|
||||
allowed.Should().BeFalse($"URL '{url}' should be rejected");
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task OversizedPayload_IsRejected()
|
||||
{
|
||||
// Create payload larger than max
|
||||
var largePayload = new string('x', (int)MaxPayloadSizeBytes + 1000);
|
||||
HttpFixture.AddJsonResponse("https://test.example.com/*", $"{{\"data\":\"{largePayload}\"}}");
|
||||
|
||||
Func<Task> act = async () => await IsUrlAllowedAsync("https://test.example.com/api");
|
||||
|
||||
// Should either return false or throw
|
||||
// Implementation-specific behavior
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task DecompressionBomb_IsRejected()
|
||||
{
|
||||
// Create a small gzipped payload that expands to large size
|
||||
// This is a simplified test - real decompression bombs are more sophisticated
|
||||
var smallCompressed = "{}"; // In reality, this would be crafted maliciously
|
||||
HttpFixture.AddGzipJsonResponse("https://test.example.com/*", smallCompressed);
|
||||
|
||||
// The connector should detect and reject decompression bombs
|
||||
// Implementation varies by connector
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task HttpsRedirectToHttp_IsRejected()
|
||||
{
|
||||
// Test that HTTPS -> HTTP downgrades are rejected
|
||||
// This requires redirect handling implementation
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (_disposed) return;
|
||||
HttpFixture.Dispose();
|
||||
_disposed = true;
|
||||
GC.SuppressFinalize(this);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,537 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// CrossModuleEvidenceLinkingTests.cs
|
||||
// Sprint: SPRINT_8100_0012_0002 - Unified Evidence Model
|
||||
// Task: EVID-8100-018 - Cross-module evidence linking integration tests
|
||||
// Description: Integration tests verifying evidence linking across modules:
|
||||
// - Same subject can have evidence from multiple modules
|
||||
// - Evidence types from Scanner, Attestor, Policy, Excititor
|
||||
// - Evidence chain/graph queries work correctly
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using System.Text;
|
||||
using System.Text.Json;
|
||||
using FluentAssertions;
|
||||
using StellaOps.Evidence.Core;
|
||||
using StellaOps.Evidence.Storage.Postgres.Tests.Fixtures;
|
||||
using Xunit;
|
||||
using Xunit.Abstractions;
|
||||
|
||||
namespace StellaOps.Evidence.Storage.Postgres.Tests;
|
||||
|
||||
/// <summary>
|
||||
/// Integration tests for cross-module evidence linking.
|
||||
/// Verifies that the unified evidence model correctly links evidence
|
||||
/// from different modules (Scanner, Attestor, Policy, Excititor) to the same subject.
|
||||
/// </summary>
|
||||
[Collection(EvidencePostgresTestCollection.Name)]
|
||||
public sealed class CrossModuleEvidenceLinkingTests : IAsyncLifetime
|
||||
{
|
||||
private readonly EvidencePostgresContainerFixture _fixture;
|
||||
private readonly ITestOutputHelper _output;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
private PostgresEvidenceStore _store = null!;
|
||||
|
||||
public CrossModuleEvidenceLinkingTests(
|
||||
EvidencePostgresContainerFixture fixture,
|
||||
ITestOutputHelper output)
|
||||
{
|
||||
_fixture = fixture;
|
||||
_output = output;
|
||||
}
|
||||
|
||||
public Task InitializeAsync()
|
||||
{
|
||||
_store = _fixture.CreateStore(_tenantId);
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public async Task DisposeAsync()
|
||||
{
|
||||
await _fixture.TruncateAllTablesAsync();
|
||||
}
|
||||
|
||||
#region Multi-Module Evidence for Same Subject
|
||||
|
||||
[Fact]
|
||||
public async Task SameSubject_MultipleEvidenceTypes_AllLinked()
|
||||
{
|
||||
// Arrange - A container image subject with evidence from multiple modules
|
||||
var subjectNodeId = $"sha256:{Guid.NewGuid():N}"; // Container image digest
|
||||
|
||||
var scannerEvidence = CreateScannerEvidence(subjectNodeId);
|
||||
var reachabilityEvidence = CreateReachabilityEvidence(subjectNodeId);
|
||||
var policyEvidence = CreatePolicyEvidence(subjectNodeId);
|
||||
var vexEvidence = CreateVexEvidence(subjectNodeId);
|
||||
var provenanceEvidence = CreateProvenanceEvidence(subjectNodeId);
|
||||
|
||||
// Act - Store all evidence
|
||||
await _store.StoreAsync(scannerEvidence);
|
||||
await _store.StoreAsync(reachabilityEvidence);
|
||||
await _store.StoreAsync(policyEvidence);
|
||||
await _store.StoreAsync(vexEvidence);
|
||||
await _store.StoreAsync(provenanceEvidence);
|
||||
|
||||
// Assert - All evidence linked to same subject
|
||||
var allEvidence = await _store.GetBySubjectAsync(subjectNodeId);
|
||||
|
||||
allEvidence.Should().HaveCount(5);
|
||||
allEvidence.Select(e => e.EvidenceType).Should().Contain(new[]
|
||||
{
|
||||
EvidenceType.Scan,
|
||||
EvidenceType.Reachability,
|
||||
EvidenceType.Policy,
|
||||
EvidenceType.Vex,
|
||||
EvidenceType.Provenance
|
||||
});
|
||||
|
||||
_output.WriteLine($"Subject {subjectNodeId} has {allEvidence.Count} evidence records from different modules");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task SameSubject_FilterByType_ReturnsCorrectEvidence()
|
||||
{
|
||||
// Arrange
|
||||
var subjectNodeId = $"sha256:{Guid.NewGuid():N}";
|
||||
|
||||
await _store.StoreAsync(CreateScannerEvidence(subjectNodeId));
|
||||
await _store.StoreAsync(CreateScannerEvidence(subjectNodeId)); // Another scan finding
|
||||
await _store.StoreAsync(CreateReachabilityEvidence(subjectNodeId));
|
||||
await _store.StoreAsync(CreatePolicyEvidence(subjectNodeId));
|
||||
|
||||
// Act - Filter by Scan type
|
||||
var scanEvidence = await _store.GetBySubjectAsync(subjectNodeId, EvidenceType.Scan);
|
||||
var policyEvidence = await _store.GetBySubjectAsync(subjectNodeId, EvidenceType.Policy);
|
||||
|
||||
// Assert
|
||||
scanEvidence.Should().HaveCount(2);
|
||||
policyEvidence.Should().HaveCount(1);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Evidence Chain Scenarios
|
||||
|
||||
[Fact]
|
||||
public async Task EvidenceChain_ScanToVexToPolicy_LinkedCorrectly()
|
||||
{
|
||||
// Scenario: Vulnerability scan → VEX assessment → Policy decision
|
||||
// All evidence points to the same subject (vulnerability finding)
|
||||
|
||||
var vulnerabilitySubject = $"sha256:{Guid.NewGuid():N}";
|
||||
|
||||
// 1. Scanner finds vulnerability
|
||||
var scanEvidence = CreateScannerEvidence(vulnerabilitySubject);
|
||||
await _store.StoreAsync(scanEvidence);
|
||||
|
||||
// 2. VEX assessment received
|
||||
var vexEvidence = CreateVexEvidence(vulnerabilitySubject, referencedEvidenceId: scanEvidence.EvidenceId);
|
||||
await _store.StoreAsync(vexEvidence);
|
||||
|
||||
// 3. Policy engine makes decision
|
||||
var policyEvidence = CreatePolicyEvidence(vulnerabilitySubject, referencedEvidenceId: vexEvidence.EvidenceId);
|
||||
await _store.StoreAsync(policyEvidence);
|
||||
|
||||
// Assert - Chain is queryable
|
||||
var allEvidence = await _store.GetBySubjectAsync(vulnerabilitySubject);
|
||||
allEvidence.Should().HaveCount(3);
|
||||
|
||||
// Verify order by type represents the chain
|
||||
var scan = allEvidence.First(e => e.EvidenceType == EvidenceType.Scan);
|
||||
var vex = allEvidence.First(e => e.EvidenceType == EvidenceType.Vex);
|
||||
var policy = allEvidence.First(e => e.EvidenceType == EvidenceType.Policy);
|
||||
|
||||
scan.Should().NotBeNull();
|
||||
vex.Should().NotBeNull();
|
||||
policy.Should().NotBeNull();
|
||||
|
||||
_output.WriteLine($"Evidence chain: Scan({scan.EvidenceId}) → VEX({vex.EvidenceId}) → Policy({policy.EvidenceId})");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task EvidenceChain_ReachabilityToEpssToPolicy_LinkedCorrectly()
|
||||
{
|
||||
// Scenario: Reachability analysis + EPSS score → Policy decision
|
||||
var subjectNodeId = $"sha256:{Guid.NewGuid():N}";
|
||||
|
||||
// 1. Reachability analysis
|
||||
var reachability = CreateReachabilityEvidence(subjectNodeId);
|
||||
await _store.StoreAsync(reachability);
|
||||
|
||||
// 2. EPSS score
|
||||
var epss = CreateEpssEvidence(subjectNodeId);
|
||||
await _store.StoreAsync(epss);
|
||||
|
||||
// 3. Policy decision based on both
|
||||
var policy = CreatePolicyEvidence(subjectNodeId, referencedEvidenceIds: new[]
|
||||
{
|
||||
reachability.EvidenceId,
|
||||
epss.EvidenceId
|
||||
});
|
||||
await _store.StoreAsync(policy);
|
||||
|
||||
// Assert
|
||||
var allEvidence = await _store.GetBySubjectAsync(subjectNodeId);
|
||||
allEvidence.Should().HaveCount(3);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Multi-Tenant Evidence Isolation
|
||||
|
||||
[Fact]
|
||||
public async Task MultiTenant_SameSubject_IsolatedByTenant()
|
||||
{
|
||||
// Arrange - Two tenants with evidence for the same subject
|
||||
var subjectNodeId = $"sha256:{Guid.NewGuid():N}";
|
||||
var tenantA = Guid.NewGuid().ToString();
|
||||
var tenantB = Guid.NewGuid().ToString();
|
||||
|
||||
var storeA = _fixture.CreateStore(tenantA);
|
||||
var storeB = _fixture.CreateStore(tenantB);
|
||||
|
||||
var evidenceA = CreateScannerEvidence(subjectNodeId);
|
||||
var evidenceB = CreateScannerEvidence(subjectNodeId);
|
||||
|
||||
// Act - Store in different tenant stores
|
||||
await storeA.StoreAsync(evidenceA);
|
||||
await storeB.StoreAsync(evidenceB);
|
||||
|
||||
// Assert - Each tenant only sees their own evidence
|
||||
var retrievedA = await storeA.GetBySubjectAsync(subjectNodeId);
|
||||
var retrievedB = await storeB.GetBySubjectAsync(subjectNodeId);
|
||||
|
||||
retrievedA.Should().HaveCount(1);
|
||||
retrievedA[0].EvidenceId.Should().Be(evidenceA.EvidenceId);
|
||||
|
||||
retrievedB.Should().HaveCount(1);
|
||||
retrievedB[0].EvidenceId.Should().Be(evidenceB.EvidenceId);
|
||||
|
||||
_output.WriteLine($"Tenant A evidence: {evidenceA.EvidenceId}");
|
||||
_output.WriteLine($"Tenant B evidence: {evidenceB.EvidenceId}");
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Evidence Graph Queries
|
||||
|
||||
[Fact]
|
||||
public async Task EvidenceGraph_AllTypesForArtifact_ReturnsComplete()
|
||||
{
|
||||
// Arrange - Simulate a complete evidence graph for a container artifact
|
||||
var artifactDigest = $"sha256:{Guid.NewGuid():N}";
|
||||
|
||||
var evidenceRecords = new[]
|
||||
{
|
||||
CreateArtifactEvidence(artifactDigest), // SBOM entry
|
||||
CreateScannerEvidence(artifactDigest), // Vulnerability scan
|
||||
CreateReachabilityEvidence(artifactDigest), // Reachability analysis
|
||||
CreateEpssEvidence(artifactDigest), // EPSS score
|
||||
CreateVexEvidence(artifactDigest), // VEX statement
|
||||
CreatePolicyEvidence(artifactDigest), // Policy decision
|
||||
CreateProvenanceEvidence(artifactDigest), // Build provenance
|
||||
CreateExceptionEvidence(artifactDigest) // Exception applied
|
||||
};
|
||||
|
||||
foreach (var record in evidenceRecords)
|
||||
{
|
||||
await _store.StoreAsync(record);
|
||||
}
|
||||
|
||||
// Act - Query all evidence types
|
||||
var allEvidence = await _store.GetBySubjectAsync(artifactDigest);
|
||||
|
||||
// Assert - Complete evidence graph
|
||||
allEvidence.Should().HaveCount(8);
|
||||
allEvidence.Select(e => e.EvidenceType).Distinct().Should().HaveCount(8);
|
||||
|
||||
// Log evidence graph
|
||||
foreach (var evidence in allEvidence)
|
||||
{
|
||||
_output.WriteLine($" {evidence.EvidenceType}: {evidence.EvidenceId}");
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task EvidenceGraph_ExistsCheck_ForAllTypes()
|
||||
{
|
||||
// Arrange
|
||||
var subjectNodeId = $"sha256:{Guid.NewGuid():N}";
|
||||
|
||||
await _store.StoreAsync(CreateScannerEvidence(subjectNodeId));
|
||||
await _store.StoreAsync(CreateReachabilityEvidence(subjectNodeId));
|
||||
// Note: No Policy evidence
|
||||
|
||||
// Act & Assert
|
||||
(await _store.ExistsAsync(subjectNodeId, EvidenceType.Scan)).Should().BeTrue();
|
||||
(await _store.ExistsAsync(subjectNodeId, EvidenceType.Reachability)).Should().BeTrue();
|
||||
(await _store.ExistsAsync(subjectNodeId, EvidenceType.Policy)).Should().BeFalse();
|
||||
(await _store.ExistsAsync(subjectNodeId, EvidenceType.Vex)).Should().BeFalse();
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Cross-Module Evidence Correlation
|
||||
|
||||
[Fact]
|
||||
public async Task Correlation_SameCorrelationId_FindsRelatedEvidence()
|
||||
{
|
||||
// Arrange - Evidence from different modules with same correlation ID
|
||||
var subjectNodeId = $"sha256:{Guid.NewGuid():N}";
|
||||
var correlationId = Guid.NewGuid().ToString();
|
||||
|
||||
var scanEvidence = CreateScannerEvidence(subjectNodeId, correlationId: correlationId);
|
||||
var reachEvidence = CreateReachabilityEvidence(subjectNodeId, correlationId: correlationId);
|
||||
var policyEvidence = CreatePolicyEvidence(subjectNodeId, correlationId: correlationId);
|
||||
|
||||
await _store.StoreAsync(scanEvidence);
|
||||
await _store.StoreAsync(reachEvidence);
|
||||
await _store.StoreAsync(policyEvidence);
|
||||
|
||||
// Act - Get all evidence for subject
|
||||
var allEvidence = await _store.GetBySubjectAsync(subjectNodeId);
|
||||
|
||||
// Assert - All have same correlation ID
|
||||
allEvidence.Should().HaveCount(3);
|
||||
allEvidence.Should().OnlyContain(e => e.Provenance.CorrelationId == correlationId);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Generators_MultiplePerSubject_AllPreserved()
|
||||
{
|
||||
// Arrange - Evidence from different generators
|
||||
var subjectNodeId = $"sha256:{Guid.NewGuid():N}";
|
||||
|
||||
var trivyEvidence = CreateScannerEvidence(subjectNodeId, generator: "stellaops/scanner/trivy");
|
||||
var grypeEvidence = CreateScannerEvidence(subjectNodeId, generator: "stellaops/scanner/grype");
|
||||
var snykEvidence = CreateScannerEvidence(subjectNodeId, generator: "vendor/snyk");
|
||||
|
||||
await _store.StoreAsync(trivyEvidence);
|
||||
await _store.StoreAsync(grypeEvidence);
|
||||
await _store.StoreAsync(snykEvidence);
|
||||
|
||||
// Act
|
||||
var scanEvidence = await _store.GetBySubjectAsync(subjectNodeId, EvidenceType.Scan);
|
||||
|
||||
// Assert
|
||||
scanEvidence.Should().HaveCount(3);
|
||||
scanEvidence.Select(e => e.Provenance.GeneratorId).Should()
|
||||
.Contain(new[] { "stellaops/scanner/trivy", "stellaops/scanner/grype", "vendor/snyk" });
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Evidence Count and Statistics
|
||||
|
||||
[Fact]
|
||||
public async Task CountBySubject_AfterMultiModuleInserts_ReturnsCorrectCount()
|
||||
{
|
||||
// Arrange
|
||||
var subjectNodeId = $"sha256:{Guid.NewGuid():N}";
|
||||
|
||||
await _store.StoreAsync(CreateScannerEvidence(subjectNodeId));
|
||||
await _store.StoreAsync(CreateReachabilityEvidence(subjectNodeId));
|
||||
await _store.StoreAsync(CreatePolicyEvidence(subjectNodeId));
|
||||
|
||||
// Act
|
||||
var count = await _store.CountBySubjectAsync(subjectNodeId);
|
||||
|
||||
// Assert
|
||||
count.Should().Be(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByType_AcrossSubjects_ReturnsAll()
|
||||
{
|
||||
// Arrange - Multiple subjects with same evidence type
|
||||
var subject1 = $"sha256:{Guid.NewGuid():N}";
|
||||
var subject2 = $"sha256:{Guid.NewGuid():N}";
|
||||
var subject3 = $"sha256:{Guid.NewGuid():N}";
|
||||
|
||||
await _store.StoreAsync(CreateScannerEvidence(subject1));
|
||||
await _store.StoreAsync(CreateScannerEvidence(subject2));
|
||||
await _store.StoreAsync(CreateScannerEvidence(subject3));
|
||||
await _store.StoreAsync(CreateReachabilityEvidence(subject1)); // Different type
|
||||
|
||||
// Act
|
||||
var scanEvidence = await _store.GetByTypeAsync(EvidenceType.Scan);
|
||||
|
||||
// Assert
|
||||
scanEvidence.Should().HaveCount(3);
|
||||
scanEvidence.Select(e => e.SubjectNodeId).Should()
|
||||
.Contain(new[] { subject1, subject2, subject3 });
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Helpers
|
||||
|
||||
private static EvidenceRecord CreateScannerEvidence(
|
||||
string subjectNodeId,
|
||||
string? correlationId = null,
|
||||
string generator = "stellaops/scanner/trivy")
|
||||
{
|
||||
var payload = JsonSerializer.SerializeToUtf8Bytes(new
|
||||
{
|
||||
cve = $"CVE-2024-{Random.Shared.Next(1000, 9999)}",
|
||||
severity = "HIGH",
|
||||
affectedPackage = "example-lib@1.0.0"
|
||||
});
|
||||
|
||||
var provenance = new EvidenceProvenance
|
||||
{
|
||||
GeneratorId = generator,
|
||||
GeneratorVersion = "1.0.0",
|
||||
GeneratedAt = DateTimeOffset.UtcNow,
|
||||
CorrelationId = correlationId ?? Guid.NewGuid().ToString()
|
||||
};
|
||||
|
||||
return EvidenceRecord.Create(subjectNodeId, EvidenceType.Scan, payload, provenance, "1.0.0");
|
||||
}
|
||||
|
||||
private static EvidenceRecord CreateReachabilityEvidence(
|
||||
string subjectNodeId,
|
||||
string? correlationId = null)
|
||||
{
|
||||
var payload = JsonSerializer.SerializeToUtf8Bytes(new
|
||||
{
|
||||
reachable = true,
|
||||
confidence = 0.95,
|
||||
paths = new[] { "main.go:42", "handler.go:128" }
|
||||
});
|
||||
|
||||
var provenance = new EvidenceProvenance
|
||||
{
|
||||
GeneratorId = "stellaops/scanner/reachability",
|
||||
GeneratorVersion = "1.0.0",
|
||||
GeneratedAt = DateTimeOffset.UtcNow,
|
||||
CorrelationId = correlationId ?? Guid.NewGuid().ToString()
|
||||
};
|
||||
|
||||
return EvidenceRecord.Create(subjectNodeId, EvidenceType.Reachability, payload, provenance, "1.0.0");
|
||||
}
|
||||
|
||||
private static EvidenceRecord CreatePolicyEvidence(
|
||||
string subjectNodeId,
|
||||
string? referencedEvidenceId = null,
|
||||
string[]? referencedEvidenceIds = null,
|
||||
string? correlationId = null)
|
||||
{
|
||||
var refs = referencedEvidenceIds ?? (referencedEvidenceId is not null ? new[] { referencedEvidenceId } : null);
|
||||
|
||||
var payload = JsonSerializer.SerializeToUtf8Bytes(new
|
||||
{
|
||||
ruleId = "vuln-severity-block",
|
||||
verdict = "BLOCK",
|
||||
referencedEvidence = refs
|
||||
});
|
||||
|
||||
var provenance = new EvidenceProvenance
|
||||
{
|
||||
GeneratorId = "stellaops/policy/opa",
|
||||
GeneratorVersion = "1.0.0",
|
||||
GeneratedAt = DateTimeOffset.UtcNow,
|
||||
CorrelationId = correlationId ?? Guid.NewGuid().ToString()
|
||||
};
|
||||
|
||||
return EvidenceRecord.Create(subjectNodeId, EvidenceType.Policy, payload, provenance, "1.0.0");
|
||||
}
|
||||
|
||||
private static EvidenceRecord CreateVexEvidence(
|
||||
string subjectNodeId,
|
||||
string? referencedEvidenceId = null)
|
||||
{
|
||||
var payload = JsonSerializer.SerializeToUtf8Bytes(new
|
||||
{
|
||||
status = "not_affected",
|
||||
justification = "vulnerable_code_not_in_execute_path",
|
||||
referencedEvidence = referencedEvidenceId
|
||||
});
|
||||
|
||||
var provenance = new EvidenceProvenance
|
||||
{
|
||||
GeneratorId = "stellaops/excititor/vex",
|
||||
GeneratorVersion = "1.0.0",
|
||||
GeneratedAt = DateTimeOffset.UtcNow
|
||||
};
|
||||
|
||||
return EvidenceRecord.Create(subjectNodeId, EvidenceType.Vex, payload, provenance, "1.0.0");
|
||||
}
|
||||
|
||||
private static EvidenceRecord CreateEpssEvidence(string subjectNodeId)
|
||||
{
|
||||
var payload = JsonSerializer.SerializeToUtf8Bytes(new
|
||||
{
|
||||
score = 0.0342,
|
||||
percentile = 0.89,
|
||||
modelDate = "2024-12-25"
|
||||
});
|
||||
|
||||
var provenance = new EvidenceProvenance
|
||||
{
|
||||
GeneratorId = "stellaops/scanner/epss",
|
||||
GeneratorVersion = "1.0.0",
|
||||
GeneratedAt = DateTimeOffset.UtcNow
|
||||
};
|
||||
|
||||
return EvidenceRecord.Create(subjectNodeId, EvidenceType.Epss, payload, provenance, "1.0.0");
|
||||
}
|
||||
|
||||
private static EvidenceRecord CreateProvenanceEvidence(string subjectNodeId)
|
||||
{
|
||||
var payload = JsonSerializer.SerializeToUtf8Bytes(new
|
||||
{
|
||||
buildId = Guid.NewGuid().ToString(),
|
||||
builder = "github-actions",
|
||||
inputs = new[] { "go.mod", "main.go" }
|
||||
});
|
||||
|
||||
var provenance = new EvidenceProvenance
|
||||
{
|
||||
GeneratorId = "stellaops/attestor/provenance",
|
||||
GeneratorVersion = "1.0.0",
|
||||
GeneratedAt = DateTimeOffset.UtcNow
|
||||
};
|
||||
|
||||
return EvidenceRecord.Create(subjectNodeId, EvidenceType.Provenance, payload, provenance, "1.0.0");
|
||||
}
|
||||
|
||||
private static EvidenceRecord CreateArtifactEvidence(string subjectNodeId)
|
||||
{
|
||||
var payload = JsonSerializer.SerializeToUtf8Bytes(new
|
||||
{
|
||||
purl = "pkg:golang/example.com/mylib@1.0.0",
|
||||
digest = subjectNodeId,
|
||||
sbomFormat = "SPDX-3.0.1"
|
||||
});
|
||||
|
||||
var provenance = new EvidenceProvenance
|
||||
{
|
||||
GeneratorId = "stellaops/scanner/sbom",
|
||||
GeneratorVersion = "1.0.0",
|
||||
GeneratedAt = DateTimeOffset.UtcNow
|
||||
};
|
||||
|
||||
return EvidenceRecord.Create(subjectNodeId, EvidenceType.Artifact, payload, provenance, "1.0.0");
|
||||
}
|
||||
|
||||
private static EvidenceRecord CreateExceptionEvidence(string subjectNodeId)
|
||||
{
|
||||
var payload = JsonSerializer.SerializeToUtf8Bytes(new
|
||||
{
|
||||
exceptionId = Guid.NewGuid().ToString(),
|
||||
reason = "Risk accepted per security review",
|
||||
expiry = DateTimeOffset.UtcNow.AddDays(90)
|
||||
});
|
||||
|
||||
var provenance = new EvidenceProvenance
|
||||
{
|
||||
GeneratorId = "stellaops/policy/exceptions",
|
||||
GeneratorVersion = "1.0.0",
|
||||
GeneratedAt = DateTimeOffset.UtcNow
|
||||
};
|
||||
|
||||
return EvidenceRecord.Create(subjectNodeId, EvidenceType.Exception, payload, provenance, "1.0.0");
|
||||
}
|
||||
|
||||
#endregion
|
||||
}
|
||||
@@ -0,0 +1,185 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// EvidencePostgresContainerFixture.cs
|
||||
// Sprint: SPRINT_8100_0012_0002 - Unified Evidence Model
|
||||
// Task: EVID-8100-017 - PostgreSQL store integration tests
|
||||
// Description: Collection fixture providing a shared PostgreSQL container for Evidence storage tests
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Infrastructure.Postgres.Options;
|
||||
using StellaOps.Infrastructure.Postgres.Testing;
|
||||
using Testcontainers.PostgreSql;
|
||||
using Xunit.Sdk;
|
||||
|
||||
namespace StellaOps.Evidence.Storage.Postgres.Tests.Fixtures;
|
||||
|
||||
/// <summary>
|
||||
/// Collection fixture that provides a shared PostgreSQL container for Evidence storage integration tests.
|
||||
/// Uses Testcontainers to spin up a PostgreSQL instance with the evidence schema.
|
||||
/// </summary>
|
||||
public sealed class EvidencePostgresContainerFixture : IAsyncLifetime, IAsyncDisposable
|
||||
{
|
||||
private PostgreSqlContainer? _container;
|
||||
private PostgresFixture? _fixture;
|
||||
private bool _disposed;
|
||||
|
||||
/// <summary>
|
||||
/// Gets whether the container is running.
|
||||
/// </summary>
|
||||
public bool IsRunning => _container is not null;
|
||||
|
||||
/// <summary>
|
||||
/// Gets the connection string for the PostgreSQL container.
|
||||
/// </summary>
|
||||
public string ConnectionString => _container?.GetConnectionString()
|
||||
?? throw new InvalidOperationException("Container not started");
|
||||
|
||||
/// <summary>
|
||||
/// Gets the PostgreSQL fixture for test schema management.
|
||||
/// </summary>
|
||||
public PostgresFixture Fixture => _fixture
|
||||
?? throw new InvalidOperationException("Fixture not initialized");
|
||||
|
||||
/// <summary>
|
||||
/// Creates PostgreSQL options configured for the test container.
|
||||
/// </summary>
|
||||
public PostgresOptions CreateOptions()
|
||||
{
|
||||
return new PostgresOptions
|
||||
{
|
||||
ConnectionString = ConnectionString,
|
||||
SchemaName = EvidenceDataSource.DefaultSchemaName,
|
||||
CommandTimeoutSeconds = 30,
|
||||
AutoMigrate = false
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates an EvidenceDataSource for tests.
|
||||
/// </summary>
|
||||
public EvidenceDataSource CreateDataSource()
|
||||
{
|
||||
var options = Options.Create(CreateOptions());
|
||||
return new EvidenceDataSource(options, NullLogger<EvidenceDataSource>.Instance);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a PostgresEvidenceStore for the specified tenant.
|
||||
/// </summary>
|
||||
public PostgresEvidenceStore CreateStore(string tenantId)
|
||||
{
|
||||
var dataSource = CreateDataSource();
|
||||
return new PostgresEvidenceStore(
|
||||
dataSource,
|
||||
tenantId,
|
||||
NullLogger<PostgresEvidenceStore>.Instance);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a PostgresEvidenceStoreFactory for tests.
|
||||
/// </summary>
|
||||
public PostgresEvidenceStoreFactory CreateStoreFactory()
|
||||
{
|
||||
var dataSource = CreateDataSource();
|
||||
return new PostgresEvidenceStoreFactory(dataSource, NullLoggerFactory.Instance);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task InitializeAsync()
|
||||
{
|
||||
try
|
||||
{
|
||||
_container = new PostgreSqlBuilder()
|
||||
.WithImage("postgres:16-alpine")
|
||||
.WithDatabase("stellaops_test")
|
||||
.WithUsername("test")
|
||||
.WithPassword("test")
|
||||
.Build();
|
||||
|
||||
await _container.StartAsync();
|
||||
|
||||
// Create fixture for schema management
|
||||
_fixture = PostgresFixtureFactory.CreateRandom(ConnectionString);
|
||||
await _fixture.InitializeAsync();
|
||||
|
||||
// Run evidence schema migrations
|
||||
await _fixture.RunMigrationsFromAssemblyAsync<EvidenceDataSource>(
|
||||
"Evidence",
|
||||
resourcePrefix: null);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
try
|
||||
{
|
||||
if (_fixture is not null)
|
||||
{
|
||||
await _fixture.DisposeAsync();
|
||||
}
|
||||
if (_container is not null)
|
||||
{
|
||||
await _container.DisposeAsync();
|
||||
}
|
||||
}
|
||||
catch
|
||||
{
|
||||
// Ignore cleanup failures during skip.
|
||||
}
|
||||
|
||||
_container = null;
|
||||
_fixture = null;
|
||||
|
||||
throw SkipException.ForSkip(
|
||||
$"Evidence PostgreSQL integration tests require Docker/Testcontainers. Skipping because the container failed to start: {ex.Message}");
|
||||
}
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task DisposeAsync()
|
||||
{
|
||||
await DisposeAsyncCore();
|
||||
}
|
||||
|
||||
async ValueTask IAsyncDisposable.DisposeAsync()
|
||||
{
|
||||
await DisposeAsyncCore();
|
||||
GC.SuppressFinalize(this);
|
||||
}
|
||||
|
||||
private async Task DisposeAsyncCore()
|
||||
{
|
||||
if (_disposed) return;
|
||||
_disposed = true;
|
||||
|
||||
if (_fixture is not null)
|
||||
{
|
||||
await _fixture.DisposeAsync();
|
||||
}
|
||||
|
||||
if (_container is not null)
|
||||
{
|
||||
await _container.StopAsync();
|
||||
await _container.DisposeAsync();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Truncates all tables for test isolation.
|
||||
/// </summary>
|
||||
public async Task TruncateAllTablesAsync()
|
||||
{
|
||||
if (_fixture is null) return;
|
||||
await _fixture.TruncateAllTablesAsync();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Collection definition for Evidence PostgreSQL integration tests.
|
||||
/// All tests in this collection share a single PostgreSQL container.
|
||||
/// </summary>
|
||||
[CollectionDefinition(Name)]
|
||||
public sealed class EvidencePostgresTestCollection : ICollectionFixture<EvidencePostgresContainerFixture>
|
||||
{
|
||||
public const string Name = "Evidence PostgreSQL Integration Tests";
|
||||
}
|
||||
@@ -0,0 +1,530 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// PostgresEvidenceStoreIntegrationTests.cs
|
||||
// Sprint: SPRINT_8100_0012_0002 - Unified Evidence Model
|
||||
// Task: EVID-8100-017 - PostgreSQL store CRUD integration tests
|
||||
// Description: Integration tests verifying PostgresEvidenceStore CRUD operations
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using System.Text;
|
||||
using FluentAssertions;
|
||||
using StellaOps.Evidence.Core;
|
||||
using StellaOps.Evidence.Storage.Postgres.Tests.Fixtures;
|
||||
using Xunit;
|
||||
using Xunit.Abstractions;
|
||||
|
||||
namespace StellaOps.Evidence.Storage.Postgres.Tests;
|
||||
|
||||
/// <summary>
|
||||
/// Integration tests for PostgresEvidenceStore CRUD operations.
|
||||
/// Tests run against a real PostgreSQL container via Testcontainers.
|
||||
/// </summary>
|
||||
[Collection(EvidencePostgresTestCollection.Name)]
|
||||
public sealed class PostgresEvidenceStoreIntegrationTests : IAsyncLifetime
|
||||
{
|
||||
private readonly EvidencePostgresContainerFixture _fixture;
|
||||
private readonly ITestOutputHelper _output;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
private PostgresEvidenceStore _store = null!;
|
||||
|
||||
public PostgresEvidenceStoreIntegrationTests(
|
||||
EvidencePostgresContainerFixture fixture,
|
||||
ITestOutputHelper output)
|
||||
{
|
||||
_fixture = fixture;
|
||||
_output = output;
|
||||
}
|
||||
|
||||
public Task InitializeAsync()
|
||||
{
|
||||
_store = _fixture.CreateStore(_tenantId);
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public async Task DisposeAsync()
|
||||
{
|
||||
await _fixture.TruncateAllTablesAsync();
|
||||
}
|
||||
|
||||
#region Store Tests
|
||||
|
||||
[Fact]
|
||||
public async Task StoreAsync_NewEvidence_ReturnsEvidenceId()
|
||||
{
|
||||
// Arrange
|
||||
var evidence = CreateTestEvidence();
|
||||
|
||||
// Act
|
||||
var storedId = await _store.StoreAsync(evidence);
|
||||
|
||||
// Assert
|
||||
storedId.Should().Be(evidence.EvidenceId);
|
||||
_output.WriteLine($"Stored evidence: {storedId}");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task StoreAsync_DuplicateEvidence_IsIdempotent()
|
||||
{
|
||||
// Arrange
|
||||
var evidence = CreateTestEvidence();
|
||||
|
||||
// Act - Store twice
|
||||
var firstId = await _store.StoreAsync(evidence);
|
||||
var secondId = await _store.StoreAsync(evidence);
|
||||
|
||||
// Assert - Both return same ID, no error
|
||||
firstId.Should().Be(evidence.EvidenceId);
|
||||
secondId.Should().Be(evidence.EvidenceId);
|
||||
|
||||
// Verify only one record exists
|
||||
var count = await _store.CountBySubjectAsync(evidence.SubjectNodeId);
|
||||
count.Should().Be(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task StoreBatchAsync_MultipleRecords_StoresAllSuccessfully()
|
||||
{
|
||||
// Arrange
|
||||
var subjectId = $"sha256:{Guid.NewGuid():N}";
|
||||
var records = Enumerable.Range(1, 5)
|
||||
.Select(i => CreateTestEvidence(subjectId, (EvidenceType)(i % 5 + 1)))
|
||||
.ToList();
|
||||
|
||||
// Act
|
||||
var storedCount = await _store.StoreBatchAsync(records);
|
||||
|
||||
// Assert
|
||||
storedCount.Should().Be(5);
|
||||
var count = await _store.CountBySubjectAsync(subjectId);
|
||||
count.Should().Be(5);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task StoreBatchAsync_WithDuplicates_StoresOnlyUnique()
|
||||
{
|
||||
// Arrange
|
||||
var evidence = CreateTestEvidence();
|
||||
var records = new[] { evidence, evidence, evidence };
|
||||
|
||||
// Act
|
||||
var storedCount = await _store.StoreBatchAsync(records);
|
||||
|
||||
// Assert - Only one should be stored
|
||||
storedCount.Should().Be(1);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region GetById Tests
|
||||
|
||||
[Fact]
|
||||
public async Task GetByIdAsync_ExistingEvidence_ReturnsEvidence()
|
||||
{
|
||||
// Arrange
|
||||
var evidence = CreateTestEvidence();
|
||||
await _store.StoreAsync(evidence);
|
||||
|
||||
// Act
|
||||
var retrieved = await _store.GetByIdAsync(evidence.EvidenceId);
|
||||
|
||||
// Assert
|
||||
retrieved.Should().NotBeNull();
|
||||
retrieved!.EvidenceId.Should().Be(evidence.EvidenceId);
|
||||
retrieved.SubjectNodeId.Should().Be(evidence.SubjectNodeId);
|
||||
retrieved.EvidenceType.Should().Be(evidence.EvidenceType);
|
||||
retrieved.PayloadSchemaVersion.Should().Be(evidence.PayloadSchemaVersion);
|
||||
retrieved.Payload.ToArray().Should().BeEquivalentTo(evidence.Payload.ToArray());
|
||||
retrieved.Provenance.GeneratorId.Should().Be(evidence.Provenance.GeneratorId);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByIdAsync_NonExistingEvidence_ReturnsNull()
|
||||
{
|
||||
// Arrange
|
||||
var nonExistentId = $"sha256:{Guid.NewGuid():N}";
|
||||
|
||||
// Act
|
||||
var retrieved = await _store.GetByIdAsync(nonExistentId);
|
||||
|
||||
// Assert
|
||||
retrieved.Should().BeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByIdAsync_WithSignatures_PreservesSignatures()
|
||||
{
|
||||
// Arrange
|
||||
var evidence = CreateTestEvidenceWithSignatures();
|
||||
await _store.StoreAsync(evidence);
|
||||
|
||||
// Act
|
||||
var retrieved = await _store.GetByIdAsync(evidence.EvidenceId);
|
||||
|
||||
// Assert
|
||||
retrieved.Should().NotBeNull();
|
||||
retrieved!.Signatures.Should().HaveCount(2);
|
||||
retrieved.Signatures[0].SignerId.Should().Be("signer-1");
|
||||
retrieved.Signatures[1].SignerId.Should().Be("signer-2");
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region GetBySubject Tests
|
||||
|
||||
[Fact]
|
||||
public async Task GetBySubjectAsync_MultipleEvidence_ReturnsAll()
|
||||
{
|
||||
// Arrange
|
||||
var subjectId = $"sha256:{Guid.NewGuid():N}";
|
||||
var records = new[]
|
||||
{
|
||||
CreateTestEvidence(subjectId, EvidenceType.Scan),
|
||||
CreateTestEvidence(subjectId, EvidenceType.Reachability),
|
||||
CreateTestEvidence(subjectId, EvidenceType.Policy)
|
||||
};
|
||||
|
||||
foreach (var record in records)
|
||||
{
|
||||
await _store.StoreAsync(record);
|
||||
}
|
||||
|
||||
// Act
|
||||
var retrieved = await _store.GetBySubjectAsync(subjectId);
|
||||
|
||||
// Assert
|
||||
retrieved.Should().HaveCount(3);
|
||||
retrieved.Select(e => e.EvidenceType).Should()
|
||||
.Contain(new[] { EvidenceType.Scan, EvidenceType.Reachability, EvidenceType.Policy });
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetBySubjectAsync_WithTypeFilter_ReturnsFiltered()
|
||||
{
|
||||
// Arrange
|
||||
var subjectId = $"sha256:{Guid.NewGuid():N}";
|
||||
await _store.StoreAsync(CreateTestEvidence(subjectId, EvidenceType.Scan));
|
||||
await _store.StoreAsync(CreateTestEvidence(subjectId, EvidenceType.Reachability));
|
||||
await _store.StoreAsync(CreateTestEvidence(subjectId, EvidenceType.Policy));
|
||||
|
||||
// Act
|
||||
var retrieved = await _store.GetBySubjectAsync(subjectId, EvidenceType.Scan);
|
||||
|
||||
// Assert
|
||||
retrieved.Should().HaveCount(1);
|
||||
retrieved[0].EvidenceType.Should().Be(EvidenceType.Scan);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetBySubjectAsync_NoEvidence_ReturnsEmptyList()
|
||||
{
|
||||
// Arrange
|
||||
var nonExistentSubject = $"sha256:{Guid.NewGuid():N}";
|
||||
|
||||
// Act
|
||||
var retrieved = await _store.GetBySubjectAsync(nonExistentSubject);
|
||||
|
||||
// Assert
|
||||
retrieved.Should().BeEmpty();
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region GetByType Tests
|
||||
|
||||
[Fact]
|
||||
public async Task GetByTypeAsync_MultipleEvidence_ReturnsMatchingType()
|
||||
{
|
||||
// Arrange
|
||||
await _store.StoreAsync(CreateTestEvidence(evidenceType: EvidenceType.Scan));
|
||||
await _store.StoreAsync(CreateTestEvidence(evidenceType: EvidenceType.Scan));
|
||||
await _store.StoreAsync(CreateTestEvidence(evidenceType: EvidenceType.Reachability));
|
||||
|
||||
// Act
|
||||
var retrieved = await _store.GetByTypeAsync(EvidenceType.Scan);
|
||||
|
||||
// Assert
|
||||
retrieved.Should().HaveCount(2);
|
||||
retrieved.Should().OnlyContain(e => e.EvidenceType == EvidenceType.Scan);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByTypeAsync_WithLimit_RespectsLimit()
|
||||
{
|
||||
// Arrange
|
||||
for (int i = 0; i < 10; i++)
|
||||
{
|
||||
await _store.StoreAsync(CreateTestEvidence(evidenceType: EvidenceType.Vex));
|
||||
}
|
||||
|
||||
// Act
|
||||
var retrieved = await _store.GetByTypeAsync(EvidenceType.Vex, limit: 5);
|
||||
|
||||
// Assert
|
||||
retrieved.Should().HaveCount(5);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Exists Tests
|
||||
|
||||
[Fact]
|
||||
public async Task ExistsAsync_ExistingEvidence_ReturnsTrue()
|
||||
{
|
||||
// Arrange
|
||||
var evidence = CreateTestEvidence();
|
||||
await _store.StoreAsync(evidence);
|
||||
|
||||
// Act
|
||||
var exists = await _store.ExistsAsync(evidence.SubjectNodeId, evidence.EvidenceType);
|
||||
|
||||
// Assert
|
||||
exists.Should().BeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ExistsAsync_NonExistingEvidence_ReturnsFalse()
|
||||
{
|
||||
// Arrange
|
||||
var evidence = CreateTestEvidence();
|
||||
await _store.StoreAsync(evidence);
|
||||
|
||||
// Act - Check for different type
|
||||
var exists = await _store.ExistsAsync(evidence.SubjectNodeId, EvidenceType.License);
|
||||
|
||||
// Assert
|
||||
exists.Should().BeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ExistsAsync_NonExistingSubject_ReturnsFalse()
|
||||
{
|
||||
// Arrange
|
||||
var nonExistentSubject = $"sha256:{Guid.NewGuid():N}";
|
||||
|
||||
// Act
|
||||
var exists = await _store.ExistsAsync(nonExistentSubject, EvidenceType.Scan);
|
||||
|
||||
// Assert
|
||||
exists.Should().BeFalse();
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Delete Tests
|
||||
|
||||
[Fact]
|
||||
public async Task DeleteAsync_ExistingEvidence_ReturnsTrue()
|
||||
{
|
||||
// Arrange
|
||||
var evidence = CreateTestEvidence();
|
||||
await _store.StoreAsync(evidence);
|
||||
|
||||
// Act
|
||||
var deleted = await _store.DeleteAsync(evidence.EvidenceId);
|
||||
|
||||
// Assert
|
||||
deleted.Should().BeTrue();
|
||||
|
||||
// Verify deletion
|
||||
var retrieved = await _store.GetByIdAsync(evidence.EvidenceId);
|
||||
retrieved.Should().BeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task DeleteAsync_NonExistingEvidence_ReturnsFalse()
|
||||
{
|
||||
// Arrange
|
||||
var nonExistentId = $"sha256:{Guid.NewGuid():N}";
|
||||
|
||||
// Act
|
||||
var deleted = await _store.DeleteAsync(nonExistentId);
|
||||
|
||||
// Assert
|
||||
deleted.Should().BeFalse();
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Count Tests
|
||||
|
||||
[Fact]
|
||||
public async Task CountBySubjectAsync_MultipleEvidence_ReturnsCorrectCount()
|
||||
{
|
||||
// Arrange
|
||||
var subjectId = $"sha256:{Guid.NewGuid():N}";
|
||||
await _store.StoreAsync(CreateTestEvidence(subjectId, EvidenceType.Scan));
|
||||
await _store.StoreAsync(CreateTestEvidence(subjectId, EvidenceType.Reachability));
|
||||
await _store.StoreAsync(CreateTestEvidence(subjectId, EvidenceType.Policy));
|
||||
|
||||
// Act
|
||||
var count = await _store.CountBySubjectAsync(subjectId);
|
||||
|
||||
// Assert
|
||||
count.Should().Be(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task CountBySubjectAsync_NoEvidence_ReturnsZero()
|
||||
{
|
||||
// Arrange
|
||||
var nonExistentSubject = $"sha256:{Guid.NewGuid():N}";
|
||||
|
||||
// Act
|
||||
var count = await _store.CountBySubjectAsync(nonExistentSubject);
|
||||
|
||||
// Assert
|
||||
count.Should().Be(0);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Integrity Tests
|
||||
|
||||
[Fact]
|
||||
public async Task RoundTrip_EvidenceRecord_PreservesIntegrity()
|
||||
{
|
||||
// Arrange
|
||||
var evidence = CreateTestEvidence();
|
||||
await _store.StoreAsync(evidence);
|
||||
|
||||
// Act
|
||||
var retrieved = await _store.GetByIdAsync(evidence.EvidenceId) as EvidenceRecord;
|
||||
|
||||
// Assert
|
||||
retrieved.Should().NotBeNull();
|
||||
retrieved!.VerifyIntegrity().Should().BeTrue("evidence ID should match computed hash");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task RoundTrip_BinaryPayload_PreservesData()
|
||||
{
|
||||
// Arrange
|
||||
var binaryPayload = new byte[] { 0x00, 0x01, 0x02, 0xFF, 0xFE, 0xFD };
|
||||
var provenance = EvidenceProvenance.CreateMinimal("test/binary", "1.0.0");
|
||||
var evidence = EvidenceRecord.Create(
|
||||
$"sha256:{Guid.NewGuid():N}",
|
||||
EvidenceType.Artifact,
|
||||
binaryPayload,
|
||||
provenance,
|
||||
"1.0.0");
|
||||
|
||||
await _store.StoreAsync(evidence);
|
||||
|
||||
// Act
|
||||
var retrieved = await _store.GetByIdAsync(evidence.EvidenceId);
|
||||
|
||||
// Assert
|
||||
retrieved.Should().NotBeNull();
|
||||
retrieved!.Payload.ToArray().Should().BeEquivalentTo(binaryPayload);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task RoundTrip_UnicodePayload_PreservesData()
|
||||
{
|
||||
// Arrange
|
||||
var unicodeJson = "{\"message\": \"Hello 世界 🌍 مرحبا\"}";
|
||||
var payload = Encoding.UTF8.GetBytes(unicodeJson);
|
||||
var provenance = EvidenceProvenance.CreateMinimal("test/unicode", "1.0.0");
|
||||
var evidence = EvidenceRecord.Create(
|
||||
$"sha256:{Guid.NewGuid():N}",
|
||||
EvidenceType.Custom,
|
||||
payload,
|
||||
provenance,
|
||||
"1.0.0");
|
||||
|
||||
await _store.StoreAsync(evidence);
|
||||
|
||||
// Act
|
||||
var retrieved = await _store.GetByIdAsync(evidence.EvidenceId);
|
||||
|
||||
// Assert
|
||||
retrieved.Should().NotBeNull();
|
||||
var retrievedJson = Encoding.UTF8.GetString(retrieved!.Payload.Span);
|
||||
retrievedJson.Should().Be(unicodeJson);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Factory Tests
|
||||
|
||||
[Fact]
|
||||
public void Factory_CreateStore_ReturnsTenantScopedStore()
|
||||
{
|
||||
// Arrange
|
||||
var factory = _fixture.CreateStoreFactory();
|
||||
var tenantId1 = Guid.NewGuid().ToString();
|
||||
var tenantId2 = Guid.NewGuid().ToString();
|
||||
|
||||
// Act
|
||||
var store1 = factory.Create(tenantId1);
|
||||
var store2 = factory.Create(tenantId2);
|
||||
|
||||
// Assert
|
||||
store1.Should().NotBeNull();
|
||||
store2.Should().NotBeNull();
|
||||
store1.Should().NotBeSameAs(store2);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Helpers
|
||||
|
||||
private static EvidenceRecord CreateTestEvidence(
|
||||
string? subjectNodeId = null,
|
||||
EvidenceType evidenceType = EvidenceType.Scan)
|
||||
{
|
||||
var subject = subjectNodeId ?? $"sha256:{Guid.NewGuid():N}";
|
||||
var payload = Encoding.UTF8.GetBytes($"{{\"test\": \"{Guid.NewGuid()}\"}}");
|
||||
var provenance = new EvidenceProvenance
|
||||
{
|
||||
GeneratorId = "test/scanner",
|
||||
GeneratorVersion = "1.0.0",
|
||||
GeneratedAt = DateTimeOffset.UtcNow,
|
||||
CorrelationId = Guid.NewGuid().ToString(),
|
||||
Environment = "test"
|
||||
};
|
||||
|
||||
return EvidenceRecord.Create(
|
||||
subject,
|
||||
evidenceType,
|
||||
payload,
|
||||
provenance,
|
||||
"1.0.0");
|
||||
}
|
||||
|
||||
private static EvidenceRecord CreateTestEvidenceWithSignatures()
|
||||
{
|
||||
var subject = $"sha256:{Guid.NewGuid():N}";
|
||||
var payload = Encoding.UTF8.GetBytes("{\"signed\": true}");
|
||||
var provenance = EvidenceProvenance.CreateMinimal("test/signer", "1.0.0");
|
||||
|
||||
var signatures = new List<EvidenceSignature>
|
||||
{
|
||||
new()
|
||||
{
|
||||
SignerId = "signer-1",
|
||||
Algorithm = "ES256",
|
||||
SignatureBase64 = Convert.ToBase64String(new byte[] { 1, 2, 3 }),
|
||||
SignedAt = DateTimeOffset.UtcNow,
|
||||
SignerType = SignerType.Internal
|
||||
},
|
||||
new()
|
||||
{
|
||||
SignerId = "signer-2",
|
||||
Algorithm = "RS256",
|
||||
SignatureBase64 = Convert.ToBase64String(new byte[] { 4, 5, 6 }),
|
||||
SignedAt = DateTimeOffset.UtcNow,
|
||||
SignerType = SignerType.CI
|
||||
}
|
||||
};
|
||||
|
||||
return EvidenceRecord.Create(
|
||||
subject,
|
||||
EvidenceType.Provenance,
|
||||
payload,
|
||||
provenance,
|
||||
"1.0.0",
|
||||
signatures);
|
||||
}
|
||||
|
||||
#endregion
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
<PropertyGroup>
|
||||
<TargetFramework>net10.0</TargetFramework>
|
||||
<ImplicitUsings>enable</ImplicitUsings>
|
||||
<Nullable>enable</Nullable>
|
||||
<LangVersion>preview</LangVersion>
|
||||
<IsPackable>false</IsPackable>
|
||||
<IsTestProject>true</IsTestProject>
|
||||
<RootNamespace>StellaOps.Evidence.Storage.Postgres.Tests</RootNamespace>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="FluentAssertions" Version="7.0.0" />
|
||||
<PackageReference Include="Testcontainers.PostgreSql" Version="4.1.0" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="..\..\StellaOps.Evidence.Storage.Postgres\StellaOps.Evidence.Storage.Postgres.csproj" />
|
||||
<ProjectReference Include="..\..\StellaOps.Infrastructure.Postgres\StellaOps.Infrastructure.Postgres.csproj" />
|
||||
</ItemGroup>
|
||||
</Project>
|
||||
@@ -0,0 +1,667 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// AtLeastOnceDeliveryTests.cs
|
||||
// Sprint: SPRINT_5100_0010_0003 - Router + Messaging Test Implementation
|
||||
// Task: MESSAGING-5100-009 - At-least-once delivery with consumer idempotency
|
||||
// Description: Integration tests verifying at-least-once delivery semantics:
|
||||
// - Messages are never lost (guaranteed delivery)
|
||||
// - Consumer idempotency correctly handles duplicate deliveries
|
||||
// - Lease expiration triggers redelivery
|
||||
// - Simulated failures result in message redelivery
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using FluentAssertions;
|
||||
using StellaOps.Messaging;
|
||||
using StellaOps.Messaging.Abstractions;
|
||||
using StellaOps.Messaging.Transport.Valkey.Tests.Fixtures;
|
||||
using Xunit;
|
||||
using Xunit.Abstractions;
|
||||
|
||||
namespace StellaOps.Messaging.Transport.Valkey.Tests;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for at-least-once delivery semantics with consumer idempotency.
|
||||
///
|
||||
/// At-least-once delivery guarantees:
|
||||
/// 1. Every message sent is delivered at least once
|
||||
/// 2. Messages may be delivered multiple times (redelivery on failure)
|
||||
/// 3. Consumer idempotency handles duplicate deliveries
|
||||
/// 4. No message is ever lost, even under failure conditions
|
||||
/// </summary>
|
||||
[Collection(ValkeyIntegrationTestCollection.Name)]
|
||||
public sealed class AtLeastOnceDeliveryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly ValkeyContainerFixture _fixture;
|
||||
private readonly ITestOutputHelper _output;
|
||||
private ValkeyConnectionFactory? _connectionFactory;
|
||||
private ValkeyIdempotencyStore? _idempotencyStore;
|
||||
|
||||
public AtLeastOnceDeliveryTests(ValkeyContainerFixture fixture, ITestOutputHelper output)
|
||||
{
|
||||
_fixture = fixture;
|
||||
_output = output;
|
||||
}
|
||||
|
||||
public Task InitializeAsync()
|
||||
{
|
||||
_connectionFactory = _fixture.CreateConnectionFactory();
|
||||
_idempotencyStore = new ValkeyIdempotencyStore(
|
||||
_connectionFactory,
|
||||
$"test-consumer-{Guid.NewGuid():N}",
|
||||
null);
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public async Task DisposeAsync()
|
||||
{
|
||||
if (_connectionFactory is not null)
|
||||
{
|
||||
await _connectionFactory.DisposeAsync();
|
||||
}
|
||||
}
|
||||
|
||||
#region At-Least-Once Delivery Guarantee Tests
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task AtLeastOnce_MessageSent_IsDeliveredAtLeastOnce()
|
||||
{
|
||||
// Arrange - Producer sends message
|
||||
var queue = CreateQueue<TestMessage>();
|
||||
var messageId = Guid.NewGuid();
|
||||
var message = new TestMessage
|
||||
{
|
||||
Id = messageId,
|
||||
Content = "At-least-once test message"
|
||||
};
|
||||
|
||||
// Act - Send message
|
||||
var enqueueResult = await queue.EnqueueAsync(message);
|
||||
enqueueResult.Success.Should().BeTrue("message should be accepted by the queue");
|
||||
|
||||
// Act - Consumer receives message
|
||||
var leases = await queue.LeaseAsync(new LeaseRequest { BatchSize = 1 });
|
||||
|
||||
// Assert - Message is delivered
|
||||
leases.Should().HaveCount(1, "message must be delivered at least once");
|
||||
leases[0].Message.Id.Should().Be(messageId);
|
||||
leases[0].Message.Content.Should().Be("At-least-once test message");
|
||||
|
||||
await leases[0].AcknowledgeAsync();
|
||||
_output.WriteLine($"Message {messageId} delivered successfully");
|
||||
}
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task AtLeastOnce_UnacknowledgedLease_MessageRedelivered()
|
||||
{
|
||||
// Arrange - Create queue with short lease duration
|
||||
var queueOptions = _fixture.CreateQueueOptions();
|
||||
queueOptions.DefaultLeaseDuration = TimeSpan.FromMilliseconds(200);
|
||||
|
||||
var queue = CreateQueue<TestMessage>(queueOptions);
|
||||
var messageId = Guid.NewGuid();
|
||||
await queue.EnqueueAsync(new TestMessage { Id = messageId, Content = "Redelivery test" });
|
||||
|
||||
// Act - Lease message but don't acknowledge (simulating consumer crash)
|
||||
var firstLease = await queue.LeaseAsync(new LeaseRequest { BatchSize = 1 });
|
||||
firstLease.Should().HaveCount(1);
|
||||
firstLease[0].Message.Id.Should().Be(messageId);
|
||||
|
||||
// Don't acknowledge - simulate crash
|
||||
_output.WriteLine("Simulating consumer crash (not acknowledging message)");
|
||||
|
||||
// Wait for lease to expire
|
||||
await Task.Delay(500);
|
||||
|
||||
// Act - Claim expired message (automatic redelivery)
|
||||
var redelivered = await queue.ClaimExpiredAsync(new ClaimRequest
|
||||
{
|
||||
BatchSize = 10,
|
||||
MinIdleTime = TimeSpan.FromMilliseconds(200),
|
||||
MinDeliveryAttempts = 1
|
||||
});
|
||||
|
||||
// Assert - Message is redelivered
|
||||
redelivered.Should().HaveCount(1, "message must be redelivered after lease expiration");
|
||||
redelivered[0].Message.Id.Should().Be(messageId);
|
||||
redelivered[0].Attempt.Should().BeGreaterThan(1, "this should be a redelivery");
|
||||
|
||||
await redelivered[0].AcknowledgeAsync();
|
||||
_output.WriteLine($"Message {messageId} successfully redelivered on attempt {redelivered[0].Attempt}");
|
||||
}
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task AtLeastOnce_MultipleMessages_AllDelivered()
|
||||
{
|
||||
// Arrange
|
||||
var queue = CreateQueue<TestMessage>();
|
||||
const int messageCount = 100;
|
||||
var sentIds = new HashSet<Guid>();
|
||||
|
||||
// Act - Send multiple messages
|
||||
for (int i = 0; i < messageCount; i++)
|
||||
{
|
||||
var id = Guid.NewGuid();
|
||||
sentIds.Add(id);
|
||||
await queue.EnqueueAsync(new TestMessage { Id = id, Content = $"Message-{i}" });
|
||||
}
|
||||
|
||||
// Act - Receive all messages
|
||||
var receivedIds = new HashSet<Guid>();
|
||||
int remaining = messageCount;
|
||||
while (remaining > 0)
|
||||
{
|
||||
var leases = await queue.LeaseAsync(new LeaseRequest { BatchSize = 20 });
|
||||
foreach (var lease in leases)
|
||||
{
|
||||
receivedIds.Add(lease.Message.Id);
|
||||
await lease.AcknowledgeAsync();
|
||||
}
|
||||
remaining -= leases.Count;
|
||||
}
|
||||
|
||||
// Assert - All messages delivered
|
||||
receivedIds.Should().BeEquivalentTo(sentIds, "all sent messages must be delivered");
|
||||
_output.WriteLine($"All {messageCount} messages delivered successfully");
|
||||
}
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task AtLeastOnce_RetryAfterNack_MessageRedelivered()
|
||||
{
|
||||
// Arrange
|
||||
var queueOptions = _fixture.CreateQueueOptions();
|
||||
queueOptions.RetryInitialBackoff = TimeSpan.Zero; // Immediate retry for test speed
|
||||
|
||||
var queue = CreateQueue<TestMessage>(queueOptions);
|
||||
var messageId = Guid.NewGuid();
|
||||
await queue.EnqueueAsync(new TestMessage { Id = messageId, Content = "Retry test" });
|
||||
|
||||
// Act - First delivery, simulate processing failure with retry
|
||||
var firstLease = await queue.LeaseAsync(new LeaseRequest { BatchSize = 1 });
|
||||
firstLease.Should().HaveCount(1);
|
||||
firstLease[0].Attempt.Should().Be(1);
|
||||
|
||||
// Nack for retry
|
||||
await firstLease[0].ReleaseAsync(ReleaseDisposition.Retry);
|
||||
_output.WriteLine("Message nacked for retry");
|
||||
|
||||
// Brief delay for retry processing
|
||||
await Task.Delay(100);
|
||||
|
||||
// Act - Second delivery after retry
|
||||
var secondLease = await queue.LeaseAsync(new LeaseRequest { BatchSize = 1 });
|
||||
|
||||
// Assert - Message is redelivered
|
||||
secondLease.Should().HaveCount(1, "message must be redelivered after nack");
|
||||
secondLease[0].Message.Id.Should().Be(messageId);
|
||||
secondLease[0].Attempt.Should().Be(2, "this should be attempt 2");
|
||||
|
||||
await secondLease[0].AcknowledgeAsync();
|
||||
_output.WriteLine($"Message {messageId} successfully processed on attempt 2");
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Consumer Idempotency Tests
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task ConsumerIdempotency_DuplicateProcessing_DetectedAndSkipped()
|
||||
{
|
||||
// Arrange - Create a consumer with idempotency tracking
|
||||
var queue = CreateQueue<TestMessage>();
|
||||
var processedMessageIds = new HashSet<Guid>();
|
||||
var processingCount = new Dictionary<Guid, int>();
|
||||
|
||||
var messageId = Guid.NewGuid();
|
||||
await queue.EnqueueAsync(new TestMessage { Id = messageId, Content = "Idempotency test" });
|
||||
|
||||
// Act - Simulate receiving the message multiple times
|
||||
for (int delivery = 1; delivery <= 3; delivery++)
|
||||
{
|
||||
// Simulate message delivery (could be redelivery)
|
||||
var idempotencyKey = $"consumer-process:{messageId}";
|
||||
var claimResult = await _idempotencyStore!.TryClaimAsync(
|
||||
idempotencyKey,
|
||||
messageId.ToString(),
|
||||
TimeSpan.FromMinutes(5));
|
||||
|
||||
if (claimResult.IsFirstClaim)
|
||||
{
|
||||
// First time processing this message
|
||||
processedMessageIds.Add(messageId);
|
||||
processingCount[messageId] = 1;
|
||||
_output.WriteLine($"Delivery {delivery}: First processing of message {messageId}");
|
||||
}
|
||||
else
|
||||
{
|
||||
// Duplicate - skip processing
|
||||
processingCount[messageId] = processingCount.GetValueOrDefault(messageId) + 1;
|
||||
_output.WriteLine($"Delivery {delivery}: Duplicate detected, skipping message {messageId}");
|
||||
}
|
||||
}
|
||||
|
||||
// Assert - Message processed exactly once despite multiple deliveries
|
||||
processedMessageIds.Should().HaveCount(1);
|
||||
processingCount[messageId].Should().BeGreaterThan(1, "we simulated multiple deliveries");
|
||||
|
||||
// Cleanup
|
||||
var leases = await queue.LeaseAsync(new LeaseRequest { BatchSize = 1 });
|
||||
if (leases.Count > 0)
|
||||
{
|
||||
await leases[0].AcknowledgeAsync();
|
||||
}
|
||||
}
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task ConsumerIdempotency_ConcurrentDuplicates_OnlyOneProcessed()
|
||||
{
|
||||
// Arrange
|
||||
var messageId = Guid.NewGuid();
|
||||
var processedCount = 0;
|
||||
var duplicateCount = 0;
|
||||
var lockObject = new object();
|
||||
|
||||
// Simulate 10 concurrent consumers trying to process the same message
|
||||
var tasks = Enumerable.Range(1, 10).Select(async consumerId =>
|
||||
{
|
||||
var idempotencyKey = $"concurrent-test:{messageId}";
|
||||
var claimResult = await _idempotencyStore!.TryClaimAsync(
|
||||
idempotencyKey,
|
||||
$"consumer-{consumerId}",
|
||||
TimeSpan.FromMinutes(5));
|
||||
|
||||
lock (lockObject)
|
||||
{
|
||||
if (claimResult.IsFirstClaim)
|
||||
{
|
||||
processedCount++;
|
||||
_output.WriteLine($"Consumer {consumerId}: Processing message (first claim)");
|
||||
}
|
||||
else
|
||||
{
|
||||
duplicateCount++;
|
||||
_output.WriteLine($"Consumer {consumerId}: Duplicate detected, existing value: {claimResult.ExistingValue}");
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Act
|
||||
await Task.WhenAll(tasks);
|
||||
|
||||
// Assert - Exactly one consumer processed the message
|
||||
processedCount.Should().Be(1, "only one consumer should process the message");
|
||||
duplicateCount.Should().Be(9, "9 consumers should detect duplicate");
|
||||
_output.WriteLine($"Processed: {processedCount}, Duplicates: {duplicateCount}");
|
||||
}
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task ConsumerIdempotency_IdempotencyWindowExpires_ReprocessingAllowed()
|
||||
{
|
||||
// Arrange
|
||||
var messageId = Guid.NewGuid();
|
||||
var shortWindow = TimeSpan.FromMilliseconds(200);
|
||||
var idempotencyKey = $"window-test:{messageId}";
|
||||
|
||||
// Act - First claim
|
||||
var firstClaim = await _idempotencyStore!.TryClaimAsync(
|
||||
idempotencyKey,
|
||||
"first-processor",
|
||||
shortWindow);
|
||||
firstClaim.IsFirstClaim.Should().BeTrue();
|
||||
_output.WriteLine("First claim successful");
|
||||
|
||||
// Duplicate should be detected
|
||||
var duplicateClaim = await _idempotencyStore!.TryClaimAsync(
|
||||
idempotencyKey,
|
||||
"duplicate-processor",
|
||||
shortWindow);
|
||||
duplicateClaim.IsDuplicate.Should().BeTrue();
|
||||
_output.WriteLine("Duplicate detected as expected");
|
||||
|
||||
// Wait for window to expire
|
||||
await Task.Delay(500);
|
||||
|
||||
// Act - After expiration, claim should succeed again
|
||||
var afterExpiration = await _idempotencyStore!.TryClaimAsync(
|
||||
idempotencyKey,
|
||||
"new-processor",
|
||||
shortWindow);
|
||||
|
||||
// Assert - Reprocessing allowed after window expiration
|
||||
afterExpiration.IsFirstClaim.Should().BeTrue(
|
||||
"after idempotency window expires, message can be reprocessed");
|
||||
_output.WriteLine("After window expiration, new claim succeeded");
|
||||
}
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task ConsumerIdempotency_DifferentMessages_IndependentProcessing()
|
||||
{
|
||||
// Arrange - Three different messages
|
||||
var messageIds = Enumerable.Range(1, 3).Select(_ => Guid.NewGuid()).ToList();
|
||||
var processedIds = new List<Guid>();
|
||||
|
||||
// Act - Process each message (simulating first-time delivery)
|
||||
foreach (var messageId in messageIds)
|
||||
{
|
||||
var idempotencyKey = $"different-msg-test:{messageId}";
|
||||
var claimResult = await _idempotencyStore!.TryClaimAsync(
|
||||
idempotencyKey,
|
||||
messageId.ToString(),
|
||||
TimeSpan.FromMinutes(5));
|
||||
|
||||
if (claimResult.IsFirstClaim)
|
||||
{
|
||||
processedIds.Add(messageId);
|
||||
}
|
||||
}
|
||||
|
||||
// Assert - All different messages processed
|
||||
processedIds.Should().BeEquivalentTo(messageIds);
|
||||
_output.WriteLine($"All {messageIds.Count} different messages processed independently");
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region End-to-End At-Least-Once with Idempotency Tests
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task EndToEnd_AtLeastOnceWithIdempotency_NoDuplicateProcessing()
|
||||
{
|
||||
// Arrange
|
||||
var queueOptions = _fixture.CreateQueueOptions();
|
||||
queueOptions.DefaultLeaseDuration = TimeSpan.FromMilliseconds(200);
|
||||
var queue = CreateQueue<TestMessage>(queueOptions);
|
||||
|
||||
var messageId = Guid.NewGuid();
|
||||
var processedIds = new HashSet<Guid>();
|
||||
var deliveryCount = 0;
|
||||
|
||||
await queue.EnqueueAsync(new TestMessage { Id = messageId, Content = "E2E test" });
|
||||
|
||||
// Act - Consumer with idempotency-aware processing
|
||||
// Simulate: first delivery - lease but crash, second delivery - process successfully
|
||||
|
||||
// First delivery (crash simulation - don't ack)
|
||||
var firstLease = await queue.LeaseAsync(new LeaseRequest { BatchSize = 1 });
|
||||
firstLease.Should().HaveCount(1);
|
||||
deliveryCount++;
|
||||
|
||||
// Attempt to claim for processing
|
||||
var firstClaim = await _idempotencyStore!.TryClaimAsync(
|
||||
$"e2e-test:{firstLease[0].Message.Id}",
|
||||
firstLease[0].MessageId,
|
||||
TimeSpan.FromMinutes(5));
|
||||
|
||||
if (firstClaim.IsFirstClaim)
|
||||
{
|
||||
processedIds.Add(firstLease[0].Message.Id);
|
||||
}
|
||||
|
||||
// Simulate crash - don't acknowledge
|
||||
_output.WriteLine("First delivery: Processing started but consumer crashed");
|
||||
|
||||
// Wait for lease expiration
|
||||
await Task.Delay(500);
|
||||
|
||||
// Claim expired message (redelivery)
|
||||
var redelivered = await queue.ClaimExpiredAsync(new ClaimRequest
|
||||
{
|
||||
BatchSize = 1,
|
||||
MinIdleTime = TimeSpan.FromMilliseconds(200),
|
||||
MinDeliveryAttempts = 1
|
||||
});
|
||||
|
||||
if (redelivered.Count > 0)
|
||||
{
|
||||
deliveryCount++;
|
||||
|
||||
// Attempt to claim again (should be duplicate)
|
||||
var secondClaim = await _idempotencyStore!.TryClaimAsync(
|
||||
$"e2e-test:{redelivered[0].Message.Id}",
|
||||
redelivered[0].MessageId,
|
||||
TimeSpan.FromMinutes(5));
|
||||
|
||||
if (secondClaim.IsFirstClaim)
|
||||
{
|
||||
processedIds.Add(redelivered[0].Message.Id);
|
||||
}
|
||||
else
|
||||
{
|
||||
_output.WriteLine($"Second delivery: Duplicate detected, skipping processing");
|
||||
}
|
||||
|
||||
// This time, acknowledge
|
||||
await redelivered[0].AcknowledgeAsync();
|
||||
_output.WriteLine("Second delivery: Message acknowledged");
|
||||
}
|
||||
|
||||
// Assert
|
||||
processedIds.Should().HaveCount(1, "message should be processed exactly once");
|
||||
deliveryCount.Should().BeGreaterThan(1, "message should be delivered at least twice (crash + redelivery)");
|
||||
_output.WriteLine($"Total deliveries: {deliveryCount}, Unique processing: {processedIds.Count}");
|
||||
}
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task EndToEnd_BulkMessages_AtLeastOnceWithIdempotency()
|
||||
{
|
||||
// Arrange
|
||||
var queue = CreateQueue<TestMessage>();
|
||||
const int messageCount = 50;
|
||||
var processedIds = new ConcurrentHashSet<Guid>();
|
||||
var deliveryAttempts = new Dictionary<Guid, int>();
|
||||
|
||||
// Send messages
|
||||
var sentIds = new List<Guid>();
|
||||
for (int i = 0; i < messageCount; i++)
|
||||
{
|
||||
var id = Guid.NewGuid();
|
||||
sentIds.Add(id);
|
||||
await queue.EnqueueAsync(new TestMessage { Id = id, Content = $"Bulk-{i}" });
|
||||
}
|
||||
|
||||
// Act - Process all messages with idempotency
|
||||
int remaining = messageCount;
|
||||
while (remaining > 0)
|
||||
{
|
||||
var leases = await queue.LeaseAsync(new LeaseRequest { BatchSize = 10 });
|
||||
if (leases.Count == 0) break;
|
||||
|
||||
foreach (var lease in leases)
|
||||
{
|
||||
var msgId = lease.Message.Id;
|
||||
deliveryAttempts[msgId] = deliveryAttempts.GetValueOrDefault(msgId) + 1;
|
||||
|
||||
// Check idempotency before processing
|
||||
var claim = await _idempotencyStore!.TryClaimAsync(
|
||||
$"bulk-test:{msgId}",
|
||||
lease.MessageId,
|
||||
TimeSpan.FromMinutes(5));
|
||||
|
||||
if (claim.IsFirstClaim)
|
||||
{
|
||||
processedIds.Add(msgId);
|
||||
}
|
||||
|
||||
await lease.AcknowledgeAsync();
|
||||
}
|
||||
|
||||
remaining -= leases.Count;
|
||||
}
|
||||
|
||||
// Assert - All messages processed exactly once
|
||||
processedIds.Count.Should().Be(messageCount, "all messages should be processed");
|
||||
sentIds.Should().BeEquivalentTo(processedIds.ToList(), "all sent messages should be processed");
|
||||
_output.WriteLine($"Processed {processedIds.Count}/{messageCount} messages with idempotency");
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Edge Cases
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task EdgeCase_IdempotencyStore_ExtendWindow()
|
||||
{
|
||||
// Arrange
|
||||
var messageId = Guid.NewGuid();
|
||||
var idempotencyKey = $"extend-test:{messageId}";
|
||||
var shortWindow = TimeSpan.FromSeconds(1);
|
||||
|
||||
// Act - Claim with short window
|
||||
var claim = await _idempotencyStore!.TryClaimAsync(
|
||||
idempotencyKey,
|
||||
"original-value",
|
||||
shortWindow);
|
||||
claim.IsFirstClaim.Should().BeTrue();
|
||||
|
||||
// Extend the window
|
||||
var extended = await _idempotencyStore!.ExtendAsync(
|
||||
idempotencyKey,
|
||||
TimeSpan.FromMinutes(5));
|
||||
|
||||
// Assert - Window extended
|
||||
extended.Should().BeTrue();
|
||||
|
||||
// Duplicate should still be detected after original window would have expired
|
||||
await Task.Delay(1500);
|
||||
var afterOriginalExpiry = await _idempotencyStore!.TryClaimAsync(
|
||||
idempotencyKey,
|
||||
"new-value",
|
||||
shortWindow);
|
||||
|
||||
afterOriginalExpiry.IsDuplicate.Should().BeTrue(
|
||||
"window was extended, so duplicate should still be detected");
|
||||
_output.WriteLine("Window extension verified - duplicate detected after original expiry");
|
||||
}
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task EdgeCase_IdempotencyStore_Release()
|
||||
{
|
||||
// Arrange
|
||||
var messageId = Guid.NewGuid();
|
||||
var idempotencyKey = $"release-test:{messageId}";
|
||||
|
||||
// Claim the key
|
||||
var firstClaim = await _idempotencyStore!.TryClaimAsync(
|
||||
idempotencyKey,
|
||||
"first-value",
|
||||
TimeSpan.FromMinutes(5));
|
||||
firstClaim.IsFirstClaim.Should().BeTrue();
|
||||
|
||||
// Duplicate should be detected
|
||||
var duplicate = await _idempotencyStore!.TryClaimAsync(
|
||||
idempotencyKey,
|
||||
"duplicate-value",
|
||||
TimeSpan.FromMinutes(5));
|
||||
duplicate.IsDuplicate.Should().BeTrue();
|
||||
|
||||
// Act - Release the key
|
||||
var released = await _idempotencyStore!.ReleaseAsync(idempotencyKey);
|
||||
released.Should().BeTrue();
|
||||
|
||||
// Assert - After release, key can be claimed again
|
||||
var afterRelease = await _idempotencyStore!.TryClaimAsync(
|
||||
idempotencyKey,
|
||||
"new-value",
|
||||
TimeSpan.FromMinutes(5));
|
||||
|
||||
afterRelease.IsFirstClaim.Should().BeTrue(
|
||||
"after release, key should be claimable again");
|
||||
_output.WriteLine("Release verified - key claimable after release");
|
||||
}
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task EdgeCase_IdempotencyStore_Exists()
|
||||
{
|
||||
// Arrange
|
||||
var messageId = Guid.NewGuid();
|
||||
var idempotencyKey = $"exists-test:{messageId}";
|
||||
|
||||
// Act - Check before claiming
|
||||
var existsBefore = await _idempotencyStore!.ExistsAsync(idempotencyKey);
|
||||
existsBefore.Should().BeFalse();
|
||||
|
||||
// Claim
|
||||
await _idempotencyStore!.TryClaimAsync(idempotencyKey, "value", TimeSpan.FromMinutes(5));
|
||||
|
||||
// Check after claiming
|
||||
var existsAfter = await _idempotencyStore!.ExistsAsync(idempotencyKey);
|
||||
existsAfter.Should().BeTrue();
|
||||
|
||||
_output.WriteLine("Exists check verified");
|
||||
}
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task EdgeCase_IdempotencyStore_Get()
|
||||
{
|
||||
// Arrange
|
||||
var messageId = Guid.NewGuid();
|
||||
var idempotencyKey = $"get-test:{messageId}";
|
||||
var storedValue = "stored-processor-id";
|
||||
|
||||
// Act - Get before claiming
|
||||
var valueBefore = await _idempotencyStore!.GetAsync(idempotencyKey);
|
||||
valueBefore.Should().BeNull();
|
||||
|
||||
// Claim
|
||||
await _idempotencyStore!.TryClaimAsync(idempotencyKey, storedValue, TimeSpan.FromMinutes(5));
|
||||
|
||||
// Get after claiming
|
||||
var valueAfter = await _idempotencyStore!.GetAsync(idempotencyKey);
|
||||
|
||||
// Assert
|
||||
valueAfter.Should().Be(storedValue);
|
||||
_output.WriteLine($"Get verified - stored value: {valueAfter}");
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Helpers
|
||||
|
||||
private ValkeyMessageQueue<TMessage> CreateQueue<TMessage>(
|
||||
MessageQueueOptions? queueOptions = null)
|
||||
where TMessage : class
|
||||
{
|
||||
queueOptions ??= _fixture.CreateQueueOptions();
|
||||
var transportOptions = _fixture.CreateOptions();
|
||||
|
||||
return new ValkeyMessageQueue<TMessage>(
|
||||
_connectionFactory!,
|
||||
queueOptions,
|
||||
transportOptions,
|
||||
_fixture.GetLogger<ValkeyMessageQueue<TMessage>>());
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Test Types
|
||||
|
||||
public sealed class TestMessage
|
||||
{
|
||||
public Guid Id { get; set; }
|
||||
public string? Content { get; set; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Thread-safe hash set for concurrent test scenarios.
|
||||
/// </summary>
|
||||
private sealed class ConcurrentHashSet<T> where T : notnull
|
||||
{
|
||||
private readonly HashSet<T> _set = new();
|
||||
private readonly object _lock = new();
|
||||
|
||||
public bool Add(T item)
|
||||
{
|
||||
lock (_lock) return _set.Add(item);
|
||||
}
|
||||
|
||||
public int Count
|
||||
{
|
||||
get { lock (_lock) return _set.Count; }
|
||||
}
|
||||
|
||||
public List<T> ToList()
|
||||
{
|
||||
lock (_lock) return _set.ToList();
|
||||
}
|
||||
}
|
||||
|
||||
#endregion
|
||||
}
|
||||
@@ -0,0 +1,203 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// ValkeyContainerFixture.cs
|
||||
// Sprint: SPRINT_5100_0010_0003 - Router + Messaging Test Implementation
|
||||
// Task: MESSAGING-5100-004 - Valkey transport compliance tests
|
||||
// Description: Collection fixture providing a shared Valkey container for integration tests
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Router.Testing.Fixtures;
|
||||
using Testcontainers.Redis;
|
||||
using Xunit.Sdk;
|
||||
|
||||
namespace StellaOps.Messaging.Transport.Valkey.Tests.Fixtures;
|
||||
|
||||
/// <summary>
|
||||
/// Collection fixture that provides a shared Valkey container for integration tests.
|
||||
/// Uses Redis container (Valkey is Redis-compatible).
|
||||
/// Implements IAsyncLifetime to start/stop the container with the test collection.
|
||||
/// </summary>
|
||||
public sealed class ValkeyContainerFixture : RouterCollectionFixture, IAsyncDisposable
|
||||
{
|
||||
private RedisContainer? _container;
|
||||
private bool _disposed;
|
||||
|
||||
/// <summary>
|
||||
/// Gets the Valkey container hostname.
|
||||
/// </summary>
|
||||
public string HostName => _container?.Hostname ?? "localhost";
|
||||
|
||||
/// <summary>
|
||||
/// Gets the Valkey container mapped port.
|
||||
/// </summary>
|
||||
public int Port => _container?.GetMappedPublicPort(6379) ?? 6379;
|
||||
|
||||
/// <summary>
|
||||
/// Gets the connection string for the Valkey container.
|
||||
/// </summary>
|
||||
public string ConnectionString => $"{HostName}:{Port}";
|
||||
|
||||
/// <summary>
|
||||
/// Gets a null logger for tests.
|
||||
/// </summary>
|
||||
public ILogger<T> GetLogger<T>() => NullLogger<T>.Instance;
|
||||
|
||||
/// <summary>
|
||||
/// Gets whether the container is running.
|
||||
/// </summary>
|
||||
public bool IsRunning => _container is not null;
|
||||
|
||||
/// <summary>
|
||||
/// Creates Valkey transport options configured for the test container.
|
||||
/// </summary>
|
||||
public ValkeyTransportOptions CreateOptions(int? database = null)
|
||||
{
|
||||
return new ValkeyTransportOptions
|
||||
{
|
||||
ConnectionString = ConnectionString,
|
||||
Database = database,
|
||||
InitializationTimeout = TimeSpan.FromSeconds(30),
|
||||
ConnectRetry = 3,
|
||||
AbortOnConnectFail = false,
|
||||
IdempotencyKeyPrefix = "test:idem:"
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a ValkeyConnectionFactory configured for the test container.
|
||||
/// </summary>
|
||||
public ValkeyConnectionFactory CreateConnectionFactory(int? database = null)
|
||||
{
|
||||
var options = CreateOptions(database);
|
||||
return new ValkeyConnectionFactory(
|
||||
Options.Create(options),
|
||||
GetLogger<ValkeyConnectionFactory>());
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates message queue options for testing.
|
||||
/// </summary>
|
||||
public StellaOps.Messaging.MessageQueueOptions CreateQueueOptions(
|
||||
string? queueName = null,
|
||||
string? consumerGroup = null,
|
||||
string? consumerName = null)
|
||||
{
|
||||
return new StellaOps.Messaging.MessageQueueOptions
|
||||
{
|
||||
QueueName = queueName ?? $"test:queue:{Guid.NewGuid():N}",
|
||||
ConsumerGroup = consumerGroup ?? "test-group",
|
||||
ConsumerName = consumerName ?? $"consumer-{Environment.ProcessId}",
|
||||
DefaultLeaseDuration = TimeSpan.FromSeconds(30),
|
||||
MaxDeliveryAttempts = 3,
|
||||
IdempotencyWindow = TimeSpan.FromMinutes(5),
|
||||
ApproximateMaxLength = 10000,
|
||||
RetryInitialBackoff = TimeSpan.FromMilliseconds(100),
|
||||
RetryMaxBackoff = TimeSpan.FromSeconds(10),
|
||||
RetryBackoffMultiplier = 2.0
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a ValkeyMessageQueue for testing.
|
||||
/// </summary>
|
||||
public ValkeyMessageQueue<TMessage> CreateMessageQueue<TMessage>(
|
||||
ValkeyConnectionFactory? connectionFactory = null,
|
||||
StellaOps.Messaging.MessageQueueOptions? queueOptions = null,
|
||||
TimeProvider? timeProvider = null)
|
||||
where TMessage : class
|
||||
{
|
||||
connectionFactory ??= CreateConnectionFactory();
|
||||
queueOptions ??= CreateQueueOptions();
|
||||
var transportOptions = CreateOptions();
|
||||
|
||||
return new ValkeyMessageQueue<TMessage>(
|
||||
connectionFactory,
|
||||
queueOptions,
|
||||
transportOptions,
|
||||
GetLogger<ValkeyMessageQueue<TMessage>>(),
|
||||
timeProvider);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Restarts the container.
|
||||
/// </summary>
|
||||
public async Task RestartAsync()
|
||||
{
|
||||
if (_container is null)
|
||||
{
|
||||
throw new InvalidOperationException("Valkey container is not running.");
|
||||
}
|
||||
|
||||
await _container.StopAsync();
|
||||
await _container.StartAsync();
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public override async Task InitializeAsync()
|
||||
{
|
||||
try
|
||||
{
|
||||
_container = new RedisBuilder()
|
||||
.WithImage("valkey/valkey:8-alpine")
|
||||
.WithPortBinding(6379, true)
|
||||
.Build();
|
||||
|
||||
await _container.StartAsync();
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
try
|
||||
{
|
||||
if (_container is not null)
|
||||
{
|
||||
await _container.DisposeAsync();
|
||||
}
|
||||
}
|
||||
catch
|
||||
{
|
||||
// Ignore cleanup failures during skip.
|
||||
}
|
||||
|
||||
_container = null;
|
||||
|
||||
throw SkipException.ForSkip(
|
||||
$"Valkey integration tests require Docker/Testcontainers. Skipping because the container failed to start: {ex.Message}");
|
||||
}
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public override async Task DisposeAsync()
|
||||
{
|
||||
await DisposeAsyncCore();
|
||||
}
|
||||
|
||||
async ValueTask IAsyncDisposable.DisposeAsync()
|
||||
{
|
||||
await DisposeAsyncCore();
|
||||
GC.SuppressFinalize(this);
|
||||
}
|
||||
|
||||
private async Task DisposeAsyncCore()
|
||||
{
|
||||
if (_disposed) return;
|
||||
_disposed = true;
|
||||
|
||||
if (_container is not null)
|
||||
{
|
||||
await _container.StopAsync();
|
||||
await _container.DisposeAsync();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Collection definition for Valkey integration tests.
|
||||
/// All tests in this collection share a single Valkey container.
|
||||
/// </summary>
|
||||
[CollectionDefinition(Name)]
|
||||
public sealed class ValkeyIntegrationTestCollection : ICollectionFixture<ValkeyContainerFixture>
|
||||
{
|
||||
public const string Name = "Valkey Integration Tests";
|
||||
}
|
||||
@@ -0,0 +1,46 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// ValkeyIntegrationFactAttribute.cs
|
||||
// Sprint: SPRINT_5100_0010_0003 - Router + Messaging Test Implementation
|
||||
// Task: MESSAGING-5100-004 - Valkey transport compliance tests
|
||||
// Description: Attribute that skips Valkey integration tests when Docker is not available
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Messaging.Transport.Valkey.Tests.Fixtures;
|
||||
|
||||
/// <summary>
|
||||
/// Fact attribute for Valkey integration tests.
|
||||
/// Skips tests when STELLAOPS_TEST_VALKEY environment variable is not set.
|
||||
/// </summary>
|
||||
[AttributeUsage(AttributeTargets.Method)]
|
||||
public sealed class ValkeyIntegrationFactAttribute : FactAttribute
|
||||
{
|
||||
public ValkeyIntegrationFactAttribute()
|
||||
{
|
||||
var enabled = Environment.GetEnvironmentVariable("STELLAOPS_TEST_VALKEY");
|
||||
if (!string.Equals(enabled, "1", StringComparison.OrdinalIgnoreCase) &&
|
||||
!string.Equals(enabled, "true", StringComparison.OrdinalIgnoreCase))
|
||||
{
|
||||
Skip = "Valkey integration tests are opt-in. Set STELLAOPS_TEST_VALKEY=1 (requires Docker/Testcontainers).";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Theory attribute for Valkey integration tests.
|
||||
/// Skips tests when STELLAOPS_TEST_VALKEY environment variable is not set.
|
||||
/// </summary>
|
||||
[AttributeUsage(AttributeTargets.Method)]
|
||||
public sealed class ValkeyIntegrationTheoryAttribute : TheoryAttribute
|
||||
{
|
||||
public ValkeyIntegrationTheoryAttribute()
|
||||
{
|
||||
var enabled = Environment.GetEnvironmentVariable("STELLAOPS_TEST_VALKEY");
|
||||
if (!string.Equals(enabled, "1", StringComparison.OrdinalIgnoreCase) &&
|
||||
!string.Equals(enabled, "true", StringComparison.OrdinalIgnoreCase))
|
||||
{
|
||||
Skip = "Valkey integration tests are opt-in. Set STELLAOPS_TEST_VALKEY=1 (requires Docker/Testcontainers).";
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
|
||||
<PropertyGroup>
|
||||
<TargetFramework>net10.0</TargetFramework>
|
||||
<LangVersion>preview</LangVersion>
|
||||
<ImplicitUsings>enable</ImplicitUsings>
|
||||
<Nullable>enable</Nullable>
|
||||
<TreatWarningsAsErrors>false</TreatWarningsAsErrors>
|
||||
<!-- Suppress CA2255 from OpenSSL auto-init shim included via Directory.Build.props -->
|
||||
<NoWarn>$(NoWarn);CA2255</NoWarn>
|
||||
<IsPackable>false</IsPackable>
|
||||
<RootNamespace>StellaOps.Messaging.Transport.Valkey.Tests</RootNamespace>
|
||||
<!-- Disable Concelier test infrastructure since not needed for Messaging tests -->
|
||||
<UseConcelierTestInfra>false</UseConcelierTestInfra>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<Using Include="Xunit" />
|
||||
<Using Include="FluentAssertions" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.0" />
|
||||
<PackageReference Include="xunit" Version="2.9.2" />
|
||||
<PackageReference Include="xunit.runner.visualstudio" Version="2.8.2">
|
||||
<PrivateAssets>all</PrivateAssets>
|
||||
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
|
||||
</PackageReference>
|
||||
<PackageReference Include="FluentAssertions" Version="6.12.0" />
|
||||
<PackageReference Include="Moq" Version="4.20.70" />
|
||||
<PackageReference Include="Testcontainers.Redis" Version="3.9.0" />
|
||||
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="10.0.0" />
|
||||
<PackageReference Include="Microsoft.Extensions.Options" Version="10.0.0" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="..\..\StellaOps.Messaging.Transport.Valkey\StellaOps.Messaging.Transport.Valkey.csproj" />
|
||||
<ProjectReference Include="..\StellaOps.Router.Testing\StellaOps.Router.Testing.csproj" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
@@ -0,0 +1,723 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// ValkeyTransportComplianceTests.cs
|
||||
// Sprint: SPRINT_5100_0010_0003 - Router + Messaging Test Implementation
|
||||
// Task: MESSAGING-5100-004 - Valkey transport compliance tests
|
||||
// Description: Transport compliance tests for Valkey transport covering roundtrip,
|
||||
// pub/sub semantics, consumer groups, ack/nack, and backpressure.
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using System.Text.Json;
|
||||
using FluentAssertions;
|
||||
using StellaOps.Messaging;
|
||||
using StellaOps.Messaging.Abstractions;
|
||||
using StellaOps.Messaging.Transport.Valkey.Tests.Fixtures;
|
||||
using Xunit;
|
||||
using Xunit.Abstractions;
|
||||
|
||||
namespace StellaOps.Messaging.Transport.Valkey.Tests;
|
||||
|
||||
/// <summary>
|
||||
/// Transport compliance tests for Valkey transport.
|
||||
/// Validates:
|
||||
/// - Message roundtrip (enqueue → lease → message preserved)
|
||||
/// - Consumer group semantics (exclusive delivery, multiple consumers)
|
||||
/// - Ack/Nack behavior (acknowledge, release, dead-letter)
|
||||
/// - Idempotency (duplicate detection)
|
||||
/// - Backpressure (batch limits, pending counts)
|
||||
/// - Lease management (renewal, expiration, claiming)
|
||||
/// </summary>
|
||||
[Collection(ValkeyIntegrationTestCollection.Name)]
|
||||
public sealed class ValkeyTransportComplianceTests : IAsyncLifetime
|
||||
{
|
||||
private readonly ValkeyContainerFixture _fixture;
|
||||
private readonly ITestOutputHelper _output;
|
||||
private ValkeyConnectionFactory? _connectionFactory;
|
||||
|
||||
public ValkeyTransportComplianceTests(ValkeyContainerFixture fixture, ITestOutputHelper output)
|
||||
{
|
||||
_fixture = fixture;
|
||||
_output = output;
|
||||
}
|
||||
|
||||
public Task InitializeAsync()
|
||||
{
|
||||
_connectionFactory = _fixture.CreateConnectionFactory();
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public async Task DisposeAsync()
|
||||
{
|
||||
if (_connectionFactory is not null)
|
||||
{
|
||||
await _connectionFactory.DisposeAsync();
|
||||
}
|
||||
}
|
||||
|
||||
#region Message Roundtrip Tests
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task Roundtrip_SimpleMessage_AllFieldsPreserved()
|
||||
{
|
||||
// Arrange
|
||||
var queue = CreateQueue<TestMessage>();
|
||||
var original = new TestMessage
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
Content = "Hello Valkey!",
|
||||
Timestamp = DateTimeOffset.UtcNow,
|
||||
Tags = new[] { "tag1", "tag2" }
|
||||
};
|
||||
|
||||
// Act
|
||||
var enqueueResult = await queue.EnqueueAsync(original);
|
||||
enqueueResult.Success.Should().BeTrue();
|
||||
enqueueResult.MessageId.Should().NotBeNullOrEmpty();
|
||||
|
||||
var leases = await queue.LeaseAsync(new LeaseRequest { BatchSize = 1 });
|
||||
|
||||
// Assert
|
||||
leases.Should().HaveCount(1);
|
||||
var lease = leases[0];
|
||||
lease.Message.Id.Should().Be(original.Id);
|
||||
lease.Message.Content.Should().Be(original.Content);
|
||||
lease.Message.Tags.Should().BeEquivalentTo(original.Tags);
|
||||
lease.Attempt.Should().Be(1);
|
||||
|
||||
await lease.AcknowledgeAsync();
|
||||
_output.WriteLine("Roundtrip test passed");
|
||||
}
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task Roundtrip_ComplexMessage_PreservedAfterSerialization()
|
||||
{
|
||||
// Arrange
|
||||
var queue = CreateQueue<ComplexMessage>();
|
||||
var original = new ComplexMessage
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
Metadata = new Dictionary<string, object>
|
||||
{
|
||||
["key1"] = "value1",
|
||||
["key2"] = 42,
|
||||
["key3"] = true
|
||||
},
|
||||
NestedData = new NestedObject
|
||||
{
|
||||
Name = "nested",
|
||||
Value = 123.45m
|
||||
}
|
||||
};
|
||||
|
||||
// Act
|
||||
await queue.EnqueueAsync(original);
|
||||
var leases = await queue.LeaseAsync(new LeaseRequest { BatchSize = 1 });
|
||||
|
||||
// Assert
|
||||
var lease = leases[0];
|
||||
lease.Message.Id.Should().Be(original.Id);
|
||||
lease.Message.NestedData.Name.Should().Be(original.NestedData.Name);
|
||||
lease.Message.NestedData.Value.Should().Be(original.NestedData.Value);
|
||||
|
||||
await lease.AcknowledgeAsync();
|
||||
_output.WriteLine("Complex message roundtrip test passed");
|
||||
}
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task Roundtrip_BinaryData_PreservesAllBytes()
|
||||
{
|
||||
// Arrange
|
||||
var queue = CreateQueue<BinaryMessage>();
|
||||
var binaryPayload = Enumerable.Range(0, 256).Select(i => (byte)i).ToArray();
|
||||
var original = new BinaryMessage
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
Data = binaryPayload
|
||||
};
|
||||
|
||||
// Act
|
||||
await queue.EnqueueAsync(original);
|
||||
var leases = await queue.LeaseAsync(new LeaseRequest { BatchSize = 1 });
|
||||
|
||||
// Assert
|
||||
leases[0].Message.Data.Should().BeEquivalentTo(binaryPayload);
|
||||
|
||||
await leases[0].AcknowledgeAsync();
|
||||
_output.WriteLine("Binary data roundtrip test passed");
|
||||
}
|
||||
|
||||
[ValkeyIntegrationTheory]
|
||||
[InlineData(1)]
|
||||
[InlineData(10)]
|
||||
[InlineData(100)]
|
||||
[InlineData(1000)]
|
||||
public async Task Roundtrip_MultipleMessages_OrderPreserved(int messageCount)
|
||||
{
|
||||
// Arrange
|
||||
var queue = CreateQueue<TestMessage>();
|
||||
var messages = Enumerable.Range(1, messageCount)
|
||||
.Select(i => new TestMessage
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
Content = $"Message-{i:D5}",
|
||||
Timestamp = DateTimeOffset.UtcNow.AddMilliseconds(i)
|
||||
})
|
||||
.ToList();
|
||||
|
||||
// Act - Enqueue all
|
||||
foreach (var msg in messages)
|
||||
{
|
||||
await queue.EnqueueAsync(msg);
|
||||
}
|
||||
|
||||
// Lease and verify order
|
||||
var receivedContents = new List<string>();
|
||||
int remaining = messageCount;
|
||||
while (remaining > 0)
|
||||
{
|
||||
var batchSize = Math.Min(remaining, 50);
|
||||
var leases = await queue.LeaseAsync(new LeaseRequest { BatchSize = batchSize });
|
||||
|
||||
foreach (var lease in leases)
|
||||
{
|
||||
receivedContents.Add(lease.Message.Content!);
|
||||
await lease.AcknowledgeAsync();
|
||||
}
|
||||
|
||||
remaining -= leases.Count;
|
||||
}
|
||||
|
||||
// Assert - FIFO order preserved
|
||||
var expectedContents = messages.Select(m => m.Content).ToList();
|
||||
receivedContents.Should().BeEquivalentTo(expectedContents, options => options.WithStrictOrdering());
|
||||
_output.WriteLine($"Order preserved for {messageCount} messages");
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Consumer Group Semantics Tests
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task ConsumerGroup_MultipleConsumers_ExclusiveDelivery()
|
||||
{
|
||||
// Arrange - Two consumers in same group
|
||||
var queueOptions = _fixture.CreateQueueOptions();
|
||||
var queue1 = CreateQueue<TestMessage>(queueOptions: queueOptions, consumerName: "consumer-1");
|
||||
var queue2 = CreateQueue<TestMessage>(queueOptions: queueOptions, consumerName: "consumer-2");
|
||||
|
||||
var messages = Enumerable.Range(1, 20)
|
||||
.Select(i => new TestMessage { Id = Guid.NewGuid(), Content = $"Msg-{i}" })
|
||||
.ToList();
|
||||
|
||||
foreach (var msg in messages)
|
||||
{
|
||||
await queue1.EnqueueAsync(msg);
|
||||
}
|
||||
|
||||
// Act - Both consumers lease
|
||||
var leases1 = await queue1.LeaseAsync(new LeaseRequest { BatchSize = 10 });
|
||||
var leases2 = await queue2.LeaseAsync(new LeaseRequest { BatchSize = 10 });
|
||||
|
||||
// Assert - Messages should be distributed (no duplicates)
|
||||
var allIds = leases1.Concat(leases2).Select(l => l.Message.Id).ToList();
|
||||
allIds.Should().OnlyHaveUniqueItems("each message should be delivered to only one consumer");
|
||||
allIds.Should().HaveCount(20, "all messages should be delivered");
|
||||
|
||||
// Cleanup
|
||||
foreach (var lease in leases1.Concat(leases2))
|
||||
{
|
||||
await lease.AcknowledgeAsync();
|
||||
}
|
||||
|
||||
_output.WriteLine("Exclusive delivery test passed");
|
||||
}
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task ConsumerGroup_DifferentGroups_EachReceivesAllMessages()
|
||||
{
|
||||
// Arrange - Two different consumer groups
|
||||
var queueName = $"test:queue:{Guid.NewGuid():N}";
|
||||
var options1 = _fixture.CreateQueueOptions(queueName: queueName, consumerGroup: "group-1");
|
||||
var options2 = _fixture.CreateQueueOptions(queueName: queueName, consumerGroup: "group-2");
|
||||
|
||||
var queue1 = CreateQueue<TestMessage>(queueOptions: options1);
|
||||
var queue2 = CreateQueue<TestMessage>(queueOptions: options2);
|
||||
|
||||
var message = new TestMessage { Id = Guid.NewGuid(), Content = "Shared message" };
|
||||
|
||||
// Act - Enqueue to one queue (same stream)
|
||||
await queue1.EnqueueAsync(message);
|
||||
|
||||
// Both groups should receive the message
|
||||
var leases1 = await queue1.LeaseAsync(new LeaseRequest { BatchSize = 1 });
|
||||
var leases2 = await queue2.LeaseAsync(new LeaseRequest { BatchSize = 1 });
|
||||
|
||||
// Assert
|
||||
leases1.Should().HaveCount(1);
|
||||
leases2.Should().HaveCount(1);
|
||||
leases1[0].Message.Id.Should().Be(message.Id);
|
||||
leases2[0].Message.Id.Should().Be(message.Id);
|
||||
|
||||
await leases1[0].AcknowledgeAsync();
|
||||
await leases2[0].AcknowledgeAsync();
|
||||
|
||||
_output.WriteLine("Different groups test passed");
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Ack/Nack/Release Semantics Tests
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task Acknowledge_RemovesMessageFromQueue()
|
||||
{
|
||||
// Arrange
|
||||
var queue = CreateQueue<TestMessage>();
|
||||
await queue.EnqueueAsync(new TestMessage { Id = Guid.NewGuid(), Content = "Ack test" });
|
||||
|
||||
// Act
|
||||
var leases = await queue.LeaseAsync(new LeaseRequest { BatchSize = 1 });
|
||||
await leases[0].AcknowledgeAsync();
|
||||
|
||||
// Assert - No more messages
|
||||
var pending = await queue.GetPendingCountAsync();
|
||||
pending.Should().Be(0);
|
||||
|
||||
var moreLeases = await queue.LeaseAsync(new LeaseRequest { BatchSize = 1 });
|
||||
moreLeases.Should().BeEmpty();
|
||||
|
||||
_output.WriteLine("Acknowledge removes message test passed");
|
||||
}
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task Release_Retry_MessageBecomesAvailableAgain()
|
||||
{
|
||||
// Arrange
|
||||
var queueOptions = _fixture.CreateQueueOptions();
|
||||
queueOptions.RetryInitialBackoff = TimeSpan.Zero; // No backoff for test speed
|
||||
var queue = CreateQueue<TestMessage>(queueOptions: queueOptions);
|
||||
var message = new TestMessage { Id = Guid.NewGuid(), Content = "Retry test" };
|
||||
await queue.EnqueueAsync(message);
|
||||
|
||||
// Act - Lease and release for retry
|
||||
var leases = await queue.LeaseAsync(new LeaseRequest { BatchSize = 1 });
|
||||
leases.Should().HaveCount(1);
|
||||
leases[0].Attempt.Should().Be(1);
|
||||
await leases[0].ReleaseAsync(ReleaseDisposition.Retry);
|
||||
|
||||
// Wait briefly for re-enqueue
|
||||
await Task.Delay(100);
|
||||
|
||||
// Lease again
|
||||
var retryLeases = await queue.LeaseAsync(new LeaseRequest { BatchSize = 1 });
|
||||
|
||||
// Assert
|
||||
retryLeases.Should().HaveCount(1);
|
||||
retryLeases[0].Message.Id.Should().Be(message.Id);
|
||||
retryLeases[0].Attempt.Should().Be(2);
|
||||
|
||||
await retryLeases[0].AcknowledgeAsync();
|
||||
_output.WriteLine("Release retry test passed");
|
||||
}
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task DeadLetter_MovesMessageToDeadLetterQueue()
|
||||
{
|
||||
// Arrange
|
||||
var mainQueueName = $"test:main:{Guid.NewGuid():N}";
|
||||
var dlqName = $"test:dlq:{Guid.NewGuid():N}";
|
||||
|
||||
var mainOptions = _fixture.CreateQueueOptions(queueName: mainQueueName);
|
||||
mainOptions.DeadLetterQueue = dlqName;
|
||||
|
||||
var dlqOptions = _fixture.CreateQueueOptions(queueName: dlqName);
|
||||
|
||||
var mainQueue = CreateQueue<TestMessage>(queueOptions: mainOptions);
|
||||
var dlqQueue = CreateQueue<TestMessage>(queueOptions: dlqOptions);
|
||||
|
||||
var message = new TestMessage { Id = Guid.NewGuid(), Content = "DLQ test" };
|
||||
await mainQueue.EnqueueAsync(message);
|
||||
|
||||
// Act
|
||||
var leases = await mainQueue.LeaseAsync(new LeaseRequest { BatchSize = 1 });
|
||||
await leases[0].DeadLetterAsync("test-reason");
|
||||
|
||||
// Assert - Message should be in DLQ
|
||||
var dlqLeases = await dlqQueue.LeaseAsync(new LeaseRequest { BatchSize = 1 });
|
||||
dlqLeases.Should().HaveCount(1);
|
||||
dlqLeases[0].Message.Id.Should().Be(message.Id);
|
||||
|
||||
// Main queue should be empty
|
||||
var mainPending = await mainQueue.GetPendingCountAsync();
|
||||
mainPending.Should().Be(0);
|
||||
|
||||
await dlqLeases[0].AcknowledgeAsync();
|
||||
_output.WriteLine("Dead letter test passed");
|
||||
}
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task MaxDeliveryAttempts_ExceededCausesDeadLetter()
|
||||
{
|
||||
// Arrange
|
||||
var mainQueueName = $"test:main:{Guid.NewGuid():N}";
|
||||
var dlqName = $"test:dlq:{Guid.NewGuid():N}";
|
||||
|
||||
var mainOptions = _fixture.CreateQueueOptions(queueName: mainQueueName);
|
||||
mainOptions.MaxDeliveryAttempts = 3;
|
||||
mainOptions.DeadLetterQueue = dlqName;
|
||||
mainOptions.RetryInitialBackoff = TimeSpan.Zero;
|
||||
|
||||
var dlqOptions = _fixture.CreateQueueOptions(queueName: dlqName);
|
||||
|
||||
var mainQueue = CreateQueue<TestMessage>(queueOptions: mainOptions);
|
||||
var dlqQueue = CreateQueue<TestMessage>(queueOptions: dlqOptions);
|
||||
|
||||
var message = new TestMessage { Id = Guid.NewGuid(), Content = "Max attempts test" };
|
||||
await mainQueue.EnqueueAsync(message);
|
||||
|
||||
// Act - Retry until max attempts exceeded
|
||||
for (int i = 0; i < 3; i++)
|
||||
{
|
||||
var leases = await mainQueue.LeaseAsync(new LeaseRequest { BatchSize = 1 });
|
||||
if (leases.Count == 0) break;
|
||||
await leases[0].ReleaseAsync(ReleaseDisposition.Retry);
|
||||
await Task.Delay(50);
|
||||
}
|
||||
|
||||
// Wait for final retry to dead-letter
|
||||
await Task.Delay(200);
|
||||
|
||||
// Assert - Message should be in DLQ
|
||||
var dlqLeases = await dlqQueue.LeaseAsync(new LeaseRequest { BatchSize = 1 });
|
||||
dlqLeases.Should().HaveCount(1);
|
||||
dlqLeases[0].Message.Id.Should().Be(message.Id);
|
||||
|
||||
await dlqLeases[0].AcknowledgeAsync();
|
||||
_output.WriteLine("Max delivery attempts test passed");
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Idempotency Tests
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task Idempotency_DuplicateKey_ReturnsExistingMessage()
|
||||
{
|
||||
// Arrange
|
||||
var queue = CreateQueue<TestMessage>();
|
||||
var idempotencyKey = Guid.NewGuid().ToString();
|
||||
var message = new TestMessage { Id = Guid.NewGuid(), Content = "Idempotent message" };
|
||||
|
||||
// Act - Enqueue twice with same key
|
||||
var result1 = await queue.EnqueueAsync(message, EnqueueOptions.WithIdempotencyKey(idempotencyKey));
|
||||
var result2 = await queue.EnqueueAsync(message, EnqueueOptions.WithIdempotencyKey(idempotencyKey));
|
||||
|
||||
// Assert
|
||||
result1.Success.Should().BeTrue();
|
||||
result1.WasDuplicate.Should().BeFalse();
|
||||
|
||||
result2.Success.Should().BeTrue();
|
||||
result2.WasDuplicate.Should().BeTrue();
|
||||
result2.MessageId.Should().Be(result1.MessageId);
|
||||
|
||||
// Only one message should be in queue
|
||||
var leases = await queue.LeaseAsync(new LeaseRequest { BatchSize = 10 });
|
||||
leases.Should().HaveCount(1);
|
||||
|
||||
await leases[0].AcknowledgeAsync();
|
||||
_output.WriteLine("Idempotency test passed");
|
||||
}
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task Idempotency_DifferentKeys_BothMessagesEnqueued()
|
||||
{
|
||||
// Arrange
|
||||
var queue = CreateQueue<TestMessage>();
|
||||
var message1 = new TestMessage { Id = Guid.NewGuid(), Content = "Message 1" };
|
||||
var message2 = new TestMessage { Id = Guid.NewGuid(), Content = "Message 2" };
|
||||
|
||||
// Act
|
||||
await queue.EnqueueAsync(message1, EnqueueOptions.WithIdempotencyKey("key-1"));
|
||||
await queue.EnqueueAsync(message2, EnqueueOptions.WithIdempotencyKey("key-2"));
|
||||
|
||||
// Assert
|
||||
var leases = await queue.LeaseAsync(new LeaseRequest { BatchSize = 10 });
|
||||
leases.Should().HaveCount(2);
|
||||
|
||||
foreach (var lease in leases)
|
||||
{
|
||||
await lease.AcknowledgeAsync();
|
||||
}
|
||||
|
||||
_output.WriteLine("Different idempotency keys test passed");
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Backpressure Tests
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task Backpressure_BatchSize_LimitsMessageCount()
|
||||
{
|
||||
// Arrange
|
||||
var queue = CreateQueue<TestMessage>();
|
||||
for (int i = 0; i < 100; i++)
|
||||
{
|
||||
await queue.EnqueueAsync(new TestMessage { Id = Guid.NewGuid(), Content = $"Msg-{i}" });
|
||||
}
|
||||
|
||||
// Act - Request only 10
|
||||
var leases = await queue.LeaseAsync(new LeaseRequest { BatchSize = 10 });
|
||||
|
||||
// Assert
|
||||
leases.Should().HaveCount(10);
|
||||
|
||||
// Cleanup
|
||||
foreach (var lease in leases)
|
||||
{
|
||||
await lease.AcknowledgeAsync();
|
||||
}
|
||||
|
||||
// Remaining messages
|
||||
var pending = await queue.GetPendingCountAsync();
|
||||
pending.Should().Be(0); // Not pending because not leased yet
|
||||
|
||||
_output.WriteLine("Batch size backpressure test passed");
|
||||
}
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task Backpressure_PendingCount_ReflectsUnacknowledged()
|
||||
{
|
||||
// Arrange
|
||||
var queue = CreateQueue<TestMessage>();
|
||||
for (int i = 0; i < 50; i++)
|
||||
{
|
||||
await queue.EnqueueAsync(new TestMessage { Id = Guid.NewGuid(), Content = $"Msg-{i}" });
|
||||
}
|
||||
|
||||
// Act - Lease 30, ack 10
|
||||
var leases = await queue.LeaseAsync(new LeaseRequest { BatchSize = 30 });
|
||||
leases.Should().HaveCount(30);
|
||||
|
||||
for (int i = 0; i < 10; i++)
|
||||
{
|
||||
await leases[i].AcknowledgeAsync();
|
||||
}
|
||||
|
||||
// Assert - 20 still pending
|
||||
var pending = await queue.GetPendingCountAsync();
|
||||
pending.Should().Be(20);
|
||||
|
||||
// Cleanup
|
||||
for (int i = 10; i < 30; i++)
|
||||
{
|
||||
await leases[i].AcknowledgeAsync();
|
||||
}
|
||||
|
||||
_output.WriteLine("Pending count test passed");
|
||||
}
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task Backpressure_EmptyQueue_ReturnsEmpty()
|
||||
{
|
||||
// Arrange
|
||||
var queue = CreateQueue<TestMessage>();
|
||||
|
||||
// Act
|
||||
var leases = await queue.LeaseAsync(new LeaseRequest { BatchSize = 10 });
|
||||
|
||||
// Assert
|
||||
leases.Should().BeEmpty();
|
||||
_output.WriteLine("Empty queue test passed");
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Lease Management Tests
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task LeaseRenewal_ExtendsLeaseTime()
|
||||
{
|
||||
// Arrange
|
||||
var queue = CreateQueue<TestMessage>();
|
||||
await queue.EnqueueAsync(new TestMessage { Id = Guid.NewGuid(), Content = "Renewal test" });
|
||||
|
||||
// Act
|
||||
var leases = await queue.LeaseAsync(new LeaseRequest
|
||||
{
|
||||
BatchSize = 1,
|
||||
LeaseDuration = TimeSpan.FromSeconds(30)
|
||||
});
|
||||
|
||||
var originalExpiry = leases[0].LeaseExpiresAt;
|
||||
|
||||
await leases[0].RenewAsync(TimeSpan.FromMinutes(5));
|
||||
|
||||
// Assert - Lease should be extended
|
||||
leases[0].LeaseExpiresAt.Should().BeAfter(originalExpiry);
|
||||
|
||||
await leases[0].AcknowledgeAsync();
|
||||
_output.WriteLine("Lease renewal test passed");
|
||||
}
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task ClaimExpired_RecoversStaleMessages()
|
||||
{
|
||||
// Arrange
|
||||
var queueOptions = _fixture.CreateQueueOptions();
|
||||
queueOptions.DefaultLeaseDuration = TimeSpan.FromMilliseconds(100);
|
||||
|
||||
var queue = CreateQueue<TestMessage>(queueOptions: queueOptions);
|
||||
await queue.EnqueueAsync(new TestMessage { Id = Guid.NewGuid(), Content = "Stale test" });
|
||||
|
||||
// Lease and let expire
|
||||
var leases = await queue.LeaseAsync(new LeaseRequest { BatchSize = 1 });
|
||||
leases.Should().HaveCount(1);
|
||||
|
||||
// Wait for lease to expire
|
||||
await Task.Delay(500);
|
||||
|
||||
// Act - Claim expired
|
||||
var claimed = await queue.ClaimExpiredAsync(new ClaimRequest
|
||||
{
|
||||
BatchSize = 10,
|
||||
MinIdleTime = TimeSpan.FromMilliseconds(100),
|
||||
MinDeliveryAttempts = 1
|
||||
});
|
||||
|
||||
// Assert
|
||||
claimed.Should().HaveCount(1);
|
||||
claimed[0].Message.Content.Should().Be("Stale test");
|
||||
claimed[0].Attempt.Should().BeGreaterThan(1);
|
||||
|
||||
await claimed[0].AcknowledgeAsync();
|
||||
_output.WriteLine("Claim expired test passed");
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Metadata/Headers Tests
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task Metadata_CorrelationId_PreservedInLease()
|
||||
{
|
||||
// Arrange
|
||||
var queue = CreateQueue<TestMessage>();
|
||||
var correlationId = Guid.NewGuid().ToString();
|
||||
var message = new TestMessage { Id = Guid.NewGuid(), Content = "Correlation test" };
|
||||
|
||||
// Act
|
||||
await queue.EnqueueAsync(message, EnqueueOptions.WithCorrelation(correlationId));
|
||||
var leases = await queue.LeaseAsync(new LeaseRequest { BatchSize = 1 });
|
||||
|
||||
// Assert
|
||||
leases[0].CorrelationId.Should().Be(correlationId);
|
||||
|
||||
await leases[0].AcknowledgeAsync();
|
||||
_output.WriteLine("Correlation ID test passed");
|
||||
}
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task Metadata_TenantId_PreservedInLease()
|
||||
{
|
||||
// Arrange
|
||||
var queue = CreateQueue<TestMessage>();
|
||||
var tenantId = "tenant-123";
|
||||
var message = new TestMessage { Id = Guid.NewGuid(), Content = "Tenant test" };
|
||||
|
||||
// Act
|
||||
await queue.EnqueueAsync(message, new EnqueueOptions { TenantId = tenantId });
|
||||
var leases = await queue.LeaseAsync(new LeaseRequest { BatchSize = 1 });
|
||||
|
||||
// Assert
|
||||
leases[0].TenantId.Should().Be(tenantId);
|
||||
|
||||
await leases[0].AcknowledgeAsync();
|
||||
_output.WriteLine("Tenant ID test passed");
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Connection Resilience Tests
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task ConnectionResilience_Ping_Succeeds()
|
||||
{
|
||||
// Arrange & Act
|
||||
var act = async () => await _connectionFactory!.PingAsync();
|
||||
|
||||
// Assert
|
||||
await act.Should().NotThrowAsync();
|
||||
_output.WriteLine("Ping test passed");
|
||||
}
|
||||
|
||||
[ValkeyIntegrationFact]
|
||||
public async Task ConnectionResilience_QueueProviderName_IsValkey()
|
||||
{
|
||||
// Arrange
|
||||
var queue = CreateQueue<TestMessage>();
|
||||
|
||||
// Assert
|
||||
queue.ProviderName.Should().Be("valkey");
|
||||
queue.QueueName.Should().NotBeNullOrEmpty();
|
||||
|
||||
_output.WriteLine("Provider name test passed");
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Helpers
|
||||
|
||||
private ValkeyMessageQueue<TMessage> CreateQueue<TMessage>(
|
||||
MessageQueueOptions? queueOptions = null,
|
||||
string? consumerName = null)
|
||||
where TMessage : class
|
||||
{
|
||||
queueOptions ??= _fixture.CreateQueueOptions();
|
||||
if (consumerName is not null)
|
||||
{
|
||||
queueOptions.ConsumerName = consumerName;
|
||||
}
|
||||
|
||||
var transportOptions = _fixture.CreateOptions();
|
||||
|
||||
return new ValkeyMessageQueue<TMessage>(
|
||||
_connectionFactory!,
|
||||
queueOptions,
|
||||
transportOptions,
|
||||
_fixture.GetLogger<ValkeyMessageQueue<TMessage>>());
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Test Message Types
|
||||
|
||||
public sealed class TestMessage
|
||||
{
|
||||
public Guid Id { get; set; }
|
||||
public string? Content { get; set; }
|
||||
public DateTimeOffset Timestamp { get; set; }
|
||||
public string[]? Tags { get; set; }
|
||||
}
|
||||
|
||||
public sealed class ComplexMessage
|
||||
{
|
||||
public Guid Id { get; set; }
|
||||
public Dictionary<string, object>? Metadata { get; set; }
|
||||
public NestedObject? NestedData { get; set; }
|
||||
}
|
||||
|
||||
public sealed class NestedObject
|
||||
{
|
||||
public string? Name { get; set; }
|
||||
public decimal Value { get; set; }
|
||||
}
|
||||
|
||||
public sealed class BinaryMessage
|
||||
{
|
||||
public Guid Id { get; set; }
|
||||
public byte[]? Data { get; set; }
|
||||
}
|
||||
|
||||
#endregion
|
||||
}
|
||||
@@ -0,0 +1,319 @@
|
||||
using FluentAssertions;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Provcache.Tests;
|
||||
|
||||
/// <summary>
|
||||
/// Determinism tests for DecisionDigestBuilder.
|
||||
/// Verifies that same inputs always produce the same DecisionDigest.
|
||||
/// </summary>
|
||||
public class DecisionDigestBuilderDeterminismTests
|
||||
{
|
||||
private readonly ProvcacheOptions _options = new()
|
||||
{
|
||||
DigestVersion = "v1",
|
||||
DefaultTtl = TimeSpan.FromHours(24)
|
||||
};
|
||||
|
||||
private readonly FakeTimeProvider _timeProvider;
|
||||
|
||||
public DecisionDigestBuilderDeterminismTests()
|
||||
{
|
||||
_timeProvider = new FakeTimeProvider(new DateTimeOffset(2024, 12, 24, 12, 0, 0, TimeSpan.Zero));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Build_SameInputs_ProducesSameDigest()
|
||||
{
|
||||
// Arrange
|
||||
var dispositions = new Dictionary<string, string>
|
||||
{
|
||||
["CVE-2024-001"] = "fixed",
|
||||
["CVE-2024-002"] = "affected",
|
||||
["CVE-2024-003"] = "not_affected"
|
||||
};
|
||||
|
||||
var evidenceChunks = new List<string>
|
||||
{
|
||||
"sha256:a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2",
|
||||
"sha256:b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3",
|
||||
"sha256:c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4"
|
||||
};
|
||||
|
||||
// Act
|
||||
var digest1 = CreateBuilder()
|
||||
.WithDispositions(dispositions)
|
||||
.WithEvidenceChunks(evidenceChunks)
|
||||
.Build();
|
||||
|
||||
var digest2 = CreateBuilder()
|
||||
.WithDispositions(dispositions)
|
||||
.WithEvidenceChunks(evidenceChunks)
|
||||
.Build();
|
||||
|
||||
// Assert
|
||||
digest1.VerdictHash.Should().Be(digest2.VerdictHash);
|
||||
digest1.ProofRoot.Should().Be(digest2.ProofRoot);
|
||||
digest1.TrustScore.Should().Be(digest2.TrustScore);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Build_DispositionsInDifferentOrder_ProducesSameVerdictHash()
|
||||
{
|
||||
// Arrange - Same dispositions, different insertion order
|
||||
var dispositions1 = new Dictionary<string, string>
|
||||
{
|
||||
["CVE-2024-001"] = "fixed",
|
||||
["CVE-2024-002"] = "affected",
|
||||
["CVE-2024-003"] = "not_affected"
|
||||
};
|
||||
|
||||
var dispositions2 = new Dictionary<string, string>
|
||||
{
|
||||
["CVE-2024-003"] = "not_affected",
|
||||
["CVE-2024-001"] = "fixed",
|
||||
["CVE-2024-002"] = "affected"
|
||||
};
|
||||
|
||||
// Act
|
||||
var digest1 = CreateBuilder().WithDispositions(dispositions1).Build();
|
||||
var digest2 = CreateBuilder().WithDispositions(dispositions2).Build();
|
||||
|
||||
// Assert - Should be same because dispositions are sorted by key
|
||||
digest1.VerdictHash.Should().Be(digest2.VerdictHash);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Build_DifferentDispositions_ProducesDifferentVerdictHash()
|
||||
{
|
||||
// Arrange
|
||||
var dispositions1 = new Dictionary<string, string> { ["CVE-2024-001"] = "fixed" };
|
||||
var dispositions2 = new Dictionary<string, string> { ["CVE-2024-001"] = "affected" };
|
||||
|
||||
// Act
|
||||
var digest1 = CreateBuilder().WithDispositions(dispositions1).Build();
|
||||
var digest2 = CreateBuilder().WithDispositions(dispositions2).Build();
|
||||
|
||||
// Assert
|
||||
digest1.VerdictHash.Should().NotBe(digest2.VerdictHash);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Build_SameEvidenceChunks_ProducesSameMerkleRoot()
|
||||
{
|
||||
// Arrange - valid SHA256 hex hashes (64 characters each)
|
||||
var chunks = new List<string>
|
||||
{
|
||||
"sha256:a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1",
|
||||
"sha256:b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2",
|
||||
"sha256:c3c3c3c3c3c3c3c3c3c3c3c3c3c3c3c3c3c3c3c3c3c3c3c3c3c3c3c3c3c3c3c3",
|
||||
"sha256:d4d4d4d4d4d4d4d4d4d4d4d4d4d4d4d4d4d4d4d4d4d4d4d4d4d4d4d4d4d4d4d4"
|
||||
};
|
||||
|
||||
// Act
|
||||
var digest1 = CreateBuilder().WithEvidenceChunks(chunks).Build();
|
||||
var digest2 = CreateBuilder().WithEvidenceChunks(chunks).Build();
|
||||
|
||||
// Assert
|
||||
digest1.ProofRoot.Should().Be(digest2.ProofRoot);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Build_DifferentEvidenceChunkOrder_ProducesDifferentMerkleRoot()
|
||||
{
|
||||
// Arrange - Merkle tree is order-sensitive (valid SHA256 hex hashes)
|
||||
var chunks1 = new List<string>
|
||||
{
|
||||
"sha256:aaaa1111aaaa1111aaaa1111aaaa1111aaaa1111aaaa1111aaaa1111aaaa1111",
|
||||
"sha256:bbbb2222bbbb2222bbbb2222bbbb2222bbbb2222bbbb2222bbbb2222bbbb2222"
|
||||
};
|
||||
var chunks2 = new List<string>
|
||||
{
|
||||
"sha256:bbbb2222bbbb2222bbbb2222bbbb2222bbbb2222bbbb2222bbbb2222bbbb2222",
|
||||
"sha256:aaaa1111aaaa1111aaaa1111aaaa1111aaaa1111aaaa1111aaaa1111aaaa1111"
|
||||
};
|
||||
|
||||
// Act
|
||||
var digest1 = CreateBuilder().WithEvidenceChunks(chunks1).Build();
|
||||
var digest2 = CreateBuilder().WithEvidenceChunks(chunks2).Build();
|
||||
|
||||
// Assert - Merkle tree preserves order, so roots should differ
|
||||
digest1.ProofRoot.Should().NotBe(digest2.ProofRoot);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void WithTrustScore_ComponentWeights_ProducesConsistentScore()
|
||||
{
|
||||
// Arrange - Using weighted formula: 25% reach + 20% sbom + 20% vex + 15% policy + 20% signer
|
||||
// 100 * 0.25 + 100 * 0.20 + 100 * 0.20 + 100 * 0.15 + 100 * 0.20 = 100
|
||||
|
||||
// Act
|
||||
var digest = CreateBuilder()
|
||||
.WithTrustScore(
|
||||
reachabilityScore: 100,
|
||||
sbomCompletenessScore: 100,
|
||||
vexCoverageScore: 100,
|
||||
policyFreshnessScore: 100,
|
||||
signerTrustScore: 100)
|
||||
.Build();
|
||||
|
||||
// Assert
|
||||
digest.TrustScore.Should().Be(100);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void WithTrustScore_MixedScores_CalculatesCorrectWeight()
|
||||
{
|
||||
// Arrange - 80 * 0.25 + 60 * 0.20 + 70 * 0.20 + 50 * 0.15 + 90 * 0.20
|
||||
// = 20 + 12 + 14 + 7.5 + 18 = 71.5 → 72
|
||||
|
||||
// Act
|
||||
var digest = CreateBuilder()
|
||||
.WithTrustScore(
|
||||
reachabilityScore: 80,
|
||||
sbomCompletenessScore: 60,
|
||||
vexCoverageScore: 70,
|
||||
policyFreshnessScore: 50,
|
||||
signerTrustScore: 90)
|
||||
.Build();
|
||||
|
||||
// Assert
|
||||
digest.TrustScore.Should().Be(72);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void WithDefaultTimestamps_UsesFrozenTime()
|
||||
{
|
||||
// Arrange
|
||||
var frozenTime = new DateTimeOffset(2024, 12, 24, 12, 0, 0, TimeSpan.Zero);
|
||||
var timeProvider = new FakeTimeProvider(frozenTime);
|
||||
var builder = new DecisionDigestBuilder(_options, timeProvider);
|
||||
|
||||
// Act
|
||||
var digest = builder
|
||||
.WithVeriKey("sha256:verikey")
|
||||
.WithVerdictHash("sha256:verdict")
|
||||
.WithProofRoot("sha256:proof")
|
||||
.WithReplaySeed(["feed1"], ["rule1"])
|
||||
.WithTrustScore(85)
|
||||
.WithDefaultTimestamps()
|
||||
.Build();
|
||||
|
||||
// Assert
|
||||
digest.CreatedAt.Should().Be(frozenTime);
|
||||
digest.ExpiresAt.Should().Be(frozenTime.Add(_options.DefaultTtl));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Build_MultipleTimes_ReturnsConsistentDigest()
|
||||
{
|
||||
// Arrange
|
||||
var dispositions = new Dictionary<string, string> { ["CVE-1"] = "fixed" };
|
||||
var builder = CreateBuilder().WithDispositions(dispositions);
|
||||
|
||||
// Act - Build multiple times
|
||||
var digests = Enumerable.Range(0, 100)
|
||||
.Select(_ => builder.Build())
|
||||
.Select(d => (d.VerdictHash, d.ProofRoot))
|
||||
.Distinct()
|
||||
.ToList();
|
||||
|
||||
// Assert - All should be identical
|
||||
digests.Should().HaveCount(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Build_EmptyDispositions_ProducesConsistentHash()
|
||||
{
|
||||
// Arrange
|
||||
var empty1 = new Dictionary<string, string>();
|
||||
var empty2 = new Dictionary<string, string>();
|
||||
|
||||
// Act
|
||||
var digest1 = CreateBuilder().WithDispositions(empty1).Build();
|
||||
var digest2 = CreateBuilder().WithDispositions(empty2).Build();
|
||||
|
||||
// Assert
|
||||
digest1.VerdictHash.Should().Be(digest2.VerdictHash);
|
||||
digest1.VerdictHash.Should().StartWith("sha256:");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Build_EmptyEvidenceChunks_ProducesConsistentHash()
|
||||
{
|
||||
// Arrange
|
||||
var empty1 = new List<string>();
|
||||
var empty2 = Array.Empty<string>();
|
||||
|
||||
// Act
|
||||
var digest1 = CreateBuilder().WithEvidenceChunks(empty1).Build();
|
||||
var digest2 = CreateBuilder().WithEvidenceChunks(empty2).Build();
|
||||
|
||||
// Assert
|
||||
digest1.ProofRoot.Should().Be(digest2.ProofRoot);
|
||||
digest1.ProofRoot.Should().StartWith("sha256:");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Build_ReplaySeedPreservedCorrectly()
|
||||
{
|
||||
// Arrange
|
||||
var feedIds = new[] { "cve-2024", "ghsa-2024" };
|
||||
var ruleIds = new[] { "policy-v1", "exceptions" };
|
||||
var frozenEpoch = new DateTimeOffset(2024, 12, 24, 0, 0, 0, TimeSpan.Zero);
|
||||
|
||||
// Act
|
||||
var digest = CreateBuilder()
|
||||
.WithReplaySeed(feedIds, ruleIds, frozenEpoch)
|
||||
.Build();
|
||||
|
||||
// Assert
|
||||
digest.ReplaySeed.FeedIds.Should().BeEquivalentTo(feedIds);
|
||||
digest.ReplaySeed.RuleIds.Should().BeEquivalentTo(ruleIds);
|
||||
digest.ReplaySeed.FrozenEpoch.Should().Be(frozenEpoch);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Build_MissingComponent_ThrowsInvalidOperationException()
|
||||
{
|
||||
// Arrange
|
||||
var builder = new DecisionDigestBuilder(_options, _timeProvider)
|
||||
.WithVeriKey("sha256:abc");
|
||||
// Missing other required components
|
||||
|
||||
// Act
|
||||
var act = () => builder.Build();
|
||||
|
||||
// Assert
|
||||
act.Should().Throw<InvalidOperationException>()
|
||||
.WithMessage("*missing required components*");
|
||||
}
|
||||
|
||||
private DecisionDigestBuilder CreateBuilder()
|
||||
{
|
||||
return new DecisionDigestBuilder(_options, _timeProvider)
|
||||
.WithVeriKey("sha256:testverikey")
|
||||
.WithVerdictHash("sha256:defaultverdict")
|
||||
.WithProofRoot("sha256:defaultproof")
|
||||
.WithReplaySeed(["feed1"], ["rule1"])
|
||||
.WithTrustScore(85)
|
||||
.WithTimestamps(
|
||||
_timeProvider.GetUtcNow(),
|
||||
_timeProvider.GetUtcNow().AddHours(24));
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Fake time provider for deterministic timestamp testing.
|
||||
/// </summary>
|
||||
private sealed class FakeTimeProvider : TimeProvider
|
||||
{
|
||||
private readonly DateTimeOffset _frozenTime;
|
||||
|
||||
public FakeTimeProvider(DateTimeOffset frozenTime)
|
||||
{
|
||||
_frozenTime = frozenTime;
|
||||
}
|
||||
|
||||
public override DateTimeOffset GetUtcNow() => _frozenTime;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,478 @@
|
||||
// SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
// Copyright (C) 2025 StellaOps Contributors
|
||||
|
||||
using FluentAssertions;
|
||||
using Microsoft.AspNetCore.Builder;
|
||||
using Microsoft.AspNetCore.Hosting;
|
||||
using Microsoft.AspNetCore.TestHost;
|
||||
using Microsoft.Extensions.DependencyInjection;
|
||||
using Microsoft.Extensions.Hosting;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Moq;
|
||||
using StellaOps.Provcache.Api;
|
||||
using System.Net;
|
||||
using System.Net.Http.Json;
|
||||
using System.Text.Json;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Provcache.Tests;
|
||||
|
||||
/// <summary>
|
||||
/// API endpoint integration tests for Provcache endpoints.
|
||||
/// </summary>
|
||||
public sealed class ProvcacheApiTests : IAsyncDisposable
|
||||
{
|
||||
private readonly Mock<IProvcacheService> _mockService;
|
||||
private readonly IHost _host;
|
||||
private readonly HttpClient _client;
|
||||
|
||||
public ProvcacheApiTests()
|
||||
{
|
||||
_mockService = new Mock<IProvcacheService>();
|
||||
|
||||
_host = Host.CreateDefaultBuilder()
|
||||
.ConfigureWebHost(webBuilder =>
|
||||
{
|
||||
webBuilder.UseTestServer()
|
||||
.ConfigureServices(services =>
|
||||
{
|
||||
services.AddSingleton(_mockService.Object);
|
||||
services.AddRouting();
|
||||
services.AddLogging(b => b.SetMinimumLevel(LogLevel.Warning));
|
||||
})
|
||||
.Configure(app =>
|
||||
{
|
||||
app.UseRouting();
|
||||
app.UseEndpoints(endpoints =>
|
||||
{
|
||||
endpoints.MapProvcacheEndpoints();
|
||||
});
|
||||
});
|
||||
})
|
||||
.Build();
|
||||
|
||||
_host.Start();
|
||||
_client = _host.GetTestClient();
|
||||
}
|
||||
|
||||
public async ValueTask DisposeAsync()
|
||||
{
|
||||
_client.Dispose();
|
||||
await _host.StopAsync();
|
||||
_host.Dispose();
|
||||
}
|
||||
|
||||
#region GET /v1/provcache/{veriKey}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByVeriKey_CacheHit_Returns200WithEntry()
|
||||
{
|
||||
// Arrange
|
||||
const string veriKey = "sha256:abc123abc123abc123abc123abc123abc123abc123abc123abc123abc123abc1";
|
||||
var entry = CreateTestEntry(veriKey);
|
||||
var result = ProvcacheServiceResult.Hit(entry, "valkey", 1.5);
|
||||
|
||||
_mockService.Setup(s => s.GetAsync(veriKey, false, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(result);
|
||||
|
||||
// Act
|
||||
var response = await _client.GetAsync($"/v1/provcache/{Uri.EscapeDataString(veriKey)}");
|
||||
|
||||
// Assert
|
||||
response.StatusCode.Should().Be(HttpStatusCode.OK);
|
||||
var content = await response.Content.ReadFromJsonAsync<ProvcacheGetResponse>();
|
||||
content.Should().NotBeNull();
|
||||
content!.VeriKey.Should().Be(veriKey);
|
||||
content.Status.Should().Be("hit");
|
||||
content.Source.Should().Be("valkey");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByVeriKey_CacheMiss_Returns204()
|
||||
{
|
||||
// Arrange
|
||||
const string veriKey = "sha256:miss123miss123miss123miss123miss123miss123miss123miss123miss123m";
|
||||
var result = ProvcacheServiceResult.Miss(2.0);
|
||||
|
||||
_mockService.Setup(s => s.GetAsync(veriKey, false, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(result);
|
||||
|
||||
// Act
|
||||
var response = await _client.GetAsync($"/v1/provcache/{Uri.EscapeDataString(veriKey)}");
|
||||
|
||||
// Assert
|
||||
response.StatusCode.Should().Be(HttpStatusCode.NoContent);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByVeriKey_Expired_Returns410Gone()
|
||||
{
|
||||
// Arrange
|
||||
const string veriKey = "sha256:exp123exp123exp123exp123exp123exp123exp123exp123exp123exp123exp1";
|
||||
var entry = CreateTestEntry(veriKey, expired: true);
|
||||
var result = ProvcacheServiceResult.Expired(entry, 5.0);
|
||||
|
||||
_mockService.Setup(s => s.GetAsync(veriKey, false, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(result);
|
||||
|
||||
// Act
|
||||
var response = await _client.GetAsync($"/v1/provcache/{Uri.EscapeDataString(veriKey)}");
|
||||
|
||||
// Assert
|
||||
response.StatusCode.Should().Be(HttpStatusCode.Gone);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByVeriKey_WithBypassCache_PassesFlagToService()
|
||||
{
|
||||
// Arrange
|
||||
const string veriKey = "sha256:bypass123bypass123bypass123bypass123bypass123bypass123bypass1234";
|
||||
var result = ProvcacheServiceResult.Miss(0.5);
|
||||
|
||||
_mockService.Setup(s => s.GetAsync(veriKey, true, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(result);
|
||||
|
||||
// Act
|
||||
await _client.GetAsync($"/v1/provcache/{Uri.EscapeDataString(veriKey)}?bypassCache=true");
|
||||
|
||||
// Assert
|
||||
_mockService.Verify(s => s.GetAsync(veriKey, true, It.IsAny<CancellationToken>()), Times.Once);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region POST /v1/provcache
|
||||
|
||||
[Fact]
|
||||
public async Task CreateOrUpdate_ValidRequest_Returns201Created()
|
||||
{
|
||||
// Arrange
|
||||
const string veriKey = "sha256:new123new123new123new123new123new123new123new123new123new123new1";
|
||||
var entry = CreateTestEntry(veriKey);
|
||||
|
||||
_mockService.Setup(s => s.SetAsync(It.IsAny<ProvcacheEntry>(), It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(true);
|
||||
|
||||
var request = new ProvcacheCreateRequest
|
||||
{
|
||||
Entry = entry
|
||||
};
|
||||
|
||||
// Act
|
||||
var response = await _client.PostAsJsonAsync("/v1/provcache", request);
|
||||
|
||||
// Assert
|
||||
response.StatusCode.Should().Be(HttpStatusCode.Created);
|
||||
var content = await response.Content.ReadFromJsonAsync<ProvcacheCreateResponse>();
|
||||
content.Should().NotBeNull();
|
||||
content!.VeriKey.Should().Be(veriKey);
|
||||
content.Success.Should().BeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task CreateOrUpdate_NullEntry_Returns400BadRequest()
|
||||
{
|
||||
// Arrange
|
||||
var request = new ProvcacheCreateRequest { Entry = null };
|
||||
|
||||
// Act
|
||||
var response = await _client.PostAsJsonAsync("/v1/provcache", request);
|
||||
|
||||
// Assert
|
||||
response.StatusCode.Should().Be(HttpStatusCode.BadRequest);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region POST /v1/provcache/invalidate
|
||||
|
||||
[Fact]
|
||||
public async Task Invalidate_SingleVeriKey_Returns200WithAffectedCount()
|
||||
{
|
||||
// Arrange
|
||||
const string veriKey = "sha256:inv123inv123inv123inv123inv123inv123inv123inv123inv123inv123inv1";
|
||||
|
||||
_mockService.Setup(s => s.InvalidateAsync(veriKey, It.IsAny<string?>(), It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(true);
|
||||
|
||||
var request = new ProvcacheInvalidateRequest
|
||||
{
|
||||
Type = null, // null means single VeriKey invalidation
|
||||
Value = veriKey,
|
||||
Reason = "Test invalidation"
|
||||
};
|
||||
|
||||
// Act
|
||||
var response = await _client.PostAsJsonAsync("/v1/provcache/invalidate", request);
|
||||
|
||||
// Assert
|
||||
response.StatusCode.Should().Be(HttpStatusCode.OK);
|
||||
var content = await response.Content.ReadFromJsonAsync<ProvcacheInvalidateResponse>();
|
||||
content.Should().NotBeNull();
|
||||
content!.EntriesAffected.Should().Be(1);
|
||||
content.Type.Should().Be("verikey");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Invalidate_ByPolicyHash_Returns200WithBulkResult()
|
||||
{
|
||||
// Arrange
|
||||
const string policyHash = "sha256:policyhash123policyhash123policyhash123policyhash123policyhash";
|
||||
var invalidationRequest = InvalidationRequest.ByPolicyHash(policyHash, "Policy updated");
|
||||
var invalidationResult = new InvalidationResult
|
||||
{
|
||||
EntriesAffected = 5,
|
||||
Request = invalidationRequest,
|
||||
Timestamp = DateTimeOffset.UtcNow
|
||||
};
|
||||
|
||||
_mockService.Setup(s => s.InvalidateByAsync(
|
||||
It.Is<InvalidationRequest>(r => r.Type == InvalidationType.PolicyHash),
|
||||
It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(invalidationResult);
|
||||
|
||||
var request = new ProvcacheInvalidateRequest
|
||||
{
|
||||
Type = InvalidationType.PolicyHash,
|
||||
Value = policyHash,
|
||||
Reason = "Policy updated"
|
||||
};
|
||||
|
||||
// Act
|
||||
var response = await _client.PostAsJsonAsync("/v1/provcache/invalidate", request);
|
||||
|
||||
// Assert
|
||||
response.StatusCode.Should().Be(HttpStatusCode.OK);
|
||||
var content = await response.Content.ReadFromJsonAsync<ProvcacheInvalidateResponse>();
|
||||
content.Should().NotBeNull();
|
||||
content!.EntriesAffected.Should().Be(5);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Invalidate_ByPattern_Returns200WithPatternResult()
|
||||
{
|
||||
// Arrange
|
||||
const string pattern = "sha256:test*";
|
||||
var invalidationRequest = InvalidationRequest.ByPattern(pattern, "Cleanup");
|
||||
var invalidationResult = new InvalidationResult
|
||||
{
|
||||
EntriesAffected = 10,
|
||||
Request = invalidationRequest,
|
||||
Timestamp = DateTimeOffset.UtcNow
|
||||
};
|
||||
|
||||
_mockService.Setup(s => s.InvalidateByAsync(
|
||||
It.Is<InvalidationRequest>(r => r.Type == InvalidationType.Pattern),
|
||||
It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(invalidationResult);
|
||||
|
||||
var request = new ProvcacheInvalidateRequest
|
||||
{
|
||||
Type = InvalidationType.Pattern,
|
||||
Value = pattern,
|
||||
Reason = "Cleanup"
|
||||
};
|
||||
|
||||
// Act
|
||||
var response = await _client.PostAsJsonAsync("/v1/provcache/invalidate", request);
|
||||
|
||||
// Assert
|
||||
response.StatusCode.Should().Be(HttpStatusCode.OK);
|
||||
var content = await response.Content.ReadFromJsonAsync<ProvcacheInvalidateResponse>();
|
||||
content.Should().NotBeNull();
|
||||
content!.EntriesAffected.Should().Be(10);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region GET /v1/provcache/metrics
|
||||
|
||||
[Fact]
|
||||
public async Task GetMetrics_Returns200WithMetrics()
|
||||
{
|
||||
// Arrange
|
||||
var metrics = new ProvcacheMetrics
|
||||
{
|
||||
TotalRequests = 1000,
|
||||
TotalHits = 800,
|
||||
TotalMisses = 200,
|
||||
TotalInvalidations = 50,
|
||||
CurrentEntryCount = 500,
|
||||
AvgLatencyMs = 2.5,
|
||||
P99LatencyMs = 10.0,
|
||||
ValkeyCacheHealthy = true,
|
||||
PostgresRepositoryHealthy = true,
|
||||
CollectedAt = DateTimeOffset.UtcNow
|
||||
};
|
||||
|
||||
_mockService.Setup(s => s.GetMetricsAsync(It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(metrics);
|
||||
|
||||
// Act
|
||||
var response = await _client.GetAsync("/v1/provcache/metrics");
|
||||
|
||||
// Assert
|
||||
response.StatusCode.Should().Be(HttpStatusCode.OK);
|
||||
var content = await response.Content.ReadFromJsonAsync<ProvcacheMetricsResponse>();
|
||||
content.Should().NotBeNull();
|
||||
content!.TotalRequests.Should().Be(1000);
|
||||
content.TotalHits.Should().Be(800);
|
||||
content.HitRate.Should().BeApproximately(0.8, 0.01);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Contract Verification Tests
|
||||
|
||||
[Fact]
|
||||
public async Task GetByVeriKey_ResponseContract_HasRequiredFields()
|
||||
{
|
||||
// Arrange
|
||||
const string veriKey = "sha256:contract123contract123contract123contract123contract123contract";
|
||||
var entry = CreateTestEntry(veriKey);
|
||||
var result = ProvcacheServiceResult.Hit(entry, "valkey", 1.0);
|
||||
|
||||
_mockService.Setup(s => s.GetAsync(veriKey, false, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(result);
|
||||
|
||||
// Act
|
||||
var response = await _client.GetAsync($"/v1/provcache/{Uri.EscapeDataString(veriKey)}");
|
||||
var json = await response.Content.ReadAsStringAsync();
|
||||
var doc = JsonDocument.Parse(json);
|
||||
var root = doc.RootElement;
|
||||
|
||||
// Assert - Verify contract structure
|
||||
root.TryGetProperty("veriKey", out _).Should().BeTrue("Response must have 'veriKey' field");
|
||||
root.TryGetProperty("status", out _).Should().BeTrue("Response must have 'status' field");
|
||||
root.TryGetProperty("source", out _).Should().BeTrue("Response must have 'source' field");
|
||||
root.TryGetProperty("elapsedMs", out _).Should().BeTrue("Response must have 'elapsedMs' field");
|
||||
root.TryGetProperty("entry", out _).Should().BeTrue("Response must have 'entry' field");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task CreateOrUpdate_ResponseContract_HasRequiredFields()
|
||||
{
|
||||
// Arrange
|
||||
const string veriKey = "sha256:contractcreate123contractcreate123contractcreate123contractcre";
|
||||
var entry = CreateTestEntry(veriKey);
|
||||
|
||||
_mockService.Setup(s => s.SetAsync(It.IsAny<ProvcacheEntry>(), It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(true);
|
||||
|
||||
var request = new ProvcacheCreateRequest { Entry = entry };
|
||||
|
||||
// Act
|
||||
var response = await _client.PostAsJsonAsync("/v1/provcache", request);
|
||||
var json = await response.Content.ReadAsStringAsync();
|
||||
var doc = JsonDocument.Parse(json);
|
||||
var root = doc.RootElement;
|
||||
|
||||
// Assert - Verify contract structure
|
||||
root.TryGetProperty("veriKey", out _).Should().BeTrue("Response must have 'veriKey' field");
|
||||
root.TryGetProperty("success", out _).Should().BeTrue("Response must have 'success' field");
|
||||
root.TryGetProperty("expiresAt", out _).Should().BeTrue("Response must have 'expiresAt' field");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task InvalidateResponse_Contract_HasRequiredFields()
|
||||
{
|
||||
// Arrange
|
||||
const string veriKey = "sha256:contractinv123contractinv123contractinv123contractinv123contra";
|
||||
|
||||
_mockService.Setup(s => s.InvalidateAsync(veriKey, It.IsAny<string?>(), It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(true);
|
||||
|
||||
var request = new ProvcacheInvalidateRequest
|
||||
{
|
||||
Type = null,
|
||||
Value = veriKey
|
||||
};
|
||||
|
||||
// Act
|
||||
var response = await _client.PostAsJsonAsync("/v1/provcache/invalidate", request);
|
||||
var json = await response.Content.ReadAsStringAsync();
|
||||
var doc = JsonDocument.Parse(json);
|
||||
var root = doc.RootElement;
|
||||
|
||||
// Assert - Verify contract structure
|
||||
root.TryGetProperty("entriesAffected", out _).Should().BeTrue("Response must have 'entriesAffected' field");
|
||||
root.TryGetProperty("type", out _).Should().BeTrue("Response must have 'type' field");
|
||||
root.TryGetProperty("value", out _).Should().BeTrue("Response must have 'value' field");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task MetricsResponse_Contract_HasRequiredFields()
|
||||
{
|
||||
// Arrange
|
||||
var metrics = new ProvcacheMetrics
|
||||
{
|
||||
TotalRequests = 100,
|
||||
TotalHits = 80,
|
||||
TotalMisses = 20,
|
||||
TotalInvalidations = 5,
|
||||
CurrentEntryCount = 50,
|
||||
AvgLatencyMs = 1.0,
|
||||
P99LatencyMs = 5.0,
|
||||
ValkeyCacheHealthy = true,
|
||||
PostgresRepositoryHealthy = true,
|
||||
CollectedAt = DateTimeOffset.UtcNow
|
||||
};
|
||||
|
||||
_mockService.Setup(s => s.GetMetricsAsync(It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(metrics);
|
||||
|
||||
// Act
|
||||
var response = await _client.GetAsync("/v1/provcache/metrics");
|
||||
var json = await response.Content.ReadAsStringAsync();
|
||||
var doc = JsonDocument.Parse(json);
|
||||
var root = doc.RootElement;
|
||||
|
||||
// Assert - Verify contract structure
|
||||
root.TryGetProperty("totalRequests", out _).Should().BeTrue("Response must have 'totalRequests' field");
|
||||
root.TryGetProperty("totalHits", out _).Should().BeTrue("Response must have 'totalHits' field");
|
||||
root.TryGetProperty("totalMisses", out _).Should().BeTrue("Response must have 'totalMisses' field");
|
||||
root.TryGetProperty("hitRate", out _).Should().BeTrue("Response must have 'hitRate' field");
|
||||
root.TryGetProperty("currentEntryCount", out _).Should().BeTrue("Response must have 'currentEntryCount' field");
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Test Helpers
|
||||
|
||||
private static ProvcacheEntry CreateTestEntry(string veriKey, bool expired = false)
|
||||
{
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
return new ProvcacheEntry
|
||||
{
|
||||
VeriKey = veriKey,
|
||||
Decision = CreateTestDecisionDigest(veriKey),
|
||||
PolicyHash = "sha256:policy123policy123policy123policy123policy123policy123policy1234",
|
||||
SignerSetHash = "sha256:signer123signer123signer123signer123signer123signer123signer12",
|
||||
FeedEpoch = "2025-W01",
|
||||
CreatedAt = now.AddMinutes(-5),
|
||||
ExpiresAt = expired ? now.AddMinutes(-1) : now.AddHours(1),
|
||||
HitCount = 0
|
||||
};
|
||||
}
|
||||
|
||||
private static DecisionDigest CreateTestDecisionDigest(string veriKey)
|
||||
{
|
||||
return new DecisionDigest
|
||||
{
|
||||
VeriKey = veriKey,
|
||||
DigestVersion = "v1",
|
||||
VerdictHash = "sha256:verdict123verdict123verdict123verdict123verdict123verdict12345",
|
||||
ProofRoot = "sha256:proof123proof123proof123proof123proof123proof123proof1234567",
|
||||
ReplaySeed = new ReplaySeed
|
||||
{
|
||||
FeedIds = ["cve-nvd", "ghsa-2024"],
|
||||
RuleIds = ["base-policy"]
|
||||
},
|
||||
CreatedAt = DateTimeOffset.UtcNow,
|
||||
ExpiresAt = DateTimeOffset.UtcNow.AddHours(1),
|
||||
TrustScore = 85
|
||||
};
|
||||
}
|
||||
|
||||
#endregion
|
||||
}
|
||||
@@ -0,0 +1,38 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
|
||||
<PropertyGroup>
|
||||
<TargetFramework>net10.0</TargetFramework>
|
||||
<ImplicitUsings>enable</ImplicitUsings>
|
||||
<Nullable>enable</Nullable>
|
||||
<LangVersion>preview</LangVersion>
|
||||
<IsPackable>false</IsPackable>
|
||||
<RootNamespace>StellaOps.Provcache.Tests</RootNamespace>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.0" />
|
||||
<PackageReference Include="xunit" Version="2.9.3" />
|
||||
<PackageReference Include="xunit.runner.visualstudio" Version="3.0.2">
|
||||
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
|
||||
<PrivateAssets>all</PrivateAssets>
|
||||
</PackageReference>
|
||||
<PackageReference Include="FluentAssertions" Version="8.2.0" />
|
||||
<PackageReference Include="Moq" Version="4.20.72" />
|
||||
<PackageReference Include="coverlet.collector" Version="6.0.2">
|
||||
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
|
||||
<PrivateAssets>all</PrivateAssets>
|
||||
</PackageReference>
|
||||
<PackageReference Include="Microsoft.AspNetCore.TestHost" Version="10.0.0" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<FrameworkReference Include="Microsoft.AspNetCore.App" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="../../StellaOps.Provcache/StellaOps.Provcache.csproj" />
|
||||
<ProjectReference Include="../../StellaOps.Provcache.Api/StellaOps.Provcache.Api.csproj" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
@@ -0,0 +1,459 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using Moq;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Provcache.Tests;
|
||||
|
||||
/// <summary>
|
||||
/// Integration tests for the write-behind queue.
|
||||
/// Tests batching, retry logic, and metrics.
|
||||
/// </summary>
|
||||
public class WriteBehindQueueTests
|
||||
{
|
||||
private readonly ProvcacheOptions _options = new()
|
||||
{
|
||||
EnableWriteBehind = true,
|
||||
WriteBehindMaxBatchSize = 5,
|
||||
WriteBehindFlushInterval = TimeSpan.FromMilliseconds(100),
|
||||
WriteBehindQueueCapacity = 100,
|
||||
WriteBehindMaxRetries = 3
|
||||
};
|
||||
|
||||
private static ProvcacheEntry CreateTestEntry(string id) => new()
|
||||
{
|
||||
VeriKey = $"sha256:verikey_{id}",
|
||||
Decision = new DecisionDigest
|
||||
{
|
||||
DigestVersion = "v1",
|
||||
VeriKey = $"sha256:verikey_{id}",
|
||||
VerdictHash = $"sha256:verdict_{id}",
|
||||
ProofRoot = $"sha256:proof_{id}",
|
||||
TrustScore = 85,
|
||||
ReplaySeed = new ReplaySeed
|
||||
{
|
||||
FeedIds = ["feed1"],
|
||||
RuleIds = ["rule1"]
|
||||
},
|
||||
CreatedAt = DateTimeOffset.UtcNow,
|
||||
ExpiresAt = DateTimeOffset.UtcNow.AddHours(24)
|
||||
},
|
||||
PolicyHash = "sha256:policy",
|
||||
SignerSetHash = "sha256:signers",
|
||||
FeedEpoch = "2024-12-24T12:00:00Z",
|
||||
CreatedAt = DateTimeOffset.UtcNow,
|
||||
ExpiresAt = DateTimeOffset.UtcNow.AddHours(24),
|
||||
HitCount = 0
|
||||
};
|
||||
|
||||
[Fact]
|
||||
public async Task EnqueueAsync_SingleEntry_UpdatesMetrics()
|
||||
{
|
||||
// Arrange
|
||||
var repository = new Mock<IProvcacheRepository>();
|
||||
var queue = new WriteBehindQueue(
|
||||
repository.Object,
|
||||
Options.Create(_options),
|
||||
NullLogger<WriteBehindQueue>.Instance);
|
||||
|
||||
var entry = CreateTestEntry("1");
|
||||
|
||||
// Act
|
||||
await queue.EnqueueAsync(entry);
|
||||
|
||||
// Assert
|
||||
var metrics = queue.GetMetrics();
|
||||
metrics.TotalEnqueued.Should().Be(1);
|
||||
metrics.CurrentQueueDepth.Should().Be(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task EnqueueAsync_MultipleEntries_TracksQueueDepth()
|
||||
{
|
||||
// Arrange
|
||||
var repository = new Mock<IProvcacheRepository>();
|
||||
var queue = new WriteBehindQueue(
|
||||
repository.Object,
|
||||
Options.Create(_options),
|
||||
NullLogger<WriteBehindQueue>.Instance);
|
||||
|
||||
// Act
|
||||
for (int i = 0; i < 10; i++)
|
||||
{
|
||||
await queue.EnqueueAsync(CreateTestEntry(i.ToString()));
|
||||
}
|
||||
|
||||
// Assert
|
||||
var metrics = queue.GetMetrics();
|
||||
metrics.TotalEnqueued.Should().Be(10);
|
||||
metrics.CurrentQueueDepth.Should().Be(10);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void GetMetrics_InitialState_AllZeros()
|
||||
{
|
||||
// Arrange
|
||||
var repository = new Mock<IProvcacheRepository>();
|
||||
var queue = new WriteBehindQueue(
|
||||
repository.Object,
|
||||
Options.Create(_options),
|
||||
NullLogger<WriteBehindQueue>.Instance);
|
||||
|
||||
// Act
|
||||
var metrics = queue.GetMetrics();
|
||||
|
||||
// Assert
|
||||
metrics.TotalEnqueued.Should().Be(0);
|
||||
metrics.TotalPersisted.Should().Be(0);
|
||||
metrics.TotalFailed.Should().Be(0);
|
||||
metrics.TotalRetries.Should().Be(0);
|
||||
metrics.TotalBatches.Should().Be(0);
|
||||
metrics.CurrentQueueDepth.Should().Be(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ProcessBatch_SuccessfulPersist_UpdatesPersistMetrics()
|
||||
{
|
||||
// Arrange
|
||||
var repository = new Mock<IProvcacheRepository>();
|
||||
repository.Setup(r => r.UpsertManyAsync(It.IsAny<IEnumerable<ProvcacheEntry>>(), It.IsAny<CancellationToken>()))
|
||||
.Returns(Task.CompletedTask);
|
||||
|
||||
var queue = new WriteBehindQueue(
|
||||
repository.Object,
|
||||
Options.Create(_options),
|
||||
NullLogger<WriteBehindQueue>.Instance);
|
||||
|
||||
// Enqueue entries
|
||||
for (int i = 0; i < 5; i++)
|
||||
{
|
||||
await queue.EnqueueAsync(CreateTestEntry(i.ToString()));
|
||||
}
|
||||
|
||||
// Act - Start the queue and let it process
|
||||
using var cts = new CancellationTokenSource();
|
||||
var task = queue.StartAsync(cts.Token);
|
||||
|
||||
// Wait for processing
|
||||
await Task.Delay(500);
|
||||
|
||||
// Stop
|
||||
await queue.StopAsync(CancellationToken.None);
|
||||
|
||||
// Assert
|
||||
var metrics = queue.GetMetrics();
|
||||
metrics.TotalPersisted.Should().BeGreaterThanOrEqualTo(5);
|
||||
metrics.TotalBatches.Should().BeGreaterThanOrEqualTo(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void WriteBehindMetrics_Timestamp_IsRecent()
|
||||
{
|
||||
// Arrange
|
||||
var repository = new Mock<IProvcacheRepository>();
|
||||
var queue = new WriteBehindQueue(
|
||||
repository.Object,
|
||||
Options.Create(_options),
|
||||
NullLogger<WriteBehindQueue>.Instance);
|
||||
|
||||
// Act
|
||||
var metrics = queue.GetMetrics();
|
||||
|
||||
// Assert
|
||||
metrics.Timestamp.Should().BeCloseTo(DateTimeOffset.UtcNow, TimeSpan.FromSeconds(5));
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Integration tests for the Provcache service with storage layer.
|
||||
/// </summary>
|
||||
public class ProvcacheServiceStorageIntegrationTests
|
||||
{
|
||||
private readonly ProvcacheOptions _options = new()
|
||||
{
|
||||
DefaultTtl = TimeSpan.FromHours(24),
|
||||
MaxTtl = TimeSpan.FromDays(7),
|
||||
TimeWindowBucket = TimeSpan.FromHours(1),
|
||||
EnableWriteBehind = false, // Disable write-behind for sync tests
|
||||
AllowCacheBypass = true
|
||||
};
|
||||
|
||||
private static ProvcacheEntry CreateTestEntry(string veriKey) => new()
|
||||
{
|
||||
VeriKey = veriKey,
|
||||
Decision = new DecisionDigest
|
||||
{
|
||||
DigestVersion = "v1",
|
||||
VeriKey = veriKey,
|
||||
VerdictHash = "sha256:verdict123",
|
||||
ProofRoot = "sha256:proof456",
|
||||
TrustScore = 90,
|
||||
ReplaySeed = new ReplaySeed
|
||||
{
|
||||
FeedIds = ["nvd:2024"],
|
||||
RuleIds = ["rule:cve-critical"]
|
||||
},
|
||||
CreatedAt = DateTimeOffset.UtcNow,
|
||||
ExpiresAt = DateTimeOffset.UtcNow.AddHours(24)
|
||||
},
|
||||
PolicyHash = "sha256:policy789",
|
||||
SignerSetHash = "sha256:signers000",
|
||||
FeedEpoch = "2024-12-24T00:00:00Z",
|
||||
CreatedAt = DateTimeOffset.UtcNow,
|
||||
ExpiresAt = DateTimeOffset.UtcNow.AddHours(24),
|
||||
HitCount = 0
|
||||
};
|
||||
|
||||
[Fact]
|
||||
public async Task SetAsync_ThenGetAsync_ReturnsEntry()
|
||||
{
|
||||
// Arrange
|
||||
var veriKey = "sha256:test_verikey_1";
|
||||
var entry = CreateTestEntry(veriKey);
|
||||
|
||||
var store = new Mock<IProvcacheStore>();
|
||||
store.Setup(s => s.SetAsync(It.IsAny<ProvcacheEntry>(), It.IsAny<CancellationToken>()))
|
||||
.Returns(ValueTask.CompletedTask);
|
||||
store.Setup(s => s.GetAsync(veriKey, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(new ProvcacheLookupResult { IsHit = true, Entry = entry, Source = "valkey" });
|
||||
|
||||
var repository = new Mock<IProvcacheRepository>();
|
||||
repository.Setup(r => r.UpsertAsync(It.IsAny<ProvcacheEntry>(), It.IsAny<CancellationToken>()))
|
||||
.Returns(Task.CompletedTask);
|
||||
|
||||
var service = new ProvcacheService(
|
||||
store.Object,
|
||||
repository.Object,
|
||||
Options.Create(_options),
|
||||
NullLogger<ProvcacheService>.Instance);
|
||||
|
||||
// Act
|
||||
await service.SetAsync(entry);
|
||||
var result = await service.GetAsync(veriKey);
|
||||
|
||||
// Assert
|
||||
result.WasCached.Should().BeTrue();
|
||||
result.Entry.Should().NotBeNull();
|
||||
result.Entry!.VeriKey.Should().Be(veriKey);
|
||||
result.Source.Should().Be("valkey");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetAsync_CacheMissWithDbHit_BackfillsCache()
|
||||
{
|
||||
// Arrange
|
||||
var veriKey = "sha256:test_verikey_2";
|
||||
var entry = CreateTestEntry(veriKey);
|
||||
|
||||
var store = new Mock<IProvcacheStore>();
|
||||
store.Setup(s => s.GetAsync(veriKey, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(new ProvcacheLookupResult { IsHit = false });
|
||||
store.Setup(s => s.SetAsync(It.IsAny<ProvcacheEntry>(), It.IsAny<CancellationToken>()))
|
||||
.Returns(ValueTask.CompletedTask);
|
||||
|
||||
var repository = new Mock<IProvcacheRepository>();
|
||||
repository.Setup(r => r.GetAsync(veriKey, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(entry);
|
||||
|
||||
var service = new ProvcacheService(
|
||||
store.Object,
|
||||
repository.Object,
|
||||
Options.Create(_options),
|
||||
NullLogger<ProvcacheService>.Instance);
|
||||
|
||||
// Act
|
||||
var result = await service.GetAsync(veriKey);
|
||||
|
||||
// Assert
|
||||
result.WasCached.Should().BeTrue();
|
||||
result.Entry.Should().NotBeNull();
|
||||
result.Source.Should().Be("postgres");
|
||||
|
||||
// Verify backfill
|
||||
store.Verify(s => s.SetAsync(It.Is<ProvcacheEntry>(e => e.VeriKey == veriKey), It.IsAny<CancellationToken>()), Times.Once);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetAsync_FullMiss_ReturnsMissResult()
|
||||
{
|
||||
// Arrange
|
||||
var veriKey = "sha256:test_verikey_3";
|
||||
|
||||
var store = new Mock<IProvcacheStore>();
|
||||
store.Setup(s => s.GetAsync(veriKey, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(new ProvcacheLookupResult { IsHit = false });
|
||||
|
||||
var repository = new Mock<IProvcacheRepository>();
|
||||
repository.Setup(r => r.GetAsync(veriKey, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync((ProvcacheEntry?)null);
|
||||
|
||||
var service = new ProvcacheService(
|
||||
store.Object,
|
||||
repository.Object,
|
||||
Options.Create(_options),
|
||||
NullLogger<ProvcacheService>.Instance);
|
||||
|
||||
// Act
|
||||
var result = await service.GetAsync(veriKey);
|
||||
|
||||
// Assert
|
||||
result.WasCached.Should().BeFalse();
|
||||
result.Status.Should().Be(ProvcacheResultStatus.CacheMiss);
|
||||
result.Entry.Should().BeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetOrComputeAsync_CacheHit_DoesNotCallFactory()
|
||||
{
|
||||
// Arrange
|
||||
var veriKey = "sha256:test_verikey_4";
|
||||
var entry = CreateTestEntry(veriKey);
|
||||
var factoryCalled = false;
|
||||
|
||||
var store = new Mock<IProvcacheStore>();
|
||||
store.Setup(s => s.GetAsync(veriKey, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(new ProvcacheLookupResult { IsHit = true, Entry = entry, Source = "valkey" });
|
||||
|
||||
var repository = new Mock<IProvcacheRepository>();
|
||||
|
||||
var service = new ProvcacheService(
|
||||
store.Object,
|
||||
repository.Object,
|
||||
Options.Create(_options),
|
||||
NullLogger<ProvcacheService>.Instance);
|
||||
|
||||
// Act
|
||||
var result = await service.GetOrComputeAsync(veriKey, async _ =>
|
||||
{
|
||||
factoryCalled = true;
|
||||
return entry;
|
||||
});
|
||||
|
||||
// Assert
|
||||
factoryCalled.Should().BeFalse();
|
||||
result.VeriKey.Should().Be(veriKey);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetOrComputeAsync_CacheMiss_CallsFactoryAndStores()
|
||||
{
|
||||
// Arrange
|
||||
var veriKey = "sha256:test_verikey_5";
|
||||
var entry = CreateTestEntry(veriKey);
|
||||
var factoryCalled = false;
|
||||
|
||||
var store = new Mock<IProvcacheStore>();
|
||||
store.Setup(s => s.GetAsync(veriKey, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(new ProvcacheLookupResult { IsHit = false });
|
||||
store.Setup(s => s.SetAsync(It.IsAny<ProvcacheEntry>(), It.IsAny<CancellationToken>()))
|
||||
.Returns(ValueTask.CompletedTask);
|
||||
|
||||
var repository = new Mock<IProvcacheRepository>();
|
||||
repository.Setup(r => r.GetAsync(veriKey, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync((ProvcacheEntry?)null);
|
||||
repository.Setup(r => r.UpsertAsync(It.IsAny<ProvcacheEntry>(), It.IsAny<CancellationToken>()))
|
||||
.Returns(Task.CompletedTask);
|
||||
|
||||
var service = new ProvcacheService(
|
||||
store.Object,
|
||||
repository.Object,
|
||||
Options.Create(_options),
|
||||
NullLogger<ProvcacheService>.Instance);
|
||||
|
||||
// Act
|
||||
var result = await service.GetOrComputeAsync(veriKey, async _ =>
|
||||
{
|
||||
factoryCalled = true;
|
||||
return entry;
|
||||
});
|
||||
|
||||
// Assert
|
||||
factoryCalled.Should().BeTrue();
|
||||
result.VeriKey.Should().Be(veriKey);
|
||||
store.Verify(s => s.SetAsync(It.Is<ProvcacheEntry>(e => e.VeriKey == veriKey), It.IsAny<CancellationToken>()), Times.Once);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task InvalidateAsync_RemovesFromBothStoreLayers()
|
||||
{
|
||||
// Arrange
|
||||
var veriKey = "sha256:test_verikey_6";
|
||||
|
||||
var store = new Mock<IProvcacheStore>();
|
||||
store.Setup(s => s.InvalidateAsync(veriKey, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(true);
|
||||
|
||||
var repository = new Mock<IProvcacheRepository>();
|
||||
repository.Setup(r => r.DeleteAsync(veriKey, It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(true);
|
||||
|
||||
var service = new ProvcacheService(
|
||||
store.Object,
|
||||
repository.Object,
|
||||
Options.Create(_options),
|
||||
NullLogger<ProvcacheService>.Instance);
|
||||
|
||||
// Act
|
||||
var result = await service.InvalidateAsync(veriKey, "test invalidation");
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
store.Verify(s => s.InvalidateAsync(veriKey, It.IsAny<CancellationToken>()), Times.Once);
|
||||
repository.Verify(r => r.DeleteAsync(veriKey, It.IsAny<CancellationToken>()), Times.Once);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetAsync_BypassCache_ReturnsbypassedResult()
|
||||
{
|
||||
// Arrange
|
||||
var veriKey = "sha256:test_verikey_7";
|
||||
|
||||
var store = new Mock<IProvcacheStore>();
|
||||
var repository = new Mock<IProvcacheRepository>();
|
||||
|
||||
var service = new ProvcacheService(
|
||||
store.Object,
|
||||
repository.Object,
|
||||
Options.Create(_options),
|
||||
NullLogger<ProvcacheService>.Instance);
|
||||
|
||||
// Act
|
||||
var result = await service.GetAsync(veriKey, bypassCache: true);
|
||||
|
||||
// Assert
|
||||
result.Status.Should().Be(ProvcacheResultStatus.Bypassed);
|
||||
store.Verify(s => s.GetAsync(It.IsAny<string>(), It.IsAny<CancellationToken>()), Times.Never);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetMetricsAsync_ReturnsCurrentMetrics()
|
||||
{
|
||||
// Arrange
|
||||
var store = new Mock<IProvcacheStore>();
|
||||
store.Setup(s => s.GetAsync(It.IsAny<string>(), It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(new ProvcacheLookupResult { IsHit = false });
|
||||
|
||||
var repository = new Mock<IProvcacheRepository>();
|
||||
repository.Setup(r => r.GetAsync(It.IsAny<string>(), It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync((ProvcacheEntry?)null);
|
||||
repository.Setup(r => r.GetStatisticsAsync(It.IsAny<CancellationToken>()))
|
||||
.ReturnsAsync(new ProvcacheStatistics { TotalEntries = 0, TotalHits = 0 });
|
||||
|
||||
var service = new ProvcacheService(
|
||||
store.Object,
|
||||
repository.Object,
|
||||
Options.Create(_options),
|
||||
NullLogger<ProvcacheService>.Instance);
|
||||
|
||||
// Generate some traffic
|
||||
await service.GetAsync("sha256:miss1");
|
||||
await service.GetAsync("sha256:miss2");
|
||||
|
||||
// Act
|
||||
var metrics = await service.GetMetricsAsync();
|
||||
|
||||
// Assert
|
||||
metrics.TotalRequests.Should().BeGreaterThanOrEqualTo(2);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,244 @@
|
||||
using FluentAssertions;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Provcache.Tests;
|
||||
|
||||
/// <summary>
|
||||
/// Determinism tests for VeriKeyBuilder.
|
||||
/// Verifies that same inputs always produce the same VeriKey.
|
||||
/// </summary>
|
||||
public class VeriKeyBuilderDeterminismTests
|
||||
{
|
||||
private readonly ProvcacheOptions _options = new()
|
||||
{
|
||||
TimeWindowBucket = TimeSpan.FromHours(1)
|
||||
};
|
||||
|
||||
[Fact]
|
||||
public void Build_SameInputs_ProducesSameVeriKey()
|
||||
{
|
||||
// Arrange
|
||||
var sourceHash = "sha256:abc123def456";
|
||||
var sbomHash = "sha256:sbom111222333";
|
||||
var vexHashes = new[] { "sha256:vex111", "sha256:vex222", "sha256:vex333" };
|
||||
var policyHash = "sha256:policy999";
|
||||
var signerHashes = new[] { "sha256:cert111", "sha256:cert222" };
|
||||
var timeWindow = "2024-12-24T12:00:00Z";
|
||||
|
||||
// Act
|
||||
var veriKey1 = new VeriKeyBuilder(_options)
|
||||
.WithSourceHash(sourceHash)
|
||||
.WithSbomHash(sbomHash)
|
||||
.WithVexStatementHashes(vexHashes)
|
||||
.WithMergePolicyHash(policyHash)
|
||||
.WithCertificateHashes(signerHashes)
|
||||
.WithTimeWindow(timeWindow)
|
||||
.Build();
|
||||
|
||||
var veriKey2 = new VeriKeyBuilder(_options)
|
||||
.WithSourceHash(sourceHash)
|
||||
.WithSbomHash(sbomHash)
|
||||
.WithVexStatementHashes(vexHashes)
|
||||
.WithMergePolicyHash(policyHash)
|
||||
.WithCertificateHashes(signerHashes)
|
||||
.WithTimeWindow(timeWindow)
|
||||
.Build();
|
||||
|
||||
// Assert
|
||||
veriKey1.Should().Be(veriKey2);
|
||||
veriKey1.Should().StartWith("sha256:");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Build_DifferentInputOrder_VexHashes_ProducesSameVeriKey()
|
||||
{
|
||||
// Arrange - VEX hashes in different orders
|
||||
var vexHashesOrder1 = new[] { "sha256:vex333", "sha256:vex111", "sha256:vex222" };
|
||||
var vexHashesOrder2 = new[] { "sha256:vex111", "sha256:vex222", "sha256:vex333" };
|
||||
|
||||
// Act
|
||||
var veriKey1 = CreateBuilder().WithVexStatementHashes(vexHashesOrder1).Build();
|
||||
var veriKey2 = CreateBuilder().WithVexStatementHashes(vexHashesOrder2).Build();
|
||||
|
||||
// Assert - Should be same because hashes are sorted
|
||||
veriKey1.Should().Be(veriKey2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Build_DifferentInputOrder_CertificateHashes_ProducesSameVeriKey()
|
||||
{
|
||||
// Arrange - Certificate hashes in different orders
|
||||
var certOrder1 = new[] { "sha256:cert222", "sha256:cert111" };
|
||||
var certOrder2 = new[] { "sha256:cert111", "sha256:cert222" };
|
||||
|
||||
// Act
|
||||
var veriKey1 = CreateBuilder().WithCertificateHashes(certOrder1).Build();
|
||||
var veriKey2 = CreateBuilder().WithCertificateHashes(certOrder2).Build();
|
||||
|
||||
// Assert - Should be same because hashes are sorted
|
||||
veriKey1.Should().Be(veriKey2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Build_DifferentSourceHash_ProducesDifferentVeriKey()
|
||||
{
|
||||
// Arrange
|
||||
var veriKey1 = CreateBuilder().WithSourceHash("sha256:source111").Build();
|
||||
var veriKey2 = CreateBuilder().WithSourceHash("sha256:source222").Build();
|
||||
|
||||
// Assert
|
||||
veriKey1.Should().NotBe(veriKey2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Build_DifferentSbomHash_ProducesDifferentVeriKey()
|
||||
{
|
||||
// Arrange
|
||||
var veriKey1 = CreateBuilder().WithSbomHash("sha256:sbom111").Build();
|
||||
var veriKey2 = CreateBuilder().WithSbomHash("sha256:sbom222").Build();
|
||||
|
||||
// Assert
|
||||
veriKey1.Should().NotBe(veriKey2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Build_DifferentTimeWindow_ProducesDifferentVeriKey()
|
||||
{
|
||||
// Arrange
|
||||
var veriKey1 = CreateBuilder().WithTimeWindow("2024-12-24T12:00:00Z").Build();
|
||||
var veriKey2 = CreateBuilder().WithTimeWindow("2024-12-24T13:00:00Z").Build();
|
||||
|
||||
// Assert
|
||||
veriKey1.Should().NotBe(veriKey2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Build_MultipleTimes_ReturnsConsistentResult()
|
||||
{
|
||||
// Arrange & Act - Create multiple builder instances with same inputs
|
||||
var results = Enumerable.Range(0, 100)
|
||||
.Select(_ => CreateBuilder().Build())
|
||||
.Distinct()
|
||||
.ToList();
|
||||
|
||||
// Assert - All should be identical
|
||||
results.Should().HaveCount(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Build_AcrossMultipleBuilders_ProducesSameResult()
|
||||
{
|
||||
// Act - Create 10 different builder instances
|
||||
var results = Enumerable.Range(0, 10)
|
||||
.Select(_ => CreateBuilder().Build())
|
||||
.Distinct()
|
||||
.ToList();
|
||||
|
||||
// Assert - All should be identical
|
||||
results.Should().HaveCount(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Build_WithHashPrefixNormalization_ProducesSameVeriKey()
|
||||
{
|
||||
// Arrange - Same hash with different case prefixes
|
||||
var veriKey1 = new VeriKeyBuilder(_options)
|
||||
.WithSourceHash("SHA256:ABC123")
|
||||
.WithSbomHash("sha256:def456")
|
||||
.WithVexStatementHashes(["sha256:vex1"])
|
||||
.WithMergePolicyHash("sha256:policy")
|
||||
.WithCertificateHashes(["sha256:cert"])
|
||||
.WithTimeWindow("2024-12-24T12:00:00Z")
|
||||
.Build();
|
||||
|
||||
var veriKey2 = new VeriKeyBuilder(_options)
|
||||
.WithSourceHash("sha256:abc123")
|
||||
.WithSbomHash("sha256:def456")
|
||||
.WithVexStatementHashes(["sha256:vex1"])
|
||||
.WithMergePolicyHash("sha256:policy")
|
||||
.WithCertificateHashes(["sha256:cert"])
|
||||
.WithTimeWindow("2024-12-24T12:00:00Z")
|
||||
.Build();
|
||||
|
||||
// Assert
|
||||
veriKey1.Should().Be(veriKey2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void WithTimeWindow_Timestamp_BucketsDeterministically()
|
||||
{
|
||||
// Arrange
|
||||
var timestamp1 = new DateTimeOffset(2024, 12, 24, 12, 30, 0, TimeSpan.Zero);
|
||||
var timestamp2 = new DateTimeOffset(2024, 12, 24, 12, 45, 0, TimeSpan.Zero);
|
||||
var timestamp3 = new DateTimeOffset(2024, 12, 24, 13, 15, 0, TimeSpan.Zero);
|
||||
|
||||
// Act
|
||||
var builder1 = CreateBuilder().WithTimeWindow(timestamp1);
|
||||
var builder2 = CreateBuilder().WithTimeWindow(timestamp2);
|
||||
var builder3 = CreateBuilder().WithTimeWindow(timestamp3);
|
||||
|
||||
// Assert - timestamps 1 and 2 are in same hour bucket, 3 is different
|
||||
builder1.Build().Should().Be(builder2.Build());
|
||||
builder1.Build().Should().NotBe(builder3.Build());
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void BuildWithComponents_ReturnsSameVeriKeyAsIndividualComponents()
|
||||
{
|
||||
// Arrange & Act - Create two identical builders
|
||||
var veriKey = CreateBuilder().Build();
|
||||
var components = CreateBuilder().BuildWithComponents();
|
||||
|
||||
// Assert
|
||||
components.VeriKey.Should().Be(veriKey);
|
||||
components.SourceHash.Should().StartWith("sha256:");
|
||||
components.SbomHash.Should().StartWith("sha256:");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Build_EmptyVexSet_ProducesConsistentHash()
|
||||
{
|
||||
// Arrange
|
||||
var emptyVex1 = Array.Empty<string>();
|
||||
var emptyVex2 = new List<string>();
|
||||
|
||||
// Act
|
||||
var veriKey1 = CreateBuilder()
|
||||
.WithVexStatementHashes(emptyVex1)
|
||||
.Build();
|
||||
|
||||
var veriKey2 = CreateBuilder()
|
||||
.WithVexStatementHashes(emptyVex2)
|
||||
.Build();
|
||||
|
||||
// Assert
|
||||
veriKey1.Should().Be(veriKey2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Build_MissingComponent_ThrowsInvalidOperationException()
|
||||
{
|
||||
// Arrange
|
||||
var builder = new VeriKeyBuilder(_options)
|
||||
.WithSourceHash("sha256:abc123");
|
||||
// Missing other required components
|
||||
|
||||
// Act
|
||||
var act = () => builder.Build();
|
||||
|
||||
// Assert
|
||||
act.Should().Throw<InvalidOperationException>()
|
||||
.WithMessage("*missing required components*");
|
||||
}
|
||||
|
||||
private VeriKeyBuilder CreateBuilder()
|
||||
{
|
||||
return new VeriKeyBuilder(_options)
|
||||
.WithSourceHash("sha256:defaultsource")
|
||||
.WithSbomHash("sha256:defaultsbom")
|
||||
.WithVexStatementHashes(["sha256:vex1", "sha256:vex2"])
|
||||
.WithMergePolicyHash("sha256:defaultpolicy")
|
||||
.WithCertificateHashes(["sha256:cert1"])
|
||||
.WithTimeWindow("2024-12-24T12:00:00Z");
|
||||
}
|
||||
}
|
||||
@@ -17,3 +17,21 @@ public sealed class RabbitMqIntegrationFactAttribute : FactAttribute
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Theory attribute for RabbitMQ integration tests.
|
||||
/// Skips tests when STELLAOPS_TEST_RABBITMQ environment variable is not set.
|
||||
/// </summary>
|
||||
[AttributeUsage(AttributeTargets.Method)]
|
||||
public sealed class RabbitMqIntegrationTheoryAttribute : TheoryAttribute
|
||||
{
|
||||
public RabbitMqIntegrationTheoryAttribute()
|
||||
{
|
||||
var enabled = Environment.GetEnvironmentVariable("STELLAOPS_TEST_RABBITMQ");
|
||||
if (!string.Equals(enabled, "1", StringComparison.OrdinalIgnoreCase) &&
|
||||
!string.Equals(enabled, "true", StringComparison.OrdinalIgnoreCase))
|
||||
{
|
||||
Skip = "RabbitMQ integration tests are opt-in. Set STELLAOPS_TEST_RABBITMQ=1 (requires Docker/Testcontainers).";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,634 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// RabbitMqTransportComplianceTests.cs
|
||||
// Sprint: SPRINT_5100_0010_0003 - Router + Messaging Test Implementation
|
||||
// Task: MESSAGING-5100-005 - RabbitMQ transport compliance tests
|
||||
// Description: Transport compliance tests for RabbitMQ transport covering roundtrip,
|
||||
// ack/nack semantics, and frame protocol behavior.
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using System.Text;
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Router.Common.Enums;
|
||||
using StellaOps.Router.Common.Frames;
|
||||
using StellaOps.Router.Common.Models;
|
||||
using StellaOps.Router.Transport.RabbitMq.Tests.Fixtures;
|
||||
using Xunit;
|
||||
using Xunit.Abstractions;
|
||||
|
||||
namespace StellaOps.Router.Transport.RabbitMq.Tests;
|
||||
|
||||
/// <summary>
|
||||
/// Transport compliance tests for RabbitMQ transport.
|
||||
/// Validates:
|
||||
/// - Protocol roundtrip (frame encoding → publish → consume → decode)
|
||||
/// - Frame type discrimination
|
||||
/// - Message ordering and delivery guarantees
|
||||
/// - Connection resilience and recovery
|
||||
/// </summary>
|
||||
[Collection(RabbitMqIntegrationTestCollection.Name)]
|
||||
public sealed class RabbitMqTransportComplianceTests : IAsyncLifetime
|
||||
{
|
||||
private readonly RabbitMqContainerFixture _fixture;
|
||||
private readonly ITestOutputHelper _output;
|
||||
private RabbitMqTransportServer? _server;
|
||||
private RabbitMqTransportClient? _client;
|
||||
|
||||
public RabbitMqTransportComplianceTests(RabbitMqContainerFixture fixture, ITestOutputHelper output)
|
||||
{
|
||||
_fixture = fixture;
|
||||
_output = output;
|
||||
}
|
||||
|
||||
public Task InitializeAsync()
|
||||
{
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public async Task DisposeAsync()
|
||||
{
|
||||
if (_client is not null)
|
||||
{
|
||||
await _client.DisposeAsync();
|
||||
}
|
||||
|
||||
if (_server is not null)
|
||||
{
|
||||
await _server.DisposeAsync();
|
||||
}
|
||||
}
|
||||
|
||||
#region Protocol Roundtrip Tests
|
||||
|
||||
[RabbitMqIntegrationFact]
|
||||
public async Task ProtocolRoundtrip_HelloFrame_ReceivedByServer()
|
||||
{
|
||||
// Arrange
|
||||
const string nodeId = "gw-hello-roundtrip";
|
||||
_server = CreateServer(nodeId);
|
||||
_client = CreateClient("svc-hello-roundtrip", nodeId);
|
||||
|
||||
Frame? receivedFrame = null;
|
||||
var frameReceived = new TaskCompletionSource<bool>();
|
||||
|
||||
_server.OnFrame += (connectionId, frame) =>
|
||||
{
|
||||
if (frame.Type == FrameType.Hello)
|
||||
{
|
||||
receivedFrame = frame;
|
||||
frameReceived.TrySetResult(true);
|
||||
}
|
||||
};
|
||||
|
||||
await _server.StartAsync(CancellationToken.None);
|
||||
|
||||
// Act
|
||||
await ConnectClientAsync(_client, "svc-hello-roundtrip");
|
||||
|
||||
// Assert
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(10));
|
||||
await frameReceived.Task.WaitAsync(cts.Token);
|
||||
|
||||
receivedFrame.Should().NotBeNull();
|
||||
receivedFrame!.Type.Should().Be(FrameType.Hello);
|
||||
receivedFrame.CorrelationId.Should().NotBeNullOrEmpty();
|
||||
|
||||
_output.WriteLine("Hello frame roundtrip test passed");
|
||||
}
|
||||
|
||||
[RabbitMqIntegrationFact]
|
||||
public async Task ProtocolRoundtrip_HeartbeatFrame_ReceivedByServer()
|
||||
{
|
||||
// Arrange
|
||||
const string nodeId = "gw-heartbeat-roundtrip";
|
||||
_server = CreateServer(nodeId);
|
||||
_client = CreateClient("svc-heartbeat-roundtrip", nodeId);
|
||||
|
||||
var heartbeatReceived = new TaskCompletionSource<bool>();
|
||||
|
||||
_server.OnFrame += (connectionId, frame) =>
|
||||
{
|
||||
if (frame.Type == FrameType.Heartbeat)
|
||||
{
|
||||
heartbeatReceived.TrySetResult(true);
|
||||
}
|
||||
};
|
||||
|
||||
await _server.StartAsync(CancellationToken.None);
|
||||
await ConnectClientAsync(_client, "svc-heartbeat-roundtrip");
|
||||
await Task.Delay(500); // Wait for HELLO to establish connection
|
||||
|
||||
// Act
|
||||
var heartbeat = new HeartbeatPayload
|
||||
{
|
||||
InstanceId = "svc-heartbeat-roundtrip",
|
||||
Status = InstanceHealthStatus.Healthy,
|
||||
InFlightRequestCount = 0,
|
||||
ErrorRate = 0,
|
||||
TimestampUtc = DateTime.UtcNow
|
||||
};
|
||||
await _client.SendHeartbeatAsync(heartbeat, CancellationToken.None);
|
||||
|
||||
// Assert
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(10));
|
||||
await heartbeatReceived.Task.WaitAsync(cts.Token);
|
||||
|
||||
_output.WriteLine("Heartbeat frame roundtrip test passed");
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Frame Protocol Tests
|
||||
|
||||
[RabbitMqIntegrationFact]
|
||||
public void FrameProtocol_ParseFrame_AllFrameTypes()
|
||||
{
|
||||
// Test that all frame types parse correctly
|
||||
var frameTypes = new[] { "Request", "Response", "Hello", "Heartbeat", "Cancel" };
|
||||
|
||||
foreach (var typeName in frameTypes)
|
||||
{
|
||||
var properties = new TestBasicProperties { Type = typeName, CorrelationId = "test" };
|
||||
var body = new byte[] { 1, 2, 3 };
|
||||
|
||||
var frame = RabbitMqFrameProtocol.ParseFrame(body, properties);
|
||||
|
||||
var expectedType = Enum.Parse<FrameType>(typeName);
|
||||
frame.Type.Should().Be(expectedType, $"Frame type {typeName} should parse correctly");
|
||||
}
|
||||
|
||||
_output.WriteLine("All frame types parse correctly");
|
||||
}
|
||||
|
||||
[RabbitMqIntegrationFact]
|
||||
public void FrameProtocol_CreateProperties_PreservesFrameType()
|
||||
{
|
||||
// Test that frame type is preserved in properties
|
||||
var frameTypes = new[] { FrameType.Request, FrameType.Response, FrameType.Hello, FrameType.Heartbeat, FrameType.Cancel };
|
||||
|
||||
foreach (var frameType in frameTypes)
|
||||
{
|
||||
var frame = new Frame
|
||||
{
|
||||
Type = frameType,
|
||||
CorrelationId = Guid.NewGuid().ToString("N"),
|
||||
Payload = Array.Empty<byte>()
|
||||
};
|
||||
|
||||
var properties = RabbitMqFrameProtocol.CreateProperties(frame, null);
|
||||
|
||||
properties.Type.Should().Be(frameType.ToString(), $"Frame type {frameType} should be preserved in properties");
|
||||
}
|
||||
|
||||
_output.WriteLine("Frame types preserved in properties");
|
||||
}
|
||||
|
||||
[RabbitMqIntegrationTheory]
|
||||
[InlineData(0)]
|
||||
[InlineData(1)]
|
||||
[InlineData(100)]
|
||||
[InlineData(1000)]
|
||||
public void FrameProtocol_BinaryPayload_Preserved(int payloadSize)
|
||||
{
|
||||
// Test that binary payloads are preserved during parsing
|
||||
var payload = new byte[payloadSize];
|
||||
if (payloadSize > 0)
|
||||
{
|
||||
new Random(payloadSize).NextBytes(payload);
|
||||
}
|
||||
|
||||
var properties = new TestBasicProperties { Type = "Request", CorrelationId = "test" };
|
||||
var frame = RabbitMqFrameProtocol.ParseFrame(payload, properties);
|
||||
|
||||
frame.Payload.ToArray().Should().BeEquivalentTo(payload);
|
||||
|
||||
_output.WriteLine($"Binary payload size {payloadSize} preserved");
|
||||
}
|
||||
|
||||
[RabbitMqIntegrationFact]
|
||||
public void FrameProtocol_CorrelationId_Preserved()
|
||||
{
|
||||
// Test that correlation ID is preserved
|
||||
var correlationId = Guid.NewGuid().ToString("N");
|
||||
var properties = new TestBasicProperties { Type = "Request", CorrelationId = correlationId };
|
||||
|
||||
var frame = RabbitMqFrameProtocol.ParseFrame(Array.Empty<byte>(), properties);
|
||||
|
||||
frame.CorrelationId.Should().Be(correlationId);
|
||||
|
||||
_output.WriteLine("Correlation ID preserved");
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Connection Semantics Tests
|
||||
|
||||
[RabbitMqIntegrationFact]
|
||||
public async Task ConnectionSemantics_ServerStart_Succeeds()
|
||||
{
|
||||
// Arrange
|
||||
_server = CreateServer("gw-start");
|
||||
|
||||
// Act
|
||||
var act = async () => await _server.StartAsync(CancellationToken.None);
|
||||
|
||||
// Assert
|
||||
await act.Should().NotThrowAsync();
|
||||
|
||||
_output.WriteLine("Server start test passed");
|
||||
}
|
||||
|
||||
[RabbitMqIntegrationFact]
|
||||
public async Task ConnectionSemantics_ServerStop_Succeeds()
|
||||
{
|
||||
// Arrange
|
||||
_server = CreateServer("gw-stop");
|
||||
await _server.StartAsync(CancellationToken.None);
|
||||
|
||||
// Act
|
||||
var act = async () => await _server.StopAsync(CancellationToken.None);
|
||||
|
||||
// Assert
|
||||
await act.Should().NotThrowAsync();
|
||||
|
||||
_output.WriteLine("Server stop test passed");
|
||||
}
|
||||
|
||||
[RabbitMqIntegrationFact]
|
||||
public async Task ConnectionSemantics_ClientConnect_Succeeds()
|
||||
{
|
||||
// Arrange
|
||||
_client = CreateClient("svc-connect");
|
||||
|
||||
// Act
|
||||
var act = async () => await ConnectClientAsync(_client, "svc-connect");
|
||||
|
||||
// Assert
|
||||
await act.Should().NotThrowAsync();
|
||||
|
||||
_output.WriteLine("Client connect test passed");
|
||||
}
|
||||
|
||||
[RabbitMqIntegrationFact]
|
||||
public async Task ConnectionSemantics_ClientDisconnect_Succeeds()
|
||||
{
|
||||
// Arrange
|
||||
_client = CreateClient("svc-disconnect");
|
||||
await ConnectClientAsync(_client, "svc-disconnect");
|
||||
|
||||
// Act
|
||||
var act = async () => await _client.DisconnectAsync();
|
||||
|
||||
// Assert
|
||||
await act.Should().NotThrowAsync();
|
||||
|
||||
_output.WriteLine("Client disconnect test passed");
|
||||
}
|
||||
|
||||
[RabbitMqIntegrationFact]
|
||||
public async Task ConnectionSemantics_MultipleClients_CanConnectSimultaneously()
|
||||
{
|
||||
// Arrange
|
||||
var client1 = CreateClient("svc-multi-1");
|
||||
var client2 = CreateClient("svc-multi-2");
|
||||
|
||||
try
|
||||
{
|
||||
// Act
|
||||
await Task.WhenAll(
|
||||
ConnectClientAsync(client1, "svc-multi-1"),
|
||||
ConnectClientAsync(client2, "svc-multi-2"));
|
||||
|
||||
// Assert - both connections succeeded
|
||||
_output.WriteLine("Multiple clients connected simultaneously");
|
||||
}
|
||||
finally
|
||||
{
|
||||
await client1.DisposeAsync();
|
||||
await client2.DisposeAsync();
|
||||
}
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Message Delivery Tests
|
||||
|
||||
[RabbitMqIntegrationFact]
|
||||
public async Task MessageDelivery_HelloFromClient_ServerReceives()
|
||||
{
|
||||
// Arrange
|
||||
const string nodeId = "gw-delivery";
|
||||
_server = CreateServer(nodeId);
|
||||
_client = CreateClient("svc-delivery", nodeId);
|
||||
|
||||
string? receivedConnectionId = null;
|
||||
var helloReceived = new TaskCompletionSource<bool>();
|
||||
|
||||
_server.OnFrame += (connectionId, frame) =>
|
||||
{
|
||||
if (frame.Type == FrameType.Hello)
|
||||
{
|
||||
receivedConnectionId = connectionId;
|
||||
helloReceived.TrySetResult(true);
|
||||
}
|
||||
};
|
||||
|
||||
await _server.StartAsync(CancellationToken.None);
|
||||
|
||||
// Act
|
||||
await ConnectClientAsync(_client, "svc-delivery");
|
||||
|
||||
// Assert
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(10));
|
||||
await helloReceived.Task.WaitAsync(cts.Token);
|
||||
|
||||
receivedConnectionId.Should().NotBeNullOrEmpty();
|
||||
|
||||
_output.WriteLine($"Server received HELLO from connection: {receivedConnectionId}");
|
||||
}
|
||||
|
||||
[RabbitMqIntegrationFact]
|
||||
public async Task MessageDelivery_MultipleHeartbeats_AllReceived()
|
||||
{
|
||||
// Arrange
|
||||
const string nodeId = "gw-multi-heartbeat";
|
||||
const int heartbeatCount = 5;
|
||||
_server = CreateServer(nodeId);
|
||||
_client = CreateClient("svc-multi-heartbeat", nodeId);
|
||||
|
||||
var receivedCount = 0;
|
||||
var allReceived = new TaskCompletionSource<bool>();
|
||||
|
||||
_server.OnFrame += (connectionId, frame) =>
|
||||
{
|
||||
if (frame.Type == FrameType.Heartbeat)
|
||||
{
|
||||
var count = Interlocked.Increment(ref receivedCount);
|
||||
if (count == heartbeatCount)
|
||||
{
|
||||
allReceived.TrySetResult(true);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
await _server.StartAsync(CancellationToken.None);
|
||||
await ConnectClientAsync(_client, "svc-multi-heartbeat");
|
||||
await Task.Delay(500);
|
||||
|
||||
// Act
|
||||
for (int i = 0; i < heartbeatCount; i++)
|
||||
{
|
||||
var heartbeat = new HeartbeatPayload
|
||||
{
|
||||
InstanceId = "svc-multi-heartbeat",
|
||||
Status = InstanceHealthStatus.Healthy,
|
||||
InFlightRequestCount = i,
|
||||
ErrorRate = 0,
|
||||
TimestampUtc = DateTime.UtcNow
|
||||
};
|
||||
await _client.SendHeartbeatAsync(heartbeat, CancellationToken.None);
|
||||
}
|
||||
|
||||
// Assert
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(15));
|
||||
await allReceived.Task.WaitAsync(cts.Token);
|
||||
|
||||
receivedCount.Should().Be(heartbeatCount);
|
||||
|
||||
_output.WriteLine($"All {heartbeatCount} heartbeats received");
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Connection Resilience Tests
|
||||
|
||||
[RabbitMqIntegrationFact]
|
||||
public async Task ConnectionResilience_BrokerRestart_ClientRecovers()
|
||||
{
|
||||
// Arrange
|
||||
const string nodeId = "gw-resilience";
|
||||
_server = CreateServer(nodeId);
|
||||
_client = CreateClient("svc-resilience", nodeId);
|
||||
|
||||
var postRestartReceived = new TaskCompletionSource<bool>();
|
||||
|
||||
_server.OnFrame += (_, frame) =>
|
||||
{
|
||||
if (frame.Type == FrameType.Heartbeat)
|
||||
{
|
||||
postRestartReceived.TrySetResult(true);
|
||||
}
|
||||
};
|
||||
|
||||
await _server.StartAsync(CancellationToken.None);
|
||||
await ConnectClientAsync(_client, "svc-resilience");
|
||||
|
||||
// Wait for connection established
|
||||
await Task.Delay(1000);
|
||||
|
||||
// Act - Restart broker
|
||||
await _fixture.RestartAsync();
|
||||
|
||||
// Wait for recovery and try sending
|
||||
await EventuallyAsync(
|
||||
async () =>
|
||||
{
|
||||
var heartbeat = new HeartbeatPayload
|
||||
{
|
||||
InstanceId = "svc-resilience",
|
||||
Status = InstanceHealthStatus.Healthy,
|
||||
InFlightRequestCount = 0,
|
||||
ErrorRate = 0,
|
||||
TimestampUtc = DateTime.UtcNow
|
||||
};
|
||||
await _client.SendHeartbeatAsync(heartbeat, CancellationToken.None);
|
||||
return true;
|
||||
},
|
||||
timeout: TimeSpan.FromSeconds(30),
|
||||
swallowExceptions: true);
|
||||
|
||||
// Assert
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(15));
|
||||
await postRestartReceived.Task.WaitAsync(cts.Token);
|
||||
|
||||
_output.WriteLine("Connection resilience test passed");
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Queue Configuration Tests
|
||||
|
||||
[RabbitMqIntegrationFact]
|
||||
public async Task QueueConfig_AutoDeleteQueues_CleanedUpOnDisconnect()
|
||||
{
|
||||
// Arrange
|
||||
var options = _fixture.CreateOptions(instanceId: "svc-autodelete");
|
||||
options.AutoDeleteQueues = true;
|
||||
|
||||
_client = new RabbitMqTransportClient(
|
||||
Options.Create(options),
|
||||
_fixture.GetLogger<RabbitMqTransportClient>());
|
||||
|
||||
await ConnectClientAsync(_client, "svc-autodelete");
|
||||
|
||||
// Act
|
||||
await _client.DisconnectAsync();
|
||||
await _client.DisposeAsync();
|
||||
_client = null;
|
||||
|
||||
// Assert - queue should be auto-deleted (no way to verify without management API)
|
||||
// Success is indicated by no exceptions
|
||||
_output.WriteLine("Auto-delete queue test passed");
|
||||
}
|
||||
|
||||
[RabbitMqIntegrationFact]
|
||||
public async Task QueueConfig_PrefetchCount_AppliedOnConnect()
|
||||
{
|
||||
// Arrange
|
||||
var options = _fixture.CreateOptions(instanceId: "svc-prefetch");
|
||||
options.PrefetchCount = 50;
|
||||
|
||||
_client = new RabbitMqTransportClient(
|
||||
Options.Create(options),
|
||||
_fixture.GetLogger<RabbitMqTransportClient>());
|
||||
|
||||
// Act & Assert - success indicates prefetch was set
|
||||
var act = async () => await ConnectClientAsync(_client, "svc-prefetch");
|
||||
|
||||
await act.Should().NotThrowAsync();
|
||||
|
||||
_output.WriteLine("Prefetch count test passed");
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Determinism Tests
|
||||
|
||||
[RabbitMqIntegrationFact]
|
||||
public void Determinism_SameFrame_SameProperties()
|
||||
{
|
||||
// Test that same input produces same output
|
||||
for (int run = 0; run < 10; run++)
|
||||
{
|
||||
var frame = new Frame
|
||||
{
|
||||
Type = FrameType.Request,
|
||||
CorrelationId = "deterministic-test",
|
||||
Payload = Encoding.UTF8.GetBytes("consistent-data")
|
||||
};
|
||||
|
||||
var props1 = RabbitMqFrameProtocol.CreateProperties(frame, "reply-queue");
|
||||
var props2 = RabbitMqFrameProtocol.CreateProperties(frame, "reply-queue");
|
||||
|
||||
props1.Type.Should().Be(props2.Type);
|
||||
props1.CorrelationId.Should().Be(props2.CorrelationId);
|
||||
props1.ReplyTo.Should().Be(props2.ReplyTo);
|
||||
}
|
||||
|
||||
_output.WriteLine("Determinism test passed");
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Helpers
|
||||
|
||||
private RabbitMqTransportServer CreateServer(string? nodeId = null)
|
||||
{
|
||||
var options = _fixture.CreateOptions(nodeId: nodeId ?? $"gw-{Guid.NewGuid():N}"[..12]);
|
||||
return new RabbitMqTransportServer(
|
||||
Options.Create(options),
|
||||
_fixture.GetLogger<RabbitMqTransportServer>());
|
||||
}
|
||||
|
||||
private RabbitMqTransportClient CreateClient(string? instanceId = null, string? nodeId = null)
|
||||
{
|
||||
var options = _fixture.CreateOptions(
|
||||
instanceId: instanceId ?? $"svc-{Guid.NewGuid():N}"[..12],
|
||||
nodeId: nodeId);
|
||||
return new RabbitMqTransportClient(
|
||||
Options.Create(options),
|
||||
_fixture.GetLogger<RabbitMqTransportClient>());
|
||||
}
|
||||
|
||||
private static async Task ConnectClientAsync(RabbitMqTransportClient client, string instanceId)
|
||||
{
|
||||
var instance = new InstanceDescriptor
|
||||
{
|
||||
InstanceId = instanceId,
|
||||
ServiceName = "test-service",
|
||||
Version = "1.0.0",
|
||||
Region = "us-east-1"
|
||||
};
|
||||
await client.ConnectAsync(instance, [], CancellationToken.None);
|
||||
}
|
||||
|
||||
private static async Task EventuallyAsync(
|
||||
Func<Task<bool>> predicate,
|
||||
TimeSpan timeout,
|
||||
bool swallowExceptions,
|
||||
TimeSpan? pollInterval = null)
|
||||
{
|
||||
pollInterval ??= TimeSpan.FromMilliseconds(500);
|
||||
var deadline = DateTime.UtcNow + timeout;
|
||||
|
||||
while (DateTime.UtcNow < deadline)
|
||||
{
|
||||
try
|
||||
{
|
||||
if (await predicate())
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
catch when (swallowExceptions)
|
||||
{
|
||||
// Retry
|
||||
}
|
||||
|
||||
await Task.Delay(pollInterval.Value);
|
||||
}
|
||||
|
||||
(await predicate()).Should().BeTrue("condition should become true within {0}", timeout);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Test Helpers
|
||||
|
||||
private sealed class TestBasicProperties : RabbitMQ.Client.IReadOnlyBasicProperties
|
||||
{
|
||||
public string? AppId { get; init; }
|
||||
public string? ClusterId { get; init; }
|
||||
public string? ContentEncoding { get; init; }
|
||||
public string? ContentType { get; init; }
|
||||
public string? CorrelationId { get; init; }
|
||||
public RabbitMQ.Client.DeliveryModes DeliveryMode { get; init; }
|
||||
public string? Expiration { get; init; }
|
||||
public IDictionary<string, object?>? Headers { get; init; }
|
||||
public string? MessageId { get; init; }
|
||||
public bool Persistent { get; init; }
|
||||
public byte Priority { get; init; }
|
||||
public string? ReplyTo { get; init; }
|
||||
public RabbitMQ.Client.PublicationAddress? ReplyToAddress { get; init; }
|
||||
public RabbitMQ.Client.AmqpTimestamp Timestamp { get; init; }
|
||||
public string? Type { get; init; }
|
||||
public string? UserId { get; init; }
|
||||
|
||||
public bool IsAppIdPresent() => AppId != null;
|
||||
public bool IsClusterIdPresent() => ClusterId != null;
|
||||
public bool IsContentEncodingPresent() => ContentEncoding != null;
|
||||
public bool IsContentTypePresent() => ContentType != null;
|
||||
public bool IsCorrelationIdPresent() => CorrelationId != null;
|
||||
public bool IsDeliveryModePresent() => true;
|
||||
public bool IsExpirationPresent() => Expiration != null;
|
||||
public bool IsHeadersPresent() => Headers != null;
|
||||
public bool IsMessageIdPresent() => MessageId != null;
|
||||
public bool IsPriorityPresent() => true;
|
||||
public bool IsReplyToPresent() => ReplyTo != null;
|
||||
public bool IsTimestampPresent() => true;
|
||||
public bool IsTypePresent() => Type != null;
|
||||
public bool IsUserIdPresent() => UserId != null;
|
||||
}
|
||||
|
||||
#endregion
|
||||
}
|
||||
Reference in New Issue
Block a user