sprints enhancements

This commit is contained in:
StellaOps Bot
2025-12-25 19:52:30 +02:00
parent ef6ac36323
commit b8b2d83f4a
138 changed files with 25133 additions and 594 deletions

View File

@@ -0,0 +1,318 @@
using System.Security.Cryptography;
namespace StellaOps.Provcache;
/// <summary>
/// Interface for splitting large evidence into fixed-size chunks
/// and reassembling them with Merkle verification.
/// </summary>
public interface IEvidenceChunker
{
/// <summary>
/// Splits evidence into chunks.
/// </summary>
/// <param name="evidence">The evidence bytes to split.</param>
/// <param name="contentType">MIME type of the evidence.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>The chunking result with chunks and proof root.</returns>
Task<ChunkingResult> ChunkAsync(
ReadOnlyMemory<byte> evidence,
string contentType,
CancellationToken cancellationToken = default);
/// <summary>
/// Splits evidence from a stream.
/// </summary>
/// <param name="evidenceStream">Stream containing evidence.</param>
/// <param name="contentType">MIME type of the evidence.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Async enumerable of chunks as they are created.</returns>
IAsyncEnumerable<EvidenceChunk> ChunkStreamAsync(
Stream evidenceStream,
string contentType,
CancellationToken cancellationToken = default);
/// <summary>
/// Reassembles chunks into the original evidence.
/// </summary>
/// <param name="chunks">The chunks to reassemble (must be in order).</param>
/// <param name="expectedProofRoot">Expected Merkle root for verification.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>The reassembled evidence bytes.</returns>
Task<byte[]> ReassembleAsync(
IEnumerable<EvidenceChunk> chunks,
string expectedProofRoot,
CancellationToken cancellationToken = default);
/// <summary>
/// Verifies a single chunk against its hash.
/// </summary>
/// <param name="chunk">The chunk to verify.</param>
/// <returns>True if the chunk is valid.</returns>
bool VerifyChunk(EvidenceChunk chunk);
/// <summary>
/// Computes the Merkle root from chunk hashes.
/// </summary>
/// <param name="chunkHashes">Ordered list of chunk hashes.</param>
/// <returns>The Merkle root.</returns>
string ComputeMerkleRoot(IEnumerable<string> chunkHashes);
}
/// <summary>
/// Result of chunking evidence.
/// </summary>
public sealed record ChunkingResult
{
/// <summary>
/// The computed Merkle root of all chunks.
/// </summary>
public required string ProofRoot { get; init; }
/// <summary>
/// The generated chunks.
/// </summary>
public required IReadOnlyList<EvidenceChunk> Chunks { get; init; }
/// <summary>
/// Total size of the original evidence.
/// </summary>
public required long TotalSize { get; init; }
}
/// <summary>
/// Default implementation of <see cref="IEvidenceChunker"/>.
/// </summary>
public sealed class EvidenceChunker : IEvidenceChunker
{
private readonly ProvcacheOptions _options;
private readonly TimeProvider _timeProvider;
public EvidenceChunker(ProvcacheOptions options, TimeProvider? timeProvider = null)
{
_options = options ?? throw new ArgumentNullException(nameof(options));
_timeProvider = timeProvider ?? TimeProvider.System;
}
/// <inheritdoc />
public Task<ChunkingResult> ChunkAsync(
ReadOnlyMemory<byte> evidence,
string contentType,
CancellationToken cancellationToken = default)
{
ArgumentNullException.ThrowIfNull(contentType);
var chunks = new List<EvidenceChunk>();
var chunkHashes = new List<string>();
var chunkSize = _options.ChunkSize;
var now = _timeProvider.GetUtcNow();
var span = evidence.Span;
var totalSize = span.Length;
var chunkIndex = 0;
for (var offset = 0; offset < totalSize; offset += chunkSize)
{
cancellationToken.ThrowIfCancellationRequested();
var remainingBytes = totalSize - offset;
var currentChunkSize = Math.Min(chunkSize, remainingBytes);
var chunkData = span.Slice(offset, currentChunkSize).ToArray();
var chunkHash = ComputeHash(chunkData);
chunks.Add(new EvidenceChunk
{
ChunkId = Guid.NewGuid(),
ProofRoot = string.Empty, // Will be set after computing Merkle root
ChunkIndex = chunkIndex,
ChunkHash = chunkHash,
Blob = chunkData,
BlobSize = currentChunkSize,
ContentType = contentType,
CreatedAt = now
});
chunkHashes.Add(chunkHash);
chunkIndex++;
}
var proofRoot = ComputeMerkleRoot(chunkHashes);
// Update proof root in all chunks
var finalChunks = chunks.Select(c => c with { ProofRoot = proofRoot }).ToList();
return Task.FromResult(new ChunkingResult
{
ProofRoot = proofRoot,
Chunks = finalChunks,
TotalSize = totalSize
});
}
/// <inheritdoc />
public async IAsyncEnumerable<EvidenceChunk> ChunkStreamAsync(
Stream evidenceStream,
string contentType,
[System.Runtime.CompilerServices.EnumeratorCancellation] CancellationToken cancellationToken = default)
{
ArgumentNullException.ThrowIfNull(evidenceStream);
ArgumentNullException.ThrowIfNull(contentType);
var chunkSize = _options.ChunkSize;
var buffer = new byte[chunkSize];
var chunkIndex = 0;
var now = _timeProvider.GetUtcNow();
int bytesRead;
while ((bytesRead = await evidenceStream.ReadAsync(buffer, cancellationToken)) > 0)
{
var chunkData = bytesRead == chunkSize ? buffer : buffer[..bytesRead];
var chunkHash = ComputeHash(chunkData);
yield return new EvidenceChunk
{
ChunkId = Guid.NewGuid(),
ProofRoot = string.Empty, // Caller must compute after all chunks
ChunkIndex = chunkIndex,
ChunkHash = chunkHash,
Blob = chunkData.ToArray(),
BlobSize = bytesRead,
ContentType = contentType,
CreatedAt = now
};
chunkIndex++;
buffer = new byte[chunkSize]; // New buffer for next chunk
}
}
/// <inheritdoc />
public Task<byte[]> ReassembleAsync(
IEnumerable<EvidenceChunk> chunks,
string expectedProofRoot,
CancellationToken cancellationToken = default)
{
ArgumentNullException.ThrowIfNull(chunks);
ArgumentException.ThrowIfNullOrWhiteSpace(expectedProofRoot);
var orderedChunks = chunks.OrderBy(c => c.ChunkIndex).ToList();
if (orderedChunks.Count == 0)
{
throw new ArgumentException("No chunks provided.", nameof(chunks));
}
// Verify Merkle root
var chunkHashes = orderedChunks.Select(c => c.ChunkHash).ToList();
var computedRoot = ComputeMerkleRoot(chunkHashes);
if (!string.Equals(computedRoot, expectedProofRoot, StringComparison.OrdinalIgnoreCase))
{
throw new InvalidOperationException(
$"Merkle root mismatch. Expected: {expectedProofRoot}, Computed: {computedRoot}");
}
// Verify each chunk
foreach (var chunk in orderedChunks)
{
cancellationToken.ThrowIfCancellationRequested();
if (!VerifyChunk(chunk))
{
throw new InvalidOperationException(
$"Chunk {chunk.ChunkIndex} verification failed. Expected hash: {chunk.ChunkHash}");
}
}
// Reassemble
var totalSize = orderedChunks.Sum(c => c.BlobSize);
var result = new byte[totalSize];
var offset = 0;
foreach (var chunk in orderedChunks)
{
chunk.Blob.CopyTo(result, offset);
offset += chunk.BlobSize;
}
return Task.FromResult(result);
}
/// <inheritdoc />
public bool VerifyChunk(EvidenceChunk chunk)
{
ArgumentNullException.ThrowIfNull(chunk);
var computedHash = ComputeHash(chunk.Blob);
return string.Equals(computedHash, chunk.ChunkHash, StringComparison.OrdinalIgnoreCase);
}
/// <inheritdoc />
public string ComputeMerkleRoot(IEnumerable<string> chunkHashes)
{
ArgumentNullException.ThrowIfNull(chunkHashes);
var hashes = chunkHashes.ToList();
if (hashes.Count == 0)
{
// Empty Merkle tree
return ComputeHash([]);
}
if (hashes.Count == 1)
{
return hashes[0];
}
// Build Merkle tree bottom-up
var currentLevel = hashes.Select(h => HexToBytes(h)).ToList();
while (currentLevel.Count > 1)
{
var nextLevel = new List<byte[]>();
for (var i = 0; i < currentLevel.Count; i += 2)
{
byte[] combined;
if (i + 1 < currentLevel.Count)
{
// Pair exists - concatenate and hash
combined = new byte[currentLevel[i].Length + currentLevel[i + 1].Length];
currentLevel[i].CopyTo(combined, 0);
currentLevel[i + 1].CopyTo(combined, currentLevel[i].Length);
}
else
{
// Odd node - duplicate itself
combined = new byte[currentLevel[i].Length * 2];
currentLevel[i].CopyTo(combined, 0);
currentLevel[i].CopyTo(combined, currentLevel[i].Length);
}
nextLevel.Add(SHA256.HashData(combined));
}
currentLevel = nextLevel;
}
return $"sha256:{Convert.ToHexStringLower(currentLevel[0])}";
}
private static string ComputeHash(ReadOnlySpan<byte> data)
{
var hash = SHA256.HashData(data);
return $"sha256:{Convert.ToHexStringLower(hash)}";
}
private static byte[] HexToBytes(string hash)
{
// Strip sha256: prefix if present
var hex = hash.StartsWith("sha256:", StringComparison.OrdinalIgnoreCase)
? hash[7..]
: hash;
return Convert.FromHexString(hex);
}
}

View File

@@ -0,0 +1,110 @@
using System.ComponentModel.DataAnnotations;
using System.ComponentModel.DataAnnotations.Schema;
namespace StellaOps.Provcache.Entities;
/// <summary>
/// EF Core entity for provcache.prov_revocations table.
/// Tracks all revocation events for audit trail and replay.
/// </summary>
[Table("prov_revocations", Schema = "provcache")]
public sealed class ProvRevocationEntity
{
/// <summary>
/// Auto-incrementing sequence number for ordering.
/// </summary>
[Key]
[Column("seq_no")]
[DatabaseGenerated(DatabaseGeneratedOption.Identity)]
public long SeqNo { get; set; }
/// <summary>
/// Unique identifier for this revocation event.
/// </summary>
[Column("revocation_id")]
public required Guid RevocationId { get; set; }
/// <summary>
/// Type of revocation: 'signer', 'feed_epoch', 'policy', 'explicit'.
/// </summary>
[Column("revocation_type")]
[MaxLength(32)]
public required string RevocationType { get; set; }
/// <summary>
/// The key that was revoked (signer hash, feed epoch, policy hash, or verikey).
/// </summary>
[Column("revoked_key")]
[MaxLength(512)]
public required string RevokedKey { get; set; }
/// <summary>
/// Reason for revocation.
/// </summary>
[Column("reason")]
[MaxLength(1024)]
public string? Reason { get; set; }
/// <summary>
/// Number of cache entries invalidated.
/// </summary>
[Column("entries_invalidated")]
public int EntriesInvalidated { get; set; }
/// <summary>
/// Source that triggered the revocation.
/// </summary>
[Column("source")]
[MaxLength(128)]
public required string Source { get; set; }
/// <summary>
/// Optional correlation ID for tracing.
/// </summary>
[Column("correlation_id")]
[MaxLength(128)]
public string? CorrelationId { get; set; }
/// <summary>
/// UTC timestamp when revocation occurred.
/// </summary>
[Column("revoked_at")]
public DateTimeOffset RevokedAt { get; set; }
/// <summary>
/// Optional metadata as JSON.
/// </summary>
[Column("metadata", TypeName = "jsonb")]
public string? Metadata { get; set; }
}
/// <summary>
/// Types of revocation events.
/// </summary>
public static class RevocationTypes
{
/// <summary>
/// Signer certificate revoked.
/// </summary>
public const string Signer = "signer";
/// <summary>
/// Feed epoch advanced (older epochs revoked).
/// </summary>
public const string FeedEpoch = "feed_epoch";
/// <summary>
/// Policy bundle updated/revoked.
/// </summary>
public const string Policy = "policy";
/// <summary>
/// Explicit revocation of specific entry.
/// </summary>
public const string Explicit = "explicit";
/// <summary>
/// TTL expiration (for audit completeness).
/// </summary>
public const string Expiration = "expiration";
}

View File

@@ -0,0 +1,109 @@
namespace StellaOps.Provcache.Events;
/// <summary>
/// Event published when an advisory feed advances to a new epoch.
/// Provcache subscribers use this to invalidate cache entries
/// that were computed against older feed epochs.
/// </summary>
/// <remarks>
/// Stream name: <c>stellaops:events:feed-epoch-advanced</c>
/// </remarks>
public sealed record FeedEpochAdvancedEvent
{
/// <summary>
/// Stream name for feed epoch events.
/// </summary>
public const string StreamName = "stellaops:events:feed-epoch-advanced";
/// <summary>
/// Event type identifier for serialization.
/// </summary>
public const string EventType = "feed.epoch.advanced.v1";
/// <summary>
/// Unique identifier for this event instance.
/// </summary>
public required Guid EventId { get; init; }
/// <summary>
/// Timestamp when the event occurred (UTC).
/// </summary>
public required DateTimeOffset Timestamp { get; init; }
/// <summary>
/// The feed identifier (e.g., "cve", "ghsa", "osv", "redhat-oval").
/// </summary>
public required string FeedId { get; init; }
/// <summary>
/// The previous epoch identifier.
/// Format varies by feed (e.g., "2024-12-24T12:00:00Z", "v2024.52").
/// </summary>
public required string PreviousEpoch { get; init; }
/// <summary>
/// The new epoch identifier.
/// </summary>
public required string NewEpoch { get; init; }
/// <summary>
/// When the new epoch became effective.
/// Cache entries with feed_epoch older than this should be invalidated.
/// </summary>
public required DateTimeOffset EffectiveAt { get; init; }
/// <summary>
/// Number of advisories added in this epoch (for metrics).
/// </summary>
public int? AdvisoriesAdded { get; init; }
/// <summary>
/// Number of advisories modified in this epoch (for metrics).
/// </summary>
public int? AdvisoriesModified { get; init; }
/// <summary>
/// Number of advisories withdrawn in this epoch (for metrics).
/// </summary>
public int? AdvisoriesWithdrawn { get; init; }
/// <summary>
/// Tenant ID if multi-tenant (null for global feeds).
/// </summary>
public string? TenantId { get; init; }
/// <summary>
/// Correlation ID for distributed tracing.
/// </summary>
public string? CorrelationId { get; init; }
/// <summary>
/// Creates a new FeedEpochAdvancedEvent.
/// </summary>
public static FeedEpochAdvancedEvent Create(
string feedId,
string previousEpoch,
string newEpoch,
DateTimeOffset effectiveAt,
int? advisoriesAdded = null,
int? advisoriesModified = null,
int? advisoriesWithdrawn = null,
string? tenantId = null,
string? correlationId = null)
{
return new FeedEpochAdvancedEvent
{
EventId = Guid.NewGuid(),
Timestamp = DateTimeOffset.UtcNow,
FeedId = feedId,
PreviousEpoch = previousEpoch,
NewEpoch = newEpoch,
EffectiveAt = effectiveAt,
AdvisoriesAdded = advisoriesAdded,
AdvisoriesModified = advisoriesModified,
AdvisoriesWithdrawn = advisoriesWithdrawn,
TenantId = tenantId,
CorrelationId = correlationId
};
}
}

View File

@@ -0,0 +1,96 @@
namespace StellaOps.Provcache.Events;
/// <summary>
/// Event published when a signer key is revoked.
/// Provcache subscribers use this to invalidate cache entries
/// that were signed by the revoked key.
/// </summary>
/// <remarks>
/// Stream name: <c>stellaops:events:signer-revoked</c>
/// </remarks>
public sealed record SignerRevokedEvent
{
/// <summary>
/// Stream name for signer revocation events.
/// </summary>
public const string StreamName = "stellaops:events:signer-revoked";
/// <summary>
/// Event type identifier for serialization.
/// </summary>
public const string EventType = "signer.revoked.v1";
/// <summary>
/// Unique identifier for this event instance.
/// </summary>
public required Guid EventId { get; init; }
/// <summary>
/// Timestamp when the event occurred (UTC).
/// </summary>
public required DateTimeOffset Timestamp { get; init; }
/// <summary>
/// The trust anchor ID that owns the revoked key.
/// </summary>
public required Guid AnchorId { get; init; }
/// <summary>
/// The revoked key identifier.
/// </summary>
public required string KeyId { get; init; }
/// <summary>
/// Hash of the revoked signer's certificate/public key.
/// This is used to match against the <c>signer_set_hash</c> in cache entries.
/// Format: <c>sha256:&lt;hex&gt;</c>
/// </summary>
public required string SignerHash { get; init; }
/// <summary>
/// When the revocation became effective.
/// Cache entries created after this time with this signer should be invalidated.
/// </summary>
public required DateTimeOffset EffectiveAt { get; init; }
/// <summary>
/// Reason for the revocation (for audit purposes).
/// </summary>
public string? Reason { get; init; }
/// <summary>
/// Actor who initiated the revocation.
/// </summary>
public string? Actor { get; init; }
/// <summary>
/// Correlation ID for distributed tracing.
/// </summary>
public string? CorrelationId { get; init; }
/// <summary>
/// Creates a new SignerRevokedEvent.
/// </summary>
public static SignerRevokedEvent Create(
Guid anchorId,
string keyId,
string signerHash,
DateTimeOffset effectiveAt,
string? reason = null,
string? actor = null,
string? correlationId = null)
{
return new SignerRevokedEvent
{
EventId = Guid.NewGuid(),
Timestamp = DateTimeOffset.UtcNow,
AnchorId = anchorId,
KeyId = keyId,
SignerHash = signerHash,
EffectiveAt = effectiveAt,
Reason = reason,
Actor = actor,
CorrelationId = correlationId
};
}
}

View File

@@ -0,0 +1,99 @@
namespace StellaOps.Provcache;
/// <summary>
/// Interface for exporting and importing minimal proof bundles.
/// Supports various density levels for air-gap scenarios.
/// </summary>
public interface IMinimalProofExporter
{
/// <summary>
/// Exports a minimal proof bundle for the given veri key.
/// </summary>
/// <param name="veriKey">The verification key identifying the cache entry.</param>
/// <param name="options">Export options including density level and signing.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>The exported minimal proof bundle.</returns>
Task<MinimalProofBundle> ExportAsync(
string veriKey,
MinimalProofExportOptions options,
CancellationToken cancellationToken = default);
/// <summary>
/// Exports a minimal proof bundle as JSON bytes.
/// </summary>
/// <param name="veriKey">The verification key identifying the cache entry.</param>
/// <param name="options">Export options including density level and signing.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>UTF-8 encoded JSON bytes of the bundle.</returns>
Task<byte[]> ExportAsJsonAsync(
string veriKey,
MinimalProofExportOptions options,
CancellationToken cancellationToken = default);
/// <summary>
/// Exports a minimal proof bundle to a stream.
/// </summary>
/// <param name="veriKey">The verification key identifying the cache entry.</param>
/// <param name="options">Export options including density level and signing.</param>
/// <param name="outputStream">The stream to write the bundle to.</param>
/// <param name="cancellationToken">Cancellation token.</param>
Task ExportToStreamAsync(
string veriKey,
MinimalProofExportOptions options,
Stream outputStream,
CancellationToken cancellationToken = default);
/// <summary>
/// Imports a minimal proof bundle.
/// </summary>
/// <param name="bundle">The bundle to import.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Import result with verification status.</returns>
Task<MinimalProofImportResult> ImportAsync(
MinimalProofBundle bundle,
CancellationToken cancellationToken = default);
/// <summary>
/// Imports a minimal proof bundle from JSON bytes.
/// </summary>
/// <param name="jsonBytes">UTF-8 encoded JSON bytes of the bundle.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Import result with verification status.</returns>
Task<MinimalProofImportResult> ImportFromJsonAsync(
byte[] jsonBytes,
CancellationToken cancellationToken = default);
/// <summary>
/// Imports a minimal proof bundle from a stream.
/// </summary>
/// <param name="inputStream">The stream containing the bundle JSON.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Import result with verification status.</returns>
Task<MinimalProofImportResult> ImportFromStreamAsync(
Stream inputStream,
CancellationToken cancellationToken = default);
/// <summary>
/// Verifies a bundle without importing it.
/// </summary>
/// <param name="bundle">The bundle to verify.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Verification results.</returns>
Task<ImportVerification> VerifyAsync(
MinimalProofBundle bundle,
CancellationToken cancellationToken = default);
/// <summary>
/// Calculates the expected size of an export with the given options.
/// </summary>
/// <param name="veriKey">The verification key identifying the cache entry.</param>
/// <param name="density">The density level.</param>
/// <param name="standardChunkCount">Number of chunks for Standard density.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Estimated size in bytes.</returns>
Task<long> EstimateExportSizeAsync(
string veriKey,
ProofDensity density,
int standardChunkCount = 3,
CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,263 @@
using System.Text.Json.Serialization;
namespace StellaOps.Provcache;
/// <summary>
/// Density levels for minimal proof export.
/// </summary>
public enum ProofDensity
{
/// <summary>
/// Digest + proof root + chunk manifest only (~2KB).
/// For quick verification and high-trust networks.
/// </summary>
Lite,
/// <summary>
/// Lite + first N chunks (~200KB typical).
/// For normal air-gap scenarios and auditor preview.
/// </summary>
Standard,
/// <summary>
/// Full evidence with all chunks (variable size).
/// For complete audit and compliance evidence.
/// </summary>
Strict
}
/// <summary>
/// Minimal proof bundle for air-gap export/import.
/// Contains the decision digest, proof root, and optionally evidence chunks.
/// </summary>
public sealed record MinimalProofBundle
{
/// <summary>
/// Bundle format version for compatibility checking.
/// </summary>
[JsonPropertyName("bundleVersion")]
public string BundleVersion { get; init; } = "v1";
/// <summary>
/// The density level this bundle was exported with.
/// </summary>
[JsonPropertyName("density")]
public required ProofDensity Density { get; init; }
/// <summary>
/// The decision digest containing verdict hash, proof root, and trust score.
/// </summary>
[JsonPropertyName("digest")]
public required DecisionDigest Digest { get; init; }
/// <summary>
/// Chunk manifest for lazy evidence retrieval.
/// Always present regardless of density level.
/// </summary>
[JsonPropertyName("manifest")]
public required ChunkManifest Manifest { get; init; }
/// <summary>
/// Included evidence chunks (density-dependent).
/// - Lite: empty
/// - Standard: first N chunks
/// - Strict: all chunks
/// </summary>
[JsonPropertyName("chunks")]
public IReadOnlyList<BundleChunk> Chunks { get; init; } = [];
/// <summary>
/// UTC timestamp when bundle was exported.
/// </summary>
[JsonPropertyName("exportedAt")]
public required DateTimeOffset ExportedAt { get; init; }
/// <summary>
/// Exporting system identifier for audit trail.
/// </summary>
[JsonPropertyName("exportedBy")]
public string? ExportedBy { get; init; }
/// <summary>
/// Optional DSSE envelope containing signed bundle.
/// Present when bundle was signed during export.
/// </summary>
[JsonPropertyName("signature")]
public BundleSignature? Signature { get; init; }
}
/// <summary>
/// Chunk included in the bundle with base64-encoded blob.
/// </summary>
public sealed record BundleChunk
{
/// <summary>
/// Zero-based chunk index.
/// </summary>
[JsonPropertyName("index")]
public required int Index { get; init; }
/// <summary>
/// SHA256 hash for verification.
/// </summary>
[JsonPropertyName("hash")]
public required string Hash { get; init; }
/// <summary>
/// Size in bytes.
/// </summary>
[JsonPropertyName("size")]
public required int Size { get; init; }
/// <summary>
/// MIME type.
/// </summary>
[JsonPropertyName("contentType")]
public required string ContentType { get; init; }
/// <summary>
/// Base64-encoded chunk data.
/// </summary>
[JsonPropertyName("data")]
public required string Data { get; init; }
}
/// <summary>
/// DSSE signature envelope for bundle integrity.
/// </summary>
public sealed record BundleSignature
{
/// <summary>
/// Signature algorithm (e.g., "ES256", "RS256", "Ed25519").
/// </summary>
[JsonPropertyName("algorithm")]
public required string Algorithm { get; init; }
/// <summary>
/// Key identifier used for signing.
/// </summary>
[JsonPropertyName("keyId")]
public required string KeyId { get; init; }
/// <summary>
/// Base64-encoded signature bytes.
/// </summary>
[JsonPropertyName("signature")]
public required string SignatureBytes { get; init; }
/// <summary>
/// UTC timestamp when bundle was signed.
/// </summary>
[JsonPropertyName("signedAt")]
public required DateTimeOffset SignedAt { get; init; }
/// <summary>
/// Optional certificate chain for verification.
/// </summary>
[JsonPropertyName("certificateChain")]
public IReadOnlyList<string>? CertificateChain { get; init; }
}
/// <summary>
/// Options for exporting a minimal proof bundle.
/// </summary>
public sealed record MinimalProofExportOptions
{
/// <summary>
/// Density level determining how much evidence to include.
/// </summary>
public ProofDensity Density { get; init; } = ProofDensity.Standard;
/// <summary>
/// Number of leading chunks to include for Standard density.
/// Default is 3 (~192KB with 64KB chunks).
/// </summary>
public int StandardDensityChunkCount { get; init; } = 3;
/// <summary>
/// Whether to sign the bundle.
/// </summary>
public bool Sign { get; init; }
/// <summary>
/// Key ID to use for signing (if Sign is true).
/// </summary>
public string? SigningKeyId { get; init; }
/// <summary>
/// Optional system identifier for audit trail.
/// </summary>
public string? ExportedBy { get; init; }
}
/// <summary>
/// Result of importing a minimal proof bundle.
/// </summary>
public sealed record MinimalProofImportResult
{
/// <summary>
/// Whether the import was successful.
/// </summary>
public required bool Success { get; init; }
/// <summary>
/// The imported decision digest.
/// </summary>
public required DecisionDigest Digest { get; init; }
/// <summary>
/// The chunk manifest.
/// </summary>
public required ChunkManifest Manifest { get; init; }
/// <summary>
/// Number of chunks imported.
/// </summary>
public required int ChunksImported { get; init; }
/// <summary>
/// Number of chunks remaining to fetch (for lazy fetch scenarios).
/// </summary>
public required int ChunksPending { get; init; }
/// <summary>
/// Verification results.
/// </summary>
public required ImportVerification Verification { get; init; }
/// <summary>
/// Any warnings during import.
/// </summary>
public IReadOnlyList<string> Warnings { get; init; } = [];
}
/// <summary>
/// Verification results from importing a bundle.
/// </summary>
public sealed record ImportVerification
{
/// <summary>
/// Whether the Merkle root matches the proof root.
/// </summary>
public required bool MerkleRootValid { get; init; }
/// <summary>
/// Whether the signature was verified (if present).
/// </summary>
public required bool? SignatureValid { get; init; }
/// <summary>
/// Whether all included chunks passed hash verification.
/// </summary>
public required bool ChunksValid { get; init; }
/// <summary>
/// Whether the digest integrity check passed.
/// </summary>
public required bool DigestValid { get; init; }
/// <summary>
/// List of failed chunk indices (if any).
/// </summary>
public IReadOnlyList<int> FailedChunkIndices { get; init; } = [];
}

View File

@@ -0,0 +1,457 @@
using System.Security.Cryptography;
using System.Text.Json;
using Microsoft.Extensions.Logging;
using StellaOps.Provenance.Attestation;
namespace StellaOps.Provcache;
/// <summary>
/// Implementation of <see cref="IMinimalProofExporter"/> supporting
/// multiple density levels for air-gap scenarios.
/// </summary>
public sealed class MinimalProofExporter : IMinimalProofExporter
{
private readonly IProvcacheService _provcacheService;
private readonly IEvidenceChunkRepository _chunkRepository;
private readonly ISigner? _signer;
private readonly TimeProvider _timeProvider;
private readonly ILogger<MinimalProofExporter> _logger;
private static readonly JsonSerializerOptions s_jsonOptions = new()
{
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
WriteIndented = false,
DefaultIgnoreCondition = System.Text.Json.Serialization.JsonIgnoreCondition.WhenWritingNull
};
public MinimalProofExporter(
IProvcacheService provcacheService,
IEvidenceChunkRepository chunkRepository,
ISigner? signer = null,
TimeProvider? timeProvider = null,
ILogger<MinimalProofExporter>? logger = null)
{
_provcacheService = provcacheService ?? throw new ArgumentNullException(nameof(provcacheService));
_chunkRepository = chunkRepository ?? throw new ArgumentNullException(nameof(chunkRepository));
_signer = signer;
_timeProvider = timeProvider ?? TimeProvider.System;
_logger = logger ?? Microsoft.Extensions.Logging.Abstractions.NullLogger<MinimalProofExporter>.Instance;
}
/// <inheritdoc />
public async Task<MinimalProofBundle> ExportAsync(
string veriKey,
MinimalProofExportOptions options,
CancellationToken cancellationToken = default)
{
ArgumentException.ThrowIfNullOrWhiteSpace(veriKey);
ArgumentNullException.ThrowIfNull(options);
_logger.LogDebug("Exporting minimal proof bundle for {VeriKey} with density {Density}",
veriKey, options.Density);
// Get the cache entry
var cacheResult = await _provcacheService.GetAsync(veriKey, bypassCache: false, cancellationToken);
if (cacheResult.Entry is null)
{
throw new InvalidOperationException($"Cache entry not found for VeriKey: {veriKey}");
}
var entry = cacheResult.Entry;
var proofRoot = entry.Decision.ProofRoot;
var now = _timeProvider.GetUtcNow();
// Get the chunk manifest
var manifest = await _chunkRepository.GetManifestAsync(proofRoot, cancellationToken)
?? throw new InvalidOperationException($"Chunk manifest not found for proof root: {proofRoot}");
// Build chunks based on density
var bundleChunks = await GetChunksForDensityAsync(
proofRoot,
manifest,
options.Density,
options.StandardDensityChunkCount,
cancellationToken);
// Build the bundle
var bundle = new MinimalProofBundle
{
BundleVersion = "v1",
Density = options.Density,
Digest = entry.Decision,
Manifest = manifest,
Chunks = bundleChunks,
ExportedAt = now,
ExportedBy = options.ExportedBy
};
// Sign if requested
if (options.Sign)
{
if (_signer is null)
{
throw new InvalidOperationException("Signing requested but no signer is configured.");
}
bundle = await SignBundleAsync(bundle, options.SigningKeyId, cancellationToken);
}
_logger.LogInformation(
"Exported minimal proof bundle for {VeriKey}: density={Density}, chunks={ChunkCount}/{TotalChunks}, signed={Signed}",
veriKey, options.Density, bundleChunks.Count, manifest.TotalChunks, options.Sign);
return bundle;
}
/// <inheritdoc />
public async Task<byte[]> ExportAsJsonAsync(
string veriKey,
MinimalProofExportOptions options,
CancellationToken cancellationToken = default)
{
var bundle = await ExportAsync(veriKey, options, cancellationToken);
return JsonSerializer.SerializeToUtf8Bytes(bundle, s_jsonOptions);
}
/// <inheritdoc />
public async Task ExportToStreamAsync(
string veriKey,
MinimalProofExportOptions options,
Stream outputStream,
CancellationToken cancellationToken = default)
{
ArgumentNullException.ThrowIfNull(outputStream);
var bundle = await ExportAsync(veriKey, options, cancellationToken);
await JsonSerializer.SerializeAsync(outputStream, bundle, s_jsonOptions, cancellationToken);
}
/// <inheritdoc />
public async Task<MinimalProofImportResult> ImportAsync(
MinimalProofBundle bundle,
CancellationToken cancellationToken = default)
{
ArgumentNullException.ThrowIfNull(bundle);
_logger.LogDebug("Importing minimal proof bundle: density={Density}, chunks={ChunkCount}",
bundle.Density, bundle.Chunks.Count);
var warnings = new List<string>();
// Verify the bundle
var verification = await VerifyAsync(bundle, cancellationToken);
if (!verification.DigestValid)
{
return new MinimalProofImportResult
{
Success = false,
Digest = bundle.Digest,
Manifest = bundle.Manifest,
ChunksImported = 0,
ChunksPending = bundle.Manifest.TotalChunks,
Verification = verification,
Warnings = ["Digest verification failed."]
};
}
if (!verification.MerkleRootValid)
{
return new MinimalProofImportResult
{
Success = false,
Digest = bundle.Digest,
Manifest = bundle.Manifest,
ChunksImported = 0,
ChunksPending = bundle.Manifest.TotalChunks,
Verification = verification,
Warnings = ["Merkle root verification failed."]
};
}
if (!verification.ChunksValid)
{
warnings.Add($"Some chunks failed verification: indices {string.Join(", ", verification.FailedChunkIndices)}");
}
if (verification.SignatureValid == false)
{
warnings.Add("Signature verification failed.");
}
// Store chunks
var chunksToStore = new List<EvidenceChunk>();
var now = _timeProvider.GetUtcNow();
foreach (var bundleChunk in bundle.Chunks)
{
if (verification.FailedChunkIndices.Contains(bundleChunk.Index))
{
continue; // Skip failed chunks
}
chunksToStore.Add(new EvidenceChunk
{
ChunkId = Guid.NewGuid(),
ProofRoot = bundle.Digest.ProofRoot,
ChunkIndex = bundleChunk.Index,
ChunkHash = bundleChunk.Hash,
Blob = Convert.FromBase64String(bundleChunk.Data),
BlobSize = bundleChunk.Size,
ContentType = bundleChunk.ContentType,
CreatedAt = now
});
}
if (chunksToStore.Count > 0)
{
await _chunkRepository.StoreChunksAsync(bundle.Digest.ProofRoot, chunksToStore, cancellationToken);
}
var chunksImported = chunksToStore.Count;
var chunksPending = bundle.Manifest.TotalChunks - chunksImported;
_logger.LogInformation(
"Imported minimal proof bundle: chunksImported={ChunksImported}, chunksPending={ChunksPending}",
chunksImported, chunksPending);
return new MinimalProofImportResult
{
Success = verification.DigestValid && verification.MerkleRootValid,
Digest = bundle.Digest,
Manifest = bundle.Manifest,
ChunksImported = chunksImported,
ChunksPending = chunksPending,
Verification = verification,
Warnings = warnings
};
}
/// <inheritdoc />
public async Task<MinimalProofImportResult> ImportFromJsonAsync(
byte[] jsonBytes,
CancellationToken cancellationToken = default)
{
ArgumentNullException.ThrowIfNull(jsonBytes);
var bundle = JsonSerializer.Deserialize<MinimalProofBundle>(jsonBytes, s_jsonOptions)
?? throw new InvalidOperationException("Failed to deserialize bundle.");
return await ImportAsync(bundle, cancellationToken);
}
/// <inheritdoc />
public async Task<MinimalProofImportResult> ImportFromStreamAsync(
Stream inputStream,
CancellationToken cancellationToken = default)
{
ArgumentNullException.ThrowIfNull(inputStream);
var bundle = await JsonSerializer.DeserializeAsync<MinimalProofBundle>(inputStream, s_jsonOptions, cancellationToken)
?? throw new InvalidOperationException("Failed to deserialize bundle.");
return await ImportAsync(bundle, cancellationToken);
}
/// <inheritdoc />
public Task<ImportVerification> VerifyAsync(
MinimalProofBundle bundle,
CancellationToken cancellationToken = default)
{
ArgumentNullException.ThrowIfNull(bundle);
// Verify digest integrity
var digestValid = VerifyDigest(bundle.Digest);
// Verify Merkle root matches digest
var merkleRootValid = string.Equals(
bundle.Manifest.ProofRoot,
bundle.Digest.ProofRoot,
StringComparison.OrdinalIgnoreCase);
// Verify included chunks
var failedChunks = new List<int>();
foreach (var chunk in bundle.Chunks)
{
if (!VerifyChunk(chunk))
{
failedChunks.Add(chunk.Index);
}
}
var chunksValid = failedChunks.Count == 0;
// Verify signature if present
bool? signatureValid = null;
if (bundle.Signature is not null)
{
signatureValid = VerifySignature(bundle);
}
return Task.FromResult(new ImportVerification
{
DigestValid = digestValid,
MerkleRootValid = merkleRootValid,
ChunksValid = chunksValid,
SignatureValid = signatureValid,
FailedChunkIndices = failedChunks
});
}
/// <inheritdoc />
public async Task<long> EstimateExportSizeAsync(
string veriKey,
ProofDensity density,
int standardChunkCount = 3,
CancellationToken cancellationToken = default)
{
ArgumentException.ThrowIfNullOrWhiteSpace(veriKey);
var cacheResult = await _provcacheService.GetAsync(veriKey, bypassCache: false, cancellationToken);
if (cacheResult.Entry is null)
{
return 0;
}
var proofRoot = cacheResult.Entry.Decision.ProofRoot;
var manifest = await _chunkRepository.GetManifestAsync(proofRoot, cancellationToken);
if (manifest is null)
{
return 0;
}
// Base size: digest + manifest (roughly 2KB)
const long baseSize = 2048;
return density switch
{
ProofDensity.Lite => baseSize,
ProofDensity.Standard => baseSize + CalculateChunkDataSize(manifest, standardChunkCount),
ProofDensity.Strict => baseSize + CalculateChunkDataSize(manifest, manifest.TotalChunks),
_ => baseSize
};
}
private async Task<IReadOnlyList<BundleChunk>> GetChunksForDensityAsync(
string proofRoot,
ChunkManifest manifest,
ProofDensity density,
int standardChunkCount,
CancellationToken cancellationToken)
{
var chunkCount = density switch
{
ProofDensity.Lite => 0,
ProofDensity.Standard => Math.Min(standardChunkCount, manifest.TotalChunks),
ProofDensity.Strict => manifest.TotalChunks,
_ => 0
};
if (chunkCount == 0)
{
return [];
}
var chunks = await _chunkRepository.GetChunkRangeAsync(
proofRoot,
startIndex: 0,
count: chunkCount,
cancellationToken);
return chunks.Select(c => new BundleChunk
{
Index = c.ChunkIndex,
Hash = c.ChunkHash,
Size = c.BlobSize,
ContentType = c.ContentType,
Data = Convert.ToBase64String(c.Blob)
}).ToList();
}
private async Task<MinimalProofBundle> SignBundleAsync(
MinimalProofBundle bundle,
string? signingKeyId,
CancellationToken cancellationToken)
{
if (_signer is null)
{
throw new InvalidOperationException("Signer is not configured.");
}
// Serialize bundle without signature for signing
var bundleWithoutSig = bundle with { Signature = null };
var payload = JsonSerializer.SerializeToUtf8Bytes(bundleWithoutSig, s_jsonOptions);
var signRequest = new SignRequest(
Payload: payload,
ContentType: "application/vnd.stellaops.proof-bundle+json");
var signResult = await _signer.SignAsync(signRequest, cancellationToken);
return bundle with
{
Signature = new BundleSignature
{
Algorithm = "HMAC-SHA256", // Could be made configurable
KeyId = signResult.KeyId,
SignatureBytes = Convert.ToBase64String(signResult.Signature),
SignedAt = signResult.SignedAt
}
};
}
private static bool VerifyDigest(DecisionDigest digest)
{
// Basic integrity checks
if (string.IsNullOrWhiteSpace(digest.VeriKey)) return false;
if (string.IsNullOrWhiteSpace(digest.VerdictHash)) return false;
if (string.IsNullOrWhiteSpace(digest.ProofRoot)) return false;
if (digest.TrustScore < 0 || digest.TrustScore > 100) return false;
if (digest.CreatedAt > digest.ExpiresAt) return false;
return true;
}
private static bool VerifyChunk(BundleChunk chunk)
{
try
{
var data = Convert.FromBase64String(chunk.Data);
if (data.Length != chunk.Size) return false;
var hash = SHA256.HashData(data);
var computedHash = $"sha256:{Convert.ToHexStringLower(hash)}";
return string.Equals(computedHash, chunk.Hash, StringComparison.OrdinalIgnoreCase);
}
catch
{
return false;
}
}
private bool VerifySignature(MinimalProofBundle bundle)
{
// For now, we don't have signature verification implemented
// This would require the signer's public key or certificate
// Return true as a placeholder - signature presence is enough for MVP
_logger.LogWarning("Signature verification not fully implemented - assuming valid");
return bundle.Signature is not null;
}
private static long CalculateChunkDataSize(ChunkManifest manifest, int chunkCount)
{
if (chunkCount <= 0 || manifest.Chunks.Count == 0)
{
return 0;
}
var actualCount = Math.Min(chunkCount, manifest.TotalChunks);
var rawSize = manifest.Chunks
.Take(actualCount)
.Sum(c => (long)c.Size);
// Base64 encoding overhead: ~33% increase
return (long)(rawSize * 1.37);
}
}

View File

@@ -0,0 +1,203 @@
namespace StellaOps.Provcache;
/// <summary>
/// Repository for evidence chunk storage and retrieval.
/// </summary>
public interface IEvidenceChunkRepository
{
/// <summary>
/// Gets all chunks for a proof root.
/// </summary>
/// <param name="proofRoot">The proof root to get chunks for.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Ordered list of chunks.</returns>
Task<IReadOnlyList<EvidenceChunk>> GetChunksAsync(
string proofRoot,
CancellationToken cancellationToken = default);
/// <summary>
/// Gets a specific chunk by index.
/// </summary>
/// <param name="proofRoot">The proof root.</param>
/// <param name="chunkIndex">The chunk index.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>The chunk or null if not found.</returns>
Task<EvidenceChunk?> GetChunkAsync(
string proofRoot,
int chunkIndex,
CancellationToken cancellationToken = default);
/// <summary>
/// Gets chunks in a range (for paged retrieval).
/// </summary>
/// <param name="proofRoot">The proof root.</param>
/// <param name="startIndex">Starting chunk index (inclusive).</param>
/// <param name="count">Number of chunks to retrieve.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Ordered list of chunks in the range.</returns>
Task<IReadOnlyList<EvidenceChunk>> GetChunkRangeAsync(
string proofRoot,
int startIndex,
int count,
CancellationToken cancellationToken = default);
/// <summary>
/// Gets the chunk manifest (metadata without blobs).
/// </summary>
/// <param name="proofRoot">The proof root.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>The chunk manifest.</returns>
Task<ChunkManifest?> GetManifestAsync(
string proofRoot,
CancellationToken cancellationToken = default);
/// <summary>
/// Stores multiple chunks for a proof root.
/// </summary>
/// <param name="proofRoot">The proof root.</param>
/// <param name="chunks">The chunks to store.</param>
/// <param name="cancellationToken">Cancellation token.</param>
Task StoreChunksAsync(
string proofRoot,
IEnumerable<EvidenceChunk> chunks,
CancellationToken cancellationToken = default);
/// <summary>
/// Deletes all chunks for a proof root.
/// </summary>
/// <param name="proofRoot">The proof root.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Number of chunks deleted.</returns>
Task<int> DeleteChunksAsync(
string proofRoot,
CancellationToken cancellationToken = default);
/// <summary>
/// Gets total chunk count for a proof root.
/// </summary>
/// <param name="proofRoot">The proof root.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Number of chunks.</returns>
Task<int> GetChunkCountAsync(
string proofRoot,
CancellationToken cancellationToken = default);
/// <summary>
/// Gets total storage size for a proof root.
/// </summary>
/// <param name="proofRoot">The proof root.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Total bytes stored.</returns>
Task<long> GetTotalSizeAsync(
string proofRoot,
CancellationToken cancellationToken = default);
}
/// <summary>
/// Represents an evidence chunk.
/// </summary>
public sealed record EvidenceChunk
{
/// <summary>
/// Unique chunk identifier.
/// </summary>
public required Guid ChunkId { get; init; }
/// <summary>
/// The proof root this chunk belongs to.
/// </summary>
public required string ProofRoot { get; init; }
/// <summary>
/// Zero-based index within the proof.
/// </summary>
public required int ChunkIndex { get; init; }
/// <summary>
/// SHA256 hash of the chunk for verification.
/// </summary>
public required string ChunkHash { get; init; }
/// <summary>
/// The binary content.
/// </summary>
public required byte[] Blob { get; init; }
/// <summary>
/// Size of the blob in bytes.
/// </summary>
public required int BlobSize { get; init; }
/// <summary>
/// MIME type of the content.
/// </summary>
public required string ContentType { get; init; }
/// <summary>
/// When the chunk was created.
/// </summary>
public required DateTimeOffset CreatedAt { get; init; }
}
/// <summary>
/// Manifest describing all chunks for a proof root (metadata only).
/// Used for lazy fetching where blobs are retrieved on demand.
/// </summary>
public sealed record ChunkManifest
{
/// <summary>
/// The proof root (Merkle root of all chunks).
/// </summary>
public required string ProofRoot { get; init; }
/// <summary>
/// Total number of chunks.
/// </summary>
public required int TotalChunks { get; init; }
/// <summary>
/// Total size of all chunks in bytes.
/// </summary>
public required long TotalSize { get; init; }
/// <summary>
/// Ordered list of chunk metadata.
/// </summary>
public required IReadOnlyList<ChunkMetadata> Chunks { get; init; }
/// <summary>
/// When the manifest was generated.
/// </summary>
public required DateTimeOffset GeneratedAt { get; init; }
}
/// <summary>
/// Metadata for a single chunk (no blob).
/// </summary>
public sealed record ChunkMetadata
{
/// <summary>
/// Chunk identifier.
/// </summary>
public required Guid ChunkId { get; init; }
/// <summary>
/// Zero-based index.
/// </summary>
public required int Index { get; init; }
/// <summary>
/// SHA256 hash for verification.
/// </summary>
public required string Hash { get; init; }
/// <summary>
/// Size in bytes.
/// </summary>
public required int Size { get; init; }
/// <summary>
/// Content type.
/// </summary>
public required string ContentType { get; init; }
}

View File

@@ -0,0 +1,184 @@
using Microsoft.Extensions.Logging;
using StellaOps.Messaging;
using StellaOps.Messaging.Abstractions;
using StellaOps.Provcache.Events;
namespace StellaOps.Provcache.Invalidation;
/// <summary>
/// Invalidator that handles feed epoch advancement events.
/// When a feed advances to a new epoch, cache entries with older feed_epoch are invalidated.
/// </summary>
public sealed class FeedEpochInvalidator : IProvcacheInvalidator
{
private readonly IEventStream<FeedEpochAdvancedEvent> _eventStream;
private readonly IProvcacheService _provcacheService;
private readonly ILogger<FeedEpochInvalidator> _logger;
private readonly TimeProvider _timeProvider;
private CancellationTokenSource? _cts;
private Task? _processingTask;
private bool _isRunning;
// Metrics
private long _eventsProcessed;
private long _entriesInvalidated;
private long _errors;
private DateTimeOffset? _lastEventAt;
public FeedEpochInvalidator(
IEventStream<FeedEpochAdvancedEvent> eventStream,
IProvcacheService provcacheService,
ILogger<FeedEpochInvalidator> logger,
TimeProvider? timeProvider = null)
{
_eventStream = eventStream ?? throw new ArgumentNullException(nameof(eventStream));
_provcacheService = provcacheService ?? throw new ArgumentNullException(nameof(provcacheService));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
_timeProvider = timeProvider ?? TimeProvider.System;
}
/// <inheritdoc />
public string Name => "FeedEpochInvalidator";
/// <inheritdoc />
public bool IsRunning => _isRunning;
/// <inheritdoc />
public Task StartAsync(CancellationToken cancellationToken = default)
{
if (_isRunning)
{
_logger.LogWarning("FeedEpochInvalidator is already running");
return Task.CompletedTask;
}
_cts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken);
_processingTask = ProcessEventsAsync(_cts.Token);
_isRunning = true;
_logger.LogInformation("FeedEpochInvalidator started, subscribing to {StreamName}", FeedEpochAdvancedEvent.StreamName);
return Task.CompletedTask;
}
/// <inheritdoc />
public async Task StopAsync(CancellationToken cancellationToken = default)
{
if (!_isRunning)
{
return;
}
_logger.LogInformation("FeedEpochInvalidator stopping...");
if (_cts is not null)
{
await _cts.CancelAsync();
}
if (_processingTask is not null)
{
try
{
await _processingTask.WaitAsync(cancellationToken);
}
catch (OperationCanceledException)
{
// Expected during shutdown
}
}
_isRunning = false;
_logger.LogInformation("FeedEpochInvalidator stopped");
}
/// <inheritdoc />
public InvalidatorMetrics GetMetrics()
{
return new InvalidatorMetrics
{
EventsProcessed = Interlocked.Read(ref _eventsProcessed),
EntriesInvalidated = Interlocked.Read(ref _entriesInvalidated),
Errors = Interlocked.Read(ref _errors),
LastEventAt = _lastEventAt,
CollectedAt = _timeProvider.GetUtcNow()
};
}
/// <inheritdoc />
public async ValueTask DisposeAsync()
{
await StopAsync();
_cts?.Dispose();
}
private async Task ProcessEventsAsync(CancellationToken cancellationToken)
{
try
{
// Start from latest events
await foreach (var streamEvent in _eventStream.SubscribeAsync(StreamPosition.End, cancellationToken))
{
try
{
await HandleEventAsync(streamEvent.Event, cancellationToken);
Interlocked.Increment(ref _eventsProcessed);
_lastEventAt = _timeProvider.GetUtcNow();
}
catch (Exception ex)
{
Interlocked.Increment(ref _errors);
_logger.LogError(ex,
"Error processing FeedEpochAdvancedEvent {EventId} for feed {FeedId}",
streamEvent.Event.EventId,
streamEvent.Event.FeedId);
}
}
}
catch (OperationCanceledException) when (cancellationToken.IsCancellationRequested)
{
// Normal shutdown
}
catch (Exception ex)
{
_logger.LogError(ex, "Fatal error in FeedEpochInvalidator event processing loop");
throw;
}
}
private async Task HandleEventAsync(FeedEpochAdvancedEvent @event, CancellationToken cancellationToken)
{
_logger.LogInformation(
"Processing feed epoch advancement: FeedId={FeedId}, PreviousEpoch={PreviousEpoch}, NewEpoch={NewEpoch}",
@event.FeedId,
@event.PreviousEpoch,
@event.NewEpoch);
// Invalidate entries with feed_epoch older than the new epoch
// The feed_epoch in cache entries is formatted as "feed:epoch" (e.g., "cve:2024-12-24T12:00:00Z")
var request = InvalidationRequest.ByFeedEpochOlderThan(
@event.NewEpoch,
$"Feed {FormatFeedEpoch(@event.FeedId, @event.NewEpoch)} advanced");
var result = await _provcacheService.InvalidateByAsync(request, cancellationToken);
Interlocked.Add(ref _entriesInvalidated, result.EntriesAffected);
_logger.LogInformation(
"Feed epoch advancement invalidated {Count} cache entries for feed {FeedId} epoch {NewEpoch}",
result.EntriesAffected,
@event.FeedId,
@event.NewEpoch);
// Record telemetry
ProvcacheTelemetry.RecordInvalidation("feed_epoch", result.EntriesAffected);
}
/// <summary>
/// Formats a feed epoch identifier.
/// </summary>
private static string FormatFeedEpoch(string feedId, string epoch)
{
return $"{feedId}:{epoch}";
}
}

View File

@@ -0,0 +1,66 @@
namespace StellaOps.Provcache.Invalidation;
/// <summary>
/// Interface for cache invalidation handlers that respond to external events.
/// Implementations subscribe to event streams and invalidate cache entries accordingly.
/// </summary>
public interface IProvcacheInvalidator : IAsyncDisposable
{
/// <summary>
/// Gets the name of this invalidator for diagnostics.
/// </summary>
string Name { get; }
/// <summary>
/// Gets whether this invalidator is currently subscribed and processing events.
/// </summary>
bool IsRunning { get; }
/// <summary>
/// Starts processing invalidation events.
/// </summary>
/// <param name="cancellationToken">Cancellation token.</param>
Task StartAsync(CancellationToken cancellationToken = default);
/// <summary>
/// Stops processing invalidation events.
/// </summary>
/// <param name="cancellationToken">Cancellation token.</param>
Task StopAsync(CancellationToken cancellationToken = default);
/// <summary>
/// Gets metrics for this invalidator.
/// </summary>
InvalidatorMetrics GetMetrics();
}
/// <summary>
/// Metrics for a cache invalidator.
/// </summary>
public sealed record InvalidatorMetrics
{
/// <summary>
/// Total number of events processed.
/// </summary>
public required long EventsProcessed { get; init; }
/// <summary>
/// Total number of cache entries invalidated.
/// </summary>
public required long EntriesInvalidated { get; init; }
/// <summary>
/// Number of processing errors encountered.
/// </summary>
public required long Errors { get; init; }
/// <summary>
/// Last event processed timestamp.
/// </summary>
public DateTimeOffset? LastEventAt { get; init; }
/// <summary>
/// When these metrics were collected.
/// </summary>
public required DateTimeOffset CollectedAt { get; init; }
}

View File

@@ -0,0 +1,177 @@
using Microsoft.Extensions.Logging;
using StellaOps.Messaging;
using StellaOps.Messaging.Abstractions;
using StellaOps.Provcache.Events;
namespace StellaOps.Provcache.Invalidation;
/// <summary>
/// Invalidator that handles signer revocation events.
/// When a signer is revoked, all cache entries with matching signer_set_hash are invalidated.
/// </summary>
public sealed class SignerSetInvalidator : IProvcacheInvalidator
{
private readonly IEventStream<SignerRevokedEvent> _eventStream;
private readonly IProvcacheService _provcacheService;
private readonly ILogger<SignerSetInvalidator> _logger;
private readonly TimeProvider _timeProvider;
private CancellationTokenSource? _cts;
private Task? _processingTask;
private bool _isRunning;
// Metrics
private long _eventsProcessed;
private long _entriesInvalidated;
private long _errors;
private DateTimeOffset? _lastEventAt;
public SignerSetInvalidator(
IEventStream<SignerRevokedEvent> eventStream,
IProvcacheService provcacheService,
ILogger<SignerSetInvalidator> logger,
TimeProvider? timeProvider = null)
{
_eventStream = eventStream ?? throw new ArgumentNullException(nameof(eventStream));
_provcacheService = provcacheService ?? throw new ArgumentNullException(nameof(provcacheService));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
_timeProvider = timeProvider ?? TimeProvider.System;
}
/// <inheritdoc />
public string Name => "SignerSetInvalidator";
/// <inheritdoc />
public bool IsRunning => _isRunning;
/// <inheritdoc />
public Task StartAsync(CancellationToken cancellationToken = default)
{
if (_isRunning)
{
_logger.LogWarning("SignerSetInvalidator is already running");
return Task.CompletedTask;
}
_cts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken);
_processingTask = ProcessEventsAsync(_cts.Token);
_isRunning = true;
_logger.LogInformation("SignerSetInvalidator started, subscribing to {StreamName}", SignerRevokedEvent.StreamName);
return Task.CompletedTask;
}
/// <inheritdoc />
public async Task StopAsync(CancellationToken cancellationToken = default)
{
if (!_isRunning)
{
return;
}
_logger.LogInformation("SignerSetInvalidator stopping...");
if (_cts is not null)
{
await _cts.CancelAsync();
}
if (_processingTask is not null)
{
try
{
await _processingTask.WaitAsync(cancellationToken);
}
catch (OperationCanceledException)
{
// Expected during shutdown
}
}
_isRunning = false;
_logger.LogInformation("SignerSetInvalidator stopped");
}
/// <inheritdoc />
public InvalidatorMetrics GetMetrics()
{
return new InvalidatorMetrics
{
EventsProcessed = Interlocked.Read(ref _eventsProcessed),
EntriesInvalidated = Interlocked.Read(ref _entriesInvalidated),
Errors = Interlocked.Read(ref _errors),
LastEventAt = _lastEventAt,
CollectedAt = _timeProvider.GetUtcNow()
};
}
/// <inheritdoc />
public async ValueTask DisposeAsync()
{
await StopAsync();
_cts?.Dispose();
}
private async Task ProcessEventsAsync(CancellationToken cancellationToken)
{
try
{
// Start from latest events (we don't want to replay old revocations)
await foreach (var streamEvent in _eventStream.SubscribeAsync(StreamPosition.End, cancellationToken))
{
try
{
await HandleEventAsync(streamEvent.Event, cancellationToken);
Interlocked.Increment(ref _eventsProcessed);
_lastEventAt = _timeProvider.GetUtcNow();
}
catch (Exception ex)
{
Interlocked.Increment(ref _errors);
_logger.LogError(ex,
"Error processing SignerRevokedEvent {EventId} for signer {SignerHash}",
streamEvent.Event.EventId,
streamEvent.Event.SignerHash);
}
}
}
catch (OperationCanceledException) when (cancellationToken.IsCancellationRequested)
{
// Normal shutdown
}
catch (Exception ex)
{
_logger.LogError(ex, "Fatal error in SignerSetInvalidator event processing loop");
throw;
}
}
private async Task HandleEventAsync(SignerRevokedEvent @event, CancellationToken cancellationToken)
{
_logger.LogInformation(
"Processing signer revocation: AnchorId={AnchorId}, KeyId={KeyId}, SignerHash={SignerHash}, Reason={Reason}",
@event.AnchorId,
@event.KeyId,
@event.SignerHash,
@event.Reason);
// Create invalidation request for entries with this signer hash
var request = InvalidationRequest.BySignerSetHash(
@event.SignerHash,
$"Signer revoked: {@event.Reason ?? "unspecified"}");
request = request with { Actor = @event.Actor ?? "SignerSetInvalidator" };
var result = await _provcacheService.InvalidateByAsync(request, cancellationToken);
Interlocked.Add(ref _entriesInvalidated, result.EntriesAffected);
_logger.LogInformation(
"Signer revocation invalidated {Count} cache entries for signer {SignerHash}",
result.EntriesAffected,
@event.SignerHash);
// Record telemetry
ProvcacheTelemetry.RecordInvalidation("signer_revocation", result.EntriesAffected);
}
}

View File

@@ -0,0 +1,257 @@
using System.Runtime.CompilerServices;
using System.Security.Cryptography;
using System.Text.Json;
using Microsoft.Extensions.Logging;
namespace StellaOps.Provcache;
/// <summary>
/// File-based lazy evidence chunk fetcher for sneakernet mode.
/// Fetches chunks from a local directory (e.g., USB drive, NFS mount).
/// </summary>
public sealed class FileChunkFetcher : ILazyEvidenceFetcher
{
private readonly string _basePath;
private readonly ILogger<FileChunkFetcher> _logger;
private readonly JsonSerializerOptions _jsonOptions;
/// <inheritdoc />
public string FetcherType => "file";
/// <summary>
/// Creates a file chunk fetcher with the specified base directory.
/// </summary>
/// <param name="basePath">The base directory containing evidence files.</param>
/// <param name="logger">Logger instance.</param>
public FileChunkFetcher(string basePath, ILogger<FileChunkFetcher> logger)
{
ArgumentException.ThrowIfNullOrWhiteSpace(basePath);
_basePath = Path.GetFullPath(basePath);
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
_jsonOptions = new JsonSerializerOptions
{
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
PropertyNameCaseInsensitive = true
};
_logger.LogDebug("FileChunkFetcher initialized with base path: {BasePath}", _basePath);
}
/// <inheritdoc />
public async Task<FetchedChunk?> FetchChunkAsync(
string proofRoot,
int chunkIndex,
CancellationToken cancellationToken = default)
{
ArgumentException.ThrowIfNullOrWhiteSpace(proofRoot);
ArgumentOutOfRangeException.ThrowIfNegative(chunkIndex);
var chunkPath = GetChunkPath(proofRoot, chunkIndex);
_logger.LogDebug("Looking for chunk at {Path}", chunkPath);
if (!File.Exists(chunkPath))
{
_logger.LogDebug("Chunk file not found: {Path}", chunkPath);
return null;
}
try
{
await using var stream = File.OpenRead(chunkPath);
var chunk = await JsonSerializer.DeserializeAsync<FetchedChunk>(stream, _jsonOptions, cancellationToken);
_logger.LogDebug("Successfully loaded chunk {Index}, {Bytes} bytes", chunkIndex, chunk?.Data.Length ?? 0);
return chunk;
}
catch (Exception ex)
{
_logger.LogWarning(ex, "Error reading chunk file {Path}", chunkPath);
throw;
}
}
/// <inheritdoc />
public async IAsyncEnumerable<FetchedChunk> FetchChunksAsync(
string proofRoot,
IEnumerable<int> chunkIndices,
[EnumeratorCancellation] CancellationToken cancellationToken = default)
{
ArgumentException.ThrowIfNullOrWhiteSpace(proofRoot);
ArgumentNullException.ThrowIfNull(chunkIndices);
var indices = chunkIndices.ToList();
_logger.LogInformation("Fetching {Count} chunks from file system for proof root {ProofRoot}", indices.Count, proofRoot);
foreach (var index in indices)
{
cancellationToken.ThrowIfCancellationRequested();
var chunk = await FetchChunkAsync(proofRoot, index, cancellationToken);
if (chunk is not null)
{
yield return chunk;
}
}
}
/// <inheritdoc />
public async IAsyncEnumerable<FetchedChunk> FetchRemainingChunksAsync(
string proofRoot,
ChunkManifest manifest,
IReadOnlySet<int> existingIndices,
[EnumeratorCancellation] CancellationToken cancellationToken = default)
{
ArgumentException.ThrowIfNullOrWhiteSpace(proofRoot);
ArgumentNullException.ThrowIfNull(manifest);
ArgumentNullException.ThrowIfNull(existingIndices);
var missingIndices = Enumerable.Range(0, manifest.TotalChunks)
.Where(i => !existingIndices.Contains(i))
.ToList();
_logger.LogInformation(
"Fetching {MissingCount} remaining chunks from files (have {ExistingCount}/{TotalCount})",
missingIndices.Count, existingIndices.Count, manifest.TotalChunks);
await foreach (var chunk in FetchChunksAsync(proofRoot, missingIndices, cancellationToken))
{
yield return chunk;
}
}
/// <inheritdoc />
public Task<bool> IsAvailableAsync(CancellationToken cancellationToken = default)
{
var isAvailable = Directory.Exists(_basePath);
_logger.LogDebug("File fetcher availability check: {IsAvailable}", isAvailable);
return Task.FromResult(isAvailable);
}
/// <inheritdoc />
public async Task<ChunkManifest?> FetchManifestAsync(
string proofRoot,
CancellationToken cancellationToken = default)
{
ArgumentException.ThrowIfNullOrWhiteSpace(proofRoot);
var manifestPath = GetManifestPath(proofRoot);
_logger.LogDebug("Looking for manifest at {Path}", manifestPath);
if (!File.Exists(manifestPath))
{
_logger.LogDebug("Manifest file not found: {Path}", manifestPath);
return null;
}
try
{
await using var stream = File.OpenRead(manifestPath);
return await JsonSerializer.DeserializeAsync<ChunkManifest>(stream, _jsonOptions, cancellationToken);
}
catch (Exception ex)
{
_logger.LogWarning(ex, "Error reading manifest file {Path}", manifestPath);
throw;
}
}
/// <summary>
/// Gets the file path for a chunk.
/// </summary>
private string GetChunkPath(string proofRoot, int chunkIndex)
{
// Sanitize proof root for use in file paths
var safeProofRoot = SanitizeForPath(proofRoot);
return Path.Combine(_basePath, safeProofRoot, $"chunk_{chunkIndex:D4}.json");
}
/// <summary>
/// Gets the file path for a manifest.
/// </summary>
private string GetManifestPath(string proofRoot)
{
var safeProofRoot = SanitizeForPath(proofRoot);
return Path.Combine(_basePath, safeProofRoot, "manifest.json");
}
/// <summary>
/// Sanitizes a proof root for use in file paths.
/// </summary>
private static string SanitizeForPath(string input)
{
// Use hash prefix to ensure consistent directory naming
var hash = Convert.ToHexString(SHA256.HashData(System.Text.Encoding.UTF8.GetBytes(input))).ToLowerInvariant();
// Return first 16 chars of hash for reasonable directory names
return hash[..16];
}
/// <summary>
/// Exports chunks to files for sneakernet transfer.
/// </summary>
/// <param name="proofRoot">The proof root.</param>
/// <param name="manifest">The chunk manifest.</param>
/// <param name="chunks">The chunks to export.</param>
/// <param name="cancellationToken">Cancellation token.</param>
public async Task ExportToFilesAsync(
string proofRoot,
ChunkManifest manifest,
IEnumerable<FetchedChunk> chunks,
CancellationToken cancellationToken = default)
{
ArgumentException.ThrowIfNullOrWhiteSpace(proofRoot);
ArgumentNullException.ThrowIfNull(manifest);
ArgumentNullException.ThrowIfNull(chunks);
var safeProofRoot = SanitizeForPath(proofRoot);
var proofDir = Path.Combine(_basePath, safeProofRoot);
Directory.CreateDirectory(proofDir);
_logger.LogInformation("Exporting to {Directory}", proofDir);
// Write manifest
var manifestPath = GetManifestPath(proofRoot);
await using (var manifestStream = File.Create(manifestPath))
{
await JsonSerializer.SerializeAsync(manifestStream, manifest, _jsonOptions, cancellationToken);
}
_logger.LogDebug("Wrote manifest to {Path}", manifestPath);
// Write chunks
var count = 0;
foreach (var chunk in chunks)
{
cancellationToken.ThrowIfCancellationRequested();
var chunkPath = GetChunkPath(proofRoot, chunk.Index);
await using var chunkStream = File.Create(chunkPath);
await JsonSerializer.SerializeAsync(chunkStream, chunk, _jsonOptions, cancellationToken);
count++;
}
_logger.LogInformation("Exported {Count} chunks to {Directory}", count, proofDir);
}
/// <summary>
/// Exports EvidenceChunks to files (converts to FetchedChunk format).
/// </summary>
/// <param name="proofRoot">The proof root.</param>
/// <param name="manifest">The chunk manifest.</param>
/// <param name="chunks">The evidence chunks to export.</param>
/// <param name="cancellationToken">Cancellation token.</param>
public Task ExportEvidenceChunksToFilesAsync(
string proofRoot,
ChunkManifest manifest,
IEnumerable<EvidenceChunk> chunks,
CancellationToken cancellationToken = default)
{
var fetchedChunks = chunks.Select(c => new FetchedChunk
{
Index = c.ChunkIndex,
Data = c.Blob,
Hash = c.ChunkHash
});
return ExportToFilesAsync(proofRoot, manifest, fetchedChunks, cancellationToken);
}
}

View File

@@ -0,0 +1,194 @@
using System.Net.Http.Json;
using System.Runtime.CompilerServices;
using System.Text.Json;
using Microsoft.Extensions.Logging;
namespace StellaOps.Provcache;
/// <summary>
/// HTTP-based lazy evidence chunk fetcher for connected mode.
/// Fetches chunks from a remote Stella API endpoint.
/// </summary>
public sealed class HttpChunkFetcher : ILazyEvidenceFetcher, IDisposable
{
private readonly HttpClient _httpClient;
private readonly bool _ownsClient;
private readonly ILogger<HttpChunkFetcher> _logger;
private readonly JsonSerializerOptions _jsonOptions;
/// <inheritdoc />
public string FetcherType => "http";
/// <summary>
/// Creates an HTTP chunk fetcher with the specified base URL.
/// </summary>
/// <param name="baseUrl">The base URL of the Stella API.</param>
/// <param name="logger">Logger instance.</param>
public HttpChunkFetcher(Uri baseUrl, ILogger<HttpChunkFetcher> logger)
: this(CreateClient(baseUrl), ownsClient: true, logger)
{
}
/// <summary>
/// Creates an HTTP chunk fetcher with an existing HTTP client.
/// </summary>
/// <param name="httpClient">The HTTP client to use.</param>
/// <param name="ownsClient">Whether this fetcher owns the client lifecycle.</param>
/// <param name="logger">Logger instance.</param>
public HttpChunkFetcher(HttpClient httpClient, bool ownsClient, ILogger<HttpChunkFetcher> logger)
{
_httpClient = httpClient ?? throw new ArgumentNullException(nameof(httpClient));
_ownsClient = ownsClient;
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
_jsonOptions = new JsonSerializerOptions
{
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
PropertyNameCaseInsensitive = true
};
}
private static HttpClient CreateClient(Uri baseUrl)
{
var client = new HttpClient { BaseAddress = baseUrl };
client.DefaultRequestHeaders.Add("Accept", "application/json");
return client;
}
/// <inheritdoc />
public async Task<FetchedChunk?> FetchChunkAsync(
string proofRoot,
int chunkIndex,
CancellationToken cancellationToken = default)
{
ArgumentException.ThrowIfNullOrWhiteSpace(proofRoot);
ArgumentOutOfRangeException.ThrowIfNegative(chunkIndex);
var url = $"api/v1/evidence/{Uri.EscapeDataString(proofRoot)}/chunks/{chunkIndex}";
_logger.LogDebug("Fetching chunk {Index} from {Url}", chunkIndex, url);
try
{
var response = await _httpClient.GetAsync(url, cancellationToken);
if (response.StatusCode == System.Net.HttpStatusCode.NotFound)
{
_logger.LogDebug("Chunk {Index} not found at remote", chunkIndex);
return null;
}
response.EnsureSuccessStatusCode();
var chunk = await response.Content.ReadFromJsonAsync<FetchedChunk>(_jsonOptions, cancellationToken);
_logger.LogDebug("Successfully fetched chunk {Index}, {Bytes} bytes", chunkIndex, chunk?.Data.Length ?? 0);
return chunk;
}
catch (HttpRequestException ex)
{
_logger.LogWarning(ex, "HTTP error fetching chunk {Index}", chunkIndex);
throw;
}
}
/// <inheritdoc />
public async IAsyncEnumerable<FetchedChunk> FetchChunksAsync(
string proofRoot,
IEnumerable<int> chunkIndices,
[EnumeratorCancellation] CancellationToken cancellationToken = default)
{
ArgumentException.ThrowIfNullOrWhiteSpace(proofRoot);
ArgumentNullException.ThrowIfNull(chunkIndices);
var indices = chunkIndices.ToList();
_logger.LogInformation("Fetching {Count} chunks for proof root {ProofRoot}", indices.Count, proofRoot);
foreach (var index in indices)
{
cancellationToken.ThrowIfCancellationRequested();
var chunk = await FetchChunkAsync(proofRoot, index, cancellationToken);
if (chunk is not null)
{
yield return chunk;
}
}
}
/// <inheritdoc />
public async IAsyncEnumerable<FetchedChunk> FetchRemainingChunksAsync(
string proofRoot,
ChunkManifest manifest,
IReadOnlySet<int> existingIndices,
[EnumeratorCancellation] CancellationToken cancellationToken = default)
{
ArgumentException.ThrowIfNullOrWhiteSpace(proofRoot);
ArgumentNullException.ThrowIfNull(manifest);
ArgumentNullException.ThrowIfNull(existingIndices);
var missingIndices = Enumerable.Range(0, manifest.TotalChunks)
.Where(i => !existingIndices.Contains(i))
.ToList();
_logger.LogInformation(
"Fetching {MissingCount} remaining chunks (have {ExistingCount}/{TotalCount})",
missingIndices.Count, existingIndices.Count, manifest.TotalChunks);
await foreach (var chunk in FetchChunksAsync(proofRoot, missingIndices, cancellationToken))
{
yield return chunk;
}
}
/// <inheritdoc />
public async Task<bool> IsAvailableAsync(CancellationToken cancellationToken = default)
{
try
{
var response = await _httpClient.GetAsync("api/v1/health", cancellationToken);
return response.IsSuccessStatusCode;
}
catch (Exception ex)
{
_logger.LogDebug(ex, "Health check failed");
return false;
}
}
/// <inheritdoc />
public async Task<ChunkManifest?> FetchManifestAsync(
string proofRoot,
CancellationToken cancellationToken = default)
{
ArgumentException.ThrowIfNullOrWhiteSpace(proofRoot);
var url = $"api/v1/evidence/{Uri.EscapeDataString(proofRoot)}/manifest";
_logger.LogDebug("Fetching manifest from {Url}", url);
try
{
var response = await _httpClient.GetAsync(url, cancellationToken);
if (response.StatusCode == System.Net.HttpStatusCode.NotFound)
{
_logger.LogDebug("Manifest not found for proof root {ProofRoot}", proofRoot);
return null;
}
response.EnsureSuccessStatusCode();
return await response.Content.ReadFromJsonAsync<ChunkManifest>(_jsonOptions, cancellationToken);
}
catch (HttpRequestException ex)
{
_logger.LogWarning(ex, "HTTP error fetching manifest for {ProofRoot}", proofRoot);
throw;
}
}
/// <inheritdoc />
public void Dispose()
{
if (_ownsClient)
{
_httpClient.Dispose();
}
}
}

View File

@@ -0,0 +1,131 @@
namespace StellaOps.Provcache;
/// <summary>
/// Interface for lazy evidence chunk fetching from various sources.
/// Enables on-demand evidence retrieval for air-gapped auditors.
/// </summary>
public interface ILazyEvidenceFetcher
{
/// <summary>
/// Gets the fetcher type (e.g., "http", "file").
/// </summary>
string FetcherType { get; }
/// <summary>
/// Fetches a single chunk by index.
/// </summary>
/// <param name="proofRoot">The proof root identifying the evidence.</param>
/// <param name="chunkIndex">The chunk index to fetch.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>The fetched chunk or null if not found.</returns>
Task<FetchedChunk?> FetchChunkAsync(
string proofRoot,
int chunkIndex,
CancellationToken cancellationToken = default);
/// <summary>
/// Fetches multiple chunks by index.
/// </summary>
/// <param name="proofRoot">The proof root identifying the evidence.</param>
/// <param name="chunkIndices">The chunk indices to fetch.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Async enumerable of fetched chunks.</returns>
IAsyncEnumerable<FetchedChunk> FetchChunksAsync(
string proofRoot,
IEnumerable<int> chunkIndices,
CancellationToken cancellationToken = default);
/// <summary>
/// Fetches all remaining chunks for a proof root.
/// </summary>
/// <param name="proofRoot">The proof root identifying the evidence.</param>
/// <param name="manifest">The chunk manifest for reference.</param>
/// <param name="existingIndices">Indices of chunks already present locally.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Async enumerable of fetched chunks.</returns>
IAsyncEnumerable<FetchedChunk> FetchRemainingChunksAsync(
string proofRoot,
ChunkManifest manifest,
IReadOnlySet<int> existingIndices,
CancellationToken cancellationToken = default);
/// <summary>
/// Checks if the source is available for fetching.
/// </summary>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>True if the source is available.</returns>
Task<bool> IsAvailableAsync(CancellationToken cancellationToken = default);
/// <summary>
/// Gets the manifest from the source.
/// </summary>
/// <param name="proofRoot">The proof root to get manifest for.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>The chunk manifest or null if not available.</returns>
Task<ChunkManifest?> FetchManifestAsync(
string proofRoot,
CancellationToken cancellationToken = default);
}
/// <summary>
/// Simplified chunk representation for lazy fetch interface.
/// Contains only the index and data for transport.
/// </summary>
public sealed record FetchedChunk
{
/// <summary>
/// Zero-based chunk index.
/// </summary>
public required int Index { get; init; }
/// <summary>
/// The chunk data.
/// </summary>
public required byte[] Data { get; init; }
/// <summary>
/// SHA256 hash of the data for verification.
/// </summary>
public required string Hash { get; init; }
}
/// <summary>
/// Result of a lazy fetch operation.
/// </summary>
public sealed record LazyFetchResult
{
/// <summary>
/// Whether the fetch was successful.
/// </summary>
public required bool Success { get; init; }
/// <summary>
/// Number of chunks fetched.
/// </summary>
public required int ChunksFetched { get; init; }
/// <summary>
/// Total bytes fetched.
/// </summary>
public required long BytesFetched { get; init; }
/// <summary>
/// Number of chunks that failed verification.
/// </summary>
public required int ChunksFailedVerification { get; init; }
/// <summary>
/// Indices of failed chunks.
/// </summary>
public IReadOnlyList<int> FailedIndices { get; init; } = [];
/// <summary>
/// Any errors encountered.
/// </summary>
public IReadOnlyList<string> Errors { get; init; } = [];
/// <summary>
/// Time taken for the fetch operation.
/// </summary>
public TimeSpan Duration { get; init; }
}

View File

@@ -0,0 +1,296 @@
using System.Diagnostics;
using System.Security.Cryptography;
using Microsoft.Extensions.Logging;
namespace StellaOps.Provcache;
/// <summary>
/// Orchestrates lazy evidence fetching with verification.
/// Coordinates between fetchers and the local evidence store.
/// </summary>
public sealed class LazyFetchOrchestrator
{
private readonly IEvidenceChunkRepository _repository;
private readonly ILogger<LazyFetchOrchestrator> _logger;
private readonly TimeProvider _timeProvider;
/// <summary>
/// Creates a lazy fetch orchestrator.
/// </summary>
/// <param name="repository">The chunk repository for local storage.</param>
/// <param name="logger">Logger instance.</param>
/// <param name="timeProvider">Optional time provider.</param>
public LazyFetchOrchestrator(
IEvidenceChunkRepository repository,
ILogger<LazyFetchOrchestrator> logger,
TimeProvider? timeProvider = null)
{
_repository = repository ?? throw new ArgumentNullException(nameof(repository));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
_timeProvider = timeProvider ?? TimeProvider.System;
}
/// <summary>
/// Fetches remaining chunks for a proof root and stores them locally.
/// </summary>
/// <param name="proofRoot">The proof root.</param>
/// <param name="fetcher">The fetcher to use.</param>
/// <param name="options">Fetch options.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>The fetch result.</returns>
public async Task<LazyFetchResult> FetchAndStoreAsync(
string proofRoot,
ILazyEvidenceFetcher fetcher,
LazyFetchOptions? options = null,
CancellationToken cancellationToken = default)
{
ArgumentException.ThrowIfNullOrWhiteSpace(proofRoot);
ArgumentNullException.ThrowIfNull(fetcher);
options ??= new LazyFetchOptions();
var stopwatch = Stopwatch.StartNew();
var errors = new List<string>();
var failedIndices = new List<int>();
var chunksFetched = 0;
long bytesFetched = 0;
var chunksFailedVerification = 0;
_logger.LogInformation(
"Starting lazy fetch for {ProofRoot} using {FetcherType} fetcher",
proofRoot, fetcher.FetcherType);
try
{
// Check fetcher availability
if (!await fetcher.IsAvailableAsync(cancellationToken))
{
_logger.LogWarning("Fetcher {FetcherType} is not available", fetcher.FetcherType);
return new LazyFetchResult
{
Success = false,
ChunksFetched = 0,
BytesFetched = 0,
ChunksFailedVerification = 0,
Errors = [$"Fetcher {fetcher.FetcherType} is not available"],
Duration = stopwatch.Elapsed
};
}
// Get local manifest
var localManifest = await _repository.GetManifestAsync(proofRoot, cancellationToken);
if (localManifest is null)
{
// Try to fetch manifest from remote
localManifest = await fetcher.FetchManifestAsync(proofRoot, cancellationToken);
if (localManifest is null)
{
_logger.LogWarning("No manifest found for {ProofRoot}", proofRoot);
return new LazyFetchResult
{
Success = false,
ChunksFetched = 0,
BytesFetched = 0,
ChunksFailedVerification = 0,
Errors = [$"No manifest found for proof root {proofRoot}"],
Duration = stopwatch.Elapsed
};
}
}
// Get existing chunks
var existingChunks = (await _repository.GetChunksAsync(proofRoot, cancellationToken))
.Select(c => c.ChunkIndex)
.ToHashSet();
var totalChunks = localManifest.TotalChunks;
var missingCount = totalChunks - existingChunks.Count;
_logger.LogInformation(
"Have {Existing}/{Total} chunks, need to fetch {Missing}",
existingChunks.Count, totalChunks, missingCount);
if (missingCount == 0)
{
_logger.LogInformation("All chunks already present, nothing to fetch");
return new LazyFetchResult
{
Success = true,
ChunksFetched = 0,
BytesFetched = 0,
ChunksFailedVerification = 0,
Duration = stopwatch.Elapsed
};
}
// Fetch remaining chunks
var chunksToStore = new List<EvidenceChunk>();
var now = _timeProvider.GetUtcNow();
await foreach (var fetchedChunk in fetcher.FetchRemainingChunksAsync(
proofRoot, localManifest, existingChunks, cancellationToken))
{
// Verify chunk if enabled
if (options.VerifyOnFetch)
{
var isValid = VerifyChunk(fetchedChunk, localManifest);
if (!isValid)
{
chunksFailedVerification++;
failedIndices.Add(fetchedChunk.Index);
errors.Add($"Chunk {fetchedChunk.Index} failed verification");
if (options.FailOnVerificationError)
{
_logger.LogError("Chunk {Index} failed verification, aborting", fetchedChunk.Index);
break;
}
_logger.LogWarning("Chunk {Index} failed verification, skipping", fetchedChunk.Index);
continue;
}
}
// Convert FetchedChunk to EvidenceChunk for storage
var evidenceChunk = new EvidenceChunk
{
ChunkId = Guid.NewGuid(),
ProofRoot = proofRoot,
ChunkIndex = fetchedChunk.Index,
ChunkHash = fetchedChunk.Hash,
Blob = fetchedChunk.Data,
BlobSize = fetchedChunk.Data.Length,
ContentType = "application/octet-stream",
CreatedAt = now
};
chunksToStore.Add(evidenceChunk);
bytesFetched += fetchedChunk.Data.Length;
chunksFetched++;
// Batch store to reduce database round-trips
if (chunksToStore.Count >= options.BatchSize)
{
await _repository.StoreChunksAsync(proofRoot, chunksToStore, cancellationToken);
_logger.LogDebug("Stored batch of {Count} chunks", chunksToStore.Count);
chunksToStore.Clear();
}
// Check max chunks limit
if (options.MaxChunksToFetch > 0 && chunksFetched >= options.MaxChunksToFetch)
{
_logger.LogInformation("Reached max chunks limit ({Max})", options.MaxChunksToFetch);
break;
}
}
// Store any remaining chunks
if (chunksToStore.Count > 0)
{
await _repository.StoreChunksAsync(proofRoot, chunksToStore, cancellationToken);
_logger.LogDebug("Stored final batch of {Count} chunks", chunksToStore.Count);
}
stopwatch.Stop();
var success = chunksFailedVerification == 0 || !options.FailOnVerificationError;
_logger.LogInformation(
"Lazy fetch complete: {Fetched} chunks, {Bytes} bytes, {Failed} verification failures in {Duration}",
chunksFetched, bytesFetched, chunksFailedVerification, stopwatch.Elapsed);
return new LazyFetchResult
{
Success = success,
ChunksFetched = chunksFetched,
BytesFetched = bytesFetched,
ChunksFailedVerification = chunksFailedVerification,
FailedIndices = failedIndices,
Errors = errors,
Duration = stopwatch.Elapsed
};
}
catch (Exception ex)
{
_logger.LogError(ex, "Error during lazy fetch for {ProofRoot}", proofRoot);
errors.Add(ex.Message);
return new LazyFetchResult
{
Success = false,
ChunksFetched = chunksFetched,
BytesFetched = bytesFetched,
ChunksFailedVerification = chunksFailedVerification,
FailedIndices = failedIndices,
Errors = errors,
Duration = stopwatch.Elapsed
};
}
}
/// <summary>
/// Verifies a chunk against the manifest.
/// </summary>
private bool VerifyChunk(FetchedChunk chunk, ChunkManifest manifest)
{
// Check index bounds
if (chunk.Index < 0 || chunk.Index >= manifest.TotalChunks)
{
_logger.LogWarning("Chunk index {Index} out of bounds (max {Max})", chunk.Index, manifest.TotalChunks - 1);
return false;
}
// Verify hash against manifest metadata
if (manifest.Chunks is not null && chunk.Index < manifest.Chunks.Count)
{
var expectedHash = manifest.Chunks[chunk.Index].Hash;
var actualHash = Convert.ToHexString(SHA256.HashData(chunk.Data)).ToLowerInvariant();
if (!string.Equals(actualHash, expectedHash, StringComparison.OrdinalIgnoreCase))
{
_logger.LogWarning(
"Chunk {Index} hash mismatch: expected {Expected}, got {Actual}",
chunk.Index, expectedHash, actualHash);
return false;
}
}
// Also verify the chunk's own hash claim
var claimedHash = Convert.ToHexString(SHA256.HashData(chunk.Data)).ToLowerInvariant();
if (!string.Equals(claimedHash, chunk.Hash, StringComparison.OrdinalIgnoreCase))
{
_logger.LogWarning(
"Chunk {Index} self-hash mismatch: claimed {Claimed}, actual {Actual}",
chunk.Index, chunk.Hash, claimedHash);
return false;
}
return true;
}
}
/// <summary>
/// Options for lazy fetch operations.
/// </summary>
public sealed class LazyFetchOptions
{
/// <summary>
/// Whether to verify chunks on fetch.
/// </summary>
public bool VerifyOnFetch { get; init; } = true;
/// <summary>
/// Whether to fail the entire operation on verification error.
/// </summary>
public bool FailOnVerificationError { get; init; } = false;
/// <summary>
/// Batch size for storing chunks.
/// </summary>
public int BatchSize { get; init; } = 100;
/// <summary>
/// Maximum number of chunks to fetch (0 = unlimited).
/// </summary>
public int MaxChunksToFetch { get; init; } = 0;
}

View File

@@ -142,7 +142,7 @@ public sealed class ProvcacheService : IProvcacheService
ArgumentNullException.ThrowIfNull(entry);
var sw = Stopwatch.StartNew();
using var activity = ProvcacheTelemetry.StartSetActivity(entry.VeriKey, entry.TrustScore);
using var activity = ProvcacheTelemetry.StartSetActivity(entry.VeriKey, entry.Decision.TrustScore);
try
{
@@ -247,7 +247,7 @@ public sealed class ProvcacheService : IProvcacheService
{
ArgumentNullException.ThrowIfNull(request);
var invalidationType = request.Type?.ToString().ToLowerInvariant() ?? "unknown";
var invalidationType = request.Type.ToString().ToLowerInvariant();
using var activity = ProvcacheTelemetry.StartInvalidateActivity(invalidationType, request.Value);
try

View File

@@ -0,0 +1,160 @@
namespace StellaOps.Provcache;
/// <summary>
/// Interface for the revocation ledger.
/// Provides audit trail and replay capabilities for revocation events.
/// </summary>
public interface IRevocationLedger
{
/// <summary>
/// Records a revocation event in the ledger.
/// </summary>
/// <param name="entry">The revocation entry to record.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>The recorded entry with sequence number.</returns>
Task<RevocationEntry> RecordAsync(
RevocationEntry entry,
CancellationToken cancellationToken = default);
/// <summary>
/// Gets revocation entries since a given sequence number.
/// </summary>
/// <param name="sinceSeqNo">The sequence number to start from (exclusive).</param>
/// <param name="limit">Maximum number of entries to return.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Ordered list of revocation entries.</returns>
Task<IReadOnlyList<RevocationEntry>> GetEntriesSinceAsync(
long sinceSeqNo,
int limit = 1000,
CancellationToken cancellationToken = default);
/// <summary>
/// Gets revocation entries by type.
/// </summary>
/// <param name="revocationType">The type of revocation to filter by.</param>
/// <param name="since">Only return entries after this time.</param>
/// <param name="limit">Maximum number of entries to return.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Ordered list of revocation entries.</returns>
Task<IReadOnlyList<RevocationEntry>> GetEntriesByTypeAsync(
string revocationType,
DateTimeOffset? since = null,
int limit = 1000,
CancellationToken cancellationToken = default);
/// <summary>
/// Gets the latest sequence number in the ledger.
/// </summary>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>The latest sequence number, or 0 if empty.</returns>
Task<long> GetLatestSeqNoAsync(CancellationToken cancellationToken = default);
/// <summary>
/// Gets revocations for a specific key.
/// </summary>
/// <param name="revokedKey">The key to look up.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>List of revocation entries for the key.</returns>
Task<IReadOnlyList<RevocationEntry>> GetRevocationsForKeyAsync(
string revokedKey,
CancellationToken cancellationToken = default);
/// <summary>
/// Gets summary statistics for the ledger.
/// </summary>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Summary statistics.</returns>
Task<RevocationLedgerStats> GetStatsAsync(CancellationToken cancellationToken = default);
}
/// <summary>
/// A revocation entry in the ledger.
/// </summary>
public sealed record RevocationEntry
{
/// <summary>
/// Sequence number (set after recording).
/// </summary>
public long SeqNo { get; init; }
/// <summary>
/// Unique identifier for this revocation event.
/// </summary>
public required Guid RevocationId { get; init; }
/// <summary>
/// Type of revocation.
/// </summary>
public required string RevocationType { get; init; }
/// <summary>
/// The key that was revoked.
/// </summary>
public required string RevokedKey { get; init; }
/// <summary>
/// Reason for revocation.
/// </summary>
public string? Reason { get; init; }
/// <summary>
/// Number of entries invalidated.
/// </summary>
public int EntriesInvalidated { get; init; }
/// <summary>
/// Source of the revocation.
/// </summary>
public required string Source { get; init; }
/// <summary>
/// Correlation ID for tracing.
/// </summary>
public string? CorrelationId { get; init; }
/// <summary>
/// When the revocation occurred.
/// </summary>
public required DateTimeOffset RevokedAt { get; init; }
/// <summary>
/// Optional metadata.
/// </summary>
public IDictionary<string, object>? Metadata { get; init; }
}
/// <summary>
/// Summary statistics for the revocation ledger.
/// </summary>
public sealed record RevocationLedgerStats
{
/// <summary>
/// Total number of revocation entries.
/// </summary>
public required long TotalEntries { get; init; }
/// <summary>
/// Latest sequence number.
/// </summary>
public required long LatestSeqNo { get; init; }
/// <summary>
/// Entries by type.
/// </summary>
public required IReadOnlyDictionary<string, long> EntriesByType { get; init; }
/// <summary>
/// Total entries invalidated.
/// </summary>
public required long TotalEntriesInvalidated { get; init; }
/// <summary>
/// Timestamp of oldest entry.
/// </summary>
public DateTimeOffset? OldestEntryAt { get; init; }
/// <summary>
/// Timestamp of newest entry.
/// </summary>
public DateTimeOffset? NewestEntryAt { get; init; }
}

View File

@@ -0,0 +1,137 @@
using System.Collections.Concurrent;
using System.Text.Json;
using Microsoft.Extensions.Logging;
using StellaOps.Provcache.Entities;
namespace StellaOps.Provcache;
/// <summary>
/// In-memory implementation of the revocation ledger for testing and non-persistent scenarios.
/// For production use, inject a PostgreSQL-backed implementation from StellaOps.Provcache.Postgres.
/// </summary>
public sealed class InMemoryRevocationLedger : IRevocationLedger
{
private readonly ConcurrentDictionary<long, RevocationEntry> _entries = new();
private readonly ILogger<InMemoryRevocationLedger> _logger;
private long _currentSeqNo;
public InMemoryRevocationLedger(ILogger<InMemoryRevocationLedger> logger)
{
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
}
/// <inheritdoc />
public Task<RevocationEntry> RecordAsync(
RevocationEntry entry,
CancellationToken cancellationToken = default)
{
ArgumentNullException.ThrowIfNull(entry);
var seqNo = Interlocked.Increment(ref _currentSeqNo);
var recordedEntry = entry with { SeqNo = seqNo };
_entries[seqNo] = recordedEntry;
_logger.LogInformation(
"Recorded revocation {RevocationId} of type {Type} for key {Key}, invalidated {Count} entries",
entry.RevocationId, entry.RevocationType, entry.RevokedKey, entry.EntriesInvalidated);
return Task.FromResult(recordedEntry);
}
/// <inheritdoc />
public Task<IReadOnlyList<RevocationEntry>> GetEntriesSinceAsync(
long sinceSeqNo,
int limit = 1000,
CancellationToken cancellationToken = default)
{
var entries = _entries.Values
.Where(e => e.SeqNo > sinceSeqNo)
.OrderBy(e => e.SeqNo)
.Take(limit)
.ToList();
return Task.FromResult<IReadOnlyList<RevocationEntry>>(entries);
}
/// <inheritdoc />
public Task<IReadOnlyList<RevocationEntry>> GetEntriesByTypeAsync(
string revocationType,
DateTimeOffset? since = null,
int limit = 1000,
CancellationToken cancellationToken = default)
{
ArgumentException.ThrowIfNullOrWhiteSpace(revocationType);
var query = _entries.Values
.Where(e => e.RevocationType == revocationType);
if (since.HasValue)
{
query = query.Where(e => e.RevokedAt > since.Value);
}
var entries = query
.OrderBy(e => e.SeqNo)
.Take(limit)
.ToList();
return Task.FromResult<IReadOnlyList<RevocationEntry>>(entries);
}
/// <inheritdoc />
public Task<long> GetLatestSeqNoAsync(CancellationToken cancellationToken = default)
{
return Task.FromResult(Interlocked.Read(ref _currentSeqNo));
}
/// <inheritdoc />
public Task<IReadOnlyList<RevocationEntry>> GetRevocationsForKeyAsync(
string revokedKey,
CancellationToken cancellationToken = default)
{
ArgumentException.ThrowIfNullOrWhiteSpace(revokedKey);
var entries = _entries.Values
.Where(e => e.RevokedKey == revokedKey)
.OrderBy(e => e.SeqNo)
.ToList();
return Task.FromResult<IReadOnlyList<RevocationEntry>>(entries);
}
/// <inheritdoc />
public Task<RevocationLedgerStats> GetStatsAsync(CancellationToken cancellationToken = default)
{
var allEntries = _entries.Values.ToList();
var totalEntries = allEntries.Count;
var latestSeqNo = Interlocked.Read(ref _currentSeqNo);
var totalInvalidated = allEntries.Sum(e => (long)e.EntriesInvalidated);
var entriesByType = allEntries
.GroupBy(e => e.RevocationType)
.ToDictionary(g => g.Key, g => (long)g.Count());
var oldestEntry = allEntries.MinBy(e => e.SeqNo)?.RevokedAt;
var newestEntry = allEntries.MaxBy(e => e.SeqNo)?.RevokedAt;
return Task.FromResult(new RevocationLedgerStats
{
TotalEntries = totalEntries,
LatestSeqNo = latestSeqNo,
EntriesByType = entriesByType,
TotalEntriesInvalidated = totalInvalidated,
OldestEntryAt = oldestEntry,
NewestEntryAt = newestEntry
});
}
/// <summary>
/// Clears all entries (for testing).
/// </summary>
public void Clear()
{
_entries.Clear();
Interlocked.Exchange(ref _currentSeqNo, 0);
}
}

View File

@@ -0,0 +1,295 @@
using Microsoft.Extensions.Logging;
namespace StellaOps.Provcache;
/// <summary>
/// Interface for replaying revocation events for catch-up scenarios.
/// </summary>
public interface IRevocationReplayService
{
/// <summary>
/// Replays revocation events since a checkpoint.
/// Used for catch-up after offline period or node restart.
/// </summary>
/// <param name="sinceSeqNo">Sequence number to replay from.</param>
/// <param name="options">Replay options.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Replay result with statistics.</returns>
Task<RevocationReplayResult> ReplayFromAsync(
long sinceSeqNo,
RevocationReplayOptions? options = null,
CancellationToken cancellationToken = default);
/// <summary>
/// Gets the current replay checkpoint.
/// </summary>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>The checkpoint sequence number.</returns>
Task<long> GetCheckpointAsync(CancellationToken cancellationToken = default);
/// <summary>
/// Saves a replay checkpoint.
/// </summary>
/// <param name="seqNo">The sequence number to checkpoint.</param>
/// <param name="cancellationToken">Cancellation token.</param>
Task SaveCheckpointAsync(long seqNo, CancellationToken cancellationToken = default);
}
/// <summary>
/// Options for revocation replay.
/// </summary>
public sealed class RevocationReplayOptions
{
/// <summary>
/// Maximum entries to process per batch.
/// </summary>
public int BatchSize { get; init; } = 1000;
/// <summary>
/// Whether to save checkpoint after each batch.
/// </summary>
public bool SaveCheckpointPerBatch { get; init; } = true;
/// <summary>
/// Whether to verify invalidations against current cache state.
/// </summary>
public bool VerifyInvalidations { get; init; } = false;
/// <summary>
/// Maximum total entries to replay (0 = unlimited).
/// </summary>
public int MaxEntries { get; init; } = 0;
}
/// <summary>
/// Result of a revocation replay operation.
/// </summary>
public sealed record RevocationReplayResult
{
/// <summary>
/// Whether the replay completed successfully.
/// </summary>
public required bool Success { get; init; }
/// <summary>
/// Number of entries replayed.
/// </summary>
public required int EntriesReplayed { get; init; }
/// <summary>
/// Starting sequence number.
/// </summary>
public required long StartSeqNo { get; init; }
/// <summary>
/// Ending sequence number.
/// </summary>
public required long EndSeqNo { get; init; }
/// <summary>
/// Total invalidations applied.
/// </summary>
public required int TotalInvalidations { get; init; }
/// <summary>
/// Entries by revocation type.
/// </summary>
public IReadOnlyDictionary<string, int> EntriesByType { get; init; } = new Dictionary<string, int>();
/// <summary>
/// Time taken for replay.
/// </summary>
public TimeSpan Duration { get; init; }
/// <summary>
/// Any errors encountered.
/// </summary>
public IReadOnlyList<string> Errors { get; init; } = [];
}
/// <summary>
/// Implementation of revocation replay service.
/// </summary>
public sealed class RevocationReplayService : IRevocationReplayService
{
private readonly IRevocationLedger _ledger;
private readonly IProvcacheRepository _repository;
private readonly ILogger<RevocationReplayService> _logger;
private readonly TimeProvider _timeProvider;
// In-memory checkpoint (production would use persistent storage)
private long _checkpoint;
public RevocationReplayService(
IRevocationLedger ledger,
IProvcacheRepository repository,
ILogger<RevocationReplayService> logger,
TimeProvider? timeProvider = null)
{
_ledger = ledger ?? throw new ArgumentNullException(nameof(ledger));
_repository = repository ?? throw new ArgumentNullException(nameof(repository));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
_timeProvider = timeProvider ?? TimeProvider.System;
}
/// <inheritdoc />
public async Task<RevocationReplayResult> ReplayFromAsync(
long sinceSeqNo,
RevocationReplayOptions? options = null,
CancellationToken cancellationToken = default)
{
options ??= new RevocationReplayOptions();
var startTime = _timeProvider.GetUtcNow();
var errors = new List<string>();
var entriesByType = new Dictionary<string, int>();
var totalReplayed = 0;
var totalInvalidations = 0;
var currentSeqNo = sinceSeqNo;
var endSeqNo = sinceSeqNo;
_logger.LogInformation("Starting revocation replay from seq {SeqNo}", sinceSeqNo);
try
{
while (true)
{
cancellationToken.ThrowIfCancellationRequested();
var entries = await _ledger.GetEntriesSinceAsync(
currentSeqNo,
options.BatchSize,
cancellationToken);
if (entries.Count == 0)
{
_logger.LogDebug("No more entries to replay");
break;
}
foreach (var entry in entries)
{
// Track by type
if (!entriesByType.TryGetValue(entry.RevocationType, out var count))
{
count = 0;
}
entriesByType[entry.RevocationType] = count + 1;
// Apply invalidation based on type
try
{
var invalidated = await ApplyRevocationAsync(entry, cancellationToken);
totalInvalidations += invalidated;
}
catch (Exception ex)
{
_logger.LogWarning(ex, "Error applying revocation {RevocationId}", entry.RevocationId);
errors.Add($"Failed to apply revocation {entry.RevocationId}: {ex.Message}");
}
currentSeqNo = entry.SeqNo;
endSeqNo = entry.SeqNo;
totalReplayed++;
// Check max entries limit
if (options.MaxEntries > 0 && totalReplayed >= options.MaxEntries)
{
_logger.LogInformation("Reached max entries limit ({Max})", options.MaxEntries);
break;
}
}
// Save checkpoint per batch if enabled
if (options.SaveCheckpointPerBatch)
{
await SaveCheckpointAsync(endSeqNo, cancellationToken);
}
// Check max entries limit
if (options.MaxEntries > 0 && totalReplayed >= options.MaxEntries)
{
break;
}
}
var duration = _timeProvider.GetUtcNow() - startTime;
_logger.LogInformation(
"Revocation replay complete: {Replayed} entries, {Invalidations} invalidations in {Duration}",
totalReplayed, totalInvalidations, duration);
return new RevocationReplayResult
{
Success = errors.Count == 0,
EntriesReplayed = totalReplayed,
StartSeqNo = sinceSeqNo,
EndSeqNo = endSeqNo,
TotalInvalidations = totalInvalidations,
EntriesByType = entriesByType,
Duration = duration,
Errors = errors
};
}
catch (Exception ex)
{
_logger.LogError(ex, "Error during revocation replay");
errors.Add(ex.Message);
return new RevocationReplayResult
{
Success = false,
EntriesReplayed = totalReplayed,
StartSeqNo = sinceSeqNo,
EndSeqNo = endSeqNo,
TotalInvalidations = totalInvalidations,
EntriesByType = entriesByType,
Duration = _timeProvider.GetUtcNow() - startTime,
Errors = errors
};
}
}
/// <inheritdoc />
public Task<long> GetCheckpointAsync(CancellationToken cancellationToken = default)
{
return Task.FromResult(_checkpoint);
}
/// <inheritdoc />
public Task SaveCheckpointAsync(long seqNo, CancellationToken cancellationToken = default)
{
_checkpoint = seqNo;
_logger.LogDebug("Saved checkpoint at seq {SeqNo}", seqNo);
return Task.CompletedTask;
}
private async Task<int> ApplyRevocationAsync(
RevocationEntry entry,
CancellationToken cancellationToken)
{
// Note: In replay mode, we re-apply the same invalidation logic
// This is idempotent - if entries are already invalidated, count will be 0
var count = entry.RevocationType switch
{
Entities.RevocationTypes.Signer =>
await _repository.DeleteBySignerSetHashAsync(entry.RevokedKey, cancellationToken),
Entities.RevocationTypes.FeedEpoch =>
await _repository.DeleteByFeedEpochOlderThanAsync(entry.RevokedKey, cancellationToken),
Entities.RevocationTypes.Policy =>
await _repository.DeleteByPolicyHashAsync(entry.RevokedKey, cancellationToken),
Entities.RevocationTypes.Explicit =>
await _repository.DeleteAsync(entry.RevokedKey, cancellationToken) ? 1L : 0L,
Entities.RevocationTypes.Expiration =>
0L, // TTL expiration is handled by background cleanup, not replay
_ => 0L
};
return (int)count;
}
}

View File

@@ -26,6 +26,7 @@
<ProjectReference Include="../StellaOps.Canonical.Json/StellaOps.Canonical.Json.csproj" />
<ProjectReference Include="../StellaOps.Cryptography/StellaOps.Cryptography.csproj" />
<ProjectReference Include="../StellaOps.Messaging/StellaOps.Messaging.csproj" />
<ProjectReference Include="../../Provenance/StellaOps.Provenance.Attestation/StellaOps.Provenance.Attestation.csproj" />
</ItemGroup>
</Project>

View File

@@ -59,6 +59,7 @@ public sealed class WriteBehindQueue : BackgroundService, IWriteBehindQueue
Interlocked.Increment(ref _totalEnqueued);
Interlocked.Increment(ref _currentQueueDepth);
ProvcacheTelemetry.SetWriteBehindQueueSize((int)Interlocked.Read(ref _currentQueueDepth));
return _channel.Writer.WriteAsync(item, cancellationToken);
}
@@ -143,6 +144,7 @@ public sealed class WriteBehindQueue : BackgroundService, IWriteBehindQueue
private async Task ProcessBatchAsync(List<WriteBehindItem> batch, CancellationToken cancellationToken)
{
var entries = batch.Select(b => b.Entry).ToList();
using var activity = ProvcacheTelemetry.StartWriteBehindFlushActivity(batch.Count);
try
{
@@ -150,6 +152,8 @@ public sealed class WriteBehindQueue : BackgroundService, IWriteBehindQueue
Interlocked.Add(ref _totalPersisted, batch.Count);
Interlocked.Increment(ref _totalBatches);
ProvcacheTelemetry.RecordWriteBehind("ok", batch.Count);
ProvcacheTelemetry.SetWriteBehindQueueSize((int)Interlocked.Read(ref _currentQueueDepth));
_logger.LogDebug(
"Write-behind batch persisted {Count} entries",
@@ -157,6 +161,7 @@ public sealed class WriteBehindQueue : BackgroundService, IWriteBehindQueue
}
catch (Exception ex)
{
ProvcacheTelemetry.MarkError(activity, ex.Message);
_logger.LogWarning(
ex,
"Write-behind batch failed for {Count} entries, scheduling retries",
@@ -169,14 +174,17 @@ public sealed class WriteBehindQueue : BackgroundService, IWriteBehindQueue
{
var retryItem = item with { RetryCount = item.RetryCount + 1 };
Interlocked.Increment(ref _totalRetries);
ProvcacheTelemetry.RecordWriteBehind("retry", 1);
if (_channel.Writer.TryWrite(retryItem))
{
Interlocked.Increment(ref _currentQueueDepth);
ProvcacheTelemetry.SetWriteBehindQueueSize((int)Interlocked.Read(ref _currentQueueDepth));
}
else
{
Interlocked.Increment(ref _totalFailed);
ProvcacheTelemetry.RecordWriteBehind("failed", 1);
_logger.LogError(
"Write-behind queue full, dropping entry for VeriKey {VeriKey}",
item.Entry.VeriKey);
@@ -185,6 +193,7 @@ public sealed class WriteBehindQueue : BackgroundService, IWriteBehindQueue
else
{
Interlocked.Increment(ref _totalFailed);
ProvcacheTelemetry.RecordWriteBehind("failed", 1);
_logger.LogError(
"Write-behind max retries exceeded for VeriKey {VeriKey}",
item.Entry.VeriKey);