notify doctors work, audit work, new product advisory sprints

This commit is contained in:
master
2026-01-13 08:36:29 +02:00
parent b8868a5f13
commit 9ca7cb183e
343 changed files with 24492 additions and 3544 deletions

View File

@@ -0,0 +1,262 @@
// -----------------------------------------------------------------------------
// RekorReceiptTests.cs
// Description: Unit tests for standardized Rekor receipt schema.
// -----------------------------------------------------------------------------
using System.Text.Json;
using FluentAssertions;
using StellaOps.Attestor.Core.Rekor;
using Xunit;
namespace StellaOps.Attestor.Core.Tests.Rekor;
[Trait("Category", "Unit")]
[Trait("Category", "Rekor")]
public sealed class RekorReceiptTests
{
[Fact]
public void RekorReceipt_SerializesToValidJson()
{
// Arrange
var receipt = CreateValidReceipt();
// Act
var json = JsonSerializer.Serialize(receipt, new JsonSerializerOptions { WriteIndented = true });
// Assert
json.Should().NotBeNullOrEmpty();
json.Should().Contain("\"schemaVersion\":");
json.Should().Contain("\"uuid\":");
json.Should().Contain("\"logIndex\":");
json.Should().Contain("\"checkpoint\":");
json.Should().Contain("\"inclusionProof\":");
}
[Fact]
public void RekorReceipt_RoundtripsCorrectly()
{
// Arrange
var original = CreateValidReceipt();
// Act
var json = JsonSerializer.Serialize(original);
var deserialized = JsonSerializer.Deserialize<RekorReceipt>(json);
// Assert
deserialized.Should().NotBeNull();
deserialized!.Uuid.Should().Be(original.Uuid);
deserialized.LogIndex.Should().Be(original.LogIndex);
deserialized.LogId.Should().Be(original.LogId);
deserialized.IntegratedTime.Should().Be(original.IntegratedTime);
deserialized.EntryKind.Should().Be(original.EntryKind);
deserialized.EntryBodyHash.Should().Be(original.EntryBodyHash);
deserialized.Checkpoint.Origin.Should().Be(original.Checkpoint.Origin);
deserialized.InclusionProof.LeafHash.Should().Be(original.InclusionProof.LeafHash);
}
[Fact]
public void RekorReceipt_IntegratedTimeUtc_ConvertsCorrectly()
{
// Arrange
var unixTime = 1704067200L; // 2024-01-01 00:00:00 UTC
var receipt = CreateValidReceipt() with { IntegratedTime = unixTime };
// Act
var utc = receipt.IntegratedTimeUtc;
// Assert
utc.Year.Should().Be(2024);
utc.Month.Should().Be(1);
utc.Day.Should().Be(1);
utc.Hour.Should().Be(0);
utc.Minute.Should().Be(0);
utc.Second.Should().Be(0);
}
[Fact]
public void RekorReceipt_EntryUrl_FormsCorrectly()
{
// Arrange
var receipt = CreateValidReceipt() with
{
LogUrl = "https://rekor.sigstore.dev",
Uuid = "abc123def456"
};
// Act
var entryUrl = receipt.EntryUrl;
// Assert
entryUrl.Should().Be("https://rekor.sigstore.dev/api/v1/log/entries/abc123def456");
}
[Fact]
public void RekorReceipt_EntryUrl_HandlesTrailingSlash()
{
// Arrange
var receipt = CreateValidReceipt() with
{
LogUrl = "https://rekor.sigstore.dev/",
Uuid = "abc123"
};
// Act
var entryUrl = receipt.EntryUrl;
// Assert
entryUrl.Should().Be("https://rekor.sigstore.dev/api/v1/log/entries/abc123");
}
[Fact]
public void RekorCheckpointV2_TimestampUtc_ConvertsCorrectly()
{
// Arrange
var checkpoint = new RekorCheckpointV2
{
Origin = "test-origin",
Size = 1000,
RootHash = "abc123",
Timestamp = 1704067200L,
Signature = "sig123"
};
// Act
var utc = checkpoint.TimestampUtc;
// Assert
utc.Year.Should().Be(2024);
}
[Fact]
public void RekorInclusionProofV2_SerializesHashesCorrectly()
{
// Arrange
var proof = new RekorInclusionProofV2
{
LogIndex = 1000,
TreeSize = 2000,
RootHash = "root123",
LeafHash = "leaf456",
Hashes = ["hash1", "hash2", "hash3"]
};
// Act
var json = JsonSerializer.Serialize(proof);
var deserialized = JsonSerializer.Deserialize<RekorInclusionProofV2>(json);
// Assert
deserialized.Should().NotBeNull();
deserialized!.Hashes.Should().HaveCount(3);
deserialized.Hashes.Should().ContainInOrder("hash1", "hash2", "hash3");
}
[Fact]
public void RekorReceiptVerificationResult_WhenValid_IsHealthy()
{
// Arrange
var result = new RekorReceiptVerificationResult
{
IsValid = true,
CheckpointSignatureValid = true,
InclusionProofValid = true,
EntryHashValid = true,
TimeSkewAcceptable = true,
VerifiedAt = DateTimeOffset.UtcNow
};
// Assert
result.IsValid.Should().BeTrue();
result.Errors.Should().BeEmpty();
}
[Fact]
public void RekorReceiptVerificationResult_WhenInvalid_ContainsErrors()
{
// Arrange
var result = new RekorReceiptVerificationResult
{
IsValid = false,
CheckpointSignatureValid = false,
InclusionProofValid = true,
EntryHashValid = true,
TimeSkewAcceptable = true,
Errors = ["Checkpoint signature verification failed"],
VerifiedAt = DateTimeOffset.UtcNow
};
// Assert
result.IsValid.Should().BeFalse();
result.Errors.Should().Contain("Checkpoint signature verification failed");
}
[Fact]
public void RekorReceiptVerificationOptions_HasSensibleDefaults()
{
// Arrange & Act
var options = new RekorReceiptVerificationOptions();
// Assert
options.MaxClockSkewSeconds.Should().Be(300); // 5 minutes
options.AllowOfflineVerification.Should().BeTrue();
options.MaxOfflineCheckpointAgeHours.Should().Be(24);
options.RequireCheckpointSignature.Should().BeTrue();
}
[Fact]
public void RekorReceipt_IncludesOptionalPolicyFields()
{
// Arrange
var receipt = CreateValidReceipt() with
{
PolicyHash = "sha256:policy123",
GraphRevision = "rev-456",
IdempotencyKey = "idem-789"
};
// Act
var json = JsonSerializer.Serialize(receipt);
var deserialized = JsonSerializer.Deserialize<RekorReceipt>(json);
// Assert
deserialized!.PolicyHash.Should().Be("sha256:policy123");
deserialized.GraphRevision.Should().Be("rev-456");
deserialized.IdempotencyKey.Should().Be("idem-789");
}
[Fact]
public void RekorReceipt_SchemaVersion_DefaultsTo1_0_0()
{
// Arrange
var receipt = CreateValidReceipt();
// Assert
receipt.SchemaVersion.Should().Be("1.0.0");
}
private static RekorReceipt CreateValidReceipt() => new()
{
Uuid = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
LogIndex = 12345,
LogId = "rekor.sigstore.dev - 2605736670972794746",
LogUrl = "https://rekor.sigstore.dev",
IntegratedTime = DateTimeOffset.UtcNow.ToUnixTimeSeconds(),
EntryKind = "dsse",
EntryBodyHash = "sha256:abcdef123456",
Checkpoint = new RekorCheckpointV2
{
Origin = "rekor.sigstore.dev - 2605736670972794746",
Size = 50000,
RootHash = "abc123def456",
Timestamp = DateTimeOffset.UtcNow.ToUnixTimeSeconds(),
Signature = "MEUCIQDtest..."
},
InclusionProof = new RekorInclusionProofV2
{
LogIndex = 12345,
TreeSize = 50000,
RootHash = "abc123def456",
LeafHash = "leaf789xyz",
Hashes = ["hash1", "hash2", "hash3"]
}
};
}

View File

@@ -0,0 +1,249 @@
// -----------------------------------------------------------------------------
// TransparencyStatusProviderTests.cs
// Description: Unit tests for transparency status provider.
// -----------------------------------------------------------------------------
using FluentAssertions;
using Microsoft.Extensions.Logging.Abstractions;
using Microsoft.Extensions.Options;
using StellaOps.Attestor.Core.Transparency;
using Xunit;
using OptionsFactory = Microsoft.Extensions.Options.Options;
namespace StellaOps.Attestor.Core.Tests.Transparency;
[Trait("Category", "Unit")]
[Trait("Category", "Transparency")]
public sealed class TransparencyStatusProviderTests : IDisposable
{
private readonly FakeTimeProvider _timeProvider;
private readonly TransparencyStatusProvider _provider;
public TransparencyStatusProviderTests()
{
_timeProvider = new FakeTimeProvider(DateTimeOffset.UtcNow);
var options = OptionsFactory.Create(new TransparencyStatusOptions
{
MaxCheckpointAgeHours = 24,
CriticalCheckpointAgeHours = 72,
RekorBackendUrl = "https://rekor.sigstore.dev"
});
_provider = new TransparencyStatusProvider(
NullLogger<TransparencyStatusProvider>.Instance,
options,
_timeProvider);
}
[Fact]
public async Task GetStatusAsync_WhenNeverSynced_ReturnsUnknown()
{
// Act
var status = await _provider.GetStatusAsync();
// Assert
status.Status.Should().Be(TransparencyStatusLevel.Unknown);
status.LastSyncAt.Should().BeNull();
status.Message.Should().Contain("never synced");
}
[Fact]
public async Task GetStatusAsync_WhenRecentlySync_ReturnsHealthy()
{
// Arrange
var syncTime = _timeProvider.GetUtcNow().AddHours(-1);
_provider.RecordSync(syncTime, 12345);
// Act
var status = await _provider.GetStatusAsync();
// Assert
status.Status.Should().Be(TransparencyStatusLevel.Healthy);
status.LastSyncAt.Should().Be(syncTime);
status.LastSyncAgeHours.Should().BeApproximately(1, 0.1);
status.LastCheckpointTreeSize.Should().Be(12345);
}
[Fact]
public async Task GetStatusAsync_WhenSyncStale_ReturnsDegraded()
{
// Arrange - sync 30 hours ago (exceeds 24h threshold)
var syncTime = _timeProvider.GetUtcNow().AddHours(-30);
_provider.RecordSync(syncTime, 12345);
// Act
var status = await _provider.GetStatusAsync();
// Assert
status.Status.Should().Be(TransparencyStatusLevel.Degraded);
status.LastSyncAgeHours.Should().BeApproximately(30, 0.1);
status.Message.Should().Contain("stale");
}
[Fact]
public async Task GetStatusAsync_WhenSyncCriticallyStale_ReturnsUnhealthy()
{
// Arrange - sync 80 hours ago (exceeds 72h critical threshold)
var syncTime = _timeProvider.GetUtcNow().AddHours(-80);
_provider.RecordSync(syncTime, 12345);
// Act
var status = await _provider.GetStatusAsync();
// Assert
status.Status.Should().Be(TransparencyStatusLevel.Unhealthy);
status.Message.Should().Contain("critically stale");
}
[Fact]
public async Task GetStatusAsync_WhenOfflineModeWithFreshCheckpoint_ReturnsOffline()
{
// Arrange - create provider without backend URL (offline mode)
var offlineOptions = OptionsFactory.Create(new TransparencyStatusOptions
{
MaxCheckpointAgeHours = 24,
RekorBackendUrl = null // Offline mode
});
using var offlineProvider = new TransparencyStatusProvider(
NullLogger<TransparencyStatusProvider>.Instance,
offlineOptions,
_timeProvider);
var syncTime = _timeProvider.GetUtcNow().AddHours(-1);
offlineProvider.RecordSync(syncTime, 12345);
// Act
var status = await offlineProvider.GetStatusAsync();
// Assert
status.Status.Should().Be(TransparencyStatusLevel.Offline);
status.OfflineMode.Should().BeTrue();
status.Message.Should().Contain("offline mode");
}
[Fact]
public void RecordSubmission_TracksMetrics()
{
// Arrange
var latency1 = TimeSpan.FromMilliseconds(100);
var latency2 = TimeSpan.FromMilliseconds(200);
var latency3 = TimeSpan.FromMilliseconds(150);
// Act
_provider.RecordSubmission(true, latency1);
_provider.RecordSubmission(true, latency2);
_provider.RecordSubmission(false, latency3);
// Assert
var status = _provider.GetStatusAsync().Result;
status.Metrics.Should().NotBeNull();
status.Metrics!.SubmissionsLastHour.Should().Be(3);
status.Metrics.SuccessfulSubmissionsLastHour.Should().Be(2);
status.Metrics.FailedSubmissionsLastHour.Should().Be(1);
status.Metrics.AvgSubmissionLatencyMs.Should().Be(150); // (100+200)/2 = 150 (only successful)
}
[Fact]
public void RecordVerification_TracksMetrics()
{
// Act
_provider.RecordVerification(true, false);
_provider.RecordVerification(true, true);
_provider.RecordVerification(false, false);
// Assert
var status = _provider.GetStatusAsync().Result;
status.Metrics.Should().NotBeNull();
status.Metrics!.VerificationsLastHour.Should().Be(3);
status.Metrics.SuccessfulVerificationsLastHour.Should().Be(2);
status.Metrics.OfflineVerificationsLastHour.Should().Be(1);
}
[Fact]
public async Task GetStatusAsync_ReportsQueueDepths()
{
// Arrange
_provider.UpdateQueueDepths(submissionQueue: 5, deadLetterQueue: 2);
// Act
var status = await _provider.GetStatusAsync();
// Assert
status.SubmissionQueueDepth.Should().Be(5);
status.DeadLetterQueueDepth.Should().Be(2);
}
[Fact]
public async Task GetStatusAsync_ReportsConfiguration()
{
// Act
var status = await _provider.GetStatusAsync();
// Assert
status.MaxCheckpointAgeHours.Should().Be(24);
status.RekorBackend.Should().Be("https://rekor.sigstore.dev");
status.EnforcementEnabled.Should().BeFalse(); // default
}
[Fact]
public async Task IsCheckpointFresh_WhenWithinThreshold_ReturnsTrue()
{
// Arrange
var syncTime = _timeProvider.GetUtcNow().AddHours(-12);
_provider.RecordSync(syncTime, 12345);
// Act
var status = await _provider.GetStatusAsync();
// Assert
status.IsCheckpointFresh.Should().BeTrue();
}
[Fact]
public async Task IsCheckpointFresh_WhenExceedsThreshold_ReturnsFalse()
{
// Arrange
var syncTime = _timeProvider.GetUtcNow().AddHours(-30);
_provider.RecordSync(syncTime, 12345);
// Act
var status = await _provider.GetStatusAsync();
// Assert
status.IsCheckpointFresh.Should().BeFalse();
}
[Fact]
public async Task IsHealthy_WhenHealthyOrDegraded_ReturnsTrue()
{
// Arrange - fresh sync (healthy)
var syncTime = _timeProvider.GetUtcNow().AddHours(-1);
_provider.RecordSync(syncTime, 12345);
// Act
var status = await _provider.GetStatusAsync();
// Assert
status.IsHealthy.Should().BeTrue();
}
public void Dispose()
{
_provider.Dispose();
}
private sealed class FakeTimeProvider : TimeProvider
{
private DateTimeOffset _utcNow;
public FakeTimeProvider(DateTimeOffset utcNow)
{
_utcNow = utcNow;
}
public override DateTimeOffset GetUtcNow() => _utcNow;
public void Advance(TimeSpan duration) => _utcNow = _utcNow.Add(duration);
}
}

View File

@@ -121,6 +121,30 @@ public sealed class AttestorOptions
public int PollIntervalMs { get; set; } = 250;
public int MaxAttempts { get; set; } = 60;
/// <summary>
/// Log version to use: Auto, V1, or V2.
/// V2 uses tile-based (Sunlight) log structure.
/// Default: Auto (backward compatible).
/// </summary>
public string Version { get; set; } = "Auto";
/// <summary>
/// Base URL for tile fetching in Rekor v2.
/// If not specified, defaults to {Url}/tile/.
/// </summary>
public string? TileBaseUrl { get; set; }
/// <summary>
/// Log ID (SHA-256 of log's public key) for multi-log environments.
/// Production Rekor: c0d23d6ad406973f9559f3ba2d1ca01f84147d8ffc5b8445c224f98b9591801d
/// </summary>
public string? LogId { get; set; }
/// <summary>
/// When true and Version is Auto, prefer tile-based proofs over v1 proofs.
/// </summary>
public bool PreferTileProofs { get; set; } = false;
}
public sealed class RekorMirrorOptions : RekorBackendOptions

View File

@@ -0,0 +1,208 @@
using System;
using System.Collections.Generic;
using System.Threading;
using System.Threading.Tasks;
namespace StellaOps.Attestor.Core.Rekor;
/// <summary>
/// Client for fetching proofs from Rekor v2 tile-based logs.
/// Tile-based logs store the Merkle tree in fixed-size chunks (tiles)
/// that can be fetched directly for offline-capable verification.
/// </summary>
public interface IRekorTileClient
{
/// <summary>
/// Fetches the latest signed checkpoint from the tile log.
/// The checkpoint contains the current tree size and root hash.
/// </summary>
/// <param name="backend">Rekor backend configuration</param>
/// <param name="cancellationToken">Cancellation token</param>
/// <returns>The checkpoint response, or null if not available</returns>
Task<RekorTileCheckpoint?> GetCheckpointAsync(
RekorBackend backend,
CancellationToken cancellationToken = default);
/// <summary>
/// Fetches a tile from the log.
/// Tiles are fixed-size chunks of the Merkle tree.
/// </summary>
/// <param name="backend">Rekor backend configuration</param>
/// <param name="level">The tree level (0 = leaves)</param>
/// <param name="index">The tile index at this level</param>
/// <param name="cancellationToken">Cancellation token</param>
/// <returns>The tile data, or null if not found</returns>
Task<RekorTileData?> GetTileAsync(
RekorBackend backend,
int level,
long index,
CancellationToken cancellationToken = default);
/// <summary>
/// Fetches an entry from the log by its index.
/// </summary>
/// <param name="backend">Rekor backend configuration</param>
/// <param name="logIndex">The log index of the entry</param>
/// <param name="cancellationToken">Cancellation token</param>
/// <returns>The entry data, or null if not found</returns>
Task<RekorTileEntry?> GetEntryAsync(
RekorBackend backend,
long logIndex,
CancellationToken cancellationToken = default);
/// <summary>
/// Computes an inclusion proof for an entry using tile data.
/// This fetches the necessary tiles and constructs the proof path.
/// </summary>
/// <param name="backend">Rekor backend configuration</param>
/// <param name="logIndex">The log index of the entry</param>
/// <param name="treeSize">The tree size for the proof (from checkpoint)</param>
/// <param name="cancellationToken">Cancellation token</param>
/// <returns>The computed proof, or null if tiles are unavailable</returns>
Task<RekorTileInclusionProof?> ComputeInclusionProofAsync(
RekorBackend backend,
long logIndex,
long treeSize,
CancellationToken cancellationToken = default);
}
/// <summary>
/// Checkpoint from a Rekor v2 tile-based log.
/// </summary>
public sealed class RekorTileCheckpoint
{
/// <summary>
/// The log origin identifier.
/// </summary>
public required string Origin { get; init; }
/// <summary>
/// Current tree size (number of entries).
/// </summary>
public required long TreeSize { get; init; }
/// <summary>
/// Root hash of the Merkle tree at this size.
/// </summary>
public required byte[] RootHash { get; init; }
/// <summary>
/// Raw checkpoint note text for signature verification.
/// </summary>
public required string RawCheckpoint { get; init; }
/// <summary>
/// Signatures on the checkpoint.
/// </summary>
public required IReadOnlyList<RekorCheckpointSignature> Signatures { get; init; }
}
/// <summary>
/// A signature on a Rekor checkpoint.
/// </summary>
public sealed class RekorCheckpointSignature
{
/// <summary>
/// Key ID or hint for the signing key.
/// </summary>
public required string KeyHint { get; init; }
/// <summary>
/// The signature bytes.
/// </summary>
public required byte[] Signature { get; init; }
}
/// <summary>
/// Data from a Merkle tree tile.
/// </summary>
public sealed class RekorTileData
{
/// <summary>
/// The level in the tree (0 = leaf level).
/// </summary>
public required int Level { get; init; }
/// <summary>
/// The tile index at this level.
/// </summary>
public required long Index { get; init; }
/// <summary>
/// The tile width (number of entries in this tile, may be partial).
/// </summary>
public required int Width { get; init; }
/// <summary>
/// The hash data in this tile.
/// Each hash is 32 bytes (SHA-256).
/// </summary>
public required byte[] Hashes { get; init; }
/// <summary>
/// Gets the hash at the given position within the tile.
/// </summary>
public byte[] GetHash(int position)
{
if (position < 0 || position >= Width)
{
throw new ArgumentOutOfRangeException(nameof(position));
}
var result = new byte[32];
Array.Copy(Hashes, position * 32, result, 0, 32);
return result;
}
}
/// <summary>
/// An entry from a Rekor tile-based log.
/// </summary>
public sealed class RekorTileEntry
{
/// <summary>
/// The log index of this entry.
/// </summary>
public required long LogIndex { get; init; }
/// <summary>
/// The entry data (typically the leaf hash input).
/// </summary>
public required byte[] Data { get; init; }
/// <summary>
/// The integrated time when this entry was added.
/// </summary>
public DateTimeOffset? IntegratedTime { get; init; }
}
/// <summary>
/// An inclusion proof computed from tile data.
/// </summary>
public sealed class RekorTileInclusionProof
{
/// <summary>
/// The log index of the entry.
/// </summary>
public required long LogIndex { get; init; }
/// <summary>
/// The tree size for this proof.
/// </summary>
public required long TreeSize { get; init; }
/// <summary>
/// The leaf hash of the entry.
/// </summary>
public required byte[] LeafHash { get; init; }
/// <summary>
/// The proof path (sibling hashes from leaf to root).
/// </summary>
public required IReadOnlyList<byte[]> Path { get; init; }
/// <summary>
/// The expected root hash for verification.
/// </summary>
public required byte[] RootHash { get; init; }
}

View File

@@ -2,15 +2,82 @@ using System;
namespace StellaOps.Attestor.Core.Rekor;
/// <summary>
/// Specifies the Rekor log version/format to use.
/// </summary>
public enum RekorLogVersion
{
/// <summary>
/// Automatically detect log version from server capabilities.
/// </summary>
Auto = 0,
/// <summary>
/// Rekor v1 with Trillian-backed Merkle tree.
/// </summary>
V1 = 1,
/// <summary>
/// Rekor v2 with tile-based (Sunlight) log structure.
/// Provides cheaper operation and simpler verification.
/// </summary>
V2 = 2
}
public sealed class RekorBackend
{
public required string Name { get; init; }
public required Uri Url { get; init; }
/// <summary>
/// Log version to use. Default is Auto for backward compatibility.
/// Set to V2 to explicitly opt into tile-based verification.
/// </summary>
public RekorLogVersion Version { get; init; } = RekorLogVersion.Auto;
/// <summary>
/// Base URL for tile fetching in Rekor v2.
/// If not specified, tiles are fetched from {Url}/tile/.
/// Only used when Version is V2 or Auto detects v2 capabilities.
/// </summary>
public Uri? TileBaseUrl { get; init; }
/// <summary>
/// Log ID (SHA-256 of the log's public key) for multi-log environments.
/// Used to match entries to the correct log when verifying bundles.
/// Production Rekor: c0d23d6ad406973f9559f3ba2d1ca01f84147d8ffc5b8445c224f98b9591801d
/// </summary>
public string? LogId { get; init; }
/// <summary>
/// Whether to prefer tile-based proofs when available.
/// When true and Version is Auto, will attempt tile fetching first.
/// </summary>
public bool PreferTileProofs { get; init; } = false;
public TimeSpan ProofTimeout { get; init; } = TimeSpan.FromSeconds(15);
public TimeSpan PollInterval { get; init; } = TimeSpan.FromMilliseconds(250);
public int MaxAttempts { get; init; } = 60;
/// <summary>
/// Returns the effective tile base URL, defaulting to {Url}/tile/ if not specified.
/// </summary>
public Uri GetEffectiveTileBaseUrl()
{
if (TileBaseUrl is not null)
{
return TileBaseUrl;
}
var baseUri = Url.ToString().TrimEnd('/');
return new Uri($"{baseUri}/tile/", UriKind.Absolute);
}
/// <summary>
/// Known log ID for the public Sigstore Rekor production instance.
/// </summary>
public const string SigstoreProductionLogId = "c0d23d6ad406973f9559f3ba2d1ca01f84147d8ffc5b8445c224f98b9591801d";
}

View File

@@ -0,0 +1,429 @@
// -----------------------------------------------------------------------------
// RekorReceipt.cs
// Description: Standardized Rekor transparency log receipt per Sigstore conventions.
// Implements receipt schema standardization from SBOM-VEX-policy advisory.
// References: https://docs.sigstore.dev/logging/overview/, Rekor v2 GA
// -----------------------------------------------------------------------------
using System.Text.Json.Serialization;
namespace StellaOps.Attestor.Core.Rekor;
/// <summary>
/// Standardized Rekor transparency log receipt following Sigstore conventions.
/// </summary>
/// <remarks>
/// This receipt format aligns with:
/// - Rekor v2 GA specification (https://blog.sigstore.dev/rekor-v2-ga/)
/// - Sigstore bundle format (https://docs.sigstore.dev/bundle/)
/// - RFC 6962 certificate transparency log semantics
///
/// Design principles:
/// - All fields use deterministic JSON property names
/// - Timestamps use Unix seconds for interoperability
/// - Hashes use lowercase hex encoding
/// - Inclusion proofs follow RFC 6962 structure
/// </remarks>
public sealed record RekorReceipt
{
/// <summary>
/// Schema version for this receipt format.
/// </summary>
[JsonPropertyName("schemaVersion")]
public string SchemaVersion { get; init; } = "1.0.0";
/// <summary>
/// Unique entry identifier (64-character hex string derived from entry hash).
/// </summary>
[JsonPropertyName("uuid")]
public required string Uuid { get; init; }
/// <summary>
/// Log index (position in the log, monotonically increasing).
/// </summary>
[JsonPropertyName("logIndex")]
public required long LogIndex { get; init; }
/// <summary>
/// Log ID identifying the specific Rekor instance/shard.
/// </summary>
[JsonPropertyName("logId")]
public required string LogId { get; init; }
/// <summary>
/// Base URL of the Rekor log instance.
/// </summary>
[JsonPropertyName("logUrl")]
public required string LogUrl { get; init; }
/// <summary>
/// Unix timestamp (seconds) when the entry was integrated into the log.
/// </summary>
[JsonPropertyName("integratedTime")]
public required long IntegratedTime { get; init; }
/// <summary>
/// Entry kind (e.g., "intoto", "hashedrekord", "dsse").
/// </summary>
[JsonPropertyName("entryKind")]
public required string EntryKind { get; init; }
/// <summary>
/// Entry API version within the kind.
/// </summary>
[JsonPropertyName("entryVersion")]
public string EntryVersion { get; init; } = "0.0.2";
/// <summary>
/// SHA-256 hash of the canonicalized entry body (lowercase hex).
/// </summary>
[JsonPropertyName("entryBodyHash")]
public required string EntryBodyHash { get; init; }
/// <summary>
/// Signed checkpoint (signed tree head) in note format.
/// </summary>
[JsonPropertyName("checkpoint")]
public required RekorCheckpointV2 Checkpoint { get; init; }
/// <summary>
/// Inclusion proof demonstrating entry is in the log.
/// </summary>
[JsonPropertyName("inclusionProof")]
public required RekorInclusionProofV2 InclusionProof { get; init; }
/// <summary>
/// Optional SET (Signed Entry Timestamp) for backward compatibility.
/// </summary>
[JsonPropertyName("signedEntryTimestamp")]
public string? SignedEntryTimestamp { get; init; }
/// <summary>
/// Policy hash linking this receipt to a specific policy evaluation.
/// </summary>
[JsonPropertyName("policyHash")]
public string? PolicyHash { get; init; }
/// <summary>
/// Graph revision ID for reachability context.
/// </summary>
[JsonPropertyName("graphRevision")]
public string? GraphRevision { get; init; }
/// <summary>
/// Idempotency key used for submission (for deduplication tracking).
/// </summary>
[JsonPropertyName("idempotencyKey")]
public string? IdempotencyKey { get; init; }
// Computed properties
/// <summary>
/// Gets the integrated time as a DateTimeOffset (UTC).
/// </summary>
[JsonIgnore]
public DateTimeOffset IntegratedTimeUtc =>
DateTimeOffset.FromUnixTimeSeconds(IntegratedTime);
/// <summary>
/// Gets the full entry URL for direct access.
/// </summary>
[JsonIgnore]
public string EntryUrl => $"{LogUrl.TrimEnd('/')}/api/v1/log/entries/{Uuid}";
}
/// <summary>
/// Rekor v2 checkpoint (signed tree head) following note format.
/// </summary>
/// <remarks>
/// Checkpoint format per Rekor v2 specification:
/// - Origin identifies the log
/// - Size is the tree size at checkpoint
/// - RootHash is the Merkle root
/// - Signature is over the checkpoint note body
/// </remarks>
public sealed record RekorCheckpointV2
{
/// <summary>
/// Origin line identifying the log (e.g., "rekor.sigstore.dev - 2605736670972794746").
/// </summary>
[JsonPropertyName("origin")]
public required string Origin { get; init; }
/// <summary>
/// Tree size at time of checkpoint.
/// </summary>
[JsonPropertyName("size")]
public required long Size { get; init; }
/// <summary>
/// Merkle tree root hash (lowercase hex).
/// </summary>
[JsonPropertyName("rootHash")]
public required string RootHash { get; init; }
/// <summary>
/// Unix timestamp (seconds) of the checkpoint.
/// </summary>
[JsonPropertyName("timestamp")]
public required long Timestamp { get; init; }
/// <summary>
/// Base64-encoded signature over the checkpoint note.
/// </summary>
[JsonPropertyName("signature")]
public required string Signature { get; init; }
/// <summary>
/// Key ID or hint for signature verification.
/// </summary>
[JsonPropertyName("keyHint")]
public string? KeyHint { get; init; }
/// <summary>
/// Raw note body for signature verification (base64-encoded).
/// </summary>
[JsonPropertyName("noteBody")]
public string? NoteBody { get; init; }
/// <summary>
/// Gets the timestamp as a DateTimeOffset (UTC).
/// </summary>
[JsonIgnore]
public DateTimeOffset TimestampUtc =>
DateTimeOffset.FromUnixTimeSeconds(Timestamp);
}
/// <summary>
/// Rekor v2 inclusion proof following RFC 6962.
/// </summary>
/// <remarks>
/// Inclusion proof structure:
/// - LeafHash is H(0x00 || entry)
/// - Hashes are the sibling nodes from leaf to root
/// - TreeSize and LogIndex define the proof context
/// </remarks>
public sealed record RekorInclusionProofV2
{
/// <summary>
/// Log index of the entry being proven.
/// </summary>
[JsonPropertyName("logIndex")]
public required long LogIndex { get; init; }
/// <summary>
/// Tree size at time of proof generation.
/// </summary>
[JsonPropertyName("treeSize")]
public required long TreeSize { get; init; }
/// <summary>
/// Root hash at time of proof (lowercase hex).
/// </summary>
[JsonPropertyName("rootHash")]
public required string RootHash { get; init; }
/// <summary>
/// Leaf hash (SHA-256 of 0x00 || entry body, lowercase hex).
/// </summary>
[JsonPropertyName("leafHash")]
public required string LeafHash { get; init; }
/// <summary>
/// Inclusion proof hashes from leaf to root (lowercase hex, ordered).
/// </summary>
[JsonPropertyName("hashes")]
public required IReadOnlyList<string> Hashes { get; init; }
/// <summary>
/// Checkpoint reference containing the signed tree head.
/// </summary>
[JsonPropertyName("checkpoint")]
public string? CheckpointRef { get; init; }
}
/// <summary>
/// Result of verifying a Rekor receipt.
/// </summary>
public sealed record RekorReceiptVerificationResult
{
/// <summary>
/// Whether the receipt is valid.
/// </summary>
public required bool IsValid { get; init; }
/// <summary>
/// Whether the checkpoint signature verified.
/// </summary>
public required bool CheckpointSignatureValid { get; init; }
/// <summary>
/// Whether the inclusion proof verified against the root.
/// </summary>
public required bool InclusionProofValid { get; init; }
/// <summary>
/// Whether the entry hash matches the leaf.
/// </summary>
public required bool EntryHashValid { get; init; }
/// <summary>
/// Time skew in seconds (positive = receipt ahead of local clock).
/// </summary>
public double TimeSkewSeconds { get; init; }
/// <summary>
/// Whether time skew is within acceptable bounds.
/// </summary>
public required bool TimeSkewAcceptable { get; init; }
/// <summary>
/// Any verification errors encountered.
/// </summary>
public IReadOnlyList<string> Errors { get; init; } = [];
/// <summary>
/// Verification diagnostics for debugging.
/// </summary>
public IReadOnlyDictionary<string, string> Diagnostics { get; init; } =
new Dictionary<string, string>();
/// <summary>
/// When the verification was performed (UTC).
/// </summary>
public required DateTimeOffset VerifiedAt { get; init; }
/// <summary>
/// Whether this was verified in offline mode.
/// </summary>
public bool OfflineVerification { get; init; }
}
/// <summary>
/// Options for Rekor receipt verification.
/// </summary>
public sealed record RekorReceiptVerificationOptions
{
/// <summary>
/// Maximum allowed clock skew in seconds (default: 300 = 5 minutes).
/// </summary>
public int MaxClockSkewSeconds { get; init; } = 300;
/// <summary>
/// Whether to allow offline verification using cached checkpoints.
/// </summary>
public bool AllowOfflineVerification { get; init; } = true;
/// <summary>
/// Path to offline checkpoint bundle for air-gapped verification.
/// </summary>
public string? OfflineCheckpointBundlePath { get; init; }
/// <summary>
/// Maximum checkpoint age in hours for offline verification (default: 24).
/// </summary>
public int MaxOfflineCheckpointAgeHours { get; init; } = 24;
/// <summary>
/// Whether to require checkpoint signature verification.
/// </summary>
public bool RequireCheckpointSignature { get; init; } = true;
/// <summary>
/// Trusted public keys for checkpoint verification (PEM or base64 DER).
/// </summary>
public IReadOnlyList<string> TrustedPublicKeys { get; init; } = [];
/// <summary>
/// Trusted log IDs (if empty, all known logs are trusted).
/// </summary>
public IReadOnlyList<string> TrustedLogIds { get; init; } = [];
}
/// <summary>
/// Service for verifying Rekor receipts.
/// </summary>
public interface IRekorReceiptVerifier
{
/// <summary>
/// Verifies a Rekor receipt.
/// </summary>
Task<RekorReceiptVerificationResult> VerifyAsync(
RekorReceipt receipt,
RekorReceiptVerificationOptions? options = null,
CancellationToken cancellationToken = default);
/// <summary>
/// Verifies the inclusion proof without network access.
/// </summary>
RekorReceiptVerificationResult VerifyInclusionProofOffline(
RekorReceipt receipt,
byte[] entryBody,
RekorReceiptVerificationOptions? options = null);
}
/// <summary>
/// Factory for creating Rekor receipts from submission responses.
/// </summary>
public static class RekorReceiptFactory
{
/// <summary>
/// Creates a standardized receipt from a submission response.
/// </summary>
public static RekorReceipt FromSubmissionResponse(
RekorSubmissionResponse response,
string logId,
string logUrl,
string entryKind,
string entryBodyHash,
string? policyHash = null,
string? graphRevision = null,
string? idempotencyKey = null)
{
ArgumentNullException.ThrowIfNull(response);
ArgumentException.ThrowIfNullOrEmpty(logId);
ArgumentException.ThrowIfNullOrEmpty(logUrl);
ArgumentException.ThrowIfNullOrEmpty(entryKind);
ArgumentException.ThrowIfNullOrEmpty(entryBodyHash);
if (response.Proof?.Checkpoint is null)
{
throw new ArgumentException("Response must include checkpoint proof", nameof(response));
}
if (response.Proof?.Inclusion is null)
{
throw new ArgumentException("Response must include inclusion proof", nameof(response));
}
return new RekorReceipt
{
Uuid = response.Uuid,
LogIndex = response.Index ?? throw new ArgumentException("Response must include index"),
LogId = logId,
LogUrl = logUrl,
IntegratedTime = response.IntegratedTime ?? throw new ArgumentException("Response must include integrated time"),
EntryKind = entryKind,
EntryBodyHash = entryBodyHash,
Checkpoint = new RekorCheckpointV2
{
Origin = response.Proof.Checkpoint.Origin ?? logId,
Size = response.Proof.Checkpoint.Size,
RootHash = response.Proof.Checkpoint.RootHash ?? throw new ArgumentException("Checkpoint must include root hash"),
Timestamp = response.Proof.Checkpoint.Timestamp?.ToUnixTimeSeconds() ?? response.IntegratedTime.Value,
Signature = "" // Will be populated from actual response
},
InclusionProof = new RekorInclusionProofV2
{
LogIndex = response.Index.Value,
TreeSize = response.Proof.Checkpoint.Size,
RootHash = response.Proof.Checkpoint.RootHash,
LeafHash = response.Proof.Inclusion.LeafHash ?? throw new ArgumentException("Inclusion proof must include leaf hash"),
Hashes = response.Proof.Inclusion.Path
},
PolicyHash = policyHash,
GraphRevision = graphRevision,
IdempotencyKey = idempotencyKey
};
}
}

View File

@@ -0,0 +1,46 @@
// -----------------------------------------------------------------------------
// TransparencyServiceExtensions.cs
// Description: DI extensions for transparency status services.
// -----------------------------------------------------------------------------
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions;
namespace StellaOps.Attestor.Core.Transparency;
/// <summary>
/// Extension methods for registering transparency services.
/// </summary>
public static class TransparencyServiceExtensions
{
/// <summary>
/// Adds transparency status services to the service collection.
/// </summary>
/// <param name="services">The service collection.</param>
/// <param name="configure">Optional configuration action.</param>
/// <returns>The service collection for chaining.</returns>
public static IServiceCollection AddTransparencyStatus(
this IServiceCollection services,
Action<TransparencyStatusOptions>? configure = null)
{
ArgumentNullException.ThrowIfNull(services);
// Register options
if (configure is not null)
{
services.Configure(configure);
}
else
{
services.AddOptions<TransparencyStatusOptions>();
}
// Register provider
services.TryAddSingleton<ITransparencyStatusProvider, TransparencyStatusProvider>();
// Ensure TimeProvider is available
services.TryAddSingleton(TimeProvider.System);
return services;
}
}

View File

@@ -0,0 +1,425 @@
// -----------------------------------------------------------------------------
// TransparencyStatus.cs
// Description: Transparency log freshness status for health endpoints.
// Implements "last sync" freshness badge from SBOM-VEX-policy advisory.
// -----------------------------------------------------------------------------
using System.Text.Json.Serialization;
namespace StellaOps.Attestor.Core.Transparency;
/// <summary>
/// Transparency log freshness status for health endpoints and status bars.
/// </summary>
/// <remarks>
/// Implements the "last sync" freshness badge pattern:
/// - Shows when transparency log was last successfully synced
/// - Indicates whether operations are using verified or degraded mode
/// - Provides clear guidance for air-gapped environments
/// </remarks>
public sealed record TransparencyStatus
{
/// <summary>
/// Overall transparency status.
/// </summary>
[JsonPropertyName("status")]
public required TransparencyStatusLevel Status { get; init; }
/// <summary>
/// Human-readable status message.
/// </summary>
[JsonPropertyName("message")]
public required string Message { get; init; }
/// <summary>
/// When the transparency log was last successfully synced (UTC).
/// </summary>
[JsonPropertyName("lastSyncAt")]
public DateTimeOffset? LastSyncAt { get; init; }
/// <summary>
/// Age of the last sync in hours.
/// </summary>
[JsonPropertyName("lastSyncAgeHours")]
public double? LastSyncAgeHours { get; init; }
/// <summary>
/// When the checkpoint was last verified (UTC).
/// </summary>
[JsonPropertyName("lastCheckpointVerifiedAt")]
public DateTimeOffset? LastCheckpointVerifiedAt { get; init; }
/// <summary>
/// Latest verified checkpoint tree size.
/// </summary>
[JsonPropertyName("lastCheckpointTreeSize")]
public long? LastCheckpointTreeSize { get; init; }
/// <summary>
/// Whether the service is operating in offline/air-gapped mode.
/// </summary>
[JsonPropertyName("offlineMode")]
public bool OfflineMode { get; init; }
/// <summary>
/// Whether transparency verification is enforced (vs. best-effort).
/// </summary>
[JsonPropertyName("enforcementEnabled")]
public bool EnforcementEnabled { get; init; }
/// <summary>
/// Configured maximum checkpoint age before warning (hours).
/// </summary>
[JsonPropertyName("maxCheckpointAgeHours")]
public int MaxCheckpointAgeHours { get; init; }
/// <summary>
/// Primary Rekor backend URL.
/// </summary>
[JsonPropertyName("rekorBackend")]
public string? RekorBackend { get; init; }
/// <summary>
/// Mirror Rekor backend URL (for air-gapped or fallback).
/// </summary>
[JsonPropertyName("rekorMirror")]
public string? RekorMirror { get; init; }
/// <summary>
/// Submission queue depth (pending entries awaiting transparency anchoring).
/// </summary>
[JsonPropertyName("submissionQueueDepth")]
public int SubmissionQueueDepth { get; init; }
/// <summary>
/// Number of entries in dead-letter queue (failed submissions).
/// </summary>
[JsonPropertyName("deadLetterQueueDepth")]
public int DeadLetterQueueDepth { get; init; }
/// <summary>
/// Metrics for recent operations.
/// </summary>
[JsonPropertyName("metrics")]
public TransparencyMetrics? Metrics { get; init; }
/// <summary>
/// Health check details for each backend.
/// </summary>
[JsonPropertyName("backends")]
public IReadOnlyList<TransparencyBackendStatus> Backends { get; init; } = [];
/// <summary>
/// Whether the status indicates healthy operation.
/// </summary>
[JsonIgnore]
public bool IsHealthy => Status is TransparencyStatusLevel.Healthy or TransparencyStatusLevel.Degraded;
/// <summary>
/// Whether the checkpoint is considered fresh.
/// </summary>
[JsonIgnore]
public bool IsCheckpointFresh =>
LastSyncAgeHours.HasValue && LastSyncAgeHours.Value <= MaxCheckpointAgeHours;
}
/// <summary>
/// Transparency status level for health indicators.
/// </summary>
public enum TransparencyStatusLevel
{
/// <summary>
/// All transparency backends are healthy and synced.
/// </summary>
Healthy,
/// <summary>
/// Operating with stale checkpoint or fallback backend.
/// </summary>
Degraded,
/// <summary>
/// Operating in offline mode with acceptable checkpoint age.
/// </summary>
Offline,
/// <summary>
/// Transparency verification is unavailable or severely degraded.
/// </summary>
Unhealthy,
/// <summary>
/// Transparency status is unknown (not yet initialized).
/// </summary>
Unknown
}
/// <summary>
/// Metrics for transparency operations.
/// </summary>
public sealed record TransparencyMetrics
{
/// <summary>
/// Total submissions in the last hour.
/// </summary>
[JsonPropertyName("submissionsLastHour")]
public int SubmissionsLastHour { get; init; }
/// <summary>
/// Successful submissions in the last hour.
/// </summary>
[JsonPropertyName("successfulSubmissionsLastHour")]
public int SuccessfulSubmissionsLastHour { get; init; }
/// <summary>
/// Failed submissions in the last hour.
/// </summary>
[JsonPropertyName("failedSubmissionsLastHour")]
public int FailedSubmissionsLastHour { get; init; }
/// <summary>
/// Total verifications in the last hour.
/// </summary>
[JsonPropertyName("verificationsLastHour")]
public int VerificationsLastHour { get; init; }
/// <summary>
/// Successful verifications in the last hour.
/// </summary>
[JsonPropertyName("successfulVerificationsLastHour")]
public int SuccessfulVerificationsLastHour { get; init; }
/// <summary>
/// Average submission latency in milliseconds.
/// </summary>
[JsonPropertyName("avgSubmissionLatencyMs")]
public double AvgSubmissionLatencyMs { get; init; }
/// <summary>
/// P95 submission latency in milliseconds.
/// </summary>
[JsonPropertyName("p95SubmissionLatencyMs")]
public double P95SubmissionLatencyMs { get; init; }
/// <summary>
/// Offline verifications in the last hour.
/// </summary>
[JsonPropertyName("offlineVerificationsLastHour")]
public int OfflineVerificationsLastHour { get; init; }
}
/// <summary>
/// Status of a single transparency backend.
/// </summary>
public sealed record TransparencyBackendStatus
{
/// <summary>
/// Backend identifier (e.g., "rekor.sigstore.dev", "rekor-mirror.internal").
/// </summary>
[JsonPropertyName("id")]
public required string Id { get; init; }
/// <summary>
/// Backend URL.
/// </summary>
[JsonPropertyName("url")]
public required string Url { get; init; }
/// <summary>
/// Whether this is the primary backend.
/// </summary>
[JsonPropertyName("primary")]
public bool Primary { get; init; }
/// <summary>
/// Backend health status.
/// </summary>
[JsonPropertyName("status")]
public required BackendHealthStatus Status { get; init; }
/// <summary>
/// When the backend was last checked.
/// </summary>
[JsonPropertyName("lastCheckedAt")]
public DateTimeOffset? LastCheckedAt { get; init; }
/// <summary>
/// Latest response latency in milliseconds.
/// </summary>
[JsonPropertyName("latencyMs")]
public double? LatencyMs { get; init; }
/// <summary>
/// Error message if unhealthy.
/// </summary>
[JsonPropertyName("error")]
public string? Error { get; init; }
/// <summary>
/// Latest checkpoint tree size from this backend.
/// </summary>
[JsonPropertyName("treeSize")]
public long? TreeSize { get; init; }
}
/// <summary>
/// Health status of a backend.
/// </summary>
public enum BackendHealthStatus
{
/// <summary>
/// Backend is healthy and responding.
/// </summary>
Healthy,
/// <summary>
/// Backend is responding slowly.
/// </summary>
Slow,
/// <summary>
/// Backend is unreachable or erroring.
/// </summary>
Unhealthy,
/// <summary>
/// Backend status is unknown.
/// </summary>
Unknown
}
/// <summary>
/// Service for retrieving transparency status.
/// </summary>
public interface ITransparencyStatusProvider
{
/// <summary>
/// Gets the current transparency status.
/// </summary>
Task<TransparencyStatus> GetStatusAsync(CancellationToken cancellationToken = default);
/// <summary>
/// Forces a refresh of the transparency status (e.g., recheck backends).
/// </summary>
Task<TransparencyStatus> RefreshAsync(CancellationToken cancellationToken = default);
/// <summary>
/// Records a successful submission for metrics.
/// </summary>
void RecordSubmission(bool success, TimeSpan latency);
/// <summary>
/// Records a verification attempt for metrics.
/// </summary>
void RecordVerification(bool success, bool offline);
/// <summary>
/// Updates the last sync timestamp.
/// </summary>
void RecordSync(DateTimeOffset syncTime, long treeSize);
}
/// <summary>
/// Configuration for transparency status provider.
/// </summary>
public sealed record TransparencyStatusOptions
{
/// <summary>
/// Maximum checkpoint age in hours before status becomes degraded (default: 24).
/// </summary>
public int MaxCheckpointAgeHours { get; init; } = 24;
/// <summary>
/// Maximum checkpoint age in hours before status becomes unhealthy (default: 72).
/// </summary>
public int CriticalCheckpointAgeHours { get; init; } = 72;
/// <summary>
/// Backend health check interval in seconds (default: 60).
/// </summary>
public int HealthCheckIntervalSeconds { get; init; } = 60;
/// <summary>
/// Backend timeout in seconds (default: 10).
/// </summary>
public int BackendTimeoutSeconds { get; init; } = 10;
/// <summary>
/// Latency threshold for "slow" status in milliseconds (default: 2000).
/// </summary>
public int SlowLatencyThresholdMs { get; init; } = 2000;
/// <summary>
/// Whether to enable enforcement mode (fail operations without transparency).
/// </summary>
public bool EnforcementEnabled { get; init; } = false;
/// <summary>
/// Primary Rekor backend URL.
/// </summary>
public string? RekorBackendUrl { get; init; }
/// <summary>
/// Mirror Rekor backend URL.
/// </summary>
public string? RekorMirrorUrl { get; init; }
}
/// <summary>
/// Interface for checking transparency backend health.
/// Implemented in infrastructure layer with HTTP client support.
/// </summary>
public interface ITransparencyBackendHealthChecker
{
/// <summary>
/// Checks the health of a transparency backend.
/// </summary>
/// <param name="url">The backend URL to check.</param>
/// <param name="timeoutSeconds">Timeout in seconds.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Health check result.</returns>
Task<BackendHealthCheckResult> CheckHealthAsync(
string url,
int timeoutSeconds,
CancellationToken cancellationToken = default);
}
/// <summary>
/// Result of a backend health check.
/// </summary>
public sealed record BackendHealthCheckResult
{
/// <summary>
/// Whether the backend is healthy.
/// </summary>
public required bool IsHealthy { get; init; }
/// <summary>
/// Response latency in milliseconds.
/// </summary>
public required double LatencyMs { get; init; }
/// <summary>
/// Error message if unhealthy.
/// </summary>
public string? Error { get; init; }
/// <summary>
/// Creates a healthy result.
/// </summary>
public static BackendHealthCheckResult Healthy(double latencyMs) => new()
{
IsHealthy = true,
LatencyMs = latencyMs
};
/// <summary>
/// Creates an unhealthy result.
/// </summary>
public static BackendHealthCheckResult Unhealthy(string error, double latencyMs = 0) => new()
{
IsHealthy = false,
LatencyMs = latencyMs,
Error = error
};
}

View File

@@ -0,0 +1,347 @@
// -----------------------------------------------------------------------------
// TransparencyStatusProvider.cs
// Description: Default implementation of transparency status provider.
// Tracks sync times, metrics, and backend health for freshness indicators.
// -----------------------------------------------------------------------------
using System.Collections.Concurrent;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
namespace StellaOps.Attestor.Core.Transparency;
/// <summary>
/// Default implementation of <see cref="ITransparencyStatusProvider"/>.
/// </summary>
public sealed class TransparencyStatusProvider : ITransparencyStatusProvider, IDisposable
{
private readonly ILogger<TransparencyStatusProvider> _logger;
private readonly TransparencyStatusOptions _options;
private readonly TimeProvider _timeProvider;
private readonly ITransparencyBackendHealthChecker? _healthChecker;
private readonly object _lock = new();
private DateTimeOffset? _lastSyncAt;
private long _lastTreeSize;
private DateTimeOffset? _lastCheckpointVerifiedAt;
// Metrics tracking (thread-safe)
private readonly ConcurrentQueue<MetricEntry> _submissionMetrics = new();
private readonly ConcurrentQueue<MetricEntry> _verificationMetrics = new();
// Backend status cache
private readonly ConcurrentDictionary<string, TransparencyBackendStatus> _backendStatuses = new();
private DateTimeOffset _lastHealthCheck = DateTimeOffset.MinValue;
// Queue depth tracking
private int _submissionQueueDepth;
private int _deadLetterQueueDepth;
public TransparencyStatusProvider(
ILogger<TransparencyStatusProvider> logger,
IOptions<TransparencyStatusOptions> options,
TimeProvider timeProvider,
ITransparencyBackendHealthChecker? healthChecker = null)
{
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
_options = options?.Value ?? throw new ArgumentNullException(nameof(options));
_timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider));
_healthChecker = healthChecker;
}
/// <inheritdoc />
public async Task<TransparencyStatus> GetStatusAsync(CancellationToken cancellationToken = default)
{
var now = _timeProvider.GetUtcNow();
// Check if we need to refresh backend health
if (now - _lastHealthCheck > TimeSpan.FromSeconds(_options.HealthCheckIntervalSeconds))
{
await RefreshBackendHealthAsync(cancellationToken);
}
return BuildStatus(now);
}
/// <inheritdoc />
public async Task<TransparencyStatus> RefreshAsync(CancellationToken cancellationToken = default)
{
await RefreshBackendHealthAsync(cancellationToken);
return BuildStatus(_timeProvider.GetUtcNow());
}
/// <inheritdoc />
public void RecordSubmission(bool success, TimeSpan latency)
{
var entry = new MetricEntry(
_timeProvider.GetUtcNow(),
success,
false,
latency.TotalMilliseconds);
_submissionMetrics.Enqueue(entry);
PruneOldMetrics(_submissionMetrics);
_logger.LogDebug(
"Recorded transparency submission: success={Success}, latency={LatencyMs}ms",
success,
latency.TotalMilliseconds);
}
/// <inheritdoc />
public void RecordVerification(bool success, bool offline)
{
var entry = new MetricEntry(
_timeProvider.GetUtcNow(),
success,
offline,
0);
_verificationMetrics.Enqueue(entry);
PruneOldMetrics(_verificationMetrics);
_logger.LogDebug(
"Recorded transparency verification: success={Success}, offline={Offline}",
success,
offline);
}
/// <inheritdoc />
public void RecordSync(DateTimeOffset syncTime, long treeSize)
{
lock (_lock)
{
_lastSyncAt = syncTime;
_lastTreeSize = treeSize;
_lastCheckpointVerifiedAt = _timeProvider.GetUtcNow();
}
_logger.LogInformation(
"Recorded transparency sync: time={SyncTime}, treeSize={TreeSize}",
syncTime,
treeSize);
}
/// <summary>
/// Updates the queue depths for status reporting.
/// </summary>
public void UpdateQueueDepths(int submissionQueue, int deadLetterQueue)
{
Interlocked.Exchange(ref _submissionQueueDepth, submissionQueue);
Interlocked.Exchange(ref _deadLetterQueueDepth, deadLetterQueue);
}
private TransparencyStatus BuildStatus(DateTimeOffset now)
{
double? lastSyncAgeHours = null;
DateTimeOffset? lastSync;
long lastTreeSize;
DateTimeOffset? lastCheckpointVerified;
lock (_lock)
{
lastSync = _lastSyncAt;
lastTreeSize = _lastTreeSize;
lastCheckpointVerified = _lastCheckpointVerifiedAt;
if (_lastSyncAt.HasValue)
{
lastSyncAgeHours = (now - _lastSyncAt.Value).TotalHours;
}
}
var (status, message) = DetermineStatus(lastSyncAgeHours);
var metrics = CalculateMetrics(now);
var backends = _backendStatuses.Values.ToList();
return new TransparencyStatus
{
Status = status,
Message = message,
LastSyncAt = lastSync,
LastSyncAgeHours = lastSyncAgeHours,
LastCheckpointVerifiedAt = lastCheckpointVerified,
LastCheckpointTreeSize = lastTreeSize > 0 ? lastTreeSize : null,
OfflineMode = string.IsNullOrEmpty(_options.RekorBackendUrl),
EnforcementEnabled = _options.EnforcementEnabled,
MaxCheckpointAgeHours = _options.MaxCheckpointAgeHours,
RekorBackend = _options.RekorBackendUrl,
RekorMirror = _options.RekorMirrorUrl,
SubmissionQueueDepth = _submissionQueueDepth,
DeadLetterQueueDepth = _deadLetterQueueDepth,
Metrics = metrics,
Backends = backends
};
}
private (TransparencyStatusLevel, string) DetermineStatus(double? lastSyncAgeHours)
{
// No backend configured - offline mode
if (string.IsNullOrEmpty(_options.RekorBackendUrl))
{
if (lastSyncAgeHours is null)
{
return (TransparencyStatusLevel.Offline, "Operating in offline mode - no checkpoint synced");
}
if (lastSyncAgeHours <= _options.MaxCheckpointAgeHours)
{
return (TransparencyStatusLevel.Offline, $"Operating in offline mode - checkpoint is {lastSyncAgeHours:F1}h old");
}
return (TransparencyStatusLevel.Unhealthy, $"Offline mode with stale checkpoint ({lastSyncAgeHours:F1}h old)");
}
// No sync ever
if (lastSyncAgeHours is null)
{
return (TransparencyStatusLevel.Unknown, "Transparency log never synced");
}
// Fresh checkpoint
if (lastSyncAgeHours <= _options.MaxCheckpointAgeHours)
{
return (TransparencyStatusLevel.Healthy, $"Transparency log synced {lastSyncAgeHours:F1}h ago");
}
// Stale but acceptable
if (lastSyncAgeHours <= _options.CriticalCheckpointAgeHours)
{
return (TransparencyStatusLevel.Degraded, $"Transparency log checkpoint is stale ({lastSyncAgeHours:F1}h old)");
}
// Critical staleness
return (TransparencyStatusLevel.Unhealthy, $"Transparency log checkpoint is critically stale ({lastSyncAgeHours:F1}h old)");
}
private TransparencyMetrics CalculateMetrics(DateTimeOffset now)
{
var oneHourAgo = now.AddHours(-1);
var recentSubmissions = _submissionMetrics
.Where(m => m.Timestamp >= oneHourAgo)
.ToList();
var recentVerifications = _verificationMetrics
.Where(m => m.Timestamp >= oneHourAgo)
.ToList();
var successfulSubmissions = recentSubmissions.Where(m => m.Success).ToList();
var latencies = successfulSubmissions.Select(m => m.LatencyMs).OrderBy(l => l).ToList();
return new TransparencyMetrics
{
SubmissionsLastHour = recentSubmissions.Count,
SuccessfulSubmissionsLastHour = successfulSubmissions.Count,
FailedSubmissionsLastHour = recentSubmissions.Count - successfulSubmissions.Count,
VerificationsLastHour = recentVerifications.Count,
SuccessfulVerificationsLastHour = recentVerifications.Count(m => m.Success),
AvgSubmissionLatencyMs = latencies.Count > 0 ? latencies.Average() : 0,
P95SubmissionLatencyMs = latencies.Count > 0 ? Percentile(latencies, 95) : 0,
OfflineVerificationsLastHour = recentVerifications.Count(m => m.Offline)
};
}
private async Task RefreshBackendHealthAsync(CancellationToken cancellationToken)
{
_lastHealthCheck = _timeProvider.GetUtcNow();
var tasks = new List<Task>();
if (!string.IsNullOrEmpty(_options.RekorBackendUrl))
{
tasks.Add(CheckBackendHealthAsync("primary", _options.RekorBackendUrl, true, cancellationToken));
}
if (!string.IsNullOrEmpty(_options.RekorMirrorUrl))
{
tasks.Add(CheckBackendHealthAsync("mirror", _options.RekorMirrorUrl, false, cancellationToken));
}
if (tasks.Count > 0)
{
await Task.WhenAll(tasks);
}
}
private async Task CheckBackendHealthAsync(
string id,
string url,
bool primary,
CancellationToken cancellationToken)
{
var status = new TransparencyBackendStatus
{
Id = id,
Url = url,
Primary = primary,
Status = BackendHealthStatus.Unknown,
LastCheckedAt = _timeProvider.GetUtcNow()
};
if (_healthChecker is null)
{
_backendStatuses[id] = status;
return;
}
try
{
var result = await _healthChecker.CheckHealthAsync(
url,
_options.BackendTimeoutSeconds,
cancellationToken);
var healthStatus = result.IsHealthy
? (result.LatencyMs > _options.SlowLatencyThresholdMs ? BackendHealthStatus.Slow : BackendHealthStatus.Healthy)
: BackendHealthStatus.Unhealthy;
status = status with
{
Status = healthStatus,
LatencyMs = result.LatencyMs,
Error = result.Error
};
}
catch (Exception ex)
{
_logger.LogWarning(ex, "Failed to check transparency backend health: {Url}", url);
status = status with
{
Status = BackendHealthStatus.Unhealthy,
Error = ex.Message
};
}
_backendStatuses[id] = status;
}
private void PruneOldMetrics(ConcurrentQueue<MetricEntry> queue)
{
var cutoff = _timeProvider.GetUtcNow().AddHours(-2);
while (queue.TryPeek(out var entry) && entry.Timestamp < cutoff)
{
queue.TryDequeue(out _);
}
}
private static double Percentile(List<double> values, int percentile)
{
if (values.Count == 0) return 0;
var index = (int)Math.Ceiling(percentile / 100.0 * values.Count) - 1;
return values[Math.Max(0, Math.Min(index, values.Count - 1))];
}
public void Dispose()
{
// No unmanaged resources to dispose
}
private sealed record MetricEntry(
DateTimeOffset Timestamp,
bool Success,
bool Offline,
double LatencyMs);
}

View File

@@ -0,0 +1,469 @@
using System;
using System.Collections.Generic;
using System.Globalization;
using System.Net;
using System.Net.Http;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Logging;
using StellaOps.Attestor.Core.Rekor;
namespace StellaOps.Attestor.Infrastructure.Rekor;
/// <summary>
/// HTTP client for fetching proofs from Rekor v2 tile-based logs.
/// Implements the Sunlight/C2SP tlog-tiles specification.
/// </summary>
internal sealed class HttpRekorTileClient : IRekorTileClient
{
private const int TileHeight = 8; // Standard tile height (2^8 = 256 entries per tile)
private const int TileWidth = 1 << TileHeight; // 256 entries per full tile
private const int HashSize = 32; // SHA-256
private readonly HttpClient _httpClient;
private readonly ILogger<HttpRekorTileClient> _logger;
public HttpRekorTileClient(HttpClient httpClient, ILogger<HttpRekorTileClient> logger)
{
_httpClient = httpClient;
_logger = logger;
}
/// <inheritdoc />
public async Task<RekorTileCheckpoint?> GetCheckpointAsync(
RekorBackend backend,
CancellationToken cancellationToken = default)
{
ArgumentNullException.ThrowIfNull(backend);
var checkpointUrl = new Uri(backend.GetEffectiveTileBaseUrl(), "../checkpoint");
_logger.LogDebug("Fetching checkpoint from {Url}", checkpointUrl);
try
{
using var request = new HttpRequestMessage(HttpMethod.Get, checkpointUrl);
using var response = await _httpClient.SendAsync(request, cancellationToken).ConfigureAwait(false);
if (response.StatusCode == HttpStatusCode.NotFound)
{
_logger.LogDebug("Checkpoint not found at {Url}", checkpointUrl);
return null;
}
response.EnsureSuccessStatusCode();
var content = await response.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false);
return ParseCheckpoint(content);
}
catch (HttpRequestException ex)
{
_logger.LogWarning(ex, "Failed to fetch checkpoint from {Url}", checkpointUrl);
return null;
}
}
/// <inheritdoc />
public async Task<RekorTileData?> GetTileAsync(
RekorBackend backend,
int level,
long index,
CancellationToken cancellationToken = default)
{
ArgumentNullException.ThrowIfNull(backend);
// Tile path format: tile/{level}/{index...} where index is split into directories
var tilePath = FormatTilePath(level, index);
var tileUrl = new Uri(backend.GetEffectiveTileBaseUrl(), tilePath);
_logger.LogDebug("Fetching tile at level {Level} index {Index} from {Url}", level, index, tileUrl);
try
{
using var request = new HttpRequestMessage(HttpMethod.Get, tileUrl);
using var response = await _httpClient.SendAsync(request, cancellationToken).ConfigureAwait(false);
if (response.StatusCode == HttpStatusCode.NotFound)
{
_logger.LogDebug("Tile not found at {Url}", tileUrl);
return null;
}
response.EnsureSuccessStatusCode();
var data = await response.Content.ReadAsByteArrayAsync(cancellationToken).ConfigureAwait(false);
var width = data.Length / HashSize;
return new RekorTileData
{
Level = level,
Index = index,
Width = width,
Hashes = data
};
}
catch (HttpRequestException ex)
{
_logger.LogWarning(ex, "Failed to fetch tile from {Url}", tileUrl);
return null;
}
}
/// <inheritdoc />
public async Task<RekorTileEntry?> GetEntryAsync(
RekorBackend backend,
long logIndex,
CancellationToken cancellationToken = default)
{
ArgumentNullException.ThrowIfNull(backend);
// Entry path format: tile/entries/{index...}
var entryPath = FormatEntryPath(logIndex);
var entryUrl = new Uri(backend.GetEffectiveTileBaseUrl(), entryPath);
_logger.LogDebug("Fetching entry at index {Index} from {Url}", logIndex, entryUrl);
try
{
using var request = new HttpRequestMessage(HttpMethod.Get, entryUrl);
using var response = await _httpClient.SendAsync(request, cancellationToken).ConfigureAwait(false);
if (response.StatusCode == HttpStatusCode.NotFound)
{
_logger.LogDebug("Entry not found at {Url}", entryUrl);
return null;
}
response.EnsureSuccessStatusCode();
var data = await response.Content.ReadAsByteArrayAsync(cancellationToken).ConfigureAwait(false);
return new RekorTileEntry
{
LogIndex = logIndex,
Data = data,
IntegratedTime = null // Would need to parse from entry format
};
}
catch (HttpRequestException ex)
{
_logger.LogWarning(ex, "Failed to fetch entry from {Url}", entryUrl);
return null;
}
}
/// <inheritdoc />
public async Task<RekorTileInclusionProof?> ComputeInclusionProofAsync(
RekorBackend backend,
long logIndex,
long treeSize,
CancellationToken cancellationToken = default)
{
ArgumentNullException.ThrowIfNull(backend);
if (logIndex < 0 || logIndex >= treeSize)
{
_logger.LogWarning("Invalid log index {Index} for tree size {Size}", logIndex, treeSize);
return null;
}
_logger.LogDebug("Computing inclusion proof for index {Index} in tree of size {Size}", logIndex, treeSize);
try
{
// Fetch the leaf tile to get the leaf hash
var leafTileIndex = logIndex / TileWidth;
var leafTile = await GetTileAsync(backend, 0, leafTileIndex, cancellationToken).ConfigureAwait(false);
if (leafTile is null)
{
_logger.LogWarning("Failed to fetch leaf tile for index {Index}", logIndex);
return null;
}
var positionInTile = (int)(logIndex % TileWidth);
if (positionInTile >= leafTile.Width)
{
_logger.LogWarning("Position {Position} exceeds tile width {Width}", positionInTile, leafTile.Width);
return null;
}
var leafHash = leafTile.GetHash(positionInTile);
// Compute the proof path by fetching required tiles
var path = await ComputeProofPathAsync(backend, logIndex, treeSize, cancellationToken).ConfigureAwait(false);
if (path is null)
{
return null;
}
// Compute expected root hash from path
var rootHash = ComputeRootFromPath(leafHash, logIndex, treeSize, path);
return new RekorTileInclusionProof
{
LogIndex = logIndex,
TreeSize = treeSize,
LeafHash = leafHash,
Path = path,
RootHash = rootHash
};
}
catch (Exception ex)
{
_logger.LogWarning(ex, "Failed to compute inclusion proof for index {Index}", logIndex);
return null;
}
}
private async Task<IReadOnlyList<byte[]>?> ComputeProofPathAsync(
RekorBackend backend,
long logIndex,
long treeSize,
CancellationToken cancellationToken)
{
var path = new List<byte[]>();
var index = logIndex;
var size = treeSize;
var level = 0;
while (size > 1)
{
var siblingIndex = index ^ 1; // XOR to get sibling
var tileIndex = siblingIndex / TileWidth;
var positionInTile = (int)(siblingIndex % TileWidth);
// Only add sibling if it exists in the tree
if (siblingIndex < size)
{
var tile = await GetTileAsync(backend, level, tileIndex, cancellationToken).ConfigureAwait(false);
if (tile is null || positionInTile >= tile.Width)
{
// For partial trees, compute ephemeral hash if needed
_logger.LogDebug("Sibling at level {Level} index {Index} not in tile, tree may be partial", level, siblingIndex);
// For now, return null if we can't get the sibling
// A full implementation would handle partial tiles
return null;
}
path.Add(tile.GetHash(positionInTile));
}
index /= 2;
size = (size + 1) / 2;
level++;
}
return path;
}
private static byte[] ComputeRootFromPath(byte[] leafHash, long logIndex, long treeSize, IReadOnlyList<byte[]> path)
{
var current = leafHash;
var index = logIndex;
var size = treeSize;
var pathIndex = 0;
while (size > 1 && pathIndex < path.Count)
{
var siblingIndex = index ^ 1;
if (siblingIndex < size)
{
var sibling = path[pathIndex++];
// Hash order depends on position
current = (index & 1) == 0
? HashPair(current, sibling)
: HashPair(sibling, current);
}
index /= 2;
size = (size + 1) / 2;
}
return current;
}
private static byte[] HashPair(byte[] left, byte[] right)
{
using var sha256 = System.Security.Cryptography.SHA256.Create();
// RFC 6962: H(0x01 || left || right)
var input = new byte[1 + left.Length + right.Length];
input[0] = 0x01;
Array.Copy(left, 0, input, 1, left.Length);
Array.Copy(right, 0, input, 1 + left.Length, right.Length);
return sha256.ComputeHash(input);
}
private RekorTileCheckpoint? ParseCheckpoint(string content)
{
// Checkpoint format (Go signed note format):
// <origin>
// <tree_size>
// <root_hash_base64>
// [optional extension lines]
//
// <signature_line>...
var lines = content.Split('\n', StringSplitOptions.None);
if (lines.Length < 4)
{
_logger.LogWarning("Checkpoint has too few lines: {Count}", lines.Length);
return null;
}
var origin = lines[0];
if (!long.TryParse(lines[1], NumberStyles.None, CultureInfo.InvariantCulture, out var treeSize))
{
_logger.LogWarning("Invalid tree size in checkpoint: {Line}", lines[1]);
return null;
}
byte[] rootHash;
try
{
rootHash = Convert.FromBase64String(lines[2]);
}
catch (FormatException)
{
_logger.LogWarning("Invalid root hash base64 in checkpoint: {Line}", lines[2]);
return null;
}
// Find the blank line that separates checkpoint from signatures
var signatureStartIndex = -1;
for (var i = 3; i < lines.Length; i++)
{
if (string.IsNullOrWhiteSpace(lines[i]))
{
signatureStartIndex = i + 1;
break;
}
}
var signatures = new List<RekorCheckpointSignature>();
if (signatureStartIndex > 0)
{
for (var i = signatureStartIndex; i < lines.Length; i++)
{
var sigLine = lines[i];
if (string.IsNullOrWhiteSpace(sigLine))
{
continue;
}
// Signature format: <key_hint> <signature_base64>
var parts = sigLine.Split(' ', 2);
if (parts.Length >= 2)
{
try
{
signatures.Add(new RekorCheckpointSignature
{
KeyHint = parts[0],
Signature = Convert.FromBase64String(parts[1])
});
}
catch (FormatException)
{
_logger.LogDebug("Skipping invalid signature line: {Line}", sigLine);
}
}
}
}
// Extract raw checkpoint (everything before signatures)
var rawCheckpointEnd = signatureStartIndex > 0 ? signatureStartIndex - 1 : lines.Length;
var rawCheckpoint = string.Join('\n', lines[..rawCheckpointEnd]);
return new RekorTileCheckpoint
{
Origin = origin,
TreeSize = treeSize,
RootHash = rootHash,
RawCheckpoint = rawCheckpoint,
Signatures = signatures
};
}
private static string FormatTilePath(int level, long index)
{
// Tile path uses base-1000 directory structure for scalability
// e.g., tile/0/x001/234 for level 0, index 1234
var sb = new StringBuilder();
sb.Append(level.ToString(CultureInfo.InvariantCulture));
sb.Append('/');
if (index == 0)
{
sb.Append("000");
}
else
{
var parts = new List<string>();
var remaining = index;
while (remaining > 0)
{
parts.Add((remaining % 1000).ToString("D3", CultureInfo.InvariantCulture));
remaining /= 1000;
}
parts.Reverse();
// First part doesn't need leading zeros padding to 3 digits if it's the most significant
if (parts.Count > 0)
{
parts[0] = parts[0].TrimStart('0');
if (string.IsNullOrEmpty(parts[0]))
{
parts[0] = "0";
}
}
sb.Append(string.Join('/', parts));
}
return sb.ToString();
}
private static string FormatEntryPath(long index)
{
// Entry path: entries/{index...}
var sb = new StringBuilder("entries/");
if (index == 0)
{
sb.Append("000");
}
else
{
var parts = new List<string>();
var remaining = index;
while (remaining > 0)
{
parts.Add((remaining % 1000).ToString("D3", CultureInfo.InvariantCulture));
remaining /= 1000;
}
parts.Reverse();
if (parts.Count > 0)
{
parts[0] = parts[0].TrimStart('0');
if (string.IsNullOrEmpty(parts[0]))
{
parts[0] = "0";
}
}
sb.Append(string.Join('/', parts));
}
return sb.ToString();
}
}

View File

@@ -47,9 +47,43 @@ internal static class RekorBackendResolver
{
Name = name,
Url = new Uri(options.Url, UriKind.Absolute),
Version = ParseLogVersion(options.Version),
TileBaseUrl = string.IsNullOrWhiteSpace(options.TileBaseUrl)
? null
: new Uri(options.TileBaseUrl, UriKind.Absolute),
LogId = options.LogId,
PreferTileProofs = options.PreferTileProofs,
ProofTimeout = TimeSpan.FromMilliseconds(options.ProofTimeoutMs),
PollInterval = TimeSpan.FromMilliseconds(options.PollIntervalMs),
MaxAttempts = options.MaxAttempts
};
}
/// <summary>
/// Parses the log version string to the enum value.
/// </summary>
private static RekorLogVersion ParseLogVersion(string? version)
{
if (string.IsNullOrWhiteSpace(version))
{
return RekorLogVersion.Auto;
}
return version.Trim().ToUpperInvariant() switch
{
"AUTO" => RekorLogVersion.Auto,
"V1" or "1" => RekorLogVersion.V1,
"V2" or "2" => RekorLogVersion.V2,
_ => RekorLogVersion.Auto
};
}
/// <summary>
/// Determines if the backend should use tile-based verification.
/// </summary>
public static bool ShouldUseTileProofs(RekorBackend backend)
{
return backend.Version == RekorLogVersion.V2 ||
(backend.Version == RekorLogVersion.Auto && backend.PreferTileProofs);
}
}

View File

@@ -96,6 +96,20 @@ public static class ServiceCollectionExtensions
});
services.AddSingleton<IRekorClient>(sp => sp.GetRequiredService<HttpRekorClient>());
// Rekor v2 tile-based client for Sunlight/tile log format
services.AddHttpClient<HttpRekorTileClient>((sp, client) =>
{
var options = sp.GetRequiredService<IOptions<AttestorOptions>>().Value;
var timeoutMs = options.Rekor.Primary.ProofTimeoutMs;
if (timeoutMs <= 0)
{
timeoutMs = 15_000;
}
client.Timeout = TimeSpan.FromMilliseconds(timeoutMs);
});
services.AddSingleton<IRekorTileClient>(sp => sp.GetRequiredService<HttpRekorTileClient>());
services.AddHttpClient<HttpTransparencyWitnessClient>((sp, client) =>
{
var options = sp.GetRequiredService<IOptions<AttestorOptions>>().Value;