Add unit tests for SBOM ingestion and transformation
Some checks failed
Docs CI / lint-and-preview (push) Has been cancelled
Some checks failed
Docs CI / lint-and-preview (push) Has been cancelled
- Implement `SbomIngestServiceCollectionExtensionsTests` to verify the SBOM ingestion pipeline exports snapshots correctly. - Create `SbomIngestTransformerTests` to ensure the transformation produces expected nodes and edges, including deduplication of license nodes and normalization of timestamps. - Add `SbomSnapshotExporterTests` to test the export functionality for manifest, adjacency, nodes, and edges. - Introduce `VexOverlayTransformerTests` to validate the transformation of VEX nodes and edges. - Set up project file for the test project with necessary dependencies and configurations. - Include JSON fixture files for testing purposes.
This commit is contained in:
@@ -0,0 +1,70 @@
|
||||
using System.Text.Json.Nodes;
|
||||
using System.Text.Json.Serialization;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.WebService.Contracts;
|
||||
|
||||
public sealed record LedgerEventRequest
|
||||
{
|
||||
[JsonPropertyName("tenantId")]
|
||||
public required string TenantId { get; init; }
|
||||
|
||||
[JsonPropertyName("chainId")]
|
||||
public required Guid ChainId { get; init; }
|
||||
|
||||
[JsonPropertyName("sequence")]
|
||||
public required long Sequence { get; init; }
|
||||
|
||||
[JsonPropertyName("eventId")]
|
||||
public required Guid EventId { get; init; }
|
||||
|
||||
[JsonPropertyName("eventType")]
|
||||
public required string EventType { get; init; }
|
||||
|
||||
[JsonPropertyName("policyVersion")]
|
||||
public required string PolicyVersion { get; init; }
|
||||
|
||||
[JsonPropertyName("finding")]
|
||||
public required LedgerFindingRequest Finding { get; init; }
|
||||
|
||||
[JsonPropertyName("artifactId")]
|
||||
public required string ArtifactId { get; init; }
|
||||
|
||||
[JsonPropertyName("sourceRunId")]
|
||||
public Guid? SourceRunId { get; init; }
|
||||
|
||||
[JsonPropertyName("actor")]
|
||||
public required LedgerActorRequest Actor { get; init; }
|
||||
|
||||
[JsonPropertyName("occurredAt")]
|
||||
public required DateTimeOffset OccurredAt { get; init; }
|
||||
|
||||
[JsonPropertyName("recordedAt")]
|
||||
public DateTimeOffset? RecordedAt { get; init; }
|
||||
|
||||
[JsonPropertyName("payload")]
|
||||
public JsonObject? Payload { get; init; }
|
||||
|
||||
[JsonPropertyName("previousHash")]
|
||||
public string? PreviousHash { get; init; }
|
||||
}
|
||||
|
||||
public sealed record LedgerFindingRequest
|
||||
{
|
||||
[JsonPropertyName("id")]
|
||||
public required string Id { get; init; }
|
||||
|
||||
[JsonPropertyName("artifactId")]
|
||||
public string? ArtifactId { get; init; }
|
||||
|
||||
[JsonPropertyName("vulnId")]
|
||||
public required string VulnId { get; init; }
|
||||
}
|
||||
|
||||
public sealed record LedgerActorRequest
|
||||
{
|
||||
[JsonPropertyName("id")]
|
||||
public required string Id { get; init; }
|
||||
|
||||
[JsonPropertyName("type")]
|
||||
public required string Type { get; init; }
|
||||
}
|
||||
@@ -0,0 +1,30 @@
|
||||
using System.Text.Json.Serialization;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.WebService.Contracts;
|
||||
|
||||
public sealed record LedgerEventResponse
|
||||
{
|
||||
[JsonPropertyName("eventId")]
|
||||
public Guid EventId { get; init; }
|
||||
|
||||
[JsonPropertyName("chainId")]
|
||||
public Guid ChainId { get; init; }
|
||||
|
||||
[JsonPropertyName("sequence")]
|
||||
public long Sequence { get; init; }
|
||||
|
||||
[JsonPropertyName("status")]
|
||||
public string Status { get; init; } = "created";
|
||||
|
||||
[JsonPropertyName("eventHash")]
|
||||
public string EventHash { get; init; } = string.Empty;
|
||||
|
||||
[JsonPropertyName("previousHash")]
|
||||
public string PreviousHash { get; init; } = string.Empty;
|
||||
|
||||
[JsonPropertyName("merkleLeafHash")]
|
||||
public string MerkleLeafHash { get; init; } = string.Empty;
|
||||
|
||||
[JsonPropertyName("recordedAt")]
|
||||
public DateTimeOffset RecordedAt { get; init; }
|
||||
}
|
||||
@@ -0,0 +1,72 @@
|
||||
using System.Text.Json.Nodes;
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
using StellaOps.Findings.Ledger.WebService.Contracts;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.WebService.Mappings;
|
||||
|
||||
public static class LedgerEventMapping
|
||||
{
|
||||
public static LedgerEventDraft ToDraft(this LedgerEventRequest request)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(request);
|
||||
|
||||
var recordedAt = (request.RecordedAt ?? DateTimeOffset.UtcNow).ToUniversalTime();
|
||||
var payload = request.Payload is null ? new JsonObject() : (JsonObject)request.Payload.DeepClone();
|
||||
|
||||
var eventObject = new JsonObject
|
||||
{
|
||||
["id"] = request.EventId.ToString(),
|
||||
["type"] = request.EventType,
|
||||
["tenant"] = request.TenantId,
|
||||
["chainId"] = request.ChainId.ToString(),
|
||||
["sequence"] = request.Sequence,
|
||||
["policyVersion"] = request.PolicyVersion,
|
||||
["artifactId"] = request.ArtifactId,
|
||||
["finding"] = new JsonObject
|
||||
{
|
||||
["id"] = request.Finding.Id,
|
||||
["artifactId"] = request.Finding.ArtifactId ?? request.ArtifactId,
|
||||
["vulnId"] = request.Finding.VulnId
|
||||
},
|
||||
["actor"] = new JsonObject
|
||||
{
|
||||
["id"] = request.Actor.Id,
|
||||
["type"] = request.Actor.Type
|
||||
},
|
||||
["occurredAt"] = FormatTimestamp(request.OccurredAt),
|
||||
["recordedAt"] = FormatTimestamp(recordedAt),
|
||||
["payload"] = payload
|
||||
};
|
||||
|
||||
if (request.SourceRunId is Guid sourceRunId && sourceRunId != Guid.Empty)
|
||||
{
|
||||
eventObject["sourceRunId"] = sourceRunId.ToString();
|
||||
}
|
||||
|
||||
var envelope = new JsonObject
|
||||
{
|
||||
["event"] = eventObject
|
||||
};
|
||||
|
||||
return new LedgerEventDraft(
|
||||
request.TenantId,
|
||||
request.ChainId,
|
||||
request.Sequence,
|
||||
request.EventId,
|
||||
request.EventType,
|
||||
request.PolicyVersion,
|
||||
request.Finding.Id,
|
||||
request.ArtifactId,
|
||||
request.SourceRunId,
|
||||
request.Actor.Id,
|
||||
request.Actor.Type,
|
||||
request.OccurredAt.ToUniversalTime(),
|
||||
recordedAt,
|
||||
payload,
|
||||
envelope,
|
||||
request.PreviousHash);
|
||||
}
|
||||
|
||||
private static string FormatTimestamp(DateTimeOffset value)
|
||||
=> value.ToUniversalTime().ToString("yyyy-MM-dd'T'HH:mm:ss.fff'Z'");
|
||||
}
|
||||
206
src/Findings/StellaOps.Findings.Ledger.WebService/Program.cs
Normal file
206
src/Findings/StellaOps.Findings.Ledger.WebService/Program.cs
Normal file
@@ -0,0 +1,206 @@
|
||||
using Microsoft.AspNetCore.Diagnostics;
|
||||
using Microsoft.AspNetCore.Http.HttpResults;
|
||||
using Microsoft.AspNetCore.Mvc;
|
||||
using Microsoft.Extensions.Options;
|
||||
using Serilog;
|
||||
using Serilog.Events;
|
||||
using StellaOps.Auth.Abstractions;
|
||||
using StellaOps.Auth.ServerIntegration;
|
||||
using StellaOps.Configuration;
|
||||
using StellaOps.DependencyInjection;
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
using StellaOps.Findings.Ledger.Infrastructure;
|
||||
using StellaOps.Findings.Ledger.Infrastructure.Merkle;
|
||||
using StellaOps.Findings.Ledger.Infrastructure.Postgres;
|
||||
using StellaOps.Findings.Ledger.Infrastructure.Projection;
|
||||
using StellaOps.Findings.Ledger.Infrastructure.Policy;
|
||||
using StellaOps.Findings.Ledger.Options;
|
||||
using StellaOps.Findings.Ledger.Services;
|
||||
using StellaOps.Findings.Ledger.WebService.Contracts;
|
||||
using StellaOps.Findings.Ledger.WebService.Mappings;
|
||||
using StellaOps.Telemetry.Core;
|
||||
|
||||
const string LedgerWritePolicy = "ledger.events.write";
|
||||
|
||||
var builder = WebApplication.CreateBuilder(args);
|
||||
|
||||
builder.Configuration.AddStellaOpsDefaults(options =>
|
||||
{
|
||||
options.BasePath = builder.Environment.ContentRootPath;
|
||||
options.EnvironmentPrefix = "FINDINGS_LEDGER_";
|
||||
options.ConfigureBuilder = configurationBuilder =>
|
||||
{
|
||||
configurationBuilder.AddYamlFile("../etc/findings-ledger.yaml", optional: true, reloadOnChange: true);
|
||||
};
|
||||
});
|
||||
|
||||
var bootstrapOptions = builder.Configuration.BindOptions<LedgerServiceOptions>(
|
||||
LedgerServiceOptions.SectionName,
|
||||
(opts, _) => opts.Validate());
|
||||
|
||||
builder.Host.UseSerilog((context, services, loggerConfiguration) =>
|
||||
{
|
||||
loggerConfiguration
|
||||
.MinimumLevel.Information()
|
||||
.MinimumLevel.Override("Microsoft.AspNetCore", LogEventLevel.Warning)
|
||||
.Enrich.FromLogContext()
|
||||
.WriteTo.Console();
|
||||
});
|
||||
|
||||
builder.Services.AddOptions<LedgerServiceOptions>()
|
||||
.Bind(builder.Configuration.GetSection(LedgerServiceOptions.SectionName))
|
||||
.PostConfigure(options => options.Validate())
|
||||
.ValidateOnStart();
|
||||
|
||||
builder.Services.AddSingleton(TimeProvider.System);
|
||||
builder.Services.AddProblemDetails();
|
||||
builder.Services.AddEndpointsApiExplorer();
|
||||
builder.Services.AddHealthChecks();
|
||||
|
||||
builder.Services.AddStellaOpsTelemetry(
|
||||
builder.Configuration,
|
||||
configureMetering: meterBuilder =>
|
||||
{
|
||||
meterBuilder.AddAspNetCoreInstrumentation();
|
||||
meterBuilder.AddHttpClientInstrumentation();
|
||||
},
|
||||
configureTracing: tracerBuilder =>
|
||||
{
|
||||
tracerBuilder.AddAspNetCoreInstrumentation();
|
||||
tracerBuilder.AddHttpClientInstrumentation();
|
||||
});
|
||||
|
||||
builder.Services.AddStellaOpsResourceServerAuthentication(
|
||||
builder.Configuration,
|
||||
configurationSection: null,
|
||||
configure: resourceOptions =>
|
||||
{
|
||||
resourceOptions.Authority = bootstrapOptions.Authority.Issuer;
|
||||
resourceOptions.RequireHttpsMetadata = bootstrapOptions.Authority.RequireHttpsMetadata;
|
||||
resourceOptions.MetadataAddress = bootstrapOptions.Authority.MetadataAddress;
|
||||
resourceOptions.BackchannelTimeout = bootstrapOptions.Authority.BackchannelTimeout;
|
||||
resourceOptions.TokenClockSkew = bootstrapOptions.Authority.TokenClockSkew;
|
||||
|
||||
resourceOptions.Audiences.Clear();
|
||||
foreach (var audience in bootstrapOptions.Authority.Audiences)
|
||||
{
|
||||
resourceOptions.Audiences.Add(audience);
|
||||
}
|
||||
|
||||
resourceOptions.RequiredScopes.Clear();
|
||||
foreach (var scope in bootstrapOptions.Authority.RequiredScopes)
|
||||
{
|
||||
resourceOptions.RequiredScopes.Add(scope);
|
||||
}
|
||||
|
||||
foreach (var network in bootstrapOptions.Authority.BypassNetworks)
|
||||
{
|
||||
resourceOptions.BypassNetworks.Add(network);
|
||||
}
|
||||
});
|
||||
|
||||
builder.Services.AddAuthorization(options =>
|
||||
{
|
||||
var scopes = bootstrapOptions.Authority.RequiredScopes.Count > 0
|
||||
? bootstrapOptions.Authority.RequiredScopes.ToArray()
|
||||
: new[] { StellaOpsScopes.VulnOperate };
|
||||
|
||||
options.AddPolicy(LedgerWritePolicy, policy =>
|
||||
{
|
||||
policy.RequireAuthenticatedUser();
|
||||
policy.Requirements.Add(new StellaOpsScopeRequirement(scopes));
|
||||
policy.AddAuthenticationSchemes(StellaOpsAuthenticationDefaults.AuthenticationScheme);
|
||||
});
|
||||
});
|
||||
|
||||
builder.Services.AddSingleton<LedgerAnchorQueue>();
|
||||
builder.Services.AddSingleton<LedgerDataSource>();
|
||||
builder.Services.AddSingleton<IMerkleAnchorRepository, PostgresMerkleAnchorRepository>();
|
||||
builder.Services.AddSingleton<ILedgerEventRepository, PostgresLedgerEventRepository>();
|
||||
builder.Services.AddSingleton<IMerkleAnchorScheduler, PostgresMerkleAnchorScheduler>();
|
||||
builder.Services.AddSingleton<ILedgerEventStream, PostgresLedgerEventStream>();
|
||||
builder.Services.AddSingleton<IFindingProjectionRepository, PostgresFindingProjectionRepository>();
|
||||
builder.Services.AddSingleton<IPolicyEvaluationService, InlinePolicyEvaluationService>();
|
||||
builder.Services.AddSingleton<ILedgerEventWriteService, LedgerEventWriteService>();
|
||||
builder.Services.AddHostedService<LedgerMerkleAnchorWorker>();
|
||||
builder.Services.AddHostedService<LedgerProjectionWorker>();
|
||||
|
||||
var app = builder.Build();
|
||||
|
||||
app.UseSerilogRequestLogging();
|
||||
app.UseExceptionHandler(exceptionApp =>
|
||||
{
|
||||
exceptionApp.Run(async context =>
|
||||
{
|
||||
var feature = context.Features.Get<IExceptionHandlerFeature>();
|
||||
if (feature?.Error is null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
var problem = Results.Problem(
|
||||
statusCode: StatusCodes.Status500InternalServerError,
|
||||
title: "ledger_internal_error",
|
||||
detail: feature.Error.Message);
|
||||
await problem.ExecuteAsync(context);
|
||||
});
|
||||
});
|
||||
|
||||
app.UseAuthentication();
|
||||
app.UseAuthorization();
|
||||
|
||||
app.MapHealthChecks("/healthz");
|
||||
|
||||
app.MapPost("/vuln/ledger/events", async Task<Results<Created<LedgerEventResponse>, Ok<LedgerEventResponse>, ProblemHttpResult>> (
|
||||
LedgerEventRequest request,
|
||||
ILedgerEventWriteService writeService,
|
||||
CancellationToken cancellationToken) =>
|
||||
{
|
||||
var draft = request.ToDraft();
|
||||
var result = await writeService.AppendAsync(draft, cancellationToken).ConfigureAwait(false);
|
||||
return result.Status switch
|
||||
{
|
||||
LedgerWriteStatus.Success => CreateCreatedResponse(result.Record!),
|
||||
LedgerWriteStatus.Idempotent => TypedResults.Ok(CreateResponse(result.Record!, "idempotent")),
|
||||
LedgerWriteStatus.ValidationFailed => TypedResults.Problem(
|
||||
statusCode: StatusCodes.Status400BadRequest,
|
||||
title: "validation_failed",
|
||||
detail: string.Join(";", result.Errors)),
|
||||
LedgerWriteStatus.Conflict => TypedResults.Problem(
|
||||
statusCode: StatusCodes.Status409Conflict,
|
||||
title: result.ConflictCode ?? "conflict",
|
||||
detail: string.Join(";", result.Errors)),
|
||||
_ => TypedResults.Problem(
|
||||
statusCode: StatusCodes.Status500InternalServerError,
|
||||
title: "ledger_internal_error",
|
||||
detail: "Unexpected ledger status.")
|
||||
};
|
||||
})
|
||||
.WithName("LedgerEventAppend")
|
||||
.RequireAuthorization(LedgerWritePolicy)
|
||||
.Produces(StatusCodes.Status201Created)
|
||||
.Produces(StatusCodes.Status200OK)
|
||||
.ProducesProblem(StatusCodes.Status400BadRequest)
|
||||
.ProducesProblem(StatusCodes.Status409Conflict)
|
||||
.ProducesProblem(StatusCodes.Status500InternalServerError);
|
||||
|
||||
app.Run();
|
||||
|
||||
static Created<LedgerEventResponse> CreateCreatedResponse(LedgerEventRecord record)
|
||||
{
|
||||
var response = CreateResponse(record, "created");
|
||||
return TypedResults.Created($"/vuln/ledger/events/{record.EventId}", response);
|
||||
}
|
||||
|
||||
static LedgerEventResponse CreateResponse(LedgerEventRecord record, string status)
|
||||
=> new()
|
||||
{
|
||||
EventId = record.EventId,
|
||||
ChainId = record.ChainId,
|
||||
Sequence = record.SequenceNumber,
|
||||
Status = status,
|
||||
EventHash = record.EventHash,
|
||||
PreviousHash = record.PreviousHash,
|
||||
MerkleLeafHash = record.MerkleLeafHash,
|
||||
RecordedAt = record.RecordedAt
|
||||
};
|
||||
@@ -0,0 +1,24 @@
|
||||
<Project Sdk="Microsoft.NET.Sdk.Web">
|
||||
|
||||
<PropertyGroup>
|
||||
<TargetFramework>net10.0</TargetFramework>
|
||||
<Nullable>enable</Nullable>
|
||||
<ImplicitUsings>enable</ImplicitUsings>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="Serilog.AspNetCore" Version="8.0.1" />
|
||||
<PackageReference Include="Serilog.Sinks.Console" Version="5.0.1" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="..\StellaOps.Findings.Ledger\StellaOps.Findings.Ledger.csproj" />
|
||||
<ProjectReference Include="..\..\Authority\StellaOps.Authority\StellaOps.Auth.Abstractions\StellaOps.Auth.Abstractions.csproj" />
|
||||
<ProjectReference Include="..\..\Authority\StellaOps.Authority\StellaOps.Auth.ServerIntegration\StellaOps.Auth.ServerIntegration.csproj" />
|
||||
<ProjectReference Include="..\..\AirGap\StellaOps.AirGap.Policy\StellaOps.AirGap.Policy\StellaOps.AirGap.Policy.csproj" />
|
||||
<ProjectReference Include="..\..\__Libraries\StellaOps.Configuration\StellaOps.Configuration.csproj" />
|
||||
<ProjectReference Include="..\..\__Libraries\StellaOps.DependencyInjection\StellaOps.DependencyInjection.csproj" />
|
||||
<ProjectReference Include="..\..\Telemetry\StellaOps.Telemetry.Core\StellaOps.Telemetry.Core\StellaOps.Telemetry.Core.csproj" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
@@ -0,0 +1,36 @@
|
||||
using System.Collections.Immutable;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Domain;
|
||||
|
||||
public static class LedgerEventConstants
|
||||
{
|
||||
public const string EventFindingCreated = "finding.created";
|
||||
public const string EventFindingStatusChanged = "finding.status_changed";
|
||||
public const string EventFindingSeverityChanged = "finding.severity_changed";
|
||||
public const string EventFindingTagUpdated = "finding.tag_updated";
|
||||
public const string EventFindingCommentAdded = "finding.comment_added";
|
||||
public const string EventFindingAssignmentChanged = "finding.assignment_changed";
|
||||
public const string EventFindingAcceptedRisk = "finding.accepted_risk";
|
||||
public const string EventFindingRemediationPlanAdded = "finding.remediation_plan_added";
|
||||
public const string EventFindingAttachmentAdded = "finding.attachment_added";
|
||||
public const string EventFindingClosed = "finding.closed";
|
||||
|
||||
public static readonly ImmutableHashSet<string> SupportedEventTypes = ImmutableHashSet.Create(StringComparer.Ordinal,
|
||||
EventFindingCreated,
|
||||
EventFindingStatusChanged,
|
||||
EventFindingSeverityChanged,
|
||||
EventFindingTagUpdated,
|
||||
EventFindingCommentAdded,
|
||||
EventFindingAssignmentChanged,
|
||||
EventFindingAcceptedRisk,
|
||||
EventFindingRemediationPlanAdded,
|
||||
EventFindingAttachmentAdded,
|
||||
EventFindingClosed);
|
||||
|
||||
public static readonly ImmutableHashSet<string> SupportedActorTypes = ImmutableHashSet.Create(StringComparer.Ordinal,
|
||||
"system",
|
||||
"operator",
|
||||
"integration");
|
||||
|
||||
public const string EmptyHash = "0000000000000000000000000000000000000000000000000000000000000000";
|
||||
}
|
||||
@@ -0,0 +1,85 @@
|
||||
using System.Text.Json.Nodes;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Domain;
|
||||
|
||||
public sealed record LedgerEventDraft(
|
||||
string TenantId,
|
||||
Guid ChainId,
|
||||
long SequenceNumber,
|
||||
Guid EventId,
|
||||
string EventType,
|
||||
string PolicyVersion,
|
||||
string FindingId,
|
||||
string ArtifactId,
|
||||
Guid? SourceRunId,
|
||||
string ActorId,
|
||||
string ActorType,
|
||||
DateTimeOffset OccurredAt,
|
||||
DateTimeOffset RecordedAt,
|
||||
JsonObject Payload,
|
||||
JsonObject CanonicalEnvelope,
|
||||
string? ProvidedPreviousHash);
|
||||
|
||||
public sealed record LedgerEventRecord(
|
||||
string TenantId,
|
||||
Guid ChainId,
|
||||
long SequenceNumber,
|
||||
Guid EventId,
|
||||
string EventType,
|
||||
string PolicyVersion,
|
||||
string FindingId,
|
||||
string ArtifactId,
|
||||
Guid? SourceRunId,
|
||||
string ActorId,
|
||||
string ActorType,
|
||||
DateTimeOffset OccurredAt,
|
||||
DateTimeOffset RecordedAt,
|
||||
JsonObject EventBody,
|
||||
string EventHash,
|
||||
string PreviousHash,
|
||||
string MerkleLeafHash,
|
||||
string CanonicalJson);
|
||||
|
||||
public sealed record LedgerChainHead(
|
||||
long SequenceNumber,
|
||||
string EventHash,
|
||||
DateTimeOffset RecordedAt);
|
||||
|
||||
public enum LedgerWriteStatus
|
||||
{
|
||||
Success,
|
||||
Idempotent,
|
||||
ValidationFailed,
|
||||
Conflict
|
||||
}
|
||||
|
||||
public sealed record LedgerWriteResult(
|
||||
LedgerWriteStatus Status,
|
||||
LedgerEventRecord? Record,
|
||||
IReadOnlyList<string> Errors,
|
||||
LedgerEventRecord? ExistingRecord,
|
||||
string? ConflictCode)
|
||||
{
|
||||
public static LedgerWriteResult ValidationFailed(params string[] errors)
|
||||
=> new(LedgerWriteStatus.ValidationFailed, null, errors, null, null);
|
||||
|
||||
public static LedgerWriteResult Conflict(string code, params string[] errors)
|
||||
=> new(LedgerWriteStatus.Conflict, null, errors, null, code);
|
||||
|
||||
public static LedgerWriteResult Idempotent(LedgerEventRecord record)
|
||||
=> new(LedgerWriteStatus.Idempotent, record, Array.Empty<string>(), record, null);
|
||||
|
||||
public static LedgerWriteResult Success(LedgerEventRecord record)
|
||||
=> new(LedgerWriteStatus.Success, record, Array.Empty<string>(), null, null);
|
||||
}
|
||||
|
||||
public sealed class LedgerDuplicateEventException : Exception
|
||||
{
|
||||
public LedgerDuplicateEventException(Guid eventId, Exception innerException)
|
||||
: base($"Ledger event {eventId} already exists.", innerException)
|
||||
{
|
||||
EventId = eventId;
|
||||
}
|
||||
|
||||
public Guid EventId { get; }
|
||||
}
|
||||
@@ -0,0 +1,57 @@
|
||||
using System.Text.Json.Nodes;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Domain;
|
||||
|
||||
public sealed record FindingProjection(
|
||||
string TenantId,
|
||||
string FindingId,
|
||||
string PolicyVersion,
|
||||
string Status,
|
||||
decimal? Severity,
|
||||
JsonObject Labels,
|
||||
Guid CurrentEventId,
|
||||
string? ExplainRef,
|
||||
JsonArray PolicyRationale,
|
||||
DateTimeOffset UpdatedAt,
|
||||
string CycleHash);
|
||||
|
||||
public sealed record FindingHistoryEntry(
|
||||
string TenantId,
|
||||
string FindingId,
|
||||
string PolicyVersion,
|
||||
Guid EventId,
|
||||
string Status,
|
||||
decimal? Severity,
|
||||
string ActorId,
|
||||
string? Comment,
|
||||
DateTimeOffset OccurredAt);
|
||||
|
||||
public sealed record TriageActionEntry(
|
||||
string TenantId,
|
||||
Guid ActionId,
|
||||
Guid EventId,
|
||||
string FindingId,
|
||||
string ActionType,
|
||||
JsonObject Payload,
|
||||
DateTimeOffset CreatedAt,
|
||||
string CreatedBy);
|
||||
|
||||
public sealed record ProjectionReduceResult(
|
||||
FindingProjection Projection,
|
||||
FindingHistoryEntry History,
|
||||
TriageActionEntry? Action);
|
||||
|
||||
public sealed record ProjectionCheckpoint(
|
||||
DateTimeOffset LastRecordedAt,
|
||||
Guid LastEventId,
|
||||
DateTimeOffset UpdatedAt)
|
||||
{
|
||||
public static ProjectionCheckpoint Initial(TimeProvider timeProvider)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(timeProvider);
|
||||
|
||||
var epoch = new DateTimeOffset(1970, 1, 1, 0, 0, 0, TimeSpan.Zero);
|
||||
var now = timeProvider.GetUtcNow();
|
||||
return new ProjectionCheckpoint(epoch, Guid.Empty, now);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Hashing;
|
||||
|
||||
internal static class HashUtilities
|
||||
{
|
||||
public static string ComputeSha256Hex(string input)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrEmpty(input);
|
||||
|
||||
var bytes = Encoding.UTF8.GetBytes(input);
|
||||
var hashBytes = SHA256.HashData(bytes);
|
||||
return Convert.ToHexString(hashBytes).ToLowerInvariant();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,78 @@
|
||||
using System.Text.Encodings.Web;
|
||||
using System.Text.Json;
|
||||
using System.Text.Json.Nodes;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Hashing;
|
||||
|
||||
public static class LedgerCanonicalJsonSerializer
|
||||
{
|
||||
private static readonly JsonSerializerOptions SerializerOptions = new()
|
||||
{
|
||||
Encoder = JavaScriptEncoder.UnsafeRelaxedJsonEscaping,
|
||||
WriteIndented = false
|
||||
};
|
||||
|
||||
public static string Serialize(JsonObject envelope)
|
||||
{
|
||||
if (envelope is null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(envelope));
|
||||
}
|
||||
|
||||
var canonical = (JsonObject)Canonicalize(envelope)!;
|
||||
return canonical.ToJsonString(SerializerOptions);
|
||||
}
|
||||
|
||||
public static JsonObject Canonicalize(JsonObject envelope)
|
||||
{
|
||||
if (envelope is null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(envelope));
|
||||
}
|
||||
|
||||
return (JsonObject)Canonicalize((JsonNode)envelope)!;
|
||||
}
|
||||
|
||||
public static JsonArray Canonicalize(JsonArray array)
|
||||
{
|
||||
if (array is null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(array));
|
||||
}
|
||||
|
||||
return (JsonArray?)Canonicalize((JsonNode)array) ?? new JsonArray();
|
||||
}
|
||||
|
||||
private static JsonNode? Canonicalize(JsonNode? node)
|
||||
{
|
||||
switch (node)
|
||||
{
|
||||
case null:
|
||||
return null;
|
||||
case JsonValue value:
|
||||
return value.DeepClone();
|
||||
case JsonArray array:
|
||||
{
|
||||
var result = new JsonArray();
|
||||
foreach (var element in array)
|
||||
{
|
||||
result.Add(Canonicalize(element));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
case JsonObject obj:
|
||||
{
|
||||
var ordered = new JsonObject();
|
||||
foreach (var property in obj.OrderBy(static p => p.Key, StringComparer.Ordinal))
|
||||
{
|
||||
ordered[property.Key] = Canonicalize(property.Value);
|
||||
}
|
||||
|
||||
return ordered;
|
||||
}
|
||||
default:
|
||||
return node?.DeepClone();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
using System.Text.Json.Nodes;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Hashing;
|
||||
|
||||
public static class LedgerHashing
|
||||
{
|
||||
public static LedgerHashResult ComputeHashes(JsonObject canonicalEnvelope, long sequenceNumber)
|
||||
{
|
||||
var canonicalJson = LedgerCanonicalJsonSerializer.Serialize(canonicalEnvelope);
|
||||
var eventHash = HashUtilities.ComputeSha256Hex(canonicalJson);
|
||||
var merkleLeafInput = $"{eventHash}-{sequenceNumber}";
|
||||
var merkleLeafHash = HashUtilities.ComputeSha256Hex(merkleLeafInput);
|
||||
return new LedgerHashResult(eventHash, merkleLeafHash, canonicalJson);
|
||||
}
|
||||
}
|
||||
|
||||
public sealed record LedgerHashResult(
|
||||
string EventHash,
|
||||
string MerkleLeafHash,
|
||||
string CanonicalJson);
|
||||
@@ -0,0 +1,98 @@
|
||||
using System.Text.Json.Nodes;
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Hashing;
|
||||
|
||||
public static class ProjectionHashing
|
||||
{
|
||||
private const string TenantIdProperty = nameof(FindingProjection.TenantId);
|
||||
private const string FindingIdProperty = nameof(FindingProjection.FindingId);
|
||||
private const string PolicyVersionProperty = nameof(FindingProjection.PolicyVersion);
|
||||
private const string StatusProperty = nameof(FindingProjection.Status);
|
||||
private const string SeverityProperty = nameof(FindingProjection.Severity);
|
||||
private const string LabelsProperty = nameof(FindingProjection.Labels);
|
||||
private const string CurrentEventIdProperty = nameof(FindingProjection.CurrentEventId);
|
||||
private const string ExplainRefProperty = nameof(FindingProjection.ExplainRef);
|
||||
private const string PolicyRationaleProperty = nameof(FindingProjection.PolicyRationale);
|
||||
private const string UpdatedAtProperty = nameof(FindingProjection.UpdatedAt);
|
||||
|
||||
public static string ComputeCycleHash(FindingProjection projection)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(projection);
|
||||
|
||||
var envelope = new JsonObject
|
||||
{
|
||||
[TenantIdProperty] = projection.TenantId,
|
||||
[FindingIdProperty] = projection.FindingId,
|
||||
[PolicyVersionProperty] = projection.PolicyVersion,
|
||||
[StatusProperty] = projection.Status,
|
||||
[SeverityProperty] = projection.Severity,
|
||||
[LabelsProperty] = projection.Labels.DeepClone(),
|
||||
[CurrentEventIdProperty] = projection.CurrentEventId.ToString(),
|
||||
[ExplainRefProperty] = projection.ExplainRef,
|
||||
[PolicyRationaleProperty] = CloneArray(projection.PolicyRationale),
|
||||
[UpdatedAtProperty] = FormatTimestamp(projection.UpdatedAt)
|
||||
};
|
||||
|
||||
var canonical = LedgerCanonicalJsonSerializer.Canonicalize(envelope);
|
||||
var canonicalJson = LedgerCanonicalJsonSerializer.Serialize(canonical);
|
||||
return HashUtilities.ComputeSha256Hex(canonicalJson);
|
||||
}
|
||||
|
||||
private static string FormatTimestamp(DateTimeOffset value)
|
||||
{
|
||||
var utc = value.ToUniversalTime();
|
||||
Span<char> buffer = stackalloc char[24];
|
||||
|
||||
WriteFourDigits(buffer, 0, utc.Year);
|
||||
buffer[4] = '-';
|
||||
WriteTwoDigits(buffer, 5, utc.Month);
|
||||
buffer[7] = '-';
|
||||
WriteTwoDigits(buffer, 8, utc.Day);
|
||||
buffer[10] = 'T';
|
||||
WriteTwoDigits(buffer, 11, utc.Hour);
|
||||
buffer[13] = ':';
|
||||
WriteTwoDigits(buffer, 14, utc.Minute);
|
||||
buffer[16] = ':';
|
||||
WriteTwoDigits(buffer, 17, utc.Second);
|
||||
buffer[19] = '.';
|
||||
WriteThreeDigits(buffer, 20, utc.Millisecond);
|
||||
buffer[23] = 'Z';
|
||||
|
||||
return new string(buffer);
|
||||
}
|
||||
|
||||
private static void WriteFourDigits(Span<char> buffer, int offset, int value)
|
||||
{
|
||||
buffer[offset] = (char)('0' + (value / 1000) % 10);
|
||||
buffer[offset + 1] = (char)('0' + (value / 100) % 10);
|
||||
buffer[offset + 2] = (char)('0' + (value / 10) % 10);
|
||||
buffer[offset + 3] = (char)('0' + value % 10);
|
||||
}
|
||||
|
||||
private static void WriteTwoDigits(Span<char> buffer, int offset, int value)
|
||||
{
|
||||
buffer[offset] = (char)('0' + (value / 10) % 10);
|
||||
buffer[offset + 1] = (char)('0' + value % 10);
|
||||
}
|
||||
|
||||
private static void WriteThreeDigits(Span<char> buffer, int offset, int value)
|
||||
{
|
||||
buffer[offset] = (char)('0' + (value / 100) % 10);
|
||||
buffer[offset + 1] = (char)('0' + (value / 10) % 10);
|
||||
buffer[offset + 2] = (char)('0' + value % 10);
|
||||
}
|
||||
|
||||
private static JsonArray CloneArray(JsonArray array)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(array);
|
||||
|
||||
var clone = new JsonArray();
|
||||
foreach (var item in array)
|
||||
{
|
||||
clone.Add(item?.DeepClone());
|
||||
}
|
||||
|
||||
return clone;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Infrastructure;
|
||||
|
||||
public interface IFindingProjectionRepository
|
||||
{
|
||||
Task<FindingProjection?> GetAsync(string tenantId, string findingId, string policyVersion, CancellationToken cancellationToken);
|
||||
|
||||
Task UpsertAsync(FindingProjection projection, CancellationToken cancellationToken);
|
||||
|
||||
Task InsertHistoryAsync(FindingHistoryEntry entry, CancellationToken cancellationToken);
|
||||
|
||||
Task InsertActionAsync(TriageActionEntry entry, CancellationToken cancellationToken);
|
||||
|
||||
Task<ProjectionCheckpoint> GetCheckpointAsync(CancellationToken cancellationToken);
|
||||
|
||||
Task SaveCheckpointAsync(ProjectionCheckpoint checkpoint, CancellationToken cancellationToken);
|
||||
}
|
||||
@@ -0,0 +1,13 @@
|
||||
using System.Text.Json.Nodes;
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Infrastructure;
|
||||
|
||||
public interface ILedgerEventRepository
|
||||
{
|
||||
Task<LedgerEventRecord?> GetByEventIdAsync(string tenantId, Guid eventId, CancellationToken cancellationToken);
|
||||
|
||||
Task<LedgerChainHead?> GetChainHeadAsync(string tenantId, Guid chainId, CancellationToken cancellationToken);
|
||||
|
||||
Task AppendAsync(LedgerEventRecord record, CancellationToken cancellationToken);
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Infrastructure;
|
||||
|
||||
public interface ILedgerEventStream
|
||||
{
|
||||
Task<IReadOnlyList<LedgerEventRecord>> ReadNextBatchAsync(
|
||||
ProjectionCheckpoint checkpoint,
|
||||
int batchSize,
|
||||
CancellationToken cancellationToken);
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Infrastructure;
|
||||
|
||||
public interface IMerkleAnchorScheduler
|
||||
{
|
||||
Task EnqueueAsync(LedgerEventRecord record, CancellationToken cancellationToken);
|
||||
}
|
||||
@@ -0,0 +1,50 @@
|
||||
using System.Collections.Concurrent;
|
||||
using System.Text.Json.Nodes;
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Infrastructure.InMemory;
|
||||
|
||||
public sealed class InMemoryLedgerEventRepository : ILedgerEventRepository
|
||||
{
|
||||
private readonly ConcurrentDictionary<(string TenantId, Guid EventId), LedgerEventRecord> _events = new();
|
||||
private readonly ConcurrentDictionary<(string TenantId, Guid ChainId), SortedList<long, LedgerEventRecord>> _chains = new();
|
||||
|
||||
public Task<LedgerEventRecord?> GetByEventIdAsync(string tenantId, Guid eventId, CancellationToken cancellationToken)
|
||||
{
|
||||
_events.TryGetValue((tenantId, eventId), out var record);
|
||||
return Task.FromResult(record);
|
||||
}
|
||||
|
||||
public Task<LedgerChainHead?> GetChainHeadAsync(string tenantId, Guid chainId, CancellationToken cancellationToken)
|
||||
{
|
||||
if (_chains.TryGetValue((tenantId, chainId), out var list) && list.Count > 0)
|
||||
{
|
||||
var last = list.Values[^1];
|
||||
return Task.FromResult<LedgerChainHead?>(new LedgerChainHead(last.SequenceNumber, last.EventHash, last.RecordedAt));
|
||||
}
|
||||
|
||||
return Task.FromResult<LedgerChainHead?>(null);
|
||||
}
|
||||
|
||||
public Task AppendAsync(LedgerEventRecord record, CancellationToken cancellationToken)
|
||||
{
|
||||
if (!_events.TryAdd((record.TenantId, record.EventId), Clone(record)))
|
||||
{
|
||||
throw new InvalidOperationException("Event already exists.");
|
||||
}
|
||||
|
||||
var chain = _chains.GetOrAdd((record.TenantId, record.ChainId), _ => new SortedList<long, LedgerEventRecord>());
|
||||
lock (chain)
|
||||
{
|
||||
chain[record.SequenceNumber] = Clone(record);
|
||||
}
|
||||
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
private static LedgerEventRecord Clone(LedgerEventRecord record)
|
||||
{
|
||||
var clonedBody = (JsonObject)record.EventBody.DeepClone();
|
||||
return record with { EventBody = clonedBody };
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,17 @@
|
||||
namespace StellaOps.Findings.Ledger.Infrastructure.Merkle;
|
||||
|
||||
public interface IMerkleAnchorRepository
|
||||
{
|
||||
Task InsertAsync(
|
||||
string tenantId,
|
||||
Guid anchorId,
|
||||
DateTimeOffset windowStart,
|
||||
DateTimeOffset windowEnd,
|
||||
long sequenceStart,
|
||||
long sequenceEnd,
|
||||
string rootHash,
|
||||
int leafCount,
|
||||
DateTimeOffset anchoredAt,
|
||||
string? anchorReference,
|
||||
CancellationToken cancellationToken);
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
using System.Threading.Channels;
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Infrastructure.Merkle;
|
||||
|
||||
public sealed class LedgerAnchorQueue
|
||||
{
|
||||
private readonly Channel<LedgerEventRecord> _channel;
|
||||
|
||||
public LedgerAnchorQueue()
|
||||
{
|
||||
_channel = Channel.CreateUnbounded<LedgerEventRecord>(new UnboundedChannelOptions
|
||||
{
|
||||
SingleReader = true,
|
||||
SingleWriter = false,
|
||||
AllowSynchronousContinuations = false
|
||||
});
|
||||
}
|
||||
|
||||
public ValueTask EnqueueAsync(LedgerEventRecord record, CancellationToken cancellationToken)
|
||||
=> _channel.Writer.WriteAsync(record, cancellationToken);
|
||||
|
||||
public IAsyncEnumerable<LedgerEventRecord> ReadAllAsync(CancellationToken cancellationToken)
|
||||
=> _channel.Reader.ReadAllAsync(cancellationToken);
|
||||
}
|
||||
@@ -0,0 +1,150 @@
|
||||
using System.Collections.Concurrent;
|
||||
using Microsoft.Extensions.Hosting;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
using StellaOps.Findings.Ledger.Options;
|
||||
using TimeProvider = System.TimeProvider;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Infrastructure.Merkle;
|
||||
|
||||
public sealed class LedgerMerkleAnchorWorker : BackgroundService
|
||||
{
|
||||
private readonly LedgerAnchorQueue _queue;
|
||||
private readonly IMerkleAnchorRepository _repository;
|
||||
private readonly TimeProvider _timeProvider;
|
||||
private readonly LedgerServiceOptions.MerkleOptions _options;
|
||||
private readonly ILogger<LedgerMerkleAnchorWorker> _logger;
|
||||
private readonly ConcurrentDictionary<(string TenantId, Guid ChainId), MerkleBatch> _buffers = new();
|
||||
|
||||
public LedgerMerkleAnchorWorker(
|
||||
LedgerAnchorQueue queue,
|
||||
IMerkleAnchorRepository repository,
|
||||
IOptions<LedgerServiceOptions> options,
|
||||
TimeProvider timeProvider,
|
||||
ILogger<LedgerMerkleAnchorWorker> logger)
|
||||
{
|
||||
_queue = queue ?? throw new ArgumentNullException(nameof(queue));
|
||||
_repository = repository ?? throw new ArgumentNullException(nameof(repository));
|
||||
_timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
_options = options?.Value.Merkle ?? throw new ArgumentNullException(nameof(options));
|
||||
}
|
||||
|
||||
protected override async Task ExecuteAsync(CancellationToken stoppingToken)
|
||||
{
|
||||
await foreach (var record in _queue.ReadAllAsync(stoppingToken))
|
||||
{
|
||||
await HandleEventAsync(record, stoppingToken).ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
|
||||
public override async Task StopAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
await FlushAllAsync(cancellationToken).ConfigureAwait(false);
|
||||
await base.StopAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
private async Task HandleEventAsync(LedgerEventRecord record, CancellationToken cancellationToken)
|
||||
{
|
||||
var key = (record.TenantId, record.ChainId);
|
||||
var batch = _buffers.GetOrAdd(key, _ => new MerkleBatch(record.RecordedAt));
|
||||
batch.Add(record);
|
||||
|
||||
if (batch.ShouldFlush(_options))
|
||||
{
|
||||
if (_buffers.TryRemove(key, out var readyBatch))
|
||||
{
|
||||
await FlushBatchAsync(record.TenantId, readyBatch, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private async Task FlushAllAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
foreach (var key in _buffers.Keys)
|
||||
{
|
||||
if (_buffers.TryRemove(key, out var batch) && batch.Events.Count > 0)
|
||||
{
|
||||
await FlushBatchAsync(key.TenantId, batch, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private async Task FlushBatchAsync(string tenantId, MerkleBatch batch, CancellationToken cancellationToken)
|
||||
{
|
||||
if (batch.Events.Count == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
var orderedEvents = batch.Events
|
||||
.OrderBy(e => e.SequenceNumber)
|
||||
.ThenBy(e => e.RecordedAt)
|
||||
.ToList();
|
||||
|
||||
var rootHash = MerkleTreeBuilder.ComputeRoot(orderedEvents.Select(e => e.MerkleLeafHash).ToArray());
|
||||
var anchorId = Guid.NewGuid();
|
||||
var windowStart = orderedEvents.First().RecordedAt;
|
||||
var windowEnd = orderedEvents.Last().RecordedAt;
|
||||
var sequenceStart = orderedEvents.First().SequenceNumber;
|
||||
var sequenceEnd = orderedEvents.Last().SequenceNumber;
|
||||
var leafCount = orderedEvents.Count;
|
||||
var anchoredAt = _timeProvider.GetUtcNow().UtcDateTime;
|
||||
|
||||
await _repository.InsertAsync(
|
||||
tenantId,
|
||||
anchorId,
|
||||
windowStart,
|
||||
windowEnd,
|
||||
sequenceStart,
|
||||
sequenceEnd,
|
||||
rootHash,
|
||||
leafCount,
|
||||
anchoredAt,
|
||||
anchorReference: null,
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
catch (Exception ex) when (!cancellationToken.IsCancellationRequested)
|
||||
{
|
||||
_logger.LogError(ex, "Failed to persist Merkle anchor for tenant {TenantId}.", tenantId);
|
||||
}
|
||||
}
|
||||
|
||||
private sealed class MerkleBatch
|
||||
{
|
||||
public MerkleBatch(DateTimeOffset windowStart)
|
||||
{
|
||||
WindowStart = windowStart;
|
||||
}
|
||||
|
||||
public List<LedgerEventRecord> Events { get; } = new();
|
||||
|
||||
public DateTimeOffset WindowStart { get; private set; }
|
||||
|
||||
public DateTimeOffset LastRecordedAt { get; private set; }
|
||||
|
||||
public void Add(LedgerEventRecord record)
|
||||
{
|
||||
Events.Add(record);
|
||||
LastRecordedAt = record.RecordedAt;
|
||||
if (Events.Count == 1)
|
||||
{
|
||||
WindowStart = record.RecordedAt;
|
||||
}
|
||||
}
|
||||
|
||||
public bool ShouldFlush(LedgerServiceOptions.MerkleOptions options)
|
||||
{
|
||||
if (Events.Count >= options.BatchSize)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
var windowDuration = LastRecordedAt - WindowStart;
|
||||
return windowDuration >= options.WindowDuration;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Infrastructure.Merkle;
|
||||
|
||||
internal static class MerkleTreeBuilder
|
||||
{
|
||||
public static string ComputeRoot(IReadOnlyList<string> leafHashes)
|
||||
{
|
||||
if (leafHashes.Count == 0)
|
||||
{
|
||||
throw new ArgumentException("At least one leaf hash is required to compute a Merkle root.", nameof(leafHashes));
|
||||
}
|
||||
|
||||
var currentLevel = leafHashes
|
||||
.Select(hash => hash ?? throw new ArgumentException("Leaf hash cannot be null.", nameof(leafHashes)))
|
||||
.ToArray();
|
||||
|
||||
while (currentLevel.Length > 1)
|
||||
{
|
||||
currentLevel = ComputeNextLevel(currentLevel);
|
||||
}
|
||||
|
||||
return currentLevel[0];
|
||||
}
|
||||
|
||||
private static string[] ComputeNextLevel(IReadOnlyList<string> level)
|
||||
{
|
||||
var next = new string[(level.Count + 1) / 2];
|
||||
var index = 0;
|
||||
|
||||
for (var i = 0; i < level.Count; i += 2)
|
||||
{
|
||||
var left = level[i];
|
||||
var right = i + 1 < level.Count ? level[i + 1] : level[i];
|
||||
next[index++] = HashPair(left, right);
|
||||
}
|
||||
|
||||
return next;
|
||||
}
|
||||
|
||||
private static string HashPair(string left, string right)
|
||||
{
|
||||
var bytes = Encoding.UTF8.GetBytes(left + right);
|
||||
var hashBytes = SHA256.HashData(bytes);
|
||||
return Convert.ToHexString(hashBytes).ToLowerInvariant();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Infrastructure.Merkle;
|
||||
|
||||
public sealed class NullMerkleAnchorScheduler : IMerkleAnchorScheduler
|
||||
{
|
||||
public Task EnqueueAsync(LedgerEventRecord record, CancellationToken cancellationToken)
|
||||
=> Task.CompletedTask;
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Infrastructure.Merkle;
|
||||
|
||||
public sealed class PostgresMerkleAnchorScheduler : IMerkleAnchorScheduler
|
||||
{
|
||||
private readonly LedgerAnchorQueue _queue;
|
||||
|
||||
public PostgresMerkleAnchorScheduler(LedgerAnchorQueue queue)
|
||||
{
|
||||
_queue = queue ?? throw new ArgumentNullException(nameof(queue));
|
||||
}
|
||||
|
||||
public Task EnqueueAsync(LedgerEventRecord record, CancellationToken cancellationToken)
|
||||
=> _queue.EnqueueAsync(record, cancellationToken).AsTask();
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
using System.Text.Json.Nodes;
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Infrastructure.Policy;
|
||||
|
||||
public interface IPolicyEvaluationService
|
||||
{
|
||||
Task<PolicyEvaluationResult> EvaluateAsync(
|
||||
LedgerEventRecord record,
|
||||
FindingProjection? existingProjection,
|
||||
CancellationToken cancellationToken);
|
||||
}
|
||||
|
||||
public sealed record PolicyEvaluationResult(
|
||||
string? Status,
|
||||
decimal? Severity,
|
||||
JsonObject Labels,
|
||||
string? ExplainRef,
|
||||
JsonArray Rationale);
|
||||
@@ -0,0 +1,189 @@
|
||||
using System.Text.Json.Nodes;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Infrastructure.Policy;
|
||||
|
||||
public sealed class InlinePolicyEvaluationService : IPolicyEvaluationService
|
||||
{
|
||||
private readonly ILogger<InlinePolicyEvaluationService> _logger;
|
||||
|
||||
public InlinePolicyEvaluationService(ILogger<InlinePolicyEvaluationService> logger)
|
||||
{
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
}
|
||||
|
||||
public Task<PolicyEvaluationResult> EvaluateAsync(
|
||||
LedgerEventRecord record,
|
||||
FindingProjection? existingProjection,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
if (record is null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(record));
|
||||
}
|
||||
|
||||
var eventObject = record.EventBody["event"]?.AsObject();
|
||||
if (eventObject is null)
|
||||
{
|
||||
_logger.LogWarning("Ledger event {EventId} missing canonical event payload; falling back to existing projection.", record.EventId);
|
||||
return Task.FromResult(CreateFallback(existingProjection));
|
||||
}
|
||||
|
||||
var payload = eventObject["payload"] as JsonObject;
|
||||
var status = ExtractString(payload, "status");
|
||||
var severity = ExtractDecimal(payload, "severity");
|
||||
var explainRef = ExtractString(payload, "explainRef") ?? ExtractString(payload, "explain_ref");
|
||||
|
||||
var labels = ExtractLabels(payload, existingProjection);
|
||||
var rationale = ExtractRationale(payload, explainRef);
|
||||
|
||||
var result = new PolicyEvaluationResult(
|
||||
status,
|
||||
severity,
|
||||
labels,
|
||||
explainRef,
|
||||
rationale);
|
||||
|
||||
return Task.FromResult(result);
|
||||
}
|
||||
|
||||
private static PolicyEvaluationResult CreateFallback(FindingProjection? existingProjection)
|
||||
{
|
||||
var labels = existingProjection?.Labels is not null
|
||||
? (JsonObject)existingProjection.Labels.DeepClone()
|
||||
: new JsonObject();
|
||||
|
||||
var rationale = existingProjection?.PolicyRationale is not null
|
||||
? CloneArray(existingProjection.PolicyRationale)
|
||||
: new JsonArray();
|
||||
|
||||
return new PolicyEvaluationResult(
|
||||
existingProjection?.Status,
|
||||
existingProjection?.Severity,
|
||||
labels,
|
||||
existingProjection?.ExplainRef,
|
||||
rationale);
|
||||
}
|
||||
|
||||
private static JsonObject ExtractLabels(JsonObject? payload, FindingProjection? existingProjection)
|
||||
{
|
||||
var labels = existingProjection?.Labels is not null
|
||||
? (JsonObject)existingProjection.Labels.DeepClone()
|
||||
: new JsonObject();
|
||||
|
||||
if (payload is null)
|
||||
{
|
||||
return labels;
|
||||
}
|
||||
|
||||
if (payload.TryGetPropertyValue("labels", out var labelsNode) && labelsNode is JsonObject labelUpdates)
|
||||
{
|
||||
foreach (var property in labelUpdates)
|
||||
{
|
||||
if (property.Value is null || property.Value.GetValueKind() == JsonValueKind.Null)
|
||||
{
|
||||
labels.Remove(property.Key);
|
||||
}
|
||||
else
|
||||
{
|
||||
labels[property.Key] = property.Value.DeepClone();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (payload.TryGetPropertyValue("labelsRemove", out var removeNode) && removeNode is JsonArray removeArray)
|
||||
{
|
||||
foreach (var item in removeArray)
|
||||
{
|
||||
if (item is JsonValue value && value.TryGetValue(out string? key) && !string.IsNullOrWhiteSpace(key))
|
||||
{
|
||||
labels.Remove(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
private static JsonArray ExtractRationale(JsonObject? payload, string? explainRef)
|
||||
{
|
||||
if (payload?.TryGetPropertyValue("rationaleRefs", out var rationaleNode) == true &&
|
||||
rationaleNode is JsonArray rationaleRefs)
|
||||
{
|
||||
return CloneArray(rationaleRefs);
|
||||
}
|
||||
|
||||
var rationale = new JsonArray();
|
||||
if (!string.IsNullOrWhiteSpace(explainRef))
|
||||
{
|
||||
rationale.Add(explainRef);
|
||||
}
|
||||
|
||||
return rationale;
|
||||
}
|
||||
|
||||
private static string? ExtractString(JsonObject? obj, string propertyName)
|
||||
{
|
||||
if (obj is null)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!obj.TryGetPropertyValue(propertyName, out var value) || value is null)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
if (value is JsonValue jsonValue && jsonValue.TryGetValue(out string? text))
|
||||
{
|
||||
return string.IsNullOrWhiteSpace(text) ? null : text;
|
||||
}
|
||||
|
||||
return value.ToString();
|
||||
}
|
||||
|
||||
private static decimal? ExtractDecimal(JsonObject? obj, string propertyName)
|
||||
{
|
||||
if (obj is null)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!obj.TryGetPropertyValue(propertyName, out var value) || value is null)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
if (value is JsonValue jsonValue)
|
||||
{
|
||||
if (jsonValue.TryGetValue(out decimal decimalValue))
|
||||
{
|
||||
return decimalValue;
|
||||
}
|
||||
|
||||
if (jsonValue.TryGetValue(out double doubleValue))
|
||||
{
|
||||
return Convert.ToDecimal(doubleValue);
|
||||
}
|
||||
}
|
||||
|
||||
if (decimal.TryParse(value.ToString(), out var parsed))
|
||||
{
|
||||
return parsed;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
private static JsonArray CloneArray(JsonArray array)
|
||||
{
|
||||
var clone = new JsonArray();
|
||||
foreach (var item in array)
|
||||
{
|
||||
clone.Add(item?.DeepClone());
|
||||
}
|
||||
|
||||
return clone;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,81 @@
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Microsoft.Extensions.Options;
|
||||
using Npgsql;
|
||||
using StellaOps.Findings.Ledger.Options;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Infrastructure.Postgres;
|
||||
|
||||
public sealed class LedgerDataSource : IAsyncDisposable
|
||||
{
|
||||
private readonly NpgsqlDataSource _dataSource;
|
||||
private readonly LedgerServiceOptions.DatabaseOptions _options;
|
||||
private readonly ILogger<LedgerDataSource> _logger;
|
||||
|
||||
public LedgerDataSource(
|
||||
IOptions<LedgerServiceOptions> options,
|
||||
ILogger<LedgerDataSource> logger)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(options);
|
||||
_options = options.Value.Database;
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
|
||||
var builder = new NpgsqlDataSourceBuilder(_options.ConnectionString);
|
||||
_dataSource = builder.Build();
|
||||
}
|
||||
|
||||
public int CommandTimeoutSeconds => _options.CommandTimeoutSeconds;
|
||||
|
||||
public async ValueTask DisposeAsync()
|
||||
{
|
||||
await _dataSource.DisposeAsync().ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public Task<NpgsqlConnection> OpenConnectionAsync(string tenantId, CancellationToken cancellationToken)
|
||||
=> OpenConnectionInternalAsync(tenantId, cancellationToken);
|
||||
|
||||
private async Task<NpgsqlConnection> OpenConnectionInternalAsync(string tenantId, CancellationToken cancellationToken)
|
||||
{
|
||||
var connection = await _dataSource.OpenConnectionAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
try
|
||||
{
|
||||
await ConfigureSessionAsync(connection, tenantId, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
catch
|
||||
{
|
||||
await connection.DisposeAsync().ConfigureAwait(false);
|
||||
throw;
|
||||
}
|
||||
|
||||
return connection;
|
||||
}
|
||||
|
||||
private async Task ConfigureSessionAsync(NpgsqlConnection connection, string tenantId, CancellationToken cancellationToken)
|
||||
{
|
||||
try
|
||||
{
|
||||
await using (var command = new NpgsqlCommand("SET TIME ZONE 'UTC';", connection))
|
||||
{
|
||||
command.CommandTimeout = _options.CommandTimeoutSeconds;
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
if (!string.IsNullOrWhiteSpace(tenantId))
|
||||
{
|
||||
await using var tenantCommand = new NpgsqlCommand("SELECT set_config('app.current_tenant', @tenant, false);", connection);
|
||||
tenantCommand.CommandTimeout = _options.CommandTimeoutSeconds;
|
||||
tenantCommand.Parameters.AddWithValue("tenant", tenantId);
|
||||
await tenantCommand.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
if (_logger.IsEnabled(LogLevel.Error))
|
||||
{
|
||||
_logger.LogError(ex, "Failed to configure PostgreSQL session for tenant {TenantId}.", tenantId);
|
||||
}
|
||||
|
||||
throw;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,318 @@
|
||||
using System.Text.Json.Nodes;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Npgsql;
|
||||
using NpgsqlTypes;
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
using StellaOps.Findings.Ledger.Hashing;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Infrastructure.Postgres;
|
||||
|
||||
public sealed class PostgresFindingProjectionRepository : IFindingProjectionRepository
|
||||
{
|
||||
private const string GetProjectionSql = """
|
||||
SELECT status,
|
||||
severity,
|
||||
labels,
|
||||
current_event_id,
|
||||
explain_ref,
|
||||
policy_rationale,
|
||||
updated_at,
|
||||
cycle_hash
|
||||
FROM findings_projection
|
||||
WHERE tenant_id = @tenant_id
|
||||
AND finding_id = @finding_id
|
||||
AND policy_version = @policy_version
|
||||
""";
|
||||
|
||||
private const string UpsertProjectionSql = """
|
||||
INSERT INTO findings_projection (
|
||||
tenant_id,
|
||||
finding_id,
|
||||
policy_version,
|
||||
status,
|
||||
severity,
|
||||
labels,
|
||||
current_event_id,
|
||||
explain_ref,
|
||||
policy_rationale,
|
||||
updated_at,
|
||||
cycle_hash)
|
||||
VALUES (
|
||||
@tenant_id,
|
||||
@finding_id,
|
||||
@policy_version,
|
||||
@status,
|
||||
@severity,
|
||||
@labels,
|
||||
@current_event_id,
|
||||
@explain_ref,
|
||||
@policy_rationale,
|
||||
@updated_at,
|
||||
@cycle_hash)
|
||||
ON CONFLICT (tenant_id, finding_id, policy_version)
|
||||
DO UPDATE SET
|
||||
status = EXCLUDED.status,
|
||||
severity = EXCLUDED.severity,
|
||||
labels = EXCLUDED.labels,
|
||||
current_event_id = EXCLUDED.current_event_id,
|
||||
explain_ref = EXCLUDED.explain_ref,
|
||||
policy_rationale = EXCLUDED.policy_rationale,
|
||||
updated_at = EXCLUDED.updated_at,
|
||||
cycle_hash = EXCLUDED.cycle_hash;
|
||||
""";
|
||||
|
||||
private const string InsertHistorySql = """
|
||||
INSERT INTO finding_history (
|
||||
tenant_id,
|
||||
finding_id,
|
||||
policy_version,
|
||||
event_id,
|
||||
status,
|
||||
severity,
|
||||
actor_id,
|
||||
comment,
|
||||
occurred_at)
|
||||
VALUES (
|
||||
@tenant_id,
|
||||
@finding_id,
|
||||
@policy_version,
|
||||
@event_id,
|
||||
@status,
|
||||
@severity,
|
||||
@actor_id,
|
||||
@comment,
|
||||
@occurred_at)
|
||||
ON CONFLICT (tenant_id, finding_id, event_id)
|
||||
DO NOTHING;
|
||||
""";
|
||||
|
||||
private const string InsertActionSql = """
|
||||
INSERT INTO triage_actions (
|
||||
tenant_id,
|
||||
action_id,
|
||||
event_id,
|
||||
finding_id,
|
||||
action_type,
|
||||
payload,
|
||||
created_at,
|
||||
created_by)
|
||||
VALUES (
|
||||
@tenant_id,
|
||||
@action_id,
|
||||
@event_id,
|
||||
@finding_id,
|
||||
@action_type,
|
||||
@payload,
|
||||
@created_at,
|
||||
@created_by)
|
||||
ON CONFLICT (tenant_id, action_id)
|
||||
DO NOTHING;
|
||||
""";
|
||||
|
||||
private const string SelectCheckpointSql = """
|
||||
SELECT last_recorded_at,
|
||||
last_event_id,
|
||||
updated_at
|
||||
FROM ledger_projection_offsets
|
||||
WHERE worker_id = @worker_id
|
||||
""";
|
||||
|
||||
private const string UpsertCheckpointSql = """
|
||||
INSERT INTO ledger_projection_offsets (
|
||||
worker_id,
|
||||
last_recorded_at,
|
||||
last_event_id,
|
||||
updated_at)
|
||||
VALUES (
|
||||
@worker_id,
|
||||
@last_recorded_at,
|
||||
@last_event_id,
|
||||
@updated_at)
|
||||
ON CONFLICT (worker_id)
|
||||
DO UPDATE SET
|
||||
last_recorded_at = EXCLUDED.last_recorded_at,
|
||||
last_event_id = EXCLUDED.last_event_id,
|
||||
updated_at = EXCLUDED.updated_at;
|
||||
""";
|
||||
|
||||
private const string DefaultWorkerId = "default";
|
||||
|
||||
private readonly LedgerDataSource _dataSource;
|
||||
private readonly TimeProvider _timeProvider;
|
||||
private readonly ILogger<PostgresFindingProjectionRepository> _logger;
|
||||
|
||||
public PostgresFindingProjectionRepository(
|
||||
LedgerDataSource dataSource,
|
||||
TimeProvider timeProvider,
|
||||
ILogger<PostgresFindingProjectionRepository> logger)
|
||||
{
|
||||
_dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource));
|
||||
_timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
}
|
||||
|
||||
public async Task<FindingProjection?> GetAsync(string tenantId, string findingId, string policyVersion, CancellationToken cancellationToken)
|
||||
{
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, cancellationToken).ConfigureAwait(false);
|
||||
await using var command = new NpgsqlCommand(GetProjectionSql, connection);
|
||||
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
|
||||
command.Parameters.AddWithValue("tenant_id", tenantId);
|
||||
command.Parameters.AddWithValue("finding_id", findingId);
|
||||
command.Parameters.AddWithValue("policy_version", policyVersion);
|
||||
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
if (!await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
var status = reader.GetString(0);
|
||||
var severity = reader.IsDBNull(1) ? (decimal?)null : reader.GetDecimal(1);
|
||||
var labelsJson = reader.GetFieldValue<string>(2);
|
||||
var labels = JsonNode.Parse(labelsJson)?.AsObject() ?? new JsonObject();
|
||||
var currentEventId = reader.GetGuid(3);
|
||||
var explainRef = reader.IsDBNull(4) ? null : reader.GetString(4);
|
||||
var rationaleJson = reader.IsDBNull(5) ? string.Empty : reader.GetFieldValue<string>(5);
|
||||
JsonArray rationale;
|
||||
if (string.IsNullOrWhiteSpace(rationaleJson))
|
||||
{
|
||||
rationale = new JsonArray();
|
||||
}
|
||||
else
|
||||
{
|
||||
rationale = JsonNode.Parse(rationaleJson) as JsonArray ?? new JsonArray();
|
||||
}
|
||||
var updatedAt = reader.GetFieldValue<DateTimeOffset>(6);
|
||||
var cycleHash = reader.GetString(7);
|
||||
|
||||
return new FindingProjection(
|
||||
tenantId,
|
||||
findingId,
|
||||
policyVersion,
|
||||
status,
|
||||
severity,
|
||||
labels,
|
||||
currentEventId,
|
||||
explainRef,
|
||||
rationale,
|
||||
updatedAt,
|
||||
cycleHash);
|
||||
}
|
||||
|
||||
public async Task UpsertAsync(FindingProjection projection, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(projection);
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(projection.TenantId, cancellationToken).ConfigureAwait(false);
|
||||
await using var command = new NpgsqlCommand(UpsertProjectionSql, connection);
|
||||
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
|
||||
|
||||
command.Parameters.AddWithValue("tenant_id", projection.TenantId);
|
||||
command.Parameters.AddWithValue("finding_id", projection.FindingId);
|
||||
command.Parameters.AddWithValue("policy_version", projection.PolicyVersion);
|
||||
command.Parameters.AddWithValue("status", projection.Status);
|
||||
command.Parameters.AddWithValue("severity", projection.Severity.HasValue ? projection.Severity.Value : (object)DBNull.Value);
|
||||
|
||||
var labelsCanonical = LedgerCanonicalJsonSerializer.Canonicalize(projection.Labels);
|
||||
var labelsJson = labelsCanonical.ToJsonString();
|
||||
command.Parameters.Add(new NpgsqlParameter<string>("labels", NpgsqlDbType.Jsonb) { TypedValue = labelsJson });
|
||||
|
||||
command.Parameters.AddWithValue("current_event_id", projection.CurrentEventId);
|
||||
command.Parameters.AddWithValue("explain_ref", projection.ExplainRef ?? (object)DBNull.Value);
|
||||
var rationaleCanonical = LedgerCanonicalJsonSerializer.Canonicalize(projection.PolicyRationale);
|
||||
var rationaleJson = rationaleCanonical.ToJsonString();
|
||||
command.Parameters.Add(new NpgsqlParameter<string>("policy_rationale", NpgsqlDbType.Jsonb) { TypedValue = rationaleJson });
|
||||
|
||||
command.Parameters.AddWithValue("updated_at", projection.UpdatedAt);
|
||||
command.Parameters.AddWithValue("cycle_hash", projection.CycleHash);
|
||||
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task InsertHistoryAsync(FindingHistoryEntry entry, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(entry);
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(entry.TenantId, cancellationToken).ConfigureAwait(false);
|
||||
await using var command = new NpgsqlCommand(InsertHistorySql, connection);
|
||||
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
|
||||
|
||||
command.Parameters.AddWithValue("tenant_id", entry.TenantId);
|
||||
command.Parameters.AddWithValue("finding_id", entry.FindingId);
|
||||
command.Parameters.AddWithValue("policy_version", entry.PolicyVersion);
|
||||
command.Parameters.AddWithValue("event_id", entry.EventId);
|
||||
command.Parameters.AddWithValue("status", entry.Status);
|
||||
command.Parameters.AddWithValue("severity", entry.Severity.HasValue ? entry.Severity.Value : (object)DBNull.Value);
|
||||
command.Parameters.AddWithValue("actor_id", entry.ActorId);
|
||||
command.Parameters.AddWithValue("comment", entry.Comment ?? (object)DBNull.Value);
|
||||
command.Parameters.AddWithValue("occurred_at", entry.OccurredAt);
|
||||
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task InsertActionAsync(TriageActionEntry entry, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(entry);
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(entry.TenantId, cancellationToken).ConfigureAwait(false);
|
||||
await using var command = new NpgsqlCommand(InsertActionSql, connection);
|
||||
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
|
||||
|
||||
command.Parameters.AddWithValue("tenant_id", entry.TenantId);
|
||||
command.Parameters.AddWithValue("action_id", entry.ActionId);
|
||||
command.Parameters.AddWithValue("event_id", entry.EventId);
|
||||
command.Parameters.AddWithValue("finding_id", entry.FindingId);
|
||||
command.Parameters.AddWithValue("action_type", entry.ActionType);
|
||||
|
||||
var payloadJson = entry.Payload.ToJsonString();
|
||||
command.Parameters.Add(new NpgsqlParameter<string>("payload", NpgsqlDbType.Jsonb) { TypedValue = payloadJson });
|
||||
|
||||
command.Parameters.AddWithValue("created_at", entry.CreatedAt);
|
||||
command.Parameters.AddWithValue("created_by", entry.CreatedBy);
|
||||
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public async Task<ProjectionCheckpoint> GetCheckpointAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(string.Empty, cancellationToken).ConfigureAwait(false);
|
||||
await using var command = new NpgsqlCommand(SelectCheckpointSql, connection);
|
||||
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
|
||||
command.Parameters.AddWithValue("worker_id", DefaultWorkerId);
|
||||
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
if (!await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
return ProjectionCheckpoint.Initial(_timeProvider);
|
||||
}
|
||||
|
||||
var lastRecordedAt = reader.GetFieldValue<DateTimeOffset>(0);
|
||||
var lastEventId = reader.GetGuid(1);
|
||||
var updatedAt = reader.GetFieldValue<DateTimeOffset>(2);
|
||||
return new ProjectionCheckpoint(lastRecordedAt, lastEventId, updatedAt);
|
||||
}
|
||||
|
||||
public async Task SaveCheckpointAsync(ProjectionCheckpoint checkpoint, CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(checkpoint);
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(string.Empty, cancellationToken).ConfigureAwait(false);
|
||||
await using var command = new NpgsqlCommand(UpsertCheckpointSql, connection);
|
||||
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
|
||||
|
||||
command.Parameters.AddWithValue("worker_id", DefaultWorkerId);
|
||||
command.Parameters.AddWithValue("last_recorded_at", checkpoint.LastRecordedAt);
|
||||
command.Parameters.AddWithValue("last_event_id", checkpoint.LastEventId);
|
||||
command.Parameters.AddWithValue("updated_at", checkpoint.UpdatedAt);
|
||||
|
||||
try
|
||||
{
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
catch (PostgresException ex)
|
||||
{
|
||||
_logger.LogError(ex, "Failed to persist projection checkpoint.");
|
||||
throw;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,221 @@
|
||||
using System.Text.Json.Nodes;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Npgsql;
|
||||
using NpgsqlTypes;
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
using StellaOps.Findings.Ledger.Hashing;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Infrastructure.Postgres;
|
||||
|
||||
public sealed class PostgresLedgerEventRepository : ILedgerEventRepository
|
||||
{
|
||||
private const string SelectByEventIdSql = """
|
||||
SELECT chain_id,
|
||||
sequence_no,
|
||||
event_type,
|
||||
policy_version,
|
||||
finding_id,
|
||||
artifact_id,
|
||||
source_run_id,
|
||||
actor_id,
|
||||
actor_type,
|
||||
occurred_at,
|
||||
recorded_at,
|
||||
event_body,
|
||||
event_hash,
|
||||
previous_hash,
|
||||
merkle_leaf_hash
|
||||
FROM ledger_events
|
||||
WHERE tenant_id = @tenant_id
|
||||
AND event_id = @event_id
|
||||
""";
|
||||
|
||||
private const string SelectChainHeadSql = """
|
||||
SELECT sequence_no,
|
||||
event_hash,
|
||||
recorded_at
|
||||
FROM ledger_events
|
||||
WHERE tenant_id = @tenant_id
|
||||
AND chain_id = @chain_id
|
||||
ORDER BY sequence_no DESC
|
||||
LIMIT 1
|
||||
""";
|
||||
|
||||
private const string InsertEventSql = """
|
||||
INSERT INTO ledger_events (
|
||||
tenant_id,
|
||||
chain_id,
|
||||
sequence_no,
|
||||
event_id,
|
||||
event_type,
|
||||
policy_version,
|
||||
finding_id,
|
||||
artifact_id,
|
||||
source_run_id,
|
||||
actor_id,
|
||||
actor_type,
|
||||
occurred_at,
|
||||
recorded_at,
|
||||
event_body,
|
||||
event_hash,
|
||||
previous_hash,
|
||||
merkle_leaf_hash)
|
||||
VALUES (
|
||||
@tenant_id,
|
||||
@chain_id,
|
||||
@sequence_no,
|
||||
@event_id,
|
||||
@event_type,
|
||||
@policy_version,
|
||||
@finding_id,
|
||||
@artifact_id,
|
||||
@source_run_id,
|
||||
@actor_id,
|
||||
@actor_type,
|
||||
@occurred_at,
|
||||
@recorded_at,
|
||||
@event_body,
|
||||
@event_hash,
|
||||
@previous_hash,
|
||||
@merkle_leaf_hash)
|
||||
""";
|
||||
|
||||
private readonly LedgerDataSource _dataSource;
|
||||
private readonly ILogger<PostgresLedgerEventRepository> _logger;
|
||||
|
||||
public PostgresLedgerEventRepository(
|
||||
LedgerDataSource dataSource,
|
||||
ILogger<PostgresLedgerEventRepository> logger)
|
||||
{
|
||||
_dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
}
|
||||
|
||||
public async Task<LedgerEventRecord?> GetByEventIdAsync(string tenantId, Guid eventId, CancellationToken cancellationToken)
|
||||
{
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, cancellationToken).ConfigureAwait(false);
|
||||
await using var command = new NpgsqlCommand(SelectByEventIdSql, connection);
|
||||
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
|
||||
command.Parameters.AddWithValue("tenant_id", tenantId);
|
||||
command.Parameters.AddWithValue("event_id", eventId);
|
||||
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
if (!await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
return MapLedgerEventRecord(tenantId, eventId, reader);
|
||||
}
|
||||
|
||||
public async Task<LedgerChainHead?> GetChainHeadAsync(string tenantId, Guid chainId, CancellationToken cancellationToken)
|
||||
{
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, cancellationToken).ConfigureAwait(false);
|
||||
await using var command = new NpgsqlCommand(SelectChainHeadSql, connection);
|
||||
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
|
||||
command.Parameters.AddWithValue("tenant_id", tenantId);
|
||||
command.Parameters.AddWithValue("chain_id", chainId);
|
||||
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
if (!await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
var sequenceNumber = reader.GetInt64(0);
|
||||
var eventHash = reader.GetString(1);
|
||||
var recordedAt = reader.GetFieldValue<DateTimeOffset>(2);
|
||||
return new LedgerChainHead(sequenceNumber, eventHash, recordedAt);
|
||||
}
|
||||
|
||||
public async Task AppendAsync(LedgerEventRecord record, CancellationToken cancellationToken)
|
||||
{
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(record.TenantId, cancellationToken).ConfigureAwait(false);
|
||||
await using var command = new NpgsqlCommand(InsertEventSql, connection);
|
||||
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
|
||||
|
||||
command.Parameters.AddWithValue("tenant_id", record.TenantId);
|
||||
command.Parameters.AddWithValue("chain_id", record.ChainId);
|
||||
command.Parameters.AddWithValue("sequence_no", record.SequenceNumber);
|
||||
command.Parameters.AddWithValue("event_id", record.EventId);
|
||||
command.Parameters.AddWithValue("event_type", record.EventType);
|
||||
command.Parameters.AddWithValue("policy_version", record.PolicyVersion);
|
||||
command.Parameters.AddWithValue("finding_id", record.FindingId);
|
||||
command.Parameters.AddWithValue("artifact_id", record.ArtifactId);
|
||||
|
||||
if (record.SourceRunId.HasValue)
|
||||
{
|
||||
command.Parameters.AddWithValue("source_run_id", record.SourceRunId.Value);
|
||||
}
|
||||
else
|
||||
{
|
||||
command.Parameters.AddWithValue("source_run_id", DBNull.Value);
|
||||
}
|
||||
|
||||
command.Parameters.AddWithValue("actor_id", record.ActorId);
|
||||
command.Parameters.AddWithValue("actor_type", record.ActorType);
|
||||
command.Parameters.AddWithValue("occurred_at", record.OccurredAt);
|
||||
command.Parameters.AddWithValue("recorded_at", record.RecordedAt);
|
||||
|
||||
var eventBody = record.EventBody.ToJsonString();
|
||||
command.Parameters.Add(new NpgsqlParameter<string>("event_body", NpgsqlDbType.Jsonb) { TypedValue = eventBody });
|
||||
command.Parameters.AddWithValue("event_hash", record.EventHash);
|
||||
command.Parameters.AddWithValue("previous_hash", record.PreviousHash);
|
||||
command.Parameters.AddWithValue("merkle_leaf_hash", record.MerkleLeafHash);
|
||||
|
||||
try
|
||||
{
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
catch (PostgresException ex) when (string.Equals(ex.SqlState, PostgresErrorCodes.UniqueViolation, StringComparison.Ordinal))
|
||||
{
|
||||
throw new LedgerDuplicateEventException(record.EventId, ex);
|
||||
}
|
||||
}
|
||||
|
||||
internal static LedgerEventRecord MapLedgerEventRecord(string tenantId, Guid eventId, NpgsqlDataReader reader)
|
||||
{
|
||||
var chainId = reader.GetFieldValue<Guid>(0);
|
||||
var sequenceNumber = reader.GetInt64(1);
|
||||
var eventType = reader.GetString(2);
|
||||
var policyVersion = reader.GetString(3);
|
||||
var findingId = reader.GetString(4);
|
||||
var artifactId = reader.GetString(5);
|
||||
var sourceRunId = reader.IsDBNull(6) ? (Guid?)null : reader.GetGuid(6);
|
||||
var actorId = reader.GetString(7);
|
||||
var actorType = reader.GetString(8);
|
||||
var occurredAt = reader.GetFieldValue<DateTimeOffset>(9);
|
||||
var recordedAt = reader.GetFieldValue<DateTimeOffset>(10);
|
||||
|
||||
var eventBodyJson = reader.GetFieldValue<string>(11);
|
||||
var eventBody = JsonNode.Parse(eventBodyJson)?.AsObject()
|
||||
?? throw new InvalidOperationException("Failed to parse ledger event body.");
|
||||
|
||||
var eventHash = reader.GetString(12);
|
||||
var previousHash = reader.GetString(13);
|
||||
var merkleLeafHash = reader.GetString(14);
|
||||
|
||||
var canonicalEnvelope = LedgerCanonicalJsonSerializer.Canonicalize(eventBody);
|
||||
var canonicalJson = LedgerCanonicalJsonSerializer.Serialize(canonicalEnvelope);
|
||||
|
||||
return new LedgerEventRecord(
|
||||
tenantId,
|
||||
chainId,
|
||||
sequenceNumber,
|
||||
eventId,
|
||||
eventType,
|
||||
policyVersion,
|
||||
findingId,
|
||||
artifactId,
|
||||
sourceRunId,
|
||||
actorId,
|
||||
actorType,
|
||||
occurredAt,
|
||||
recordedAt,
|
||||
eventBody,
|
||||
eventHash,
|
||||
previousHash,
|
||||
merkleLeafHash,
|
||||
canonicalJson);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,130 @@
|
||||
using System.Text.Json.Nodes;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Npgsql;
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
using StellaOps.Findings.Ledger.Hashing;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Infrastructure.Postgres;
|
||||
|
||||
public sealed class PostgresLedgerEventStream : ILedgerEventStream
|
||||
{
|
||||
private const string ReadEventsSql = """
|
||||
SELECT tenant_id,
|
||||
chain_id,
|
||||
sequence_no,
|
||||
event_id,
|
||||
event_type,
|
||||
policy_version,
|
||||
finding_id,
|
||||
artifact_id,
|
||||
source_run_id,
|
||||
actor_id,
|
||||
actor_type,
|
||||
occurred_at,
|
||||
recorded_at,
|
||||
event_body,
|
||||
event_hash,
|
||||
previous_hash,
|
||||
merkle_leaf_hash
|
||||
FROM ledger_events
|
||||
WHERE recorded_at > @last_recorded_at
|
||||
OR (recorded_at = @last_recorded_at AND event_id > @last_event_id)
|
||||
ORDER BY recorded_at, event_id
|
||||
LIMIT @page_size
|
||||
""";
|
||||
|
||||
private readonly LedgerDataSource _dataSource;
|
||||
private readonly ILogger<PostgresLedgerEventStream> _logger;
|
||||
|
||||
public PostgresLedgerEventStream(
|
||||
LedgerDataSource dataSource,
|
||||
ILogger<PostgresLedgerEventStream> logger)
|
||||
{
|
||||
_dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<LedgerEventRecord>> ReadNextBatchAsync(
|
||||
ProjectionCheckpoint checkpoint,
|
||||
int batchSize,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(checkpoint);
|
||||
if (batchSize <= 0)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(batchSize), "Batch size must be greater than zero.");
|
||||
}
|
||||
|
||||
var records = new List<LedgerEventRecord>(batchSize);
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(string.Empty, cancellationToken).ConfigureAwait(false);
|
||||
await using var command = new NpgsqlCommand(ReadEventsSql, connection);
|
||||
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
|
||||
command.Parameters.AddWithValue("last_recorded_at", checkpoint.LastRecordedAt);
|
||||
command.Parameters.AddWithValue("last_event_id", checkpoint.LastEventId);
|
||||
command.Parameters.AddWithValue("page_size", batchSize);
|
||||
|
||||
try
|
||||
{
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
records.Add(MapLedgerEvent(reader));
|
||||
}
|
||||
}
|
||||
catch (PostgresException ex)
|
||||
{
|
||||
_logger.LogError(ex, "Failed to read ledger event batch for projection replay.");
|
||||
throw;
|
||||
}
|
||||
|
||||
return records;
|
||||
}
|
||||
|
||||
private static LedgerEventRecord MapLedgerEvent(NpgsqlDataReader reader)
|
||||
{
|
||||
var tenantId = reader.GetString(0);
|
||||
var chainId = reader.GetFieldValue<Guid>(1);
|
||||
var sequenceNumber = reader.GetInt64(2);
|
||||
var eventId = reader.GetGuid(3);
|
||||
var eventType = reader.GetString(4);
|
||||
var policyVersion = reader.GetString(5);
|
||||
var findingId = reader.GetString(6);
|
||||
var artifactId = reader.GetString(7);
|
||||
var sourceRunId = reader.IsDBNull(8) ? (Guid?)null : reader.GetGuid(8);
|
||||
var actorId = reader.GetString(9);
|
||||
var actorType = reader.GetString(10);
|
||||
var occurredAt = reader.GetFieldValue<DateTimeOffset>(11);
|
||||
var recordedAt = reader.GetFieldValue<DateTimeOffset>(12);
|
||||
|
||||
var eventBodyJson = reader.GetFieldValue<string>(13);
|
||||
var eventBodyParsed = JsonNode.Parse(eventBodyJson)?.AsObject()
|
||||
?? throw new InvalidOperationException("Failed to parse ledger event payload.");
|
||||
var canonicalEnvelope = LedgerCanonicalJsonSerializer.Canonicalize(eventBodyParsed);
|
||||
var canonicalJson = LedgerCanonicalJsonSerializer.Serialize(canonicalEnvelope);
|
||||
|
||||
var eventHash = reader.GetString(14);
|
||||
var previousHash = reader.GetString(15);
|
||||
var merkleLeafHash = reader.GetString(16);
|
||||
|
||||
return new LedgerEventRecord(
|
||||
tenantId,
|
||||
chainId,
|
||||
sequenceNumber,
|
||||
eventId,
|
||||
eventType,
|
||||
policyVersion,
|
||||
findingId,
|
||||
artifactId,
|
||||
sourceRunId,
|
||||
actorId,
|
||||
actorType,
|
||||
occurredAt,
|
||||
recordedAt,
|
||||
canonicalEnvelope,
|
||||
eventHash,
|
||||
previousHash,
|
||||
merkleLeafHash,
|
||||
canonicalJson);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,83 @@
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Npgsql;
|
||||
using StellaOps.Findings.Ledger.Infrastructure.Merkle;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Infrastructure.Postgres;
|
||||
|
||||
public sealed class PostgresMerkleAnchorRepository : IMerkleAnchorRepository
|
||||
{
|
||||
private const string InsertAnchorSql = """
|
||||
INSERT INTO ledger_merkle_roots (
|
||||
tenant_id,
|
||||
anchor_id,
|
||||
window_start,
|
||||
window_end,
|
||||
sequence_start,
|
||||
sequence_end,
|
||||
root_hash,
|
||||
leaf_count,
|
||||
anchored_at,
|
||||
anchor_reference)
|
||||
VALUES (
|
||||
@tenant_id,
|
||||
@anchor_id,
|
||||
@window_start,
|
||||
@window_end,
|
||||
@sequence_start,
|
||||
@sequence_end,
|
||||
@root_hash,
|
||||
@leaf_count,
|
||||
@anchored_at,
|
||||
@anchor_reference)
|
||||
""";
|
||||
|
||||
private readonly LedgerDataSource _dataSource;
|
||||
private readonly ILogger<PostgresMerkleAnchorRepository> _logger;
|
||||
|
||||
public PostgresMerkleAnchorRepository(
|
||||
LedgerDataSource dataSource,
|
||||
ILogger<PostgresMerkleAnchorRepository> logger)
|
||||
{
|
||||
_dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
}
|
||||
|
||||
public async Task InsertAsync(
|
||||
string tenantId,
|
||||
Guid anchorId,
|
||||
DateTimeOffset windowStart,
|
||||
DateTimeOffset windowEnd,
|
||||
long sequenceStart,
|
||||
long sequenceEnd,
|
||||
string rootHash,
|
||||
int leafCount,
|
||||
DateTimeOffset anchoredAt,
|
||||
string? anchorReference,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, cancellationToken).ConfigureAwait(false);
|
||||
await using var command = new NpgsqlCommand(InsertAnchorSql, connection);
|
||||
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
|
||||
|
||||
command.Parameters.AddWithValue("tenant_id", tenantId);
|
||||
command.Parameters.AddWithValue("anchor_id", anchorId);
|
||||
command.Parameters.AddWithValue("window_start", windowStart);
|
||||
command.Parameters.AddWithValue("window_end", windowEnd);
|
||||
command.Parameters.AddWithValue("sequence_start", sequenceStart);
|
||||
command.Parameters.AddWithValue("sequence_end", sequenceEnd);
|
||||
command.Parameters.AddWithValue("root_hash", rootHash);
|
||||
command.Parameters.AddWithValue("leaf_count", leafCount);
|
||||
command.Parameters.AddWithValue("anchored_at", anchoredAt);
|
||||
command.Parameters.AddWithValue("anchor_reference", anchorReference ?? (object)DBNull.Value);
|
||||
|
||||
try
|
||||
{
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
catch (PostgresException ex)
|
||||
{
|
||||
_logger.LogError(ex, "Failed to insert Merkle root for tenant {TenantId}.", tenantId);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,129 @@
|
||||
using Microsoft.Extensions.Hosting;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
using StellaOps.Findings.Ledger.Infrastructure;
|
||||
using StellaOps.Findings.Ledger.Infrastructure.Policy;
|
||||
using StellaOps.Findings.Ledger.Options;
|
||||
using StellaOps.Findings.Ledger.Services;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Infrastructure.Projection;
|
||||
|
||||
public sealed class LedgerProjectionWorker : BackgroundService
|
||||
{
|
||||
private readonly ILedgerEventStream _eventStream;
|
||||
private readonly IFindingProjectionRepository _repository;
|
||||
private readonly IPolicyEvaluationService _policyEvaluationService;
|
||||
private readonly TimeProvider _timeProvider;
|
||||
private readonly LedgerServiceOptions.ProjectionOptions _options;
|
||||
private readonly ILogger<LedgerProjectionWorker> _logger;
|
||||
|
||||
public LedgerProjectionWorker(
|
||||
ILedgerEventStream eventStream,
|
||||
IFindingProjectionRepository repository,
|
||||
IPolicyEvaluationService policyEvaluationService,
|
||||
IOptions<LedgerServiceOptions> options,
|
||||
TimeProvider timeProvider,
|
||||
ILogger<LedgerProjectionWorker> logger)
|
||||
{
|
||||
_eventStream = eventStream ?? throw new ArgumentNullException(nameof(eventStream));
|
||||
_repository = repository ?? throw new ArgumentNullException(nameof(repository));
|
||||
_policyEvaluationService = policyEvaluationService ?? throw new ArgumentNullException(nameof(policyEvaluationService));
|
||||
_timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider));
|
||||
_options = (options ?? throw new ArgumentNullException(nameof(options))).Value.Projection;
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
}
|
||||
|
||||
protected override async Task ExecuteAsync(CancellationToken stoppingToken)
|
||||
{
|
||||
ProjectionCheckpoint checkpoint;
|
||||
try
|
||||
{
|
||||
checkpoint = await _repository.GetCheckpointAsync(stoppingToken).ConfigureAwait(false);
|
||||
}
|
||||
catch (Exception ex) when (!stoppingToken.IsCancellationRequested)
|
||||
{
|
||||
_logger.LogError(ex, "Failed to load ledger projection checkpoint.");
|
||||
throw;
|
||||
}
|
||||
|
||||
while (!stoppingToken.IsCancellationRequested)
|
||||
{
|
||||
IReadOnlyList<LedgerEventRecord> batch;
|
||||
|
||||
try
|
||||
{
|
||||
batch = await _eventStream.ReadNextBatchAsync(checkpoint, _options.BatchSize, stoppingToken).ConfigureAwait(false);
|
||||
}
|
||||
catch (OperationCanceledException) when (stoppingToken.IsCancellationRequested)
|
||||
{
|
||||
break;
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogError(ex, "Failed to read ledger events for projection replay.");
|
||||
await DelayAsync(stoppingToken).ConfigureAwait(false);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (batch.Count == 0)
|
||||
{
|
||||
await DelayAsync(stoppingToken).ConfigureAwait(false);
|
||||
continue;
|
||||
}
|
||||
|
||||
foreach (var record in batch)
|
||||
{
|
||||
try
|
||||
{
|
||||
await ApplyAsync(record, stoppingToken).ConfigureAwait(false);
|
||||
|
||||
checkpoint = checkpoint with
|
||||
{
|
||||
LastRecordedAt = record.RecordedAt,
|
||||
LastEventId = record.EventId,
|
||||
UpdatedAt = _timeProvider.GetUtcNow()
|
||||
};
|
||||
|
||||
await _repository.SaveCheckpointAsync(checkpoint, stoppingToken).ConfigureAwait(false);
|
||||
}
|
||||
catch (OperationCanceledException) when (stoppingToken.IsCancellationRequested)
|
||||
{
|
||||
return;
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_logger.LogError(ex, "Failed to project ledger event {EventId} for tenant {TenantId}.", record.EventId, record.TenantId);
|
||||
await DelayAsync(stoppingToken).ConfigureAwait(false);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private async Task ApplyAsync(LedgerEventRecord record, CancellationToken cancellationToken)
|
||||
{
|
||||
var current = await _repository.GetAsync(record.TenantId, record.FindingId, record.PolicyVersion, cancellationToken).ConfigureAwait(false);
|
||||
var evaluation = await _policyEvaluationService.EvaluateAsync(record, current, cancellationToken).ConfigureAwait(false);
|
||||
var result = LedgerProjectionReducer.Reduce(record, current, evaluation);
|
||||
|
||||
await _repository.UpsertAsync(result.Projection, cancellationToken).ConfigureAwait(false);
|
||||
await _repository.InsertHistoryAsync(result.History, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
if (result.Action is not null)
|
||||
{
|
||||
await _repository.InsertActionAsync(result.Action, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
|
||||
private async Task DelayAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
try
|
||||
{
|
||||
await Task.Delay(_options.IdleDelay, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
catch (OperationCanceledException) when (cancellationToken.IsCancellationRequested)
|
||||
{
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,93 @@
|
||||
namespace StellaOps.Findings.Ledger.Options;
|
||||
|
||||
public sealed class LedgerServiceOptions
|
||||
{
|
||||
public const string SectionName = "findings:ledger";
|
||||
|
||||
public DatabaseOptions Database { get; init; } = new();
|
||||
|
||||
public AuthorityOptions Authority { get; init; } = new();
|
||||
|
||||
public MerkleOptions Merkle { get; init; } = new();
|
||||
|
||||
public ProjectionOptions Projection { get; init; } = new();
|
||||
|
||||
public void Validate()
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(Database.ConnectionString))
|
||||
{
|
||||
throw new InvalidOperationException("Findings Ledger database connection string is required.");
|
||||
}
|
||||
|
||||
if (Database.CommandTimeoutSeconds <= 0)
|
||||
{
|
||||
throw new InvalidOperationException("Database command timeout must be greater than zero seconds.");
|
||||
}
|
||||
|
||||
if (Merkle.BatchSize <= 0)
|
||||
{
|
||||
throw new InvalidOperationException("Merkle anchor batch size must be greater than zero.");
|
||||
}
|
||||
|
||||
if (Merkle.WindowDuration <= TimeSpan.Zero)
|
||||
{
|
||||
throw new InvalidOperationException("Merkle anchor window duration must be greater than zero.");
|
||||
}
|
||||
|
||||
if (Projection.BatchSize <= 0)
|
||||
{
|
||||
throw new InvalidOperationException("Projection batch size must be greater than zero.");
|
||||
}
|
||||
|
||||
if (Projection.IdleDelay <= TimeSpan.Zero)
|
||||
{
|
||||
throw new InvalidOperationException("Projection idle delay must be greater than zero.");
|
||||
}
|
||||
}
|
||||
|
||||
public sealed class DatabaseOptions
|
||||
{
|
||||
public string ConnectionString { get; set; } = string.Empty;
|
||||
|
||||
public int CommandTimeoutSeconds { get; set; } = 30;
|
||||
}
|
||||
|
||||
public sealed class AuthorityOptions
|
||||
{
|
||||
public string Issuer { get; set; } = string.Empty;
|
||||
|
||||
public bool RequireHttpsMetadata { get; set; } = true;
|
||||
|
||||
public string? MetadataAddress { get; set; }
|
||||
|
||||
public IList<string> Audiences { get; } = new List<string>();
|
||||
|
||||
public IList<string> RequiredScopes { get; } = new List<string>();
|
||||
|
||||
public IList<string> BypassNetworks { get; } = new List<string>();
|
||||
|
||||
public TimeSpan BackchannelTimeout { get; set; } = TimeSpan.FromSeconds(10);
|
||||
|
||||
public TimeSpan TokenClockSkew { get; set; } = TimeSpan.FromMinutes(5);
|
||||
}
|
||||
|
||||
public sealed class MerkleOptions
|
||||
{
|
||||
private const int DefaultBatchSize = 1000;
|
||||
private static readonly TimeSpan DefaultWindow = TimeSpan.FromMinutes(15);
|
||||
|
||||
public int BatchSize { get; set; } = DefaultBatchSize;
|
||||
|
||||
public TimeSpan WindowDuration { get; set; } = DefaultWindow;
|
||||
}
|
||||
|
||||
public sealed class ProjectionOptions
|
||||
{
|
||||
private const int DefaultBatchSize = 200;
|
||||
private static readonly TimeSpan DefaultIdleDelay = TimeSpan.FromSeconds(5);
|
||||
|
||||
public int BatchSize { get; set; } = DefaultBatchSize;
|
||||
|
||||
public TimeSpan IdleDelay { get; set; } = DefaultIdleDelay;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,210 @@
|
||||
using System.Text.Json.Nodes;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
using StellaOps.Findings.Ledger.Hashing;
|
||||
using StellaOps.Findings.Ledger.Infrastructure;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Services;
|
||||
|
||||
public interface ILedgerEventWriteService
|
||||
{
|
||||
Task<LedgerWriteResult> AppendAsync(LedgerEventDraft draft, CancellationToken cancellationToken);
|
||||
}
|
||||
|
||||
public sealed class LedgerEventWriteService : ILedgerEventWriteService
|
||||
{
|
||||
private readonly ILedgerEventRepository _repository;
|
||||
private readonly IMerkleAnchorScheduler _merkleAnchorScheduler;
|
||||
private readonly ILogger<LedgerEventWriteService> _logger;
|
||||
|
||||
public LedgerEventWriteService(
|
||||
ILedgerEventRepository repository,
|
||||
IMerkleAnchorScheduler merkleAnchorScheduler,
|
||||
ILogger<LedgerEventWriteService> logger)
|
||||
{
|
||||
_repository = repository ?? throw new ArgumentNullException(nameof(repository));
|
||||
_merkleAnchorScheduler = merkleAnchorScheduler ?? throw new ArgumentNullException(nameof(merkleAnchorScheduler));
|
||||
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
|
||||
}
|
||||
|
||||
public async Task<LedgerWriteResult> AppendAsync(LedgerEventDraft draft, CancellationToken cancellationToken)
|
||||
{
|
||||
var validationErrors = ValidateDraft(draft);
|
||||
if (validationErrors.Count > 0)
|
||||
{
|
||||
return LedgerWriteResult.ValidationFailed([.. validationErrors]);
|
||||
}
|
||||
|
||||
var existing = await _repository.GetByEventIdAsync(draft.TenantId, draft.EventId, cancellationToken).ConfigureAwait(false);
|
||||
if (existing is not null)
|
||||
{
|
||||
var canonicalJson = LedgerCanonicalJsonSerializer.Serialize(draft.CanonicalEnvelope);
|
||||
if (!string.Equals(existing.CanonicalJson, canonicalJson, StringComparison.Ordinal))
|
||||
{
|
||||
return LedgerWriteResult.Conflict(
|
||||
"event_id_conflict",
|
||||
$"Event '{draft.EventId}' already exists with a different payload.");
|
||||
}
|
||||
|
||||
return LedgerWriteResult.Idempotent(existing);
|
||||
}
|
||||
|
||||
var chainHead = await _repository.GetChainHeadAsync(draft.TenantId, draft.ChainId, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
var expectedSequence = chainHead is null ? 1 : chainHead.SequenceNumber + 1;
|
||||
if (draft.SequenceNumber != expectedSequence)
|
||||
{
|
||||
return LedgerWriteResult.Conflict(
|
||||
"sequence_mismatch",
|
||||
$"Sequence number '{draft.SequenceNumber}' does not match expected '{expectedSequence}'.");
|
||||
}
|
||||
|
||||
var previousHash = chainHead?.EventHash ?? LedgerEventConstants.EmptyHash;
|
||||
if (draft.ProvidedPreviousHash is not null && !string.Equals(draft.ProvidedPreviousHash, previousHash, StringComparison.OrdinalIgnoreCase))
|
||||
{
|
||||
return LedgerWriteResult.Conflict(
|
||||
"previous_hash_mismatch",
|
||||
$"Provided previous hash '{draft.ProvidedPreviousHash}' does not match chain head hash '{previousHash}'.");
|
||||
}
|
||||
|
||||
var canonicalEnvelope = LedgerCanonicalJsonSerializer.Canonicalize(draft.CanonicalEnvelope);
|
||||
var hashResult = LedgerHashing.ComputeHashes(canonicalEnvelope, draft.SequenceNumber);
|
||||
|
||||
var eventBody = (JsonObject)canonicalEnvelope.DeepClone();
|
||||
var record = new LedgerEventRecord(
|
||||
draft.TenantId,
|
||||
draft.ChainId,
|
||||
draft.SequenceNumber,
|
||||
draft.EventId,
|
||||
draft.EventType,
|
||||
draft.PolicyVersion,
|
||||
draft.FindingId,
|
||||
draft.ArtifactId,
|
||||
draft.SourceRunId,
|
||||
draft.ActorId,
|
||||
draft.ActorType,
|
||||
draft.OccurredAt,
|
||||
draft.RecordedAt,
|
||||
eventBody,
|
||||
hashResult.EventHash,
|
||||
previousHash,
|
||||
hashResult.MerkleLeafHash,
|
||||
hashResult.CanonicalJson);
|
||||
|
||||
try
|
||||
{
|
||||
await _repository.AppendAsync(record, cancellationToken).ConfigureAwait(false);
|
||||
await _merkleAnchorScheduler.EnqueueAsync(record, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
catch (Exception ex) when (IsDuplicateKeyException(ex))
|
||||
{
|
||||
_logger.LogWarning(ex, "Ledger append detected concurrent duplicate for {EventId}", draft.EventId);
|
||||
var persisted = await _repository.GetByEventIdAsync(draft.TenantId, draft.EventId, cancellationToken).ConfigureAwait(false);
|
||||
if (persisted is null)
|
||||
{
|
||||
return LedgerWriteResult.Conflict("append_failed", "Ledger append failed due to concurrent write.");
|
||||
}
|
||||
|
||||
if (!string.Equals(persisted.CanonicalJson, record.CanonicalJson, StringComparison.Ordinal))
|
||||
{
|
||||
return LedgerWriteResult.Conflict("event_id_conflict", "Ledger append raced with conflicting payload.");
|
||||
}
|
||||
|
||||
return LedgerWriteResult.Idempotent(persisted);
|
||||
}
|
||||
|
||||
return LedgerWriteResult.Success(record);
|
||||
}
|
||||
|
||||
private static bool IsDuplicateKeyException(Exception exception)
|
||||
{
|
||||
if (exception is null)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (exception is LedgerDuplicateEventException)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
if (exception.GetType().Name.Contains("Unique", StringComparison.OrdinalIgnoreCase))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
if (exception.InnerException is not null)
|
||||
{
|
||||
return IsDuplicateKeyException(exception.InnerException);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
private static List<string> ValidateDraft(LedgerEventDraft draft)
|
||||
{
|
||||
var errors = new List<string>();
|
||||
if (draft is null)
|
||||
{
|
||||
errors.Add("draft_required");
|
||||
return errors;
|
||||
}
|
||||
|
||||
if (string.IsNullOrWhiteSpace(draft.TenantId))
|
||||
{
|
||||
errors.Add("tenant_id_required");
|
||||
}
|
||||
|
||||
if (draft.SequenceNumber < 1)
|
||||
{
|
||||
errors.Add("sequence_must_be_positive");
|
||||
}
|
||||
|
||||
if (draft.EventId == Guid.Empty)
|
||||
{
|
||||
errors.Add("event_id_required");
|
||||
}
|
||||
|
||||
if (draft.ChainId == Guid.Empty)
|
||||
{
|
||||
errors.Add("chain_id_required");
|
||||
}
|
||||
|
||||
if (!LedgerEventConstants.SupportedEventTypes.Contains(draft.EventType))
|
||||
{
|
||||
errors.Add($"event_type_invalid:{draft.EventType}");
|
||||
}
|
||||
|
||||
if (!LedgerEventConstants.SupportedActorTypes.Contains(draft.ActorType))
|
||||
{
|
||||
errors.Add($"actor_type_invalid:{draft.ActorType}");
|
||||
}
|
||||
|
||||
if (string.IsNullOrWhiteSpace(draft.PolicyVersion))
|
||||
{
|
||||
errors.Add("policy_version_required");
|
||||
}
|
||||
|
||||
if (string.IsNullOrWhiteSpace(draft.FindingId))
|
||||
{
|
||||
errors.Add("finding_id_required");
|
||||
}
|
||||
|
||||
if (string.IsNullOrWhiteSpace(draft.ArtifactId))
|
||||
{
|
||||
errors.Add("artifact_id_required");
|
||||
}
|
||||
|
||||
if (draft.Payload is null)
|
||||
{
|
||||
errors.Add("payload_required");
|
||||
}
|
||||
|
||||
if (draft.CanonicalEnvelope is null)
|
||||
{
|
||||
errors.Add("canonical_envelope_required");
|
||||
}
|
||||
|
||||
return errors;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,247 @@
|
||||
using System.Text.Json;
|
||||
using System.Text.Json.Nodes;
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
using StellaOps.Findings.Ledger.Hashing;
|
||||
using StellaOps.Findings.Ledger.Infrastructure.Policy;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Services;
|
||||
|
||||
public static class LedgerProjectionReducer
|
||||
{
|
||||
public static ProjectionReduceResult Reduce(
|
||||
LedgerEventRecord record,
|
||||
FindingProjection? current,
|
||||
PolicyEvaluationResult evaluation)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(record);
|
||||
ArgumentNullException.ThrowIfNull(evaluation);
|
||||
|
||||
var eventObject = record.EventBody["event"]?.AsObject()
|
||||
?? throw new InvalidOperationException("Ledger event payload is missing 'event' object.");
|
||||
var payload = eventObject["payload"] as JsonObject;
|
||||
|
||||
var status = evaluation.Status ?? DetermineStatus(record.EventType, payload, current?.Status);
|
||||
var severity = evaluation.Severity ?? DetermineSeverity(payload, current?.Severity);
|
||||
|
||||
var labels = CloneLabels(evaluation.Labels);
|
||||
MergeLabels(labels, payload);
|
||||
|
||||
var explainRef = evaluation.ExplainRef ?? DetermineExplainRef(payload, current?.ExplainRef);
|
||||
var rationale = CloneArray(evaluation.Rationale);
|
||||
if (rationale.Count == 0 && !string.IsNullOrWhiteSpace(explainRef))
|
||||
{
|
||||
rationale.Add(explainRef);
|
||||
}
|
||||
|
||||
var updatedAt = record.RecordedAt;
|
||||
|
||||
var provisional = new FindingProjection(
|
||||
record.TenantId,
|
||||
record.FindingId,
|
||||
record.PolicyVersion,
|
||||
status,
|
||||
severity,
|
||||
labels,
|
||||
record.EventId,
|
||||
explainRef,
|
||||
rationale,
|
||||
updatedAt,
|
||||
string.Empty);
|
||||
|
||||
var cycleHash = ProjectionHashing.ComputeCycleHash(provisional);
|
||||
var projection = provisional with { CycleHash = cycleHash };
|
||||
|
||||
var historyEntry = new FindingHistoryEntry(
|
||||
record.TenantId,
|
||||
record.FindingId,
|
||||
record.PolicyVersion,
|
||||
record.EventId,
|
||||
projection.Status,
|
||||
projection.Severity,
|
||||
record.ActorId,
|
||||
DetermineComment(payload),
|
||||
record.OccurredAt);
|
||||
|
||||
var actionEntry = CreateActionEntry(record, payload);
|
||||
return new ProjectionReduceResult(projection, historyEntry, actionEntry);
|
||||
}
|
||||
|
||||
private static string DetermineStatus(string eventType, JsonObject? payload, string? currentStatus)
|
||||
{
|
||||
var candidate = ExtractString(payload, "status") ?? currentStatus;
|
||||
|
||||
return eventType switch
|
||||
{
|
||||
LedgerEventConstants.EventFindingCreated => candidate ?? "affected",
|
||||
LedgerEventConstants.EventFindingStatusChanged => candidate ?? currentStatus ?? "affected",
|
||||
LedgerEventConstants.EventFindingClosed => candidate ?? "closed",
|
||||
LedgerEventConstants.EventFindingAcceptedRisk => candidate ?? "accepted_risk",
|
||||
_ => candidate ?? currentStatus ?? "affected"
|
||||
};
|
||||
}
|
||||
|
||||
private static decimal? DetermineSeverity(JsonObject? payload, decimal? current)
|
||||
{
|
||||
if (payload is null)
|
||||
{
|
||||
return current;
|
||||
}
|
||||
|
||||
if (payload.TryGetPropertyValue("severity", out var severityNode))
|
||||
{
|
||||
if (TryConvertDecimal(severityNode, out var severity))
|
||||
{
|
||||
return severity;
|
||||
}
|
||||
|
||||
if (severityNode is JsonValue value && value.TryGetValue(out string? severityString)
|
||||
&& decimal.TryParse(severityString, out var severityFromString))
|
||||
{
|
||||
return severityFromString;
|
||||
}
|
||||
}
|
||||
|
||||
return current;
|
||||
}
|
||||
|
||||
private static void MergeLabels(JsonObject target, JsonObject? payload)
|
||||
{
|
||||
if (payload is null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (payload.TryGetPropertyValue("labels", out var labelsNode) && labelsNode is JsonObject labelUpdates)
|
||||
{
|
||||
foreach (var property in labelUpdates)
|
||||
{
|
||||
if (property.Value is null || property.Value.GetValueKind() == JsonValueKind.Null)
|
||||
{
|
||||
target.Remove(property.Key);
|
||||
}
|
||||
else
|
||||
{
|
||||
target[property.Key] = property.Value.DeepClone();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (payload.TryGetPropertyValue("labelsRemove", out var removeNode) && removeNode is JsonArray removeArray)
|
||||
{
|
||||
foreach (var item in removeArray)
|
||||
{
|
||||
if (item is JsonValue value && value.TryGetValue(out string? key) && !string.IsNullOrWhiteSpace(key))
|
||||
{
|
||||
target.Remove(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static string? DetermineExplainRef(JsonObject? payload, string? current)
|
||||
{
|
||||
var explainRef = ExtractString(payload, "explainRef") ?? ExtractString(payload, "explain_ref");
|
||||
return explainRef ?? current;
|
||||
}
|
||||
|
||||
private static string? DetermineComment(JsonObject? payload)
|
||||
{
|
||||
return ExtractString(payload, "comment")
|
||||
?? ExtractString(payload, "justification")
|
||||
?? ExtractString(payload, "note");
|
||||
}
|
||||
|
||||
private static TriageActionEntry? CreateActionEntry(LedgerEventRecord record, JsonObject? payload)
|
||||
{
|
||||
var actionType = record.EventType switch
|
||||
{
|
||||
LedgerEventConstants.EventFindingStatusChanged => "status_change",
|
||||
LedgerEventConstants.EventFindingCommentAdded => "comment",
|
||||
LedgerEventConstants.EventFindingAssignmentChanged => "assign",
|
||||
LedgerEventConstants.EventFindingRemediationPlanAdded => "remediation_plan",
|
||||
LedgerEventConstants.EventFindingAcceptedRisk => "accept_risk",
|
||||
LedgerEventConstants.EventFindingAttachmentAdded => "attach_evidence",
|
||||
LedgerEventConstants.EventFindingClosed => "close",
|
||||
_ => null
|
||||
};
|
||||
|
||||
if (actionType is null)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
var payloadClone = payload?.DeepClone()?.AsObject() ?? new JsonObject();
|
||||
return new TriageActionEntry(
|
||||
record.TenantId,
|
||||
record.EventId,
|
||||
record.EventId,
|
||||
record.FindingId,
|
||||
actionType,
|
||||
payloadClone,
|
||||
record.RecordedAt,
|
||||
record.ActorId);
|
||||
}
|
||||
|
||||
private static JsonObject CloneLabels(JsonObject? source)
|
||||
{
|
||||
return source is null ? new JsonObject() : (JsonObject)source.DeepClone();
|
||||
}
|
||||
|
||||
private static JsonArray CloneArray(JsonArray source)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(source);
|
||||
|
||||
var clone = new JsonArray();
|
||||
foreach (var item in source)
|
||||
{
|
||||
clone.Add(item?.DeepClone());
|
||||
}
|
||||
|
||||
return clone;
|
||||
}
|
||||
|
||||
private static string? ExtractString(JsonObject? obj, string propertyName)
|
||||
{
|
||||
if (obj is null)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!obj.TryGetPropertyValue(propertyName, out var node) || node is null)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
if (node is JsonValue value && value.TryGetValue(out string? result))
|
||||
{
|
||||
return string.IsNullOrWhiteSpace(result) ? null : result;
|
||||
}
|
||||
|
||||
return node.ToString();
|
||||
}
|
||||
|
||||
private static bool TryConvertDecimal(JsonNode? node, out decimal value)
|
||||
{
|
||||
switch (node)
|
||||
{
|
||||
case null:
|
||||
value = default;
|
||||
return false;
|
||||
case JsonValue jsonValue when jsonValue.TryGetValue(out decimal decimalValue):
|
||||
value = decimalValue;
|
||||
return true;
|
||||
case JsonValue jsonValue when jsonValue.TryGetValue(out double doubleValue):
|
||||
value = Convert.ToDecimal(doubleValue);
|
||||
return true;
|
||||
default:
|
||||
if (decimal.TryParse(node.ToString(), out var parsed))
|
||||
{
|
||||
value = parsed;
|
||||
return true;
|
||||
}
|
||||
|
||||
value = default;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
|
||||
<PropertyGroup>
|
||||
<TargetFramework>net10.0</TargetFramework>
|
||||
<Nullable>enable</Nullable>
|
||||
<ImplicitUsings>enable</ImplicitUsings>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<None Include="migrations\**\*" Pack="false" CopyToOutputDirectory="Never" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="10.0.0-rc.2.25502.107" />
|
||||
<PackageReference Include="Microsoft.Extensions.Hosting.Abstractions" Version="10.0.0-rc.2.25502.107" />
|
||||
<PackageReference Include="Microsoft.Extensions.Options" Version="10.0.0-rc.2.25502.107" />
|
||||
<PackageReference Include="Npgsql" Version="7.0.7" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
@@ -1,73 +1,73 @@
|
||||
# Findings Ledger Task Board — Epic 6: Vulnerability Explorer
|
||||
| ID | Status | Owner(s) | Depends on | Description | Exit Criteria |
|
||||
|----|--------|----------|------------|-------------|---------------|
|
||||
| LEDGER-29-001 | TODO | Findings Ledger Guild | AUTH-POLICY-27-001 | Design ledger & projection schemas (tables/indexes), canonical JSON format, hashing strategy, and migrations. Publish schema doc + fixtures. | Schemas committed; migrations generated; hashing documented; fixtures seeded for CI. |
|
||||
| LEDGER-29-002 | TODO | Findings Ledger Guild | LEDGER-29-001 | Implement ledger write API (`POST /vuln/ledger/events`) with validation, idempotency, hash chaining, and Merkle root computation job. | Events persisted with chained hashes; Merkle job emits anchors; unit/integration tests cover happy/pathological cases. |
|
||||
| LEDGER-29-003 | TODO | Findings Ledger Guild, Scheduler Guild | LEDGER-29-001 | Build projector worker that derives `findings_projection` rows from ledger events + policy determinations; ensure idempotent replay keyed by `(tenant,finding_id,policy_version)`. | Projector processes sample streams deterministically; replay tests pass; metrics exported. |
|
||||
| LEDGER-29-004 | TODO | Findings Ledger Guild, Policy Guild | LEDGER-29-003, POLICY-ENGINE-27-001 | Integrate Policy Engine batch evaluation (baseline + simulate) with projector; cache rationale references. | Projector fetches determinations efficiently; rationale stored for UI; regression tests cover version switches. |
|
||||
| LEDGER-29-005 | TODO | Findings Ledger Guild | LEDGER-29-003 | Implement workflow mutation handlers (assign, comment, accept-risk, target-fix, verify-fix, reopen) producing ledger events with validation and attachments metadata. | API endpoints enforce business rules; attachments metadata stored; tests cover state machine transitions. |
|
||||
| LEDGER-29-006 | TODO | Findings Ledger Guild, Security Guild | LEDGER-29-002 | Integrate attachment encryption (KMS envelope), signed URL issuance, CSRF protection hooks for Console. | Attachments encrypted and accessible via signed URLs; security tests verify expiry + scope. |
|
||||
| LEDGER-29-007 | TODO | Findings Ledger Guild, Observability Guild | LEDGER-29-002..005 | Instrument metrics (`ledger_write_latency`, `projection_lag_seconds`, `ledger_events_total`), structured logs, and Merkle anchoring alerts; publish dashboards. | Metrics/traces emitted; dashboards live; alert thresholds documented. |
|
||||
| LEDGER-29-008 | TODO | Findings Ledger Guild, QA Guild | LEDGER-29-002..005 | Develop unit/property/integration tests, replay/restore tooling, determinism harness, and load tests at 5M findings/tenant. | CI suite green; load tests documented; determinism harness proves stable projections. |
|
||||
| LEDGER-29-009 | TODO | Findings Ledger Guild, DevOps Guild | LEDGER-29-002..008 | Provide deployment manifests (Helm/Compose), backup/restore guidance, Merkle anchor externalization (optional), and offline kit instructions. | Deployment docs merged; smoke deploy validated; backup/restore scripts recorded; offline kit includes seed data. |
|
||||
|
||||
## Export Center
|
||||
| ID | Status | Owner(s) | Depends on | Description | Exit Criteria |
|
||||
|----|--------|----------|------------|-------------|---------------|
|
||||
| LEDGER-EXPORT-35-001 | TODO | Findings Ledger Guild | LEDGER-29-003, EXPORT-SVC-35-002 | Provide paginated streaming endpoints for advisories, VEX, SBOMs, and findings aligned with export filters, including deterministic ordering and provenance metadata. | Streaming APIs deployed; integration tests with exporter planner; metrics/logs instrumented; docs updated. |
|
||||
|
||||
## Orchestrator Dashboard
|
||||
| ID | Status | Owner(s) | Depends on | Description | Exit Criteria |
|
||||
|----|--------|----------|------------|-------------|---------------|
|
||||
| LEDGER-34-101 | TODO | Findings Ledger Guild | ORCH-SVC-34-002, LEDGER-29-002 | Link orchestrator run ledger exports into Findings Ledger provenance chain, index by artifact hash, and expose audit queries. | Ledger ingestion job consumes orchestrator exports; provenance queries return artifact chain; tests cover multi-tenant isolation; docs updated. |
|
||||
|
||||
## CLI Parity & Task Packs
|
||||
| ID | Status | Owner(s) | Depends on | Description | Exit Criteria |
|
||||
|----|--------|----------|------------|-------------|---------------|
|
||||
| LEDGER-PACKS-42-001 | TODO | Findings Ledger Guild | LEDGER-29-003, TASKRUN-41-001 | Provide snapshot/time-travel APIs and digestable exports for task pack simulation and CLI offline mode. | Snapshot API deployed; simulation validated; docs updated; imposed rule noted. |
|
||||
|
||||
## Authority-Backed Scopes & Tenancy (Epic 14)
|
||||
| ID | Status | Owner(s) | Depends on | Description | Exit Criteria |
|
||||
|----|--------|----------|------------|-------------|---------------|
|
||||
| LEDGER-TEN-48-001 | TODO | Findings Ledger Guild | AUTH-TEN-47-001 | Partition ledger tables by tenant/project, enable RLS, update queries/events, and stamp audit metadata. | Ledger queries respect tenant context; RLS tests pass; events include tenant metadata. |
|
||||
|
||||
## Observability & Forensics (Epic 15)
|
||||
| ID | Status | Owner(s) | Depends on | Description | Exit Criteria |
|
||||
|----|--------|----------|------------|-------------|---------------|
|
||||
| LEDGER-OBS-50-001 | TODO | Findings Ledger Guild, Observability Guild | TELEMETRY-OBS-50-001, TELEMETRY-OBS-50-002 | Integrate telemetry core within ledger writer/projector services, emitting structured logs and trace spans for ledger append, projector replay, and query APIs with tenant context. | Telemetry present for append + replay flows; integration tests assert trace propagation; log schema validated. |
|
||||
| LEDGER-OBS-51-001 | TODO | Findings Ledger Guild, DevOps Guild | LEDGER-OBS-50-001, TELEMETRY-OBS-51-001 | Publish metrics for ledger latency, projector lag, event throughput, and policy evaluation linkage. Define SLOs (ledger append P95 < 1s, replay lag < 30s) with burn-rate alerts and dashboards. | Metrics surfaced in dashboards; SLO alerts configured/tested; documentation updated. |
|
||||
| LEDGER-OBS-52-001 | TODO | Findings Ledger Guild | LEDGER-29-002, TIMELINE-OBS-52-002 | Emit timeline events for ledger writes and projector commits (`ledger.event.appended`, `ledger.projection.updated`) with trace ID, policy version, evidence bundle reference placeholders. | Timeline events validated with fixtures; duplicates suppressed; docs note schema. |
|
||||
| LEDGER-OBS-53-001 | TODO | Findings Ledger Guild, Evidence Locker Guild | LEDGER-OBS-52-001, EVID-OBS-53-002 | Persist evidence bundle references (evaluation/job capsules) alongside ledger entries, exposing lookup API linking findings to evidence manifests and timeline. | Evidence references stored/retrievable; API returns deterministic payload; integration tests pass. |
|
||||
| LEDGER-OBS-54-001 | TODO | Findings Ledger Guild, Provenance Guild | LEDGER-OBS-53-001, PROV-OBS-54-001 | Verify attestation references for ledger-derived exports; expose `/ledger/attestations` endpoint returning DSSE verification state and chain-of-custody summary. | Endpoint returns verification results; negative cases handled; docs updated. |
|
||||
| LEDGER-OBS-55-001 | TODO | Findings Ledger Guild, DevOps Guild | LEDGER-OBS-51-001, DEVOPS-OBS-55-001 | Enhance incident mode to record additional replay diagnostics (lag traces, conflict snapshots) and extend retention while active. Emit activation events to timeline + notifier. | Incident mode captures diagnostics; retention adjustments revert post-incident; timeline/notifications validated. |
|
||||
|
||||
## Air-Gapped Mode (Epic 16)
|
||||
| ID | Status | Owner(s) | Depends on | Description | Exit Criteria |
|
||||
|----|--------|----------|------------|-------------|---------------|
|
||||
| LEDGER-AIRGAP-56-001 | TODO | Findings Ledger Guild | AIRGAP-IMP-57-001, CONCELIER-AIRGAP-56-002 | Record bundle provenance (`bundle_id`, `merkle_root`, `time_anchor`) on ledger events for advisories/VEX/policies imported via Mirror Bundles. | Ledger entries include bundle metadata; queries expose provenance; tests cover import + replay. |
|
||||
| LEDGER-AIRGAP-56-002 | TODO | Findings Ledger Guild, AirGap Time Guild | LEDGER-AIRGAP-56-001, AIRGAP-TIME-58-001 | Surface staleness metrics for findings and block risk-critical exports when stale beyond thresholds; provide remediation messaging. | Staleness thresholds enforced; exports blocked when stale; notifications triggered. |
|
||||
| LEDGER-AIRGAP-57-001 | TODO | Findings Ledger Guild, Evidence Locker Guild | LEDGER-AIRGAP-56-001, EVID-OBS-54-001 | Link findings evidence snapshots to portable evidence bundles and ensure cross-enclave verification works. | Evidence references validated; portable bundles verify across environments; integration tests updated. |
|
||||
| LEDGER-AIRGAP-58-001 | TODO | Findings Ledger Guild, AirGap Controller Guild | LEDGER-AIRGAP-56-001, AIRGAP-CTL-56-002 | Emit timeline events for bundle import impacts (new findings, remediation changes) with sealed-mode context. | Timeline events emitted with bundle IDs; duplicates suppressed; docs updated. |
|
||||
|
||||
## SDKs & OpenAPI (Epic 17)
|
||||
| ID | Status | Owner(s) | Depends on | Description | Exit Criteria |
|
||||
|----|--------|----------|------------|-------------|---------------|
|
||||
| LEDGER-OAS-61-001 | TODO | Findings Ledger Guild, API Contracts Guild | OAS-61-001 | Expand Findings Ledger OAS to include projections, evidence lookups, and filter parameters with examples. | Spec covers all ledger endpoints; lint/compat checks pass. |
|
||||
| LEDGER-OAS-61-002 | TODO | Findings Ledger Guild | LEDGER-OAS-61-001 | Implement `/.well-known/openapi` endpoint and ensure version metadata matches release. | Discovery endpoint live; contract tests added. |
|
||||
| LEDGER-OAS-62-001 | TODO | Findings Ledger Guild, SDK Generator Guild | LEDGER-OAS-61-001, SDKGEN-63-001 | Provide SDK test cases for findings pagination, filtering, evidence links; ensure typed models expose provenance. | SDK smoke tests cover ledger flows; documentation embeds examples. |
|
||||
| LEDGER-OAS-63-001 | TODO | Findings Ledger Guild, API Governance Guild | APIGOV-63-001 | Support deprecation headers and Notifications for retiring finding endpoints. | Headers emitted; notifications validated; docs updated. |
|
||||
|
||||
## Risk Profiles (Epic 18)
|
||||
| ID | Status | Owner(s) | Depends on | Description | Exit Criteria |
|
||||
|----|--------|----------|------------|-------------|---------------|
|
||||
| LEDGER-RISK-66-001 | TODO | Findings Ledger Guild, Risk Engine Guild | RISK-ENGINE-66-001 | Add schema migrations for `risk_score`, `risk_severity`, `profile_version`, `explanation_id`, and supporting indexes. | Migrations applied; indexes created; schema docs updated. |
|
||||
| LEDGER-RISK-66-002 | TODO | Findings Ledger Guild | LEDGER-RISK-66-001 | Implement deterministic upsert of scoring results keyed by finding hash/profile version with history audit. | Upsert path tested; duplicate suppression verified; audit records stored. |
|
||||
| LEDGER-RISK-67-001 | TODO | Findings Ledger Guild, Risk Engine Guild | LEDGER-RISK-66-002, RISK-ENGINE-68-001 | Expose query APIs for scored findings with score/severity filters, pagination, and explainability links. | API documented; contract tests pass; latency within targets. |
|
||||
| LEDGER-RISK-68-001 | TODO | Findings Ledger Guild, Export Guild | LEDGER-RISK-66-002 | Enable export of scored findings and simulation results via Export Center integration. | Export job functional; CLI/Console consume bundle; verification tests pass. |
|
||||
| LEDGER-RISK-69-001 | TODO | Findings Ledger Guild, Observability Guild | LEDGER-RISK-66-001 | Emit metrics/dashboards for scoring latency, result freshness, severity distribution, provider gaps. | Dashboards live; alerts configured; documentation updated. |
|
||||
|
||||
## Attestor Console (Epic 19)
|
||||
| ID | Status | Owner(s) | Depends on | Description | Exit Criteria |
|
||||
|----|--------|----------|------------|-------------|---------------|
|
||||
| LEDGER-ATTEST-73-001 | TODO | Findings Ledger Guild, Attestor Service Guild | ATTESTOR-73-002 | Persist pointers from findings to verification reports and attestation envelopes for explainability. | Ledger schema extended; queries return linked evidence; tests cover joins. |
|
||||
| LEDGER-ATTEST-73-002 | TODO | Findings Ledger Guild | LEDGER-ATTEST-73-001 | Enable search/filter in findings projections by verification result and attestation status. | API filters by verification result; UI integration ready; tests updated. |
|
||||
# Findings Ledger Task Board — Epic 6: Vulnerability Explorer
|
||||
| ID | Status | Owner(s) | Depends on | Description | Exit Criteria |
|
||||
|----|--------|----------|------------|-------------|---------------|
|
||||
| LEDGER-29-001 | DONE (2025-11-03) | Findings Ledger Guild | AUTH-POLICY-27-001 | Design ledger & projection schemas (tables/indexes), canonical JSON format, hashing strategy, and migrations. Publish schema doc + fixtures.<br>2025-11-03: Initial PostgreSQL migration added with partitions/enums, fixtures seeded with canonical hashes, schema doc aligned. | Schemas committed; migrations generated; hashing documented; fixtures seeded for CI. |
|
||||
| LEDGER-29-002 | DONE (2025-11-03) | Findings Ledger Guild | LEDGER-29-001 | Implement ledger write API (`POST /vuln/ledger/events`) with validation, idempotency, hash chaining, and Merkle root computation job.<br>2025-11-03: Minimal web service scaffolded with canonical hashing, in-memory repository, Merkle scheduler stub, request/response contracts, and unit tests for hashing + conflict flows. | Events persisted with chained hashes; Merkle job emits anchors; unit/integration tests cover happy/pathological cases. |
|
||||
| LEDGER-29-003 | DONE (2025-11-03) | Findings Ledger Guild, Scheduler Guild | LEDGER-29-001 | Build projector worker that derives `findings_projection` rows from ledger events + policy determinations; ensure idempotent replay keyed by `(tenant,finding_id,policy_version)`. | Postgres-backed projector worker and reducers landed with replay checkpointing, fixtures, and tests. |
|
||||
| LEDGER-29-004 | DOING (2025-11-03) | Findings Ledger Guild, Policy Guild | LEDGER-29-003, POLICY-ENGINE-27-001 | Integrate Policy Engine batch evaluation (baseline + simulate) with projector; cache rationale references.<br>2025-11-04: Projection reducer now consumes policy evaluation output with rationale arrays; Postgres migration + fixtures/tests updated, awaiting Policy Engine API wiring for batch fetch. | Projector fetches determinations efficiently; rationale stored for UI; regression tests cover version switches. |
|
||||
| LEDGER-29-005 | TODO | Findings Ledger Guild | LEDGER-29-003 | Implement workflow mutation handlers (assign, comment, accept-risk, target-fix, verify-fix, reopen) producing ledger events with validation and attachments metadata. | API endpoints enforce business rules; attachments metadata stored; tests cover state machine transitions. |
|
||||
| LEDGER-29-006 | TODO | Findings Ledger Guild, Security Guild | LEDGER-29-002 | Integrate attachment encryption (KMS envelope), signed URL issuance, CSRF protection hooks for Console. | Attachments encrypted and accessible via signed URLs; security tests verify expiry + scope. |
|
||||
| LEDGER-29-007 | TODO | Findings Ledger Guild, Observability Guild | LEDGER-29-002..005 | Instrument metrics (`ledger_write_latency`, `projection_lag_seconds`, `ledger_events_total`), structured logs, and Merkle anchoring alerts; publish dashboards. | Metrics/traces emitted; dashboards live; alert thresholds documented. |
|
||||
| LEDGER-29-008 | TODO | Findings Ledger Guild, QA Guild | LEDGER-29-002..005 | Develop unit/property/integration tests, replay/restore tooling, determinism harness, and load tests at 5M findings/tenant. | CI suite green; load tests documented; determinism harness proves stable projections. |
|
||||
| LEDGER-29-009 | TODO | Findings Ledger Guild, DevOps Guild | LEDGER-29-002..008 | Provide deployment manifests (Helm/Compose), backup/restore guidance, Merkle anchor externalization (optional), and offline kit instructions. | Deployment docs merged; smoke deploy validated; backup/restore scripts recorded; offline kit includes seed data. |
|
||||
|
||||
## Export Center
|
||||
| ID | Status | Owner(s) | Depends on | Description | Exit Criteria |
|
||||
|----|--------|----------|------------|-------------|---------------|
|
||||
| LEDGER-EXPORT-35-001 | TODO | Findings Ledger Guild | LEDGER-29-003, EXPORT-SVC-35-002 | Provide paginated streaming endpoints for advisories, VEX, SBOMs, and findings aligned with export filters, including deterministic ordering and provenance metadata. | Streaming APIs deployed; integration tests with exporter planner; metrics/logs instrumented; docs updated. |
|
||||
|
||||
## Orchestrator Dashboard
|
||||
| ID | Status | Owner(s) | Depends on | Description | Exit Criteria |
|
||||
|----|--------|----------|------------|-------------|---------------|
|
||||
| LEDGER-34-101 | TODO | Findings Ledger Guild | ORCH-SVC-34-002, LEDGER-29-002 | Link orchestrator run ledger exports into Findings Ledger provenance chain, index by artifact hash, and expose audit queries. | Ledger ingestion job consumes orchestrator exports; provenance queries return artifact chain; tests cover multi-tenant isolation; docs updated. |
|
||||
|
||||
## CLI Parity & Task Packs
|
||||
| ID | Status | Owner(s) | Depends on | Description | Exit Criteria |
|
||||
|----|--------|----------|------------|-------------|---------------|
|
||||
| LEDGER-PACKS-42-001 | TODO | Findings Ledger Guild | LEDGER-29-003, TASKRUN-41-001 | Provide snapshot/time-travel APIs and digestable exports for task pack simulation and CLI offline mode. | Snapshot API deployed; simulation validated; docs updated; imposed rule noted. |
|
||||
|
||||
## Authority-Backed Scopes & Tenancy (Epic 14)
|
||||
| ID | Status | Owner(s) | Depends on | Description | Exit Criteria |
|
||||
|----|--------|----------|------------|-------------|---------------|
|
||||
| LEDGER-TEN-48-001 | TODO | Findings Ledger Guild | AUTH-TEN-47-001 | Partition ledger tables by tenant/project, enable RLS, update queries/events, and stamp audit metadata. | Ledger queries respect tenant context; RLS tests pass; events include tenant metadata. |
|
||||
|
||||
## Observability & Forensics (Epic 15)
|
||||
| ID | Status | Owner(s) | Depends on | Description | Exit Criteria |
|
||||
|----|--------|----------|------------|-------------|---------------|
|
||||
| LEDGER-OBS-50-001 | TODO | Findings Ledger Guild, Observability Guild | TELEMETRY-OBS-50-001, TELEMETRY-OBS-50-002 | Integrate telemetry core within ledger writer/projector services, emitting structured logs and trace spans for ledger append, projector replay, and query APIs with tenant context. | Telemetry present for append + replay flows; integration tests assert trace propagation; log schema validated. |
|
||||
| LEDGER-OBS-51-001 | TODO | Findings Ledger Guild, DevOps Guild | LEDGER-OBS-50-001, TELEMETRY-OBS-51-001 | Publish metrics for ledger latency, projector lag, event throughput, and policy evaluation linkage. Define SLOs (ledger append P95 < 1s, replay lag < 30s) with burn-rate alerts and dashboards. | Metrics surfaced in dashboards; SLO alerts configured/tested; documentation updated. |
|
||||
| LEDGER-OBS-52-001 | TODO | Findings Ledger Guild | LEDGER-29-002, TIMELINE-OBS-52-002 | Emit timeline events for ledger writes and projector commits (`ledger.event.appended`, `ledger.projection.updated`) with trace ID, policy version, evidence bundle reference placeholders. | Timeline events validated with fixtures; duplicates suppressed; docs note schema. |
|
||||
| LEDGER-OBS-53-001 | TODO | Findings Ledger Guild, Evidence Locker Guild | LEDGER-OBS-52-001, EVID-OBS-53-002 | Persist evidence bundle references (evaluation/job capsules) alongside ledger entries, exposing lookup API linking findings to evidence manifests and timeline. | Evidence references stored/retrievable; API returns deterministic payload; integration tests pass. |
|
||||
| LEDGER-OBS-54-001 | TODO | Findings Ledger Guild, Provenance Guild | LEDGER-OBS-53-001, PROV-OBS-54-001 | Verify attestation references for ledger-derived exports; expose `/ledger/attestations` endpoint returning DSSE verification state and chain-of-custody summary. | Endpoint returns verification results; negative cases handled; docs updated. |
|
||||
| LEDGER-OBS-55-001 | TODO | Findings Ledger Guild, DevOps Guild | LEDGER-OBS-51-001, DEVOPS-OBS-55-001 | Enhance incident mode to record additional replay diagnostics (lag traces, conflict snapshots) and extend retention while active. Emit activation events to timeline + notifier. | Incident mode captures diagnostics; retention adjustments revert post-incident; timeline/notifications validated. |
|
||||
|
||||
## Air-Gapped Mode (Epic 16)
|
||||
| ID | Status | Owner(s) | Depends on | Description | Exit Criteria |
|
||||
|----|--------|----------|------------|-------------|---------------|
|
||||
| LEDGER-AIRGAP-56-001 | TODO | Findings Ledger Guild | AIRGAP-IMP-57-001, CONCELIER-AIRGAP-56-002 | Record bundle provenance (`bundle_id`, `merkle_root`, `time_anchor`) on ledger events for advisories/VEX/policies imported via Mirror Bundles. | Ledger entries include bundle metadata; queries expose provenance; tests cover import + replay. |
|
||||
| LEDGER-AIRGAP-56-002 | TODO | Findings Ledger Guild, AirGap Time Guild | LEDGER-AIRGAP-56-001, AIRGAP-TIME-58-001 | Surface staleness metrics for findings and block risk-critical exports when stale beyond thresholds; provide remediation messaging. | Staleness thresholds enforced; exports blocked when stale; notifications triggered. |
|
||||
| LEDGER-AIRGAP-57-001 | TODO | Findings Ledger Guild, Evidence Locker Guild | LEDGER-AIRGAP-56-001, EVID-OBS-54-001 | Link findings evidence snapshots to portable evidence bundles and ensure cross-enclave verification works. | Evidence references validated; portable bundles verify across environments; integration tests updated. |
|
||||
| LEDGER-AIRGAP-58-001 | TODO | Findings Ledger Guild, AirGap Controller Guild | LEDGER-AIRGAP-56-001, AIRGAP-CTL-56-002 | Emit timeline events for bundle import impacts (new findings, remediation changes) with sealed-mode context. | Timeline events emitted with bundle IDs; duplicates suppressed; docs updated. |
|
||||
|
||||
## SDKs & OpenAPI (Epic 17)
|
||||
| ID | Status | Owner(s) | Depends on | Description | Exit Criteria |
|
||||
|----|--------|----------|------------|-------------|---------------|
|
||||
| LEDGER-OAS-61-001 | TODO | Findings Ledger Guild, API Contracts Guild | OAS-61-001 | Expand Findings Ledger OAS to include projections, evidence lookups, and filter parameters with examples. | Spec covers all ledger endpoints; lint/compat checks pass. |
|
||||
| LEDGER-OAS-61-002 | TODO | Findings Ledger Guild | LEDGER-OAS-61-001 | Implement `/.well-known/openapi` endpoint and ensure version metadata matches release. | Discovery endpoint live; contract tests added. |
|
||||
| LEDGER-OAS-62-001 | TODO | Findings Ledger Guild, SDK Generator Guild | LEDGER-OAS-61-001, SDKGEN-63-001 | Provide SDK test cases for findings pagination, filtering, evidence links; ensure typed models expose provenance. | SDK smoke tests cover ledger flows; documentation embeds examples. |
|
||||
| LEDGER-OAS-63-001 | TODO | Findings Ledger Guild, API Governance Guild | APIGOV-63-001 | Support deprecation headers and Notifications for retiring finding endpoints. | Headers emitted; notifications validated; docs updated. |
|
||||
|
||||
## Risk Profiles (Epic 18)
|
||||
| ID | Status | Owner(s) | Depends on | Description | Exit Criteria |
|
||||
|----|--------|----------|------------|-------------|---------------|
|
||||
| LEDGER-RISK-66-001 | TODO | Findings Ledger Guild, Risk Engine Guild | RISK-ENGINE-66-001 | Add schema migrations for `risk_score`, `risk_severity`, `profile_version`, `explanation_id`, and supporting indexes. | Migrations applied; indexes created; schema docs updated. |
|
||||
| LEDGER-RISK-66-002 | TODO | Findings Ledger Guild | LEDGER-RISK-66-001 | Implement deterministic upsert of scoring results keyed by finding hash/profile version with history audit. | Upsert path tested; duplicate suppression verified; audit records stored. |
|
||||
| LEDGER-RISK-67-001 | TODO | Findings Ledger Guild, Risk Engine Guild | LEDGER-RISK-66-002, RISK-ENGINE-68-001 | Expose query APIs for scored findings with score/severity filters, pagination, and explainability links. | API documented; contract tests pass; latency within targets. |
|
||||
| LEDGER-RISK-68-001 | TODO | Findings Ledger Guild, Export Guild | LEDGER-RISK-66-002 | Enable export of scored findings and simulation results via Export Center integration. | Export job functional; CLI/Console consume bundle; verification tests pass. |
|
||||
| LEDGER-RISK-69-001 | TODO | Findings Ledger Guild, Observability Guild | LEDGER-RISK-66-001 | Emit metrics/dashboards for scoring latency, result freshness, severity distribution, provider gaps. | Dashboards live; alerts configured; documentation updated. |
|
||||
|
||||
## Attestor Console (Epic 19)
|
||||
| ID | Status | Owner(s) | Depends on | Description | Exit Criteria |
|
||||
|----|--------|----------|------------|-------------|---------------|
|
||||
| LEDGER-ATTEST-73-001 | TODO | Findings Ledger Guild, Attestor Service Guild | ATTESTOR-73-002 | Persist pointers from findings to verification reports and attestation envelopes for explainability. | Ledger schema extended; queries return linked evidence; tests cover joins. |
|
||||
| LEDGER-ATTEST-73-002 | TODO | Findings Ledger Guild | LEDGER-ATTEST-73-001 | Enable search/filter in findings projections by verification result and attestation status. | API filters by verification result; UI integration ready; tests updated. |
|
||||
|
||||
@@ -0,0 +1,138 @@
|
||||
-- 001_initial.sql
|
||||
-- Findings Ledger bootstrap schema (LEDGER-29-001)
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE TYPE ledger_event_type AS ENUM (
|
||||
'finding.created',
|
||||
'finding.status_changed',
|
||||
'finding.severity_changed',
|
||||
'finding.tag_updated',
|
||||
'finding.comment_added',
|
||||
'finding.assignment_changed',
|
||||
'finding.accepted_risk',
|
||||
'finding.remediation_plan_added',
|
||||
'finding.attachment_added',
|
||||
'finding.closed'
|
||||
);
|
||||
|
||||
CREATE TYPE ledger_action_type AS ENUM (
|
||||
'assign',
|
||||
'comment',
|
||||
'attach_evidence',
|
||||
'link_ticket',
|
||||
'remediation_plan',
|
||||
'status_change',
|
||||
'accept_risk',
|
||||
'reopen',
|
||||
'close'
|
||||
);
|
||||
|
||||
CREATE TABLE ledger_events (
|
||||
tenant_id TEXT NOT NULL,
|
||||
chain_id UUID NOT NULL,
|
||||
sequence_no BIGINT NOT NULL,
|
||||
event_id UUID NOT NULL,
|
||||
event_type ledger_event_type NOT NULL,
|
||||
policy_version TEXT NOT NULL,
|
||||
finding_id TEXT NOT NULL,
|
||||
artifact_id TEXT NOT NULL,
|
||||
source_run_id UUID,
|
||||
actor_id TEXT NOT NULL,
|
||||
actor_type TEXT NOT NULL,
|
||||
occurred_at TIMESTAMPTZ NOT NULL,
|
||||
recorded_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
event_body JSONB NOT NULL,
|
||||
event_hash CHAR(64) NOT NULL,
|
||||
previous_hash CHAR(64) NOT NULL,
|
||||
merkle_leaf_hash CHAR(64) NOT NULL,
|
||||
CONSTRAINT pk_ledger_events PRIMARY KEY (tenant_id, chain_id, sequence_no),
|
||||
CONSTRAINT uq_ledger_events_event_id UNIQUE (tenant_id, event_id),
|
||||
CONSTRAINT uq_ledger_events_chain_hash UNIQUE (tenant_id, chain_id, event_hash),
|
||||
CONSTRAINT ck_ledger_events_event_hash_hex CHECK (event_hash ~ '^[0-9a-f]{64}$'),
|
||||
CONSTRAINT ck_ledger_events_previous_hash_hex CHECK (previous_hash ~ '^[0-9a-f]{64}$'),
|
||||
CONSTRAINT ck_ledger_events_leaf_hash_hex CHECK (merkle_leaf_hash ~ '^[0-9a-f]{64}$'),
|
||||
CONSTRAINT ck_ledger_events_actor_type CHECK (actor_type IN ('system', 'operator', 'integration'))
|
||||
) PARTITION BY LIST (tenant_id);
|
||||
|
||||
CREATE TABLE ledger_events_default PARTITION OF ledger_events DEFAULT;
|
||||
|
||||
CREATE INDEX ix_ledger_events_finding ON ledger_events (tenant_id, finding_id, policy_version);
|
||||
CREATE INDEX ix_ledger_events_type ON ledger_events (tenant_id, event_type, recorded_at DESC);
|
||||
CREATE INDEX ix_ledger_events_recorded_at ON ledger_events (tenant_id, recorded_at DESC);
|
||||
|
||||
CREATE TABLE ledger_merkle_roots (
|
||||
tenant_id TEXT NOT NULL,
|
||||
anchor_id UUID NOT NULL,
|
||||
window_start TIMESTAMPTZ NOT NULL,
|
||||
window_end TIMESTAMPTZ NOT NULL,
|
||||
sequence_start BIGINT NOT NULL,
|
||||
sequence_end BIGINT NOT NULL,
|
||||
root_hash CHAR(64) NOT NULL,
|
||||
leaf_count INTEGER NOT NULL,
|
||||
anchored_at TIMESTAMPTZ NOT NULL,
|
||||
anchor_reference TEXT,
|
||||
CONSTRAINT pk_ledger_merkle_roots PRIMARY KEY (tenant_id, anchor_id),
|
||||
CONSTRAINT uq_ledger_merkle_root_hash UNIQUE (tenant_id, root_hash),
|
||||
CONSTRAINT ck_ledger_merkle_root_hash_hex CHECK (root_hash ~ '^[0-9a-f]{64}$')
|
||||
) PARTITION BY LIST (tenant_id);
|
||||
|
||||
CREATE TABLE ledger_merkle_roots_default PARTITION OF ledger_merkle_roots DEFAULT;
|
||||
|
||||
CREATE INDEX ix_merkle_sequences ON ledger_merkle_roots (tenant_id, sequence_end DESC);
|
||||
|
||||
CREATE TABLE findings_projection (
|
||||
tenant_id TEXT NOT NULL,
|
||||
finding_id TEXT NOT NULL,
|
||||
policy_version TEXT NOT NULL,
|
||||
status TEXT NOT NULL,
|
||||
severity NUMERIC(6,3),
|
||||
labels JSONB NOT NULL DEFAULT '{}'::JSONB,
|
||||
current_event_id UUID NOT NULL,
|
||||
explain_ref TEXT,
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
cycle_hash CHAR(64) NOT NULL,
|
||||
CONSTRAINT pk_findings_projection PRIMARY KEY (tenant_id, finding_id, policy_version),
|
||||
CONSTRAINT ck_findings_projection_cycle_hash_hex CHECK (cycle_hash ~ '^[0-9a-f]{64}$')
|
||||
) PARTITION BY LIST (tenant_id);
|
||||
|
||||
CREATE TABLE findings_projection_default PARTITION OF findings_projection DEFAULT;
|
||||
|
||||
CREATE INDEX ix_projection_status ON findings_projection (tenant_id, status, severity DESC);
|
||||
CREATE INDEX ix_projection_labels_gin ON findings_projection USING GIN (labels JSONB_PATH_OPS);
|
||||
|
||||
CREATE TABLE finding_history (
|
||||
tenant_id TEXT NOT NULL,
|
||||
finding_id TEXT NOT NULL,
|
||||
policy_version TEXT NOT NULL,
|
||||
event_id UUID NOT NULL,
|
||||
status TEXT NOT NULL,
|
||||
severity NUMERIC(6,3),
|
||||
actor_id TEXT NOT NULL,
|
||||
comment TEXT,
|
||||
occurred_at TIMESTAMPTZ NOT NULL,
|
||||
CONSTRAINT pk_finding_history PRIMARY KEY (tenant_id, finding_id, event_id)
|
||||
) PARTITION BY LIST (tenant_id);
|
||||
|
||||
CREATE TABLE finding_history_default PARTITION OF finding_history DEFAULT;
|
||||
|
||||
CREATE INDEX ix_finding_history_timeline ON finding_history (tenant_id, finding_id, occurred_at DESC);
|
||||
|
||||
CREATE TABLE triage_actions (
|
||||
tenant_id TEXT NOT NULL,
|
||||
action_id UUID NOT NULL,
|
||||
event_id UUID NOT NULL,
|
||||
finding_id TEXT NOT NULL,
|
||||
action_type ledger_action_type NOT NULL,
|
||||
payload JSONB NOT NULL DEFAULT '{}'::JSONB,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
created_by TEXT NOT NULL,
|
||||
CONSTRAINT pk_triage_actions PRIMARY KEY (tenant_id, action_id)
|
||||
) PARTITION BY LIST (tenant_id);
|
||||
|
||||
CREATE TABLE triage_actions_default PARTITION OF triage_actions DEFAULT;
|
||||
|
||||
CREATE INDEX ix_triage_actions_event ON triage_actions (tenant_id, event_id);
|
||||
CREATE INDEX ix_triage_actions_created_at ON triage_actions (tenant_id, created_at DESC);
|
||||
|
||||
COMMIT;
|
||||
@@ -0,0 +1,21 @@
|
||||
-- 002_projection_offsets.sql
|
||||
-- Projection worker checkpoint storage (LEDGER-29-003)
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS ledger_projection_offsets (
|
||||
worker_id TEXT NOT NULL PRIMARY KEY,
|
||||
last_recorded_at TIMESTAMPTZ NOT NULL,
|
||||
last_event_id UUID NOT NULL,
|
||||
updated_at TIMESTAMPTZ NOT NULL
|
||||
);
|
||||
|
||||
INSERT INTO ledger_projection_offsets (worker_id, last_recorded_at, last_event_id, updated_at)
|
||||
VALUES (
|
||||
'default',
|
||||
'1970-01-01T00:00:00Z',
|
||||
'00000000-0000-0000-0000-000000000000',
|
||||
NOW())
|
||||
ON CONFLICT (worker_id) DO NOTHING;
|
||||
|
||||
COMMIT;
|
||||
@@ -0,0 +1,16 @@
|
||||
-- 003_policy_rationale.sql
|
||||
-- Add policy rationale column to findings_projection (LEDGER-29-004)
|
||||
|
||||
BEGIN;
|
||||
|
||||
ALTER TABLE findings_projection
|
||||
ADD COLUMN IF NOT EXISTS policy_rationale JSONB NOT NULL DEFAULT '[]'::JSONB;
|
||||
|
||||
ALTER TABLE findings_projection
|
||||
ALTER COLUMN policy_rationale SET DEFAULT '[]'::JSONB;
|
||||
|
||||
UPDATE findings_projection
|
||||
SET policy_rationale = '[]'::JSONB
|
||||
WHERE policy_rationale IS NULL;
|
||||
|
||||
COMMIT;
|
||||
@@ -0,0 +1,164 @@
|
||||
using System.Text.Json.Nodes;
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
using StellaOps.Findings.Ledger.Hashing;
|
||||
using StellaOps.Findings.Ledger.Infrastructure.Policy;
|
||||
using StellaOps.Findings.Ledger.Services;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Tests;
|
||||
|
||||
public sealed class InlinePolicyEvaluationServiceTests
|
||||
{
|
||||
private readonly InlinePolicyEvaluationService _service = new(NullLogger<InlinePolicyEvaluationService>.Instance);
|
||||
|
||||
[Fact]
|
||||
public async Task EvaluateAsync_UsesPayloadValues_WhenPresent()
|
||||
{
|
||||
var payload = new JsonObject
|
||||
{
|
||||
["status"] = "triaged",
|
||||
["severity"] = 5.2,
|
||||
["labels"] = new JsonObject
|
||||
{
|
||||
["kev"] = true,
|
||||
["runtime"] = "exposed"
|
||||
},
|
||||
["labelsRemove"] = new JsonArray("deprecated"),
|
||||
["explainRef"] = "explain://tenant/findings/1",
|
||||
["rationaleRefs"] = new JsonArray("explain://tenant/findings/1", "policy://tenant/pol/version/rationale")
|
||||
};
|
||||
|
||||
var existingProjection = new FindingProjection(
|
||||
"tenant",
|
||||
"finding",
|
||||
"policy-sha",
|
||||
"affected",
|
||||
7.1m,
|
||||
new JsonObject { ["deprecated"] = "true" },
|
||||
Guid.NewGuid(),
|
||||
null,
|
||||
new JsonArray("explain://existing"),
|
||||
DateTimeOffset.UtcNow,
|
||||
string.Empty);
|
||||
|
||||
var record = CreateRecord(payload);
|
||||
|
||||
var result = await _service.EvaluateAsync(record, existingProjection, default);
|
||||
|
||||
result.Status.Should().Be("triaged");
|
||||
result.Severity.Should().Be(5.2m);
|
||||
result.Labels["kev"]!.GetValue<bool>().Should().BeTrue();
|
||||
result.Labels.ContainsKey("deprecated").Should().BeFalse();
|
||||
result.Labels["runtime"]!.GetValue<string>().Should().Be("exposed");
|
||||
result.ExplainRef.Should().Be("explain://tenant/findings/1");
|
||||
result.Rationale.Should().HaveCount(2);
|
||||
result.Rationale[0]!.GetValue<string>().Should().Be("explain://tenant/findings/1");
|
||||
result.Rationale[1]!.GetValue<string>().Should().Be("policy://tenant/pol/version/rationale");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task EvaluateAsync_FallsBack_WhenEventMissing()
|
||||
{
|
||||
var existingRationale = new JsonArray("explain://existing/rationale");
|
||||
var existingProjection = new FindingProjection(
|
||||
"tenant",
|
||||
"finding",
|
||||
"policy-sha",
|
||||
"accepted_risk",
|
||||
3.4m,
|
||||
new JsonObject { ["runtime"] = "contained" },
|
||||
Guid.NewGuid(),
|
||||
"explain://existing",
|
||||
existingRationale,
|
||||
DateTimeOffset.UtcNow,
|
||||
string.Empty);
|
||||
|
||||
var record = new LedgerEventRecord(
|
||||
"tenant",
|
||||
Guid.NewGuid(),
|
||||
1,
|
||||
Guid.NewGuid(),
|
||||
"finding.status_changed",
|
||||
"policy-sha",
|
||||
"finding",
|
||||
"artifact",
|
||||
null,
|
||||
"user:alice",
|
||||
"operator",
|
||||
DateTimeOffset.UtcNow,
|
||||
DateTimeOffset.UtcNow,
|
||||
new JsonObject(),
|
||||
"hash",
|
||||
"prev",
|
||||
"leaf",
|
||||
"{}"
|
||||
);
|
||||
|
||||
var result = await _service.EvaluateAsync(record, existingProjection, default);
|
||||
|
||||
result.Status.Should().Be("accepted_risk");
|
||||
result.Severity.Should().Be(3.4m);
|
||||
result.Labels["runtime"]!.GetValue<string>().Should().Be("contained");
|
||||
result.ExplainRef.Should().Be("explain://existing");
|
||||
result.Rationale.Should().HaveCount(1);
|
||||
result.Rationale[0]!.GetValue<string>().Should().Be("explain://existing/rationale");
|
||||
}
|
||||
|
||||
private static LedgerEventRecord CreateRecord(JsonObject payload)
|
||||
{
|
||||
var eventObject = new JsonObject
|
||||
{
|
||||
["id"] = Guid.NewGuid().ToString(),
|
||||
["type"] = "finding.status_changed",
|
||||
["tenant"] = "tenant",
|
||||
["chainId"] = Guid.NewGuid().ToString(),
|
||||
["sequence"] = 10,
|
||||
["policyVersion"] = "policy-sha",
|
||||
["artifactId"] = "artifact",
|
||||
["finding"] = new JsonObject
|
||||
{
|
||||
["id"] = "finding",
|
||||
["artifactId"] = "artifact",
|
||||
["vulnId"] = "CVE-0000-0001"
|
||||
},
|
||||
["actor"] = new JsonObject
|
||||
{
|
||||
["id"] = "user:alice",
|
||||
["type"] = "operator"
|
||||
},
|
||||
["occurredAt"] = "2025-11-04T12:00:00.000Z",
|
||||
["recordedAt"] = "2025-11-04T12:00:01.000Z",
|
||||
["payload"] = payload.DeepClone()
|
||||
};
|
||||
|
||||
var envelope = new JsonObject
|
||||
{
|
||||
["event"] = eventObject
|
||||
};
|
||||
|
||||
var canonical = LedgerCanonicalJsonSerializer.Canonicalize(envelope);
|
||||
var canonicalJson = LedgerCanonicalJsonSerializer.Serialize(canonical);
|
||||
|
||||
return new LedgerEventRecord(
|
||||
"tenant",
|
||||
Guid.Parse(eventObject["chainId"]!.GetValue<string>()),
|
||||
10,
|
||||
Guid.Parse(eventObject["id"]!.GetValue<string>()),
|
||||
eventObject["type"]!.GetValue<string>(),
|
||||
eventObject["policyVersion"]!.GetValue<string>(),
|
||||
eventObject["finding"]!["id"]!.GetValue<string>(),
|
||||
eventObject["artifactId"]!.GetValue<string>(),
|
||||
null,
|
||||
eventObject["actor"]!["id"]!.GetValue<string>(),
|
||||
eventObject["actor"]!["type"]!.GetValue<string>(),
|
||||
DateTimeOffset.Parse(eventObject["occurredAt"]!.GetValue<string>()),
|
||||
DateTimeOffset.Parse(eventObject["recordedAt"]!.GetValue<string>()),
|
||||
canonical,
|
||||
"hash",
|
||||
"prev",
|
||||
"leaf",
|
||||
canonicalJson);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,204 @@
|
||||
using System.Globalization;
|
||||
using System.Text.Json.Nodes;
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
using StellaOps.Findings.Ledger.Hashing;
|
||||
using StellaOps.Findings.Ledger.Infrastructure;
|
||||
using StellaOps.Findings.Ledger.Infrastructure.InMemory;
|
||||
using StellaOps.Findings.Ledger.Infrastructure.Merkle;
|
||||
using StellaOps.Findings.Ledger.Services;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Tests;
|
||||
|
||||
public sealed class LedgerEventWriteServiceTests
|
||||
{
|
||||
private readonly InMemoryLedgerEventRepository _repository = new();
|
||||
private readonly NullMerkleAnchorScheduler _scheduler = new();
|
||||
private readonly LedgerEventWriteService _service;
|
||||
|
||||
public LedgerEventWriteServiceTests()
|
||||
{
|
||||
_service = new LedgerEventWriteService(_repository, _scheduler, NullLogger<LedgerEventWriteService>.Instance);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task AppendAsync_ComputesExpectedHashes()
|
||||
{
|
||||
var draft = CreateDraft();
|
||||
var result = await _service.AppendAsync(draft, CancellationToken.None);
|
||||
|
||||
result.Status.Should().Be(LedgerWriteStatus.Success);
|
||||
result.Record.Should().NotBeNull();
|
||||
var canonicalEnvelope = LedgerCanonicalJsonSerializer.Canonicalize(draft.CanonicalEnvelope);
|
||||
var expectedHashes = LedgerHashing.ComputeHashes(canonicalEnvelope, draft.SequenceNumber);
|
||||
|
||||
result.Record!.EventHash.Should().Be(expectedHashes.EventHash);
|
||||
result.Record.MerkleLeafHash.Should().Be(expectedHashes.MerkleLeafHash);
|
||||
result.Record.PreviousHash.Should().Be(LedgerEventConstants.EmptyHash);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task AppendAsync_ReturnsConflict_WhenSequenceOutOfOrder()
|
||||
{
|
||||
var initial = CreateDraft();
|
||||
await _service.AppendAsync(initial, CancellationToken.None);
|
||||
|
||||
var second = CreateDraft(sequenceNumber: 44, eventId: Guid.NewGuid());
|
||||
Assert.NotEqual(initial.EventId, second.EventId);
|
||||
var result = await _service.AppendAsync(second, CancellationToken.None);
|
||||
|
||||
result.Status.Should().Be(LedgerWriteStatus.Conflict);
|
||||
result.Errors.Should().NotBeEmpty();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task AppendAsync_ReturnsIdempotent_WhenExistingRecordMatches()
|
||||
{
|
||||
var draft = CreateDraft();
|
||||
var existingRecord = CreateRecordFromDraft(draft, LedgerEventConstants.EmptyHash);
|
||||
var repository = new StubLedgerEventRepository(existingRecord);
|
||||
var scheduler = new CapturingMerkleScheduler();
|
||||
var service = new LedgerEventWriteService(repository, scheduler, NullLogger<LedgerEventWriteService>.Instance);
|
||||
|
||||
var result = await service.AppendAsync(draft, CancellationToken.None);
|
||||
|
||||
result.Status.Should().Be(LedgerWriteStatus.Idempotent);
|
||||
scheduler.Enqueued.Should().BeFalse();
|
||||
repository.AppendWasCalled.Should().BeFalse();
|
||||
}
|
||||
|
||||
private static LedgerEventDraft CreateDraft(long sequenceNumber = 1, Guid? eventId = null)
|
||||
{
|
||||
var eventGuid = eventId ?? Guid.Parse("3ac1f4ef-3c26-4b0d-91d4-6a6d3a5bde10");
|
||||
var payload = new JsonObject
|
||||
{
|
||||
["previousStatus"] = "affected",
|
||||
["status"] = "triaged",
|
||||
["justification"] = "Ticket SEC-1234 created",
|
||||
["ticket"] = new JsonObject
|
||||
{
|
||||
["id"] = "SEC-1234",
|
||||
["url"] = "https://tracker.example/sec-1234"
|
||||
}
|
||||
};
|
||||
|
||||
var occurredAt = DateTimeOffset.Parse("2025-11-03T15:12:05.123Z", CultureInfo.InvariantCulture, DateTimeStyles.AssumeUniversal);
|
||||
var recordedAt = DateTimeOffset.Parse("2025-11-03T15:12:06.001Z", CultureInfo.InvariantCulture, DateTimeStyles.AssumeUniversal);
|
||||
|
||||
var eventObject = new JsonObject
|
||||
{
|
||||
["id"] = eventGuid.ToString(),
|
||||
["type"] = "finding.status_changed",
|
||||
["tenant"] = "tenant-a",
|
||||
["chainId"] = "5fa2b970-9da2-4ef4-9a63-463c5d98d3cc",
|
||||
["sequence"] = sequenceNumber,
|
||||
["policyVersion"] = "sha256:5f38c7887d4a4bb887ce89c393c7a2e23e6e708fda310f9f3ff2a2a0b4dffbdf",
|
||||
["finding"] = new JsonObject
|
||||
{
|
||||
["id"] = "artifact:sha256:3f1e2d9c7b1a0f6534d1b6f998d7a5c3ef9e0ab92f4c1d2e3f5a6b7c8d9e0f1a|pkg:cpe:/o:vendor:product",
|
||||
["artifactId"] = "sha256:3f1e2d9c7b1a0f6534d1b6f998d7a5c3ef9e0ab92f4c1d2e3f5a6b7c8d9e0f1a",
|
||||
["vulnId"] = "CVE-2025-1234"
|
||||
},
|
||||
["artifactId"] = "sha256:3f1e2d9c7b1a0f6534d1b6f998d7a5c3ef9e0ab92f4c1d2e3f5a6b7c8d9e0f1a",
|
||||
["actor"] = new JsonObject
|
||||
{
|
||||
["id"] = "user:alice@tenant",
|
||||
["type"] = "operator"
|
||||
},
|
||||
["occurredAt"] = occurredAt.ToUniversalTime().ToString("yyyy-MM-dd'T'HH:mm:ss.fff'Z'"),
|
||||
["recordedAt"] = recordedAt.ToUniversalTime().ToString("yyyy-MM-dd'T'HH:mm:ss.fff'Z'"),
|
||||
["payload"] = payload
|
||||
};
|
||||
|
||||
eventObject["sourceRunId"] = "8f89a703-94cd-4e9d-8a75-2f407c4bee7f";
|
||||
|
||||
var envelope = new JsonObject
|
||||
{
|
||||
["event"] = eventObject
|
||||
};
|
||||
|
||||
var draft = new LedgerEventDraft(
|
||||
TenantId: "tenant-a",
|
||||
ChainId: Guid.Parse("5fa2b970-9da2-4ef4-9a63-463c5d98d3cc"),
|
||||
SequenceNumber: sequenceNumber,
|
||||
EventId: Guid.Parse("3ac1f4ef-3c26-4b0d-91d4-6a6d3a5bde10"),
|
||||
EventType: "finding.status_changed",
|
||||
PolicyVersion: "sha256:5f38c7887d4a4bb887ce89c393c7a2e23e6e708fda310f9f3ff2a2a0b4dffbdf",
|
||||
FindingId: "artifact:sha256:3f1e2d9c7b1a0f6534d1b6f998d7a5c3ef9e0ab92f4c1d2e3f5a6b7c8d9e0f1a|pkg:cpe:/o:vendor:product",
|
||||
ArtifactId: "sha256:3f1e2d9c7b1a0f6534d1b6f998d7a5c3ef9e0ab92f4c1d2e3f5a6b7c8d9e0f1a",
|
||||
SourceRunId: Guid.Parse("8f89a703-94cd-4e9d-8a75-2f407c4bee7f"),
|
||||
ActorId: "user:alice@tenant",
|
||||
ActorType: "operator",
|
||||
OccurredAt: occurredAt,
|
||||
RecordedAt: recordedAt,
|
||||
Payload: payload,
|
||||
CanonicalEnvelope: envelope,
|
||||
ProvidedPreviousHash: null);
|
||||
|
||||
return draft with { EventId = eventGuid };
|
||||
}
|
||||
|
||||
private static LedgerEventRecord CreateRecordFromDraft(LedgerEventDraft draft, string previousHash)
|
||||
{
|
||||
var canonicalEnvelope = LedgerCanonicalJsonSerializer.Canonicalize(draft.CanonicalEnvelope);
|
||||
var hashResult = LedgerHashing.ComputeHashes(canonicalEnvelope, draft.SequenceNumber);
|
||||
var eventBody = (JsonObject)canonicalEnvelope.DeepClone();
|
||||
|
||||
return new LedgerEventRecord(
|
||||
draft.TenantId,
|
||||
draft.ChainId,
|
||||
draft.SequenceNumber,
|
||||
draft.EventId,
|
||||
draft.EventType,
|
||||
draft.PolicyVersion,
|
||||
draft.FindingId,
|
||||
draft.ArtifactId,
|
||||
draft.SourceRunId,
|
||||
draft.ActorId,
|
||||
draft.ActorType,
|
||||
draft.OccurredAt,
|
||||
draft.RecordedAt,
|
||||
eventBody,
|
||||
hashResult.EventHash,
|
||||
previousHash,
|
||||
hashResult.MerkleLeafHash,
|
||||
hashResult.CanonicalJson);
|
||||
}
|
||||
|
||||
private sealed class StubLedgerEventRepository : ILedgerEventRepository
|
||||
{
|
||||
private readonly LedgerEventRecord? _existing;
|
||||
|
||||
public StubLedgerEventRepository(LedgerEventRecord? existing)
|
||||
{
|
||||
_existing = existing;
|
||||
}
|
||||
|
||||
public bool AppendWasCalled { get; private set; }
|
||||
|
||||
public Task AppendAsync(LedgerEventRecord record, CancellationToken cancellationToken)
|
||||
{
|
||||
AppendWasCalled = true;
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public Task<LedgerEventRecord?> GetByEventIdAsync(string tenantId, Guid eventId, CancellationToken cancellationToken)
|
||||
=> Task.FromResult(_existing);
|
||||
|
||||
public Task<LedgerChainHead?> GetChainHeadAsync(string tenantId, Guid chainId, CancellationToken cancellationToken)
|
||||
=> Task.FromResult<LedgerChainHead?>(null);
|
||||
}
|
||||
|
||||
private sealed class CapturingMerkleScheduler : IMerkleAnchorScheduler
|
||||
{
|
||||
public bool Enqueued { get; private set; }
|
||||
|
||||
public Task EnqueueAsync(LedgerEventRecord record, CancellationToken cancellationToken)
|
||||
{
|
||||
Enqueued = true;
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,205 @@
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
using System.Text.Json.Nodes;
|
||||
using FluentAssertions;
|
||||
using StellaOps.Findings.Ledger.Domain;
|
||||
using StellaOps.Findings.Ledger.Hashing;
|
||||
using StellaOps.Findings.Ledger.Infrastructure.Policy;
|
||||
using StellaOps.Findings.Ledger.Services;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Tests;
|
||||
|
||||
public sealed class LedgerProjectionReducerTests
|
||||
{
|
||||
[Fact]
|
||||
public void Reduce_WhenFindingCreated_InitialisesProjection()
|
||||
{
|
||||
var payload = new JsonObject
|
||||
{
|
||||
["status"] = "triaged",
|
||||
["severity"] = 6.5,
|
||||
["labels"] = new JsonObject
|
||||
{
|
||||
["kev"] = true,
|
||||
["runtime"] = "exposed"
|
||||
},
|
||||
["explainRef"] = "explain://tenant-a/finding/123"
|
||||
};
|
||||
|
||||
var record = CreateRecord(LedgerEventConstants.EventFindingCreated, payload);
|
||||
|
||||
var evaluation = new PolicyEvaluationResult(
|
||||
"triaged",
|
||||
6.5m,
|
||||
(JsonObject)payload["labels"]!.DeepClone(),
|
||||
payload["explainRef"]!.GetValue<string>(),
|
||||
new JsonArray(payload["explainRef"]!.GetValue<string>()));
|
||||
|
||||
var result = LedgerProjectionReducer.Reduce(record, current: null, evaluation);
|
||||
|
||||
result.Projection.Status.Should().Be("triaged");
|
||||
result.Projection.Severity.Should().Be(6.5m);
|
||||
result.Projection.Labels["kev"]!.GetValue<bool>().Should().BeTrue();
|
||||
result.Projection.Labels["runtime"]!.GetValue<string>().Should().Be("exposed");
|
||||
result.Projection.ExplainRef.Should().Be("explain://tenant-a/finding/123");
|
||||
result.Projection.PolicyRationale.Should().ContainSingle()
|
||||
.Which!.GetValue<string>().Should().Be("explain://tenant-a/finding/123");
|
||||
result.Projection.CycleHash.Should().NotBeNullOrWhiteSpace();
|
||||
ProjectionHashing.ComputeCycleHash(result.Projection).Should().Be(result.Projection.CycleHash);
|
||||
|
||||
result.History.Status.Should().Be("triaged");
|
||||
result.History.Severity.Should().Be(6.5m);
|
||||
result.Action.Should().BeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Reduce_StatusChange_ProducesHistoryAndAction()
|
||||
{
|
||||
var existing = new FindingProjection(
|
||||
"tenant-a",
|
||||
"finding-1",
|
||||
"policy-v1",
|
||||
"affected",
|
||||
5.0m,
|
||||
new JsonObject(),
|
||||
Guid.NewGuid(),
|
||||
null,
|
||||
DateTimeOffset.UtcNow,
|
||||
string.Empty);
|
||||
var existingHash = ProjectionHashing.ComputeCycleHash(existing);
|
||||
existing = existing with { CycleHash = existingHash };
|
||||
|
||||
var payload = new JsonObject
|
||||
{
|
||||
["status"] = "accepted_risk",
|
||||
["justification"] = "Approved by CISO"
|
||||
};
|
||||
|
||||
var record = CreateRecord(LedgerEventConstants.EventFindingStatusChanged, payload);
|
||||
|
||||
var evaluation = new PolicyEvaluationResult(
|
||||
"accepted_risk",
|
||||
existing.Severity,
|
||||
(JsonObject)existing.Labels.DeepClone(),
|
||||
null,
|
||||
new JsonArray());
|
||||
|
||||
var result = LedgerProjectionReducer.Reduce(record, existing, evaluation);
|
||||
|
||||
result.Projection.Status.Should().Be("accepted_risk");
|
||||
result.History.Status.Should().Be("accepted_risk");
|
||||
result.History.Comment.Should().Be("Approved by CISO");
|
||||
result.Action.Should().NotBeNull();
|
||||
result.Action!.ActionType.Should().Be("status_change");
|
||||
result.Action.Payload["justification"]!.GetValue<string>().Should().Be("Approved by CISO");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Reduce_LabelUpdates_RemoveKeys()
|
||||
{
|
||||
var labels = new JsonObject
|
||||
{
|
||||
["kev"] = true,
|
||||
["runtime"] = "exposed"
|
||||
};
|
||||
var existing = new FindingProjection(
|
||||
"tenant-a",
|
||||
"finding-1",
|
||||
"policy-v1",
|
||||
"triaged",
|
||||
7.1m,
|
||||
labels,
|
||||
Guid.NewGuid(),
|
||||
null,
|
||||
DateTimeOffset.UtcNow,
|
||||
string.Empty);
|
||||
existing = existing with { CycleHash = ProjectionHashing.ComputeCycleHash(existing) };
|
||||
|
||||
var payload = new JsonObject
|
||||
{
|
||||
["labels"] = new JsonObject
|
||||
{
|
||||
["runtime"] = "contained",
|
||||
["priority"] = "p1"
|
||||
},
|
||||
["labelsRemove"] = new JsonArray("kev")
|
||||
};
|
||||
|
||||
var record = CreateRecord(LedgerEventConstants.EventFindingTagUpdated, payload);
|
||||
|
||||
var evaluation = new PolicyEvaluationResult(
|
||||
"triaged",
|
||||
existing.Severity,
|
||||
(JsonObject)payload["labels"]!.DeepClone(),
|
||||
null,
|
||||
new JsonArray());
|
||||
|
||||
var result = LedgerProjectionReducer.Reduce(record, existing, evaluation);
|
||||
|
||||
result.Projection.Labels.ContainsKey("kev").Should().BeFalse();
|
||||
result.Projection.Labels["runtime"]!.GetValue<string>().Should().Be("contained");
|
||||
result.Projection.Labels["priority"]!.GetValue<string>().Should().Be("p1");
|
||||
}
|
||||
|
||||
private static LedgerEventRecord CreateRecord(string eventType, JsonObject payload)
|
||||
{
|
||||
var envelope = new JsonObject
|
||||
{
|
||||
["event"] = new JsonObject
|
||||
{
|
||||
["id"] = Guid.NewGuid().ToString(),
|
||||
["type"] = eventType,
|
||||
["tenant"] = "tenant-a",
|
||||
["chainId"] = Guid.NewGuid().ToString(),
|
||||
["sequence"] = 1,
|
||||
["policyVersion"] = "policy-v1",
|
||||
["artifactId"] = "artifact-1",
|
||||
["finding"] = new JsonObject
|
||||
{
|
||||
["id"] = "finding-1",
|
||||
["artifactId"] = "artifact-1",
|
||||
["vulnId"] = "CVE-2025-0001"
|
||||
},
|
||||
["actor"] = new JsonObject
|
||||
{
|
||||
["id"] = "user:alice",
|
||||
["type"] = "operator"
|
||||
},
|
||||
["occurredAt"] = "2025-11-03T12:00:00.000Z",
|
||||
["recordedAt"] = "2025-11-03T12:00:05.000Z",
|
||||
["payload"] = payload.DeepClone()
|
||||
}
|
||||
};
|
||||
|
||||
var canonical = LedgerCanonicalJsonSerializer.Canonicalize(envelope);
|
||||
var canonicalJson = LedgerCanonicalJsonSerializer.Serialize(canonical);
|
||||
|
||||
return new LedgerEventRecord(
|
||||
"tenant-a",
|
||||
Guid.Parse(canonical["event"]!["chainId"]!.GetValue<string>()),
|
||||
1,
|
||||
Guid.Parse(canonical["event"]!["id"]!.GetValue<string>()),
|
||||
eventType,
|
||||
canonical["event"]!["policyVersion"]!.GetValue<string>(),
|
||||
canonical["event"]!["finding"]!["id"]!.GetValue<string>(),
|
||||
canonical["event"]!["artifactId"]!.GetValue<string>(),
|
||||
null,
|
||||
canonical["event"]!["actor"]!["id"]!.GetValue<string>(),
|
||||
canonical["event"]!["actor"]!["type"]!.GetValue<string>(),
|
||||
DateTimeOffset.Parse(canonical["event"]!["occurredAt"]!.GetValue<string>()),
|
||||
DateTimeOffset.Parse(canonical["event"]!["recordedAt"]!.GetValue<string>()),
|
||||
canonical,
|
||||
ComputeSha256Hex(canonicalJson),
|
||||
LedgerEventConstants.EmptyHash,
|
||||
ComputeSha256Hex("placeholder-1"),
|
||||
canonicalJson);
|
||||
}
|
||||
|
||||
private static string ComputeSha256Hex(string input)
|
||||
{
|
||||
var bytes = Encoding.UTF8.GetBytes(input);
|
||||
var hashBytes = SHA256.HashData(bytes);
|
||||
return Convert.ToHexString(hashBytes).ToLowerInvariant();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
<PropertyGroup>
|
||||
<TargetFramework>net10.0</TargetFramework>
|
||||
<ImplicitUsings>enable</ImplicitUsings>
|
||||
<Nullable>enable</Nullable>
|
||||
<LangVersion>preview</LangVersion>
|
||||
<IsPackable>false</IsPackable>
|
||||
</PropertyGroup>
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="..\..\StellaOps.Findings.Ledger\StellaOps.Findings.Ledger.csproj" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<PackageReference Update="Microsoft.NET.Test.Sdk" Version="17.14.0" />
|
||||
<PackageReference Update="xunit" Version="2.9.2" />
|
||||
<PackageReference Update="xunit.runner.visualstudio" Version="2.8.2">
|
||||
<PrivateAssets>all</PrivateAssets>
|
||||
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
|
||||
</PackageReference>
|
||||
<PackageReference Include="FluentAssertions" Version="6.12.0" />
|
||||
</ItemGroup>
|
||||
</Project>
|
||||
Reference in New Issue
Block a user