|
|
|
|
@@ -0,0 +1,502 @@
|
|
|
|
|
using System.CommandLine;
|
|
|
|
|
using System.Diagnostics;
|
|
|
|
|
using System.Diagnostics.Metrics;
|
|
|
|
|
using System.Text.Json;
|
|
|
|
|
using System.Text.Json.Nodes;
|
|
|
|
|
using Microsoft.Extensions.DependencyInjection;
|
|
|
|
|
using Microsoft.Extensions.Hosting;
|
|
|
|
|
using Microsoft.Extensions.Logging;
|
|
|
|
|
using Microsoft.Extensions.Options;
|
|
|
|
|
using StellaOps.Findings.Ledger.Domain;
|
|
|
|
|
using StellaOps.Findings.Ledger.Hashing;
|
|
|
|
|
using StellaOps.Findings.Ledger.Infrastructure;
|
|
|
|
|
using StellaOps.Findings.Ledger.Infrastructure.Merkle;
|
|
|
|
|
using StellaOps.Findings.Ledger.Infrastructure.Postgres;
|
|
|
|
|
using StellaOps.Findings.Ledger.Infrastructure.Projection;
|
|
|
|
|
using StellaOps.Findings.Ledger.Options;
|
|
|
|
|
using StellaOps.Findings.Ledger.Observability;
|
|
|
|
|
using StellaOps.Findings.Ledger.Services;
|
|
|
|
|
|
|
|
|
|
// Command-line options
|
|
|
|
|
var fixturesOption = new Option<FileInfo[]>(
|
|
|
|
|
name: "--fixture",
|
|
|
|
|
description: "NDJSON fixtures containing canonical ledger envelopes (sequence-ordered)")
|
|
|
|
|
{
|
|
|
|
|
IsRequired = true
|
|
|
|
|
};
|
|
|
|
|
fixturesOption.AllowMultipleArgumentsPerToken = true;
|
|
|
|
|
|
|
|
|
|
var connectionOption = new Option<string>(
|
|
|
|
|
name: "--connection",
|
|
|
|
|
description: "PostgreSQL connection string for ledger DB")
|
|
|
|
|
{
|
|
|
|
|
IsRequired = true
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
var tenantOption = new Option<string>(
|
|
|
|
|
name: "--tenant",
|
|
|
|
|
getDefaultValue: () => "tenant-a",
|
|
|
|
|
description: "Tenant identifier for appended events");
|
|
|
|
|
|
|
|
|
|
var maxParallelOption = new Option<int>(
|
|
|
|
|
name: "--maxParallel",
|
|
|
|
|
getDefaultValue: () => 4,
|
|
|
|
|
description: "Maximum concurrent append operations");
|
|
|
|
|
|
|
|
|
|
var reportOption = new Option<FileInfo?>(
|
|
|
|
|
name: "--report",
|
|
|
|
|
description: "Path to write harness report JSON (with DSSE placeholder)");
|
|
|
|
|
|
|
|
|
|
var metricsOption = new Option<FileInfo?>(
|
|
|
|
|
name: "--metrics",
|
|
|
|
|
description: "Optional path to write metrics snapshot JSON");
|
|
|
|
|
|
|
|
|
|
var root = new RootCommand("Findings Ledger Replay Harness (LEDGER-29-008)");
|
|
|
|
|
root.AddOption(fixturesOption);
|
|
|
|
|
root.AddOption(connectionOption);
|
|
|
|
|
root.AddOption(tenantOption);
|
|
|
|
|
root.AddOption(maxParallelOption);
|
|
|
|
|
root.AddOption(reportOption);
|
|
|
|
|
root.AddOption(metricsOption);
|
|
|
|
|
|
|
|
|
|
root.SetHandler(async (FileInfo[] fixtures, string connection, string tenant, int maxParallel, FileInfo? reportFile, FileInfo? metricsFile) =>
|
|
|
|
|
{
|
|
|
|
|
await using var host = BuildHost(connection);
|
|
|
|
|
using var scope = host.Services.CreateScope();
|
|
|
|
|
|
|
|
|
|
var writeService = scope.ServiceProvider.GetRequiredService<ILedgerEventWriteService>();
|
|
|
|
|
var projectionWorker = scope.ServiceProvider.GetRequiredService<LedgerProjectionWorker>();
|
|
|
|
|
var anchorWorker = scope.ServiceProvider.GetRequiredService<LedgerMerkleAnchorWorker>();
|
|
|
|
|
var logger = scope.ServiceProvider.GetRequiredService<ILoggerFactory>().CreateLogger("Harness");
|
|
|
|
|
var timeProvider = scope.ServiceProvider.GetRequiredService<TimeProvider>();
|
|
|
|
|
|
|
|
|
|
var cts = new CancellationTokenSource();
|
|
|
|
|
var projectionTask = projectionWorker.StartAsync(cts.Token);
|
|
|
|
|
var anchorTask = anchorWorker.StartAsync(cts.Token);
|
|
|
|
|
|
|
|
|
|
var (meterListener, metrics) = CreateMeterListener();
|
|
|
|
|
|
|
|
|
|
var sw = Stopwatch.StartNew();
|
|
|
|
|
long eventsWritten = 0;
|
|
|
|
|
|
|
|
|
|
await Parallel.ForEachAsync(fixtures, new ParallelOptions { MaxDegreeOfParallelism = maxParallel, CancellationToken = cts.Token }, async (file, token) =>
|
|
|
|
|
{
|
|
|
|
|
await foreach (var draft in ReadDraftsAsync(file, tenant, timeProvider, token))
|
|
|
|
|
{
|
|
|
|
|
var result = await writeService.AppendAsync(draft, token).ConfigureAwait(false);
|
|
|
|
|
if (result.Status is LedgerWriteStatus.ValidationFailed or LedgerWriteStatus.Conflict)
|
|
|
|
|
{
|
|
|
|
|
throw new InvalidOperationException($"Append failed for {draft.EventId}: {string.Join(",", result.Errors)} ({result.ConflictCode})");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Interlocked.Increment(ref eventsWritten);
|
|
|
|
|
if (eventsWritten % 50_000 == 0)
|
|
|
|
|
{
|
|
|
|
|
logger.LogInformation("Appended {Count} events...", eventsWritten);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}).ConfigureAwait(false);
|
|
|
|
|
|
|
|
|
|
// Wait for projector to catch up
|
|
|
|
|
await Task.Delay(TimeSpan.FromSeconds(2), cts.Token);
|
|
|
|
|
sw.Stop();
|
|
|
|
|
|
|
|
|
|
meterListener.RecordObservableInstruments();
|
|
|
|
|
|
|
|
|
|
var verification = await VerifyLedgerAsync(scope.ServiceProvider, tenant, eventsWritten, cts.Token).ConfigureAwait(false);
|
|
|
|
|
|
|
|
|
|
var writeLatencyP95Ms = Percentile(metrics.HistDouble("ledger_write_latency_seconds"), 95) * 1000;
|
|
|
|
|
var rebuildP95Ms = Percentile(metrics.HistDouble("ledger_projection_rebuild_seconds"), 95) * 1000;
|
|
|
|
|
var projectionLagSeconds = metrics.GaugeDouble("ledger_projection_lag_seconds").DefaultIfEmpty(0).Max();
|
|
|
|
|
var backlogEvents = metrics.GaugeLong("ledger_ingest_backlog_events").DefaultIfEmpty(0).Max();
|
|
|
|
|
var dbConnections = metrics.GaugeLong("ledger_db_connections_active").DefaultIfEmpty(0).Sum();
|
|
|
|
|
|
|
|
|
|
var report = new HarnessReport(
|
|
|
|
|
tenant,
|
|
|
|
|
fixtures.Select(f => f.FullName).ToArray(),
|
|
|
|
|
eventsWritten,
|
|
|
|
|
sw.Elapsed.TotalSeconds,
|
|
|
|
|
status: verification.Success ? "pass" : "fail",
|
|
|
|
|
WriteLatencyP95Ms: writeLatencyP95Ms,
|
|
|
|
|
ProjectionRebuildP95Ms: rebuildP95Ms,
|
|
|
|
|
ProjectionLagSecondsMax: projectionLagSeconds,
|
|
|
|
|
BacklogEventsMax: backlogEvents,
|
|
|
|
|
DbConnectionsObserved: dbConnections,
|
|
|
|
|
VerificationErrors: verification.Errors.ToArray());
|
|
|
|
|
|
|
|
|
|
var jsonOptions = new JsonSerializerOptions { WriteIndented = true };
|
|
|
|
|
var json = JsonSerializer.Serialize(report, jsonOptions);
|
|
|
|
|
Console.WriteLine(json);
|
|
|
|
|
|
|
|
|
|
if (reportFile is not null)
|
|
|
|
|
{
|
|
|
|
|
await File.WriteAllTextAsync(reportFile.FullName, json, cts.Token).ConfigureAwait(false);
|
|
|
|
|
await WriteDssePlaceholderAsync(reportFile.FullName, json, cts.Token).ConfigureAwait(false);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (metricsFile is not null)
|
|
|
|
|
{
|
|
|
|
|
var snapshot = metrics.ToSnapshot();
|
|
|
|
|
var metricsJson = JsonSerializer.Serialize(snapshot, jsonOptions);
|
|
|
|
|
await File.WriteAllTextAsync(metricsFile.FullName, metricsJson, cts.Token).ConfigureAwait(false);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cts.Cancel();
|
|
|
|
|
await Task.WhenAll(projectionTask, anchorTask).WaitAsync(TimeSpan.FromSeconds(5));
|
|
|
|
|
}, fixturesOption, connectionOption, tenantOption, maxParallelOption, reportOption, metricsOption);
|
|
|
|
|
|
|
|
|
|
await root.InvokeAsync(args);
|
|
|
|
|
|
|
|
|
|
static async Task WriteDssePlaceholderAsync(string reportPath, string json, CancellationToken cancellationToken)
|
|
|
|
|
{
|
|
|
|
|
using var sha = System.Security.Cryptography.SHA256.Create();
|
|
|
|
|
var digest = sha.ComputeHash(System.Text.Encoding.UTF8.GetBytes(json));
|
|
|
|
|
var sig = new
|
|
|
|
|
{
|
|
|
|
|
payloadType = "application/vnd.stella-ledger-harness+json",
|
|
|
|
|
sha256 = Convert.ToHexString(digest).ToLowerInvariant(),
|
|
|
|
|
signedBy = "harness-local",
|
|
|
|
|
createdAt = DateTimeOffset.UtcNow
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
var sigJson = JsonSerializer.Serialize(sig, new JsonSerializerOptions { WriteIndented = true });
|
|
|
|
|
await File.WriteAllTextAsync(reportPath + ".sig", sigJson, cancellationToken).ConfigureAwait(false);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static (MeterListener Listener, MetricsBag Bag) CreateMeterListener()
|
|
|
|
|
{
|
|
|
|
|
var bag = new MetricsBag();
|
|
|
|
|
var listener = new MeterListener
|
|
|
|
|
{
|
|
|
|
|
InstrumentPublished = (instrument, meterListener) =>
|
|
|
|
|
{
|
|
|
|
|
if (instrument.Meter.Name == "StellaOps.Findings.Ledger")
|
|
|
|
|
{
|
|
|
|
|
meterListener.EnableMeasurementEvents(instrument);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
listener.SetMeasurementEventCallback<double>((instrument, measurement, tags, _) =>
|
|
|
|
|
{
|
|
|
|
|
bag.Add(instrument, measurement, tags);
|
|
|
|
|
});
|
|
|
|
|
listener.SetMeasurementEventCallback<long>((instrument, measurement, tags, _) =>
|
|
|
|
|
{
|
|
|
|
|
bag.Add(instrument, measurement, tags);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
listener.Start();
|
|
|
|
|
return (listener, bag);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static IHost BuildHost(string connectionString)
|
|
|
|
|
{
|
|
|
|
|
return Host.CreateDefaultBuilder()
|
|
|
|
|
.ConfigureLogging(logging =>
|
|
|
|
|
{
|
|
|
|
|
logging.ClearProviders();
|
|
|
|
|
logging.AddSimpleConsole(options =>
|
|
|
|
|
{
|
|
|
|
|
options.SingleLine = true;
|
|
|
|
|
options.TimestampFormat = "HH:mm:ss ";
|
|
|
|
|
});
|
|
|
|
|
})
|
|
|
|
|
.ConfigureServices(services =>
|
|
|
|
|
{
|
|
|
|
|
services.Configure<LedgerServiceOptions>(opts =>
|
|
|
|
|
{
|
|
|
|
|
opts.Database.ConnectionString = connectionString;
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
services.AddSingleton<TimeProvider>(_ => TimeProvider.System);
|
|
|
|
|
services.AddSingleton<LedgerDataSource>();
|
|
|
|
|
services.AddSingleton<ILedgerEventRepository, PostgresLedgerEventRepository>();
|
|
|
|
|
services.AddSingleton<IFindingProjectionRepository, NoOpProjectionRepository>();
|
|
|
|
|
services.AddSingleton<ILedgerEventStream, PostgresLedgerEventStream>();
|
|
|
|
|
services.AddSingleton<IPolicyEvaluationService, NoOpPolicyEvaluationService>();
|
|
|
|
|
services.AddSingleton<IMerkleAnchorRepository, NoOpMerkleAnchorRepository>();
|
|
|
|
|
services.AddSingleton<LedgerAnchorQueue>();
|
|
|
|
|
services.AddSingleton<IMerkleAnchorScheduler, QueueMerkleAnchorScheduler>();
|
|
|
|
|
services.AddSingleton<LedgerMerkleAnchorWorker>();
|
|
|
|
|
services.AddSingleton<LedgerProjectionWorker>();
|
|
|
|
|
services.AddSingleton<ILedgerEventWriteService, LedgerEventWriteService>();
|
|
|
|
|
})
|
|
|
|
|
.Build();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static async IAsyncEnumerable<LedgerEventDraft> ReadDraftsAsync(FileInfo file, string tenant, TimeProvider timeProvider, [EnumeratorCancellation] CancellationToken cancellationToken)
|
|
|
|
|
{
|
|
|
|
|
await using var stream = file.OpenRead();
|
|
|
|
|
using var reader = new StreamReader(stream);
|
|
|
|
|
var recordedAtBase = timeProvider.GetUtcNow();
|
|
|
|
|
|
|
|
|
|
while (!reader.EndOfStream)
|
|
|
|
|
{
|
|
|
|
|
var line = await reader.ReadLineAsync().ConfigureAwait(false);
|
|
|
|
|
if (string.IsNullOrWhiteSpace(line))
|
|
|
|
|
{
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var node = JsonNode.Parse(line)?.AsObject();
|
|
|
|
|
if (node is null)
|
|
|
|
|
{
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
yield return ToDraft(node, tenant, recordedAtBase);
|
|
|
|
|
cancellationToken.ThrowIfCancellationRequested();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static LedgerEventDraft ToDraft(JsonObject node, string defaultTenant, DateTimeOffset recordedAtBase)
|
|
|
|
|
{
|
|
|
|
|
string required(string name) => node[name]?.GetValue<string>() ?? throw new InvalidOperationException($"{name} missing");
|
|
|
|
|
|
|
|
|
|
var tenantId = node.TryGetPropertyValue("tenant", out var tenantNode)
|
|
|
|
|
? tenantNode!.GetValue<string>()
|
|
|
|
|
: defaultTenant;
|
|
|
|
|
|
|
|
|
|
var chainId = Guid.Parse(required("chain_id"));
|
|
|
|
|
var sequence = node["sequence_no"]?.GetValue<long>() ?? node["sequence"]?.GetValue<long>() ?? throw new InvalidOperationException("sequence_no missing");
|
|
|
|
|
var eventId = Guid.Parse(required("event_id"));
|
|
|
|
|
var eventType = required("event_type");
|
|
|
|
|
var policyVersion = required("policy_version");
|
|
|
|
|
var findingId = required("finding_id");
|
|
|
|
|
var artifactId = required("artifact_id");
|
|
|
|
|
var sourceRunId = node.TryGetPropertyValue("source_run_id", out var sourceRunNode) && sourceRunNode is not null && !string.IsNullOrWhiteSpace(sourceRunNode.GetValue<string>())
|
|
|
|
|
? Guid.Parse(sourceRunNode!.GetValue<string>())
|
|
|
|
|
: null;
|
|
|
|
|
var actorId = required("actor_id");
|
|
|
|
|
var actorType = required("actor_type");
|
|
|
|
|
var occurredAt = DateTimeOffset.Parse(required("occurred_at"));
|
|
|
|
|
var recordedAt = node.TryGetPropertyValue("recorded_at", out var recordedAtNode) && recordedAtNode is not null
|
|
|
|
|
? DateTimeOffset.Parse(recordedAtNode.GetValue<string>())
|
|
|
|
|
: recordedAtBase;
|
|
|
|
|
|
|
|
|
|
var payload = node.TryGetPropertyValue("payload", out var payloadNode) && payloadNode is JsonObject payloadObj
|
|
|
|
|
? payloadObj
|
|
|
|
|
: throw new InvalidOperationException("payload missing");
|
|
|
|
|
|
|
|
|
|
var canonicalEnvelope = LedgerCanonicalJsonSerializer.Canonicalize(payload);
|
|
|
|
|
var prev = node.TryGetPropertyValue("previous_hash", out var prevNode) ? prevNode?.GetValue<string>() : null;
|
|
|
|
|
|
|
|
|
|
return new LedgerEventDraft(
|
|
|
|
|
tenantId,
|
|
|
|
|
chainId,
|
|
|
|
|
sequence,
|
|
|
|
|
eventId,
|
|
|
|
|
eventType,
|
|
|
|
|
policyVersion,
|
|
|
|
|
findingId,
|
|
|
|
|
artifactId,
|
|
|
|
|
sourceRunId,
|
|
|
|
|
actorId,
|
|
|
|
|
actorType,
|
|
|
|
|
occurredAt,
|
|
|
|
|
recordedAt,
|
|
|
|
|
payload,
|
|
|
|
|
canonicalEnvelope,
|
|
|
|
|
prev);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static async Task<VerificationResult> VerifyLedgerAsync(IServiceProvider services, string tenant, long expectedEvents, CancellationToken cancellationToken)
|
|
|
|
|
{
|
|
|
|
|
var errors = new List<string>();
|
|
|
|
|
var dataSource = services.GetRequiredService<LedgerDataSource>();
|
|
|
|
|
|
|
|
|
|
await using var connection = await dataSource.OpenConnectionAsync(tenant, "verify", cancellationToken).ConfigureAwait(false);
|
|
|
|
|
|
|
|
|
|
// Count check
|
|
|
|
|
await using (var countCommand = new Npgsql.NpgsqlCommand("select count(*) from ledger_events where tenant_id = @tenant", connection))
|
|
|
|
|
{
|
|
|
|
|
countCommand.Parameters.AddWithValue("tenant", tenant);
|
|
|
|
|
var count = (long)await countCommand.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
|
|
|
|
|
if (count < expectedEvents)
|
|
|
|
|
{
|
|
|
|
|
errors.Add($"event_count_mismatch:{count}/{expectedEvents}");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Sequence and hash verification
|
|
|
|
|
const string query = """
|
|
|
|
|
select chain_id, sequence_no, event_id, event_body, event_hash, previous_hash, merkle_leaf_hash
|
|
|
|
|
from ledger_events
|
|
|
|
|
where tenant_id = @tenant
|
|
|
|
|
order by chain_id, sequence_no
|
|
|
|
|
""";
|
|
|
|
|
|
|
|
|
|
await using var command = new Npgsql.NpgsqlCommand(query, connection);
|
|
|
|
|
command.Parameters.AddWithValue("tenant", tenant);
|
|
|
|
|
|
|
|
|
|
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
|
|
|
|
|
|
|
|
|
Guid? currentChain = null;
|
|
|
|
|
long expectedSequence = 1;
|
|
|
|
|
string? prevHash = null;
|
|
|
|
|
|
|
|
|
|
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
|
|
|
|
{
|
|
|
|
|
var chainId = reader.GetGuid(0);
|
|
|
|
|
var sequence = reader.GetInt64(1);
|
|
|
|
|
var eventId = reader.GetGuid(2);
|
|
|
|
|
var eventBodyJson = reader.GetString(3);
|
|
|
|
|
var eventHash = reader.GetString(4);
|
|
|
|
|
var previousHash = reader.GetString(5);
|
|
|
|
|
var merkleLeafHash = reader.GetString(6);
|
|
|
|
|
|
|
|
|
|
if (currentChain != chainId)
|
|
|
|
|
{
|
|
|
|
|
currentChain = chainId;
|
|
|
|
|
expectedSequence = 1;
|
|
|
|
|
prevHash = LedgerEventConstants.EmptyHash;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (sequence != expectedSequence)
|
|
|
|
|
{
|
|
|
|
|
errors.Add($"sequence_gap:{chainId}:{sequence}");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!string.Equals(previousHash, prevHash, StringComparison.Ordinal))
|
|
|
|
|
{
|
|
|
|
|
errors.Add($"previous_hash_mismatch:{chainId}:{sequence}");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var node = JsonNode.Parse(eventBodyJson)?.AsObject() ?? new JsonObject();
|
|
|
|
|
var canonical = LedgerCanonicalJsonSerializer.Canonicalize(node);
|
|
|
|
|
var hashResult = LedgerHashing.ComputeHashes(canonical, sequence);
|
|
|
|
|
|
|
|
|
|
if (!string.Equals(hashResult.EventHash, eventHash, StringComparison.Ordinal))
|
|
|
|
|
{
|
|
|
|
|
errors.Add($"event_hash_mismatch:{eventId}");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!string.Equals(hashResult.MerkleLeafHash, merkleLeafHash, StringComparison.Ordinal))
|
|
|
|
|
{
|
|
|
|
|
errors.Add($"merkle_leaf_mismatch:{eventId}");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
prevHash = eventHash;
|
|
|
|
|
expectedSequence++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (errors.Count == 0)
|
|
|
|
|
{
|
|
|
|
|
// Additional check: projector caught up (no lag > 0)
|
|
|
|
|
var lagMax = LedgerMetricsSnapshot.LagMax;
|
|
|
|
|
if (lagMax > 0)
|
|
|
|
|
{
|
|
|
|
|
errors.Add($"projection_lag_remaining:{lagMax}");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return new VerificationResult(errors.Count == 0, errors);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static double Percentile(IEnumerable<double> values, double percentile)
|
|
|
|
|
{
|
|
|
|
|
var data = values.Where(v => !double.IsNaN(v)).OrderBy(v => v).ToArray();
|
|
|
|
|
if (data.Length == 0)
|
|
|
|
|
{
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var rank = (percentile / 100.0) * (data.Length - 1);
|
|
|
|
|
var lowerIndex = (int)Math.Floor(rank);
|
|
|
|
|
var upperIndex = (int)Math.Ceiling(rank);
|
|
|
|
|
if (lowerIndex == upperIndex)
|
|
|
|
|
{
|
|
|
|
|
return data[lowerIndex];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var fraction = rank - lowerIndex;
|
|
|
|
|
return data[lowerIndex] + (data[upperIndex] - data[lowerIndex]) * fraction;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
internal sealed record HarnessReport(
|
|
|
|
|
string Tenant,
|
|
|
|
|
IReadOnlyList<string> Fixtures,
|
|
|
|
|
long EventsWritten,
|
|
|
|
|
double DurationSeconds,
|
|
|
|
|
string Status,
|
|
|
|
|
double WriteLatencyP95Ms,
|
|
|
|
|
double ProjectionRebuildP95Ms,
|
|
|
|
|
double ProjectionLagSecondsMax,
|
|
|
|
|
double BacklogEventsMax,
|
|
|
|
|
long DbConnectionsObserved,
|
|
|
|
|
IReadOnlyList<string> VerificationErrors);
|
|
|
|
|
|
|
|
|
|
internal sealed record VerificationResult(bool Success, IReadOnlyList<string> Errors);
|
|
|
|
|
|
|
|
|
|
internal sealed class MetricsBag
|
|
|
|
|
{
|
|
|
|
|
private readonly List<(string Name, double Value)> doubles = new();
|
|
|
|
|
private readonly List<(string Name, long Value)> longs = new();
|
|
|
|
|
|
|
|
|
|
public void Add(Instrument instrument, double value, ReadOnlySpan<KeyValuePair<string, object?>> _)
|
|
|
|
|
=> doubles.Add((instrument.Name, value));
|
|
|
|
|
|
|
|
|
|
public void Add(Instrument instrument, long value, ReadOnlySpan<KeyValuePair<string, object?>> _)
|
|
|
|
|
=> longs.Add((instrument.Name, value));
|
|
|
|
|
|
|
|
|
|
public IEnumerable<double> HistDouble(string name) => doubles.Where(d => d.Name == name).Select(d => d.Value);
|
|
|
|
|
public IEnumerable<double> GaugeDouble(string name) => doubles.Where(d => d.Name == name).Select(d => d.Value);
|
|
|
|
|
public IEnumerable<long> GaugeLong(string name) => longs.Where(l => l.Name == name).Select(l => l.Value);
|
|
|
|
|
|
|
|
|
|
public object ToSnapshot() => new
|
|
|
|
|
{
|
|
|
|
|
doubles = doubles.GroupBy(x => x.Name).ToDictionary(g => g.Key, g => g.Select(v => v.Value).ToArray()),
|
|
|
|
|
longs = longs.GroupBy(x => x.Name).ToDictionary(g => g.Key, g => g.Select(v => v.Value).ToArray())
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Harness lightweight no-op implementations for projection/merkle to keep replay fast
|
|
|
|
|
internal sealed class NoOpPolicyEvaluationService : IPolicyEvaluationService
|
|
|
|
|
{
|
|
|
|
|
public Task<PolicyEvaluationResult> EvaluateAsync(LedgerEventRecord record, FindingProjection? current, CancellationToken cancellationToken)
|
|
|
|
|
{
|
|
|
|
|
return Task.FromResult(new PolicyEvaluationResult("noop", record.OccurredAt, record.RecordedAt, current?.Status ?? "new"));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
internal sealed class NoOpProjectionRepository : IFindingProjectionRepository
|
|
|
|
|
{
|
|
|
|
|
public Task<FindingProjection?> GetAsync(string tenantId, string findingId, string policyVersion, CancellationToken cancellationToken) =>
|
|
|
|
|
Task.FromResult<FindingProjection?>(null);
|
|
|
|
|
|
|
|
|
|
public Task InsertActionAsync(FindingAction action, CancellationToken cancellationToken) => Task.CompletedTask;
|
|
|
|
|
|
|
|
|
|
public Task InsertHistoryAsync(FindingHistory history, CancellationToken cancellationToken) => Task.CompletedTask;
|
|
|
|
|
|
|
|
|
|
public Task SaveCheckpointAsync(ProjectionCheckpoint checkpoint, CancellationToken cancellationToken) => Task.CompletedTask;
|
|
|
|
|
|
|
|
|
|
public Task<ProjectionCheckpoint> GetCheckpointAsync(CancellationToken cancellationToken) =>
|
|
|
|
|
Task.FromResult(new ProjectionCheckpoint(DateTimeOffset.MinValue, Guid.Empty, DateTimeOffset.MinValue));
|
|
|
|
|
|
|
|
|
|
public Task UpsertAsync(FindingProjection projection, CancellationToken cancellationToken) => Task.CompletedTask;
|
|
|
|
|
|
|
|
|
|
public Task EnsureIndexesAsync(CancellationToken cancellationToken) => Task.CompletedTask;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
internal sealed class NoOpMerkleAnchorRepository : IMerkleAnchorRepository
|
|
|
|
|
{
|
|
|
|
|
public Task InsertAsync(string tenantId, Guid anchorId, DateTimeOffset windowStart, DateTimeOffset windowEnd, long sequenceStart, long sequenceEnd, string rootHash, long leafCount, DateTime anchoredAt, string? anchorReference, CancellationToken cancellationToken)
|
|
|
|
|
=> Task.CompletedTask;
|
|
|
|
|
|
|
|
|
|
public Task<MerkleAnchor?> GetLatestAsync(string tenantId, CancellationToken cancellationToken) =>
|
|
|
|
|
Task.FromResult<MerkleAnchor?>(null);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
internal sealed class QueueMerkleAnchorScheduler : IMerkleAnchorScheduler
|
|
|
|
|
{
|
|
|
|
|
private readonly LedgerAnchorQueue _queue;
|
|
|
|
|
|
|
|
|
|
public QueueMerkleAnchorScheduler(LedgerAnchorQueue queue)
|
|
|
|
|
{
|
|
|
|
|
_queue = queue ?? throw new ArgumentNullException(nameof(queue));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public Task EnqueueAsync(LedgerEventRecord record, CancellationToken cancellationToken)
|
|
|
|
|
=> _queue.EnqueueAsync(record, cancellationToken).AsTask();
|
|
|
|
|
}
|