feat(audit): wire AddAuditEmission into 9 services (AUDIT-002)

- Wire StellaOps.Audit.Emission DI in: Authority, Policy, Release-Orchestrator,
  EvidenceLocker, Notify, Scanner, Scheduler, Integrations, Platform
- Add AuditEmission__TimelineBaseUrl to compose defaults
- Endpoint filter annotation deferred to follow-up pass

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
master
2026-04-08 16:20:39 +03:00
parent 65106afe4c
commit f5a9f874d0
34 changed files with 1865 additions and 24 deletions

View File

@@ -68,6 +68,8 @@ x-router-microservice-defaults: &router-microservice-defaults
Router__Messaging__valkey__QueueWaitTimeoutSeconds: "${VALKEY_QUEUE_WAIT_TIMEOUT:-0}"
# Identity envelope verification (signed by gateway, verified by services)
Router__IdentityEnvelopeSigningKey: "${STELLAOPS_IDENTITY_ENVELOPE_SIGNING_KEY}"
# Unified audit emission — services POST audit events to Timeline ingest
AuditEmission__TimelineBaseUrl: "http://timeline.stella-ops.local"
x-healthcheck-tcp: &healthcheck-tcp
interval: ${HEALTHCHECK_INTERVAL:-60s}

View File

@@ -97,7 +97,7 @@
{ "Type": "Microservice", "Path": "^/api/v1/advisory-ai(.*)", "IsRegex": true, "TranslatesTo": "http://advisoryai.stella-ops.local/v1/advisory-ai$1" },
{ "Type": "Microservice", "Path": "^/api/v1/advisory(.*)", "IsRegex": true, "TranslatesTo": "http://advisoryai.stella-ops.local/api/v1/advisory$1" },
{ "Type": "Microservice", "Path": "^/api/v1/vex(.*)", "IsRegex": true, "TranslatesTo": "https://vexhub.stella-ops.local/api/v1/vex$1" },
{ "Type": "Microservice", "Path": "^/api/v1/doctor/scheduler(.*)", "IsRegex": true, "TranslatesTo": "http://doctor-scheduler.stella-ops.local/api/v1/doctor/scheduler$1" },
{ "Type": "Microservice", "Path": "^/api/v1/scheduler/doctor(.*)", "IsRegex": true, "TranslatesTo": "http://scheduler.stella-ops.local/api/v1/scheduler/doctor$1" },
{ "Type": "ReverseProxy", "Path": "^/api/v1/registries(.*)", "IsRegex": true, "TranslatesTo": "http://platform.stella-ops.local/api/v1/registries$1", "PreserveAuthHeaders": true },
{ "Type": "Microservice", "Path": "^/api/v2/context(.*)", "IsRegex": true, "TranslatesTo": "http://platform.stella-ops.local/api/v2/context$1" },

View File

@@ -59,7 +59,8 @@ timeline-web|devops/docker/Dockerfile.hardened.template|src/Timeline/StellaOps.T
findings-ledger-web|devops/docker/Dockerfile.hardened.template|src/Findings/StellaOps.Findings.Ledger.WebService/StellaOps.Findings.Ledger.WebService.csproj|StellaOps.Findings.Ledger.WebService|8080
# ── Slot 26: Doctor ─────────────────────────────────────────────────────────────
doctor-web|devops/docker/Dockerfile.hardened.template|src/Doctor/StellaOps.Doctor.WebService/StellaOps.Doctor.WebService.csproj|StellaOps.Doctor.WebService|8080
doctor-scheduler|devops/docker/Dockerfile.hardened.template|src/Doctor/StellaOps.Doctor.Scheduler/StellaOps.Doctor.Scheduler.csproj|StellaOps.Doctor.Scheduler|8080
# doctor-scheduler: DEPRECATED -- replaced by DoctorJobPlugin in the Scheduler service (plugin architecture).
# doctor-scheduler|devops/docker/Dockerfile.hardened.template|src/Doctor/StellaOps.Doctor.Scheduler/StellaOps.Doctor.Scheduler.csproj|StellaOps.Doctor.Scheduler|8080
# ── Slot 27: OpsMemory ──────────────────────────────────────────────────────────
opsmemory-web|devops/docker/Dockerfile.hardened.template|src/AdvisoryAI/StellaOps.OpsMemory.WebService/StellaOps.OpsMemory.WebService.csproj|StellaOps.OpsMemory.WebService|8080
# ── Slot 28: Notifier (web merged into notify-web; worker stays) ────────────────

View File

@@ -65,6 +65,7 @@ using StellaOps.Auth.Abstractions;
using StellaOps.Auth.ServerIntegration;
using StellaOps.Authority.Vulnerability.Workflow;
using StellaOps.Authority.Vulnerability.Attachments;
using StellaOps.Audit.Emission;
#if STELLAOPS_AUTH_SECURITY
using StellaOps.Auth.Security.Dpop;
using StackExchange.Redis;
@@ -463,6 +464,9 @@ builder.Services.Configure<OpenIddictServerOptions>(options =>
builder.Services.AddStellaOpsResourceServerAuthentication(builder.Configuration, configurationSection: null);
builder.Services.AddAuthorization();
// Unified audit emission (posts audit events to Timeline service)
builder.Services.AddAuditEmission(builder.Configuration);
// Stella Router integration
var routerEnabled = builder.Services.AddRouterMicroservice(
builder.Configuration,

View File

@@ -36,6 +36,7 @@
<ProjectReference Include="../../../__Libraries/StellaOps.DependencyInjection/StellaOps.DependencyInjection.csproj" />
<ProjectReference Include="../../../Attestor/StellaOps.Attestation/StellaOps.Attestation.csproj" />
<ProjectReference Include="../../../Router/__Libraries/StellaOps.Router.AspNet/StellaOps.Router.AspNet.csproj" />
<ProjectReference Include="../../../__Libraries/StellaOps.Audit.Emission/StellaOps.Audit.Emission.csproj" />
</ItemGroup>
<ItemGroup>
<EmbeddedResource Include="Pages\*.html" />

View File

@@ -18,6 +18,7 @@ using StellaOps.EvidenceLocker.Infrastructure.Services;
using StellaOps.EvidenceLocker.WebService.Audit;
using StellaOps.EvidenceLocker.WebService.Contracts;
using StellaOps.EvidenceLocker.WebService.Security;
using StellaOps.Audit.Emission;
using StellaOps.Localization;
using StellaOps.Router.AspNet;
using static StellaOps.Localization.T;
@@ -54,6 +55,9 @@ builder.Services.AddStellaOpsCors(builder.Environment, builder.Configuration);
builder.Services.AddStellaOpsLocalization(builder.Configuration);
builder.Services.AddTranslationBundle(System.Reflection.Assembly.GetExecutingAssembly());
// Unified audit emission (posts audit events to Timeline service)
builder.Services.AddAuditEmission(builder.Configuration);
// Stella Router integration
var routerEnabled = builder.Services.AddRouterMicroservice(
builder.Configuration,

View File

@@ -24,6 +24,7 @@
<ProjectReference Include="..\..\..\Authority\StellaOps.Authority\StellaOps.Auth.Abstractions\StellaOps.Auth.Abstractions.csproj" />
<ProjectReference Include="..\..\..\Router/__Libraries/StellaOps.Router.AspNet\StellaOps.Router.AspNet.csproj" />
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Localization\StellaOps.Localization.csproj" />
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Audit.Emission\StellaOps.Audit.Emission.csproj" />
</ItemGroup>
<ItemGroup>
<EmbeddedResource Include="Translations\*.json" />

View File

@@ -0,0 +1,419 @@
// Licensed under BUSL-1.1. Copyright (C) 2026 StellaOps Contributors.
// Postgres-backed repositories for VulnExplorer adapters (VXLM-005 gap fix).
// Replaces ConcurrentDictionary in-memory stores with durable persistence.
using Microsoft.Extensions.Logging;
using Npgsql;
using NpgsqlTypes;
using StellaOps.Findings.Ledger.Infrastructure.Postgres;
using StellaOps.Findings.Ledger.WebService.Contracts.VulnExplorer;
using System.Text.Json;
namespace StellaOps.Findings.Ledger.WebService.Services;
// ============================================================================
// Interfaces
// ============================================================================
public interface IVexDecisionRepository
{
Task<VexDecisionDto> CreateAsync(string tenantId, VexDecisionDto decision, CancellationToken ct = default);
Task<VexDecisionDto?> UpdateAsync(string tenantId, Guid id, UpdateVexDecisionRequest request, DateTimeOffset updatedAt, CancellationToken ct = default);
Task<VexDecisionDto?> GetAsync(string tenantId, Guid id, CancellationToken ct = default);
Task<IReadOnlyList<VexDecisionDto>> QueryAsync(string tenantId, string? vulnerabilityId, string? subjectName, VexStatus? status, int skip, int take, CancellationToken ct = default);
Task<int> CountAsync(string tenantId, CancellationToken ct = default);
}
public interface IFixVerificationRepository
{
Task<FixVerificationRecord> CreateAsync(string tenantId, FixVerificationRecord record, CancellationToken ct = default);
Task<FixVerificationRecord?> UpdateAsync(string tenantId, string cveId, string verdict, FixVerificationTransition transition, CancellationToken ct = default);
}
public interface IAuditBundleRepository
{
Task<AuditBundleResponse> CreateAsync(string tenantId, AuditBundleResponse bundle, IReadOnlyList<Guid> decisionIds, CancellationToken ct = default);
Task<string> NextBundleIdAsync(string tenantId, CancellationToken ct = default);
}
// ============================================================================
// JSON serialization options (shared)
// ============================================================================
internal static class VulnExplorerJsonOptions
{
internal static readonly JsonSerializerOptions Default = new()
{
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
WriteIndented = false,
Converters = { new System.Text.Json.Serialization.JsonStringEnumConverter(JsonNamingPolicy.CamelCase) }
};
}
// ============================================================================
// PostgresVexDecisionRepository
// ============================================================================
public sealed class PostgresVexDecisionRepository : IVexDecisionRepository
{
private readonly LedgerDataSource _dataSource;
private readonly ILogger<PostgresVexDecisionRepository> _logger;
public PostgresVexDecisionRepository(LedgerDataSource dataSource, ILogger<PostgresVexDecisionRepository> logger)
{
_dataSource = dataSource;
_logger = logger;
}
public async Task<VexDecisionDto> CreateAsync(string tenantId, VexDecisionDto decision, CancellationToken ct = default)
{
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "vex-write", ct).ConfigureAwait(false);
const string sql = """
INSERT INTO findings.vex_decisions
(id, tenant_id, vulnerability_id, subject, status, justification_type,
justification_text, evidence_refs, scope, valid_for, attestation_ref,
signed_override, supersedes_decision_id, created_by, created_at, updated_at)
VALUES
(@id, @tenant_id, @vulnerability_id, @subject::jsonb, @status, @justification_type,
@justification_text, @evidence_refs::jsonb, @scope::jsonb, @valid_for::jsonb,
@attestation_ref::jsonb, @signed_override::jsonb, @supersedes_decision_id,
@created_by::jsonb, @created_at, @updated_at)
""";
await using var cmd = new NpgsqlCommand(sql, connection);
cmd.Parameters.AddWithValue("id", decision.Id);
cmd.Parameters.AddWithValue("tenant_id", tenantId);
cmd.Parameters.AddWithValue("vulnerability_id", decision.VulnerabilityId);
cmd.Parameters.Add(new NpgsqlParameter("subject", NpgsqlDbType.Jsonb) { Value = JsonSerializer.Serialize(decision.Subject, VulnExplorerJsonOptions.Default) });
cmd.Parameters.AddWithValue("status", decision.Status.ToString());
cmd.Parameters.AddWithValue("justification_type", decision.JustificationType.ToString());
cmd.Parameters.AddWithValue("justification_text", (object?)decision.JustificationText ?? DBNull.Value);
cmd.Parameters.Add(new NpgsqlParameter("evidence_refs", NpgsqlDbType.Jsonb) { Value = decision.EvidenceRefs is not null ? JsonSerializer.Serialize(decision.EvidenceRefs, VulnExplorerJsonOptions.Default) : DBNull.Value });
cmd.Parameters.Add(new NpgsqlParameter("scope", NpgsqlDbType.Jsonb) { Value = decision.Scope is not null ? JsonSerializer.Serialize(decision.Scope, VulnExplorerJsonOptions.Default) : DBNull.Value });
cmd.Parameters.Add(new NpgsqlParameter("valid_for", NpgsqlDbType.Jsonb) { Value = decision.ValidFor is not null ? JsonSerializer.Serialize(decision.ValidFor, VulnExplorerJsonOptions.Default) : DBNull.Value });
cmd.Parameters.Add(new NpgsqlParameter("attestation_ref", NpgsqlDbType.Jsonb) { Value = decision.AttestationRef is not null ? JsonSerializer.Serialize(decision.AttestationRef, VulnExplorerJsonOptions.Default) : DBNull.Value });
cmd.Parameters.Add(new NpgsqlParameter("signed_override", NpgsqlDbType.Jsonb) { Value = decision.SignedOverride is not null ? JsonSerializer.Serialize(decision.SignedOverride, VulnExplorerJsonOptions.Default) : DBNull.Value });
cmd.Parameters.AddWithValue("supersedes_decision_id", (object?)decision.SupersedesDecisionId ?? DBNull.Value);
cmd.Parameters.Add(new NpgsqlParameter("created_by", NpgsqlDbType.Jsonb) { Value = JsonSerializer.Serialize(decision.CreatedBy, VulnExplorerJsonOptions.Default) });
cmd.Parameters.AddWithValue("created_at", decision.CreatedAt);
cmd.Parameters.AddWithValue("updated_at", (object?)decision.UpdatedAt ?? DBNull.Value);
await cmd.ExecuteNonQueryAsync(ct).ConfigureAwait(false);
_logger.LogDebug("Created VEX decision {DecisionId} for tenant {TenantId}", decision.Id, tenantId);
return decision;
}
public async Task<VexDecisionDto?> UpdateAsync(string tenantId, Guid id, UpdateVexDecisionRequest request, DateTimeOffset updatedAt, CancellationToken ct = default)
{
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "vex-write", ct).ConfigureAwait(false);
// First fetch the existing record
var existing = await GetInternalAsync(connection, id, ct).ConfigureAwait(false);
if (existing is null)
{
return null;
}
var updated = existing with
{
Status = request.Status ?? existing.Status,
JustificationType = request.JustificationType ?? existing.JustificationType,
JustificationText = request.JustificationText ?? existing.JustificationText,
EvidenceRefs = request.EvidenceRefs ?? existing.EvidenceRefs,
Scope = request.Scope ?? existing.Scope,
ValidFor = request.ValidFor ?? existing.ValidFor,
SupersedesDecisionId = request.SupersedesDecisionId ?? existing.SupersedesDecisionId,
UpdatedAt = updatedAt
};
const string sql = """
UPDATE findings.vex_decisions SET
status = @status,
justification_type = @justification_type,
justification_text = @justification_text,
evidence_refs = @evidence_refs::jsonb,
scope = @scope::jsonb,
valid_for = @valid_for::jsonb,
supersedes_decision_id = @supersedes_decision_id,
updated_at = @updated_at
WHERE id = @id
""";
await using var cmd = new NpgsqlCommand(sql, connection);
cmd.Parameters.AddWithValue("id", id);
cmd.Parameters.AddWithValue("status", updated.Status.ToString());
cmd.Parameters.AddWithValue("justification_type", updated.JustificationType.ToString());
cmd.Parameters.AddWithValue("justification_text", (object?)updated.JustificationText ?? DBNull.Value);
cmd.Parameters.Add(new NpgsqlParameter("evidence_refs", NpgsqlDbType.Jsonb) { Value = updated.EvidenceRefs is not null ? JsonSerializer.Serialize(updated.EvidenceRefs, VulnExplorerJsonOptions.Default) : DBNull.Value });
cmd.Parameters.Add(new NpgsqlParameter("scope", NpgsqlDbType.Jsonb) { Value = updated.Scope is not null ? JsonSerializer.Serialize(updated.Scope, VulnExplorerJsonOptions.Default) : DBNull.Value });
cmd.Parameters.Add(new NpgsqlParameter("valid_for", NpgsqlDbType.Jsonb) { Value = updated.ValidFor is not null ? JsonSerializer.Serialize(updated.ValidFor, VulnExplorerJsonOptions.Default) : DBNull.Value });
cmd.Parameters.AddWithValue("supersedes_decision_id", (object?)updated.SupersedesDecisionId ?? DBNull.Value);
cmd.Parameters.AddWithValue("updated_at", updatedAt);
await cmd.ExecuteNonQueryAsync(ct).ConfigureAwait(false);
_logger.LogDebug("Updated VEX decision {DecisionId} for tenant {TenantId}", id, tenantId);
return updated;
}
public async Task<VexDecisionDto?> GetAsync(string tenantId, Guid id, CancellationToken ct = default)
{
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "vex-read", ct).ConfigureAwait(false);
return await GetInternalAsync(connection, id, ct).ConfigureAwait(false);
}
public async Task<IReadOnlyList<VexDecisionDto>> QueryAsync(
string tenantId, string? vulnerabilityId, string? subjectName, VexStatus? status,
int skip, int take, CancellationToken ct = default)
{
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "vex-read", ct).ConfigureAwait(false);
var sql = "SELECT id, vulnerability_id, subject, status, justification_type, justification_text, evidence_refs, scope, valid_for, attestation_ref, signed_override, supersedes_decision_id, created_by, created_at, updated_at FROM findings.vex_decisions WHERE 1=1";
var parameters = new List<NpgsqlParameter>();
if (vulnerabilityId is not null)
{
sql += " AND vulnerability_id = @vulnerability_id";
parameters.Add(new NpgsqlParameter("vulnerability_id", vulnerabilityId));
}
if (subjectName is not null)
{
sql += " AND subject->>'name' ILIKE @subject_name";
parameters.Add(new NpgsqlParameter("subject_name", $"%{subjectName}%"));
}
if (status is not null)
{
sql += " AND status = @status";
parameters.Add(new NpgsqlParameter("status", status.Value.ToString()));
}
sql += " ORDER BY created_at DESC, id ASC LIMIT @take OFFSET @skip";
parameters.Add(new NpgsqlParameter("take", take));
parameters.Add(new NpgsqlParameter("skip", skip));
await using var cmd = new NpgsqlCommand(sql, connection);
cmd.Parameters.AddRange(parameters.ToArray());
var results = new List<VexDecisionDto>();
await using var reader = await cmd.ExecuteReaderAsync(ct).ConfigureAwait(false);
while (await reader.ReadAsync(ct).ConfigureAwait(false))
{
results.Add(MapDecision(reader));
}
return results;
}
public async Task<int> CountAsync(string tenantId, CancellationToken ct = default)
{
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "vex-read", ct).ConfigureAwait(false);
const string sql = "SELECT COUNT(*) FROM findings.vex_decisions";
await using var cmd = new NpgsqlCommand(sql, connection);
var result = await cmd.ExecuteScalarAsync(ct).ConfigureAwait(false);
return Convert.ToInt32(result);
}
private static async Task<VexDecisionDto?> GetInternalAsync(NpgsqlConnection connection, Guid id, CancellationToken ct)
{
const string sql = """
SELECT id, vulnerability_id, subject, status, justification_type, justification_text,
evidence_refs, scope, valid_for, attestation_ref, signed_override,
supersedes_decision_id, created_by, created_at, updated_at
FROM findings.vex_decisions
WHERE id = @id
""";
await using var cmd = new NpgsqlCommand(sql, connection);
cmd.Parameters.AddWithValue("id", id);
await using var reader = await cmd.ExecuteReaderAsync(ct).ConfigureAwait(false);
return await reader.ReadAsync(ct).ConfigureAwait(false) ? MapDecision(reader) : null;
}
private static VexDecisionDto MapDecision(NpgsqlDataReader reader)
{
var statusStr = reader.GetString(reader.GetOrdinal("status"));
var justTypeStr = reader.GetString(reader.GetOrdinal("justification_type"));
return new VexDecisionDto(
Id: reader.GetGuid(reader.GetOrdinal("id")),
VulnerabilityId: reader.GetString(reader.GetOrdinal("vulnerability_id")),
Subject: JsonSerializer.Deserialize<SubjectRefDto>(reader.GetString(reader.GetOrdinal("subject")), VulnExplorerJsonOptions.Default)!,
Status: Enum.TryParse<VexStatus>(statusStr, ignoreCase: true, out var s) ? s : VexStatus.NotAffected,
JustificationType: Enum.TryParse<VexJustificationType>(justTypeStr, ignoreCase: true, out var j) ? j : VexJustificationType.Other,
JustificationText: reader.IsDBNull(reader.GetOrdinal("justification_text")) ? null : reader.GetString(reader.GetOrdinal("justification_text")),
EvidenceRefs: reader.IsDBNull(reader.GetOrdinal("evidence_refs")) ? null : JsonSerializer.Deserialize<IReadOnlyList<EvidenceRefDto>>(reader.GetString(reader.GetOrdinal("evidence_refs")), VulnExplorerJsonOptions.Default),
Scope: reader.IsDBNull(reader.GetOrdinal("scope")) ? null : JsonSerializer.Deserialize<VexScopeDto>(reader.GetString(reader.GetOrdinal("scope")), VulnExplorerJsonOptions.Default),
ValidFor: reader.IsDBNull(reader.GetOrdinal("valid_for")) ? null : JsonSerializer.Deserialize<ValidForDto>(reader.GetString(reader.GetOrdinal("valid_for")), VulnExplorerJsonOptions.Default),
AttestationRef: reader.IsDBNull(reader.GetOrdinal("attestation_ref")) ? null : JsonSerializer.Deserialize<AttestationRefDto>(reader.GetString(reader.GetOrdinal("attestation_ref")), VulnExplorerJsonOptions.Default),
SignedOverride: reader.IsDBNull(reader.GetOrdinal("signed_override")) ? null : JsonSerializer.Deserialize<VexOverrideAttestationDto>(reader.GetString(reader.GetOrdinal("signed_override")), VulnExplorerJsonOptions.Default),
SupersedesDecisionId: reader.IsDBNull(reader.GetOrdinal("supersedes_decision_id")) ? null : reader.GetGuid(reader.GetOrdinal("supersedes_decision_id")),
CreatedBy: JsonSerializer.Deserialize<ActorRefDto>(reader.GetString(reader.GetOrdinal("created_by")), VulnExplorerJsonOptions.Default)!,
CreatedAt: reader.GetFieldValue<DateTimeOffset>(reader.GetOrdinal("created_at")),
UpdatedAt: reader.IsDBNull(reader.GetOrdinal("updated_at")) ? null : reader.GetFieldValue<DateTimeOffset>(reader.GetOrdinal("updated_at"))
);
}
}
// ============================================================================
// PostgresFixVerificationRepository
// ============================================================================
public sealed class PostgresFixVerificationRepository : IFixVerificationRepository
{
private readonly LedgerDataSource _dataSource;
private readonly ILogger<PostgresFixVerificationRepository> _logger;
public PostgresFixVerificationRepository(LedgerDataSource dataSource, ILogger<PostgresFixVerificationRepository> logger)
{
_dataSource = dataSource;
_logger = logger;
}
public async Task<FixVerificationRecord> CreateAsync(string tenantId, FixVerificationRecord record, CancellationToken ct = default)
{
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "fix-write", ct).ConfigureAwait(false);
const string sql = """
INSERT INTO findings.fix_verifications
(cve_id, tenant_id, component_purl, artifact_digest, verdict, transitions, created_at, updated_at)
VALUES
(@cve_id, @tenant_id, @component_purl, @artifact_digest, @verdict, @transitions::jsonb, @created_at, @updated_at)
""";
await using var cmd = new NpgsqlCommand(sql, connection);
cmd.Parameters.AddWithValue("cve_id", record.CveId);
cmd.Parameters.AddWithValue("tenant_id", tenantId);
cmd.Parameters.AddWithValue("component_purl", record.ComponentPurl);
cmd.Parameters.AddWithValue("artifact_digest", (object?)record.ArtifactDigest ?? DBNull.Value);
cmd.Parameters.AddWithValue("verdict", record.Verdict);
cmd.Parameters.Add(new NpgsqlParameter("transitions", NpgsqlDbType.Jsonb) { Value = JsonSerializer.Serialize(record.Transitions, VulnExplorerJsonOptions.Default) });
cmd.Parameters.AddWithValue("created_at", record.CreatedAt);
cmd.Parameters.AddWithValue("updated_at", record.UpdatedAt);
await cmd.ExecuteNonQueryAsync(ct).ConfigureAwait(false);
_logger.LogDebug("Created fix verification for CVE {CveId} in tenant {TenantId}", record.CveId, tenantId);
return record;
}
public async Task<FixVerificationRecord?> UpdateAsync(string tenantId, string cveId, string verdict, FixVerificationTransition transition, CancellationToken ct = default)
{
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "fix-write", ct).ConfigureAwait(false);
// First fetch existing
var existing = await GetInternalAsync(connection, cveId, ct).ConfigureAwait(false);
if (existing is null)
{
return null;
}
var transitions = existing.Transitions.ToList();
transitions.Add(transition);
var updated = existing with
{
Verdict = verdict,
Transitions = transitions.ToArray(),
UpdatedAt = transition.ChangedAt
};
const string sql = """
UPDATE findings.fix_verifications SET
verdict = @verdict,
transitions = @transitions::jsonb,
updated_at = @updated_at
WHERE cve_id = @cve_id
""";
await using var cmd = new NpgsqlCommand(sql, connection);
cmd.Parameters.AddWithValue("cve_id", cveId);
cmd.Parameters.AddWithValue("verdict", verdict);
cmd.Parameters.Add(new NpgsqlParameter("transitions", NpgsqlDbType.Jsonb) { Value = JsonSerializer.Serialize(transitions, VulnExplorerJsonOptions.Default) });
cmd.Parameters.AddWithValue("updated_at", transition.ChangedAt);
await cmd.ExecuteNonQueryAsync(ct).ConfigureAwait(false);
_logger.LogDebug("Updated fix verification {CveId} to verdict {Verdict} in tenant {TenantId}", cveId, verdict, tenantId);
return updated;
}
private static async Task<FixVerificationRecord?> GetInternalAsync(NpgsqlConnection connection, string cveId, CancellationToken ct)
{
const string sql = """
SELECT cve_id, component_purl, artifact_digest, verdict, transitions, created_at, updated_at
FROM findings.fix_verifications
WHERE cve_id = @cve_id
""";
await using var cmd = new NpgsqlCommand(sql, connection);
cmd.Parameters.AddWithValue("cve_id", cveId);
await using var reader = await cmd.ExecuteReaderAsync(ct).ConfigureAwait(false);
if (!await reader.ReadAsync(ct).ConfigureAwait(false))
{
return null;
}
return new FixVerificationRecord(
CveId: reader.GetString(reader.GetOrdinal("cve_id")),
ComponentPurl: reader.GetString(reader.GetOrdinal("component_purl")),
ArtifactDigest: reader.IsDBNull(reader.GetOrdinal("artifact_digest")) ? null : reader.GetString(reader.GetOrdinal("artifact_digest")),
Verdict: reader.GetString(reader.GetOrdinal("verdict")),
Transitions: JsonSerializer.Deserialize<IReadOnlyList<FixVerificationTransition>>(reader.GetString(reader.GetOrdinal("transitions")), VulnExplorerJsonOptions.Default) ?? [],
CreatedAt: reader.GetFieldValue<DateTimeOffset>(reader.GetOrdinal("created_at")),
UpdatedAt: reader.GetFieldValue<DateTimeOffset>(reader.GetOrdinal("updated_at"))
);
}
}
// ============================================================================
// PostgresAuditBundleRepository
// ============================================================================
public sealed class PostgresAuditBundleRepository : IAuditBundleRepository
{
private readonly LedgerDataSource _dataSource;
private readonly ILogger<PostgresAuditBundleRepository> _logger;
public PostgresAuditBundleRepository(LedgerDataSource dataSource, ILogger<PostgresAuditBundleRepository> logger)
{
_dataSource = dataSource;
_logger = logger;
}
public async Task<AuditBundleResponse> CreateAsync(string tenantId, AuditBundleResponse bundle, IReadOnlyList<Guid> decisionIds, CancellationToken ct = default)
{
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "audit-write", ct).ConfigureAwait(false);
const string sql = """
INSERT INTO findings.audit_bundles
(bundle_id, tenant_id, decision_ids, decisions, evidence_refs, created_at)
VALUES
(@bundle_id, @tenant_id, @decision_ids::jsonb, @decisions::jsonb, @evidence_refs::jsonb, @created_at)
""";
await using var cmd = new NpgsqlCommand(sql, connection);
cmd.Parameters.AddWithValue("bundle_id", bundle.BundleId);
cmd.Parameters.AddWithValue("tenant_id", tenantId);
cmd.Parameters.Add(new NpgsqlParameter("decision_ids", NpgsqlDbType.Jsonb) { Value = JsonSerializer.Serialize(decisionIds, VulnExplorerJsonOptions.Default) });
cmd.Parameters.Add(new NpgsqlParameter("decisions", NpgsqlDbType.Jsonb) { Value = JsonSerializer.Serialize(bundle.Decisions, VulnExplorerJsonOptions.Default) });
cmd.Parameters.Add(new NpgsqlParameter("evidence_refs", NpgsqlDbType.Jsonb) { Value = JsonSerializer.Serialize(bundle.EvidenceRefs, VulnExplorerJsonOptions.Default) });
cmd.Parameters.AddWithValue("created_at", bundle.CreatedAt);
await cmd.ExecuteNonQueryAsync(ct).ConfigureAwait(false);
_logger.LogDebug("Created audit bundle {BundleId} for tenant {TenantId}", bundle.BundleId, tenantId);
return bundle;
}
public async Task<string> NextBundleIdAsync(string tenantId, CancellationToken ct = default)
{
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "audit-write", ct).ConfigureAwait(false);
const string sql = "SELECT nextval('findings.audit_bundle_seq')";
await using var cmd = new NpgsqlCommand(sql, connection);
var result = await cmd.ExecuteScalarAsync(ct).ConfigureAwait(false);
return $"bundle-{Convert.ToInt64(result):D6}";
}
}

View File

@@ -0,0 +1,120 @@
-- 010_vex_fix_audit_tables.sql
-- Create Postgres-backed tables for VulnExplorer adapters merged into Findings Ledger.
-- Replaces ConcurrentDictionary in-memory stores (VXLM-005 gap fix).
SET search_path TO findings, public;
BEGIN;
-- ============================================
-- 1. VEX Decisions table
-- ============================================
CREATE TABLE IF NOT EXISTS findings.vex_decisions (
id UUID NOT NULL PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
vulnerability_id TEXT NOT NULL,
subject JSONB NOT NULL,
status TEXT NOT NULL,
justification_type TEXT NOT NULL,
justification_text TEXT,
evidence_refs JSONB,
scope JSONB,
valid_for JSONB,
attestation_ref JSONB,
signed_override JSONB,
supersedes_decision_id UUID,
created_by JSONB NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ
);
CREATE INDEX IF NOT EXISTS idx_vex_decisions_tenant
ON findings.vex_decisions (tenant_id);
CREATE INDEX IF NOT EXISTS idx_vex_decisions_vuln
ON findings.vex_decisions (tenant_id, vulnerability_id);
CREATE INDEX IF NOT EXISTS idx_vex_decisions_created
ON findings.vex_decisions (tenant_id, created_at DESC);
-- ============================================
-- 2. Fix Verifications table
-- ============================================
CREATE TABLE IF NOT EXISTS findings.fix_verifications (
cve_id TEXT NOT NULL,
tenant_id TEXT NOT NULL,
component_purl TEXT NOT NULL,
artifact_digest TEXT,
verdict TEXT NOT NULL DEFAULT 'pending',
transitions JSONB NOT NULL DEFAULT '[]'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
PRIMARY KEY (tenant_id, cve_id)
);
CREATE INDEX IF NOT EXISTS idx_fix_verifications_tenant
ON findings.fix_verifications (tenant_id);
-- ============================================
-- 3. Audit Bundles table
-- ============================================
CREATE TABLE IF NOT EXISTS findings.audit_bundles (
bundle_id TEXT NOT NULL,
tenant_id TEXT NOT NULL,
decision_ids JSONB NOT NULL DEFAULT '[]'::jsonb,
decisions JSONB NOT NULL DEFAULT '[]'::jsonb,
evidence_refs JSONB NOT NULL DEFAULT '[]'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
PRIMARY KEY (tenant_id, bundle_id)
);
CREATE INDEX IF NOT EXISTS idx_audit_bundles_tenant
ON findings.audit_bundles (tenant_id);
-- ============================================
-- 4. Enable RLS on new tables
-- ============================================
ALTER TABLE findings.vex_decisions ENABLE ROW LEVEL SECURITY;
ALTER TABLE findings.vex_decisions FORCE ROW LEVEL SECURITY;
DROP POLICY IF EXISTS vex_decisions_tenant_isolation ON findings.vex_decisions;
CREATE POLICY vex_decisions_tenant_isolation
ON findings.vex_decisions
FOR ALL
USING (tenant_id = findings_ledger_app.require_current_tenant())
WITH CHECK (tenant_id = findings_ledger_app.require_current_tenant());
ALTER TABLE findings.fix_verifications ENABLE ROW LEVEL SECURITY;
ALTER TABLE findings.fix_verifications FORCE ROW LEVEL SECURITY;
DROP POLICY IF EXISTS fix_verifications_tenant_isolation ON findings.fix_verifications;
CREATE POLICY fix_verifications_tenant_isolation
ON findings.fix_verifications
FOR ALL
USING (tenant_id = findings_ledger_app.require_current_tenant())
WITH CHECK (tenant_id = findings_ledger_app.require_current_tenant());
ALTER TABLE findings.audit_bundles ENABLE ROW LEVEL SECURITY;
ALTER TABLE findings.audit_bundles FORCE ROW LEVEL SECURITY;
DROP POLICY IF EXISTS audit_bundles_tenant_isolation ON findings.audit_bundles;
CREATE POLICY audit_bundles_tenant_isolation
ON findings.audit_bundles
FOR ALL
USING (tenant_id = findings_ledger_app.require_current_tenant())
WITH CHECK (tenant_id = findings_ledger_app.require_current_tenant());
-- ============================================
-- 5. Sequence for audit bundle IDs
-- ============================================
CREATE SEQUENCE IF NOT EXISTS findings.audit_bundle_seq
START WITH 1
INCREMENT BY 1
NO CYCLE;
COMMIT;

View File

@@ -18,6 +18,7 @@ using StellaOps.Integrations.WebService.AiCodeGuard;
using StellaOps.Integrations.WebService.Infrastructure;
using StellaOps.Integrations.WebService.Security;
using StellaOps.Audit.Emission;
using StellaOps.Auth.ServerIntegration.Tenancy;
using StellaOps.Infrastructure.Postgres.Migrations;
using StellaOps.Infrastructure.Postgres.Options;
@@ -133,6 +134,9 @@ builder.Services.AddAuthorization(options =>
options.AddStellaOpsScopePolicy(IntegrationPolicies.Operate, StellaOpsScopes.IntegrationOperate);
});
// Unified audit emission (posts audit events to Timeline service)
builder.Services.AddAuditEmission(builder.Configuration);
// Stella Router integration
var routerEnabled = builder.Services.AddRouterMicroservice(
builder.Configuration,

View File

@@ -30,6 +30,7 @@
<ProjectReference Include="..\..\Router\__Libraries\StellaOps.Messaging\StellaOps.Messaging.csproj" />
<ProjectReference Include="..\..\Authority\StellaOps.Authority\StellaOps.Auth.ServerIntegration\StellaOps.Auth.ServerIntegration.csproj" />
<ProjectReference Include="..\..\__Libraries\StellaOps.Localization\StellaOps.Localization.csproj" />
<ProjectReference Include="..\..\__Libraries\StellaOps.Audit.Emission\StellaOps.Audit.Emission.csproj" />
</ItemGroup>
<ItemGroup>
<EmbeddedResource Include="Translations\*.json" />

View File

@@ -2,6 +2,7 @@
using Microsoft.AspNetCore.Authentication;
using Microsoft.Extensions.DependencyInjection.Extensions;
using Microsoft.Extensions.Options;
using StellaOps.Audit.Emission;
using StellaOps.Auth.Abstractions;
using StellaOps.Localization;
using StellaOps.Auth.ServerIntegration;
@@ -280,6 +281,9 @@ builder.Services.AddEndpointsApiExplorer();
builder.Services.AddStellaOpsLocalization(builder.Configuration);
builder.Services.AddTranslationBundle(System.Reflection.Assembly.GetExecutingAssembly());
// Unified audit emission (posts audit events to Timeline service)
builder.Services.AddAuditEmission(builder.Configuration);
// Stella Router integration
var routerEnabled = builder.Services.AddRouterMicroservice(
builder.Configuration,

View File

@@ -181,7 +181,10 @@ internal static class ScheduleEndpoints
SchedulerEndpointHelpers.ResolveActorId(httpContext),
now,
SchedulerEndpointHelpers.ResolveActorId(httpContext),
SchedulerSchemaVersions.Schedule);
SchedulerSchemaVersions.Schedule,
source: request.Source ?? "user",
jobKind: request.JobKind ?? "scan",
pluginConfig: request.PluginConfig);
await repository.UpsertAsync(schedule, cancellationToken: cancellationToken).ConfigureAwait(false);
await auditService.WriteAsync(
@@ -366,7 +369,7 @@ internal static class ScheduleEndpoints
enabled: false,
existing.CronExpression,
existing.Timezone,
existing.Mode,
existing.Mode,
existing.Selection,
existing.OnlyIf,
existing.Notify,
@@ -377,7 +380,9 @@ internal static class ScheduleEndpoints
now,
SchedulerEndpointHelpers.ResolveActorId(httpContext),
existing.SchemaVersion,
existing.Source);
existing.Source,
jobKind: existing.JobKind,
pluginConfig: existing.PluginConfig);
await repository.UpsertAsync(updated, cancellationToken: cancellationToken).ConfigureAwait(false);
await auditService.WriteAsync(
@@ -454,7 +459,9 @@ internal static class ScheduleEndpoints
now,
SchedulerEndpointHelpers.ResolveActorId(httpContext),
existing.SchemaVersion,
existing.Source);
existing.Source,
jobKind: existing.JobKind,
pluginConfig: existing.PluginConfig);
await repository.UpsertAsync(updated, cancellationToken: cancellationToken).ConfigureAwait(false);
await auditService.WriteAsync(
@@ -512,6 +519,8 @@ internal static class ScheduleEndpoints
var notify = request.Notify ?? existing.Notify;
var limits = request.Limits ?? existing.Limits;
var subscribers = request.Subscribers ?? existing.Subscribers;
var jobKind = request.JobKind ?? existing.JobKind;
var pluginConfig = request.PluginConfig ?? existing.PluginConfig;
return new Schedule(
existing.Id,
@@ -531,7 +540,9 @@ internal static class ScheduleEndpoints
updatedAt,
actor,
existing.SchemaVersion,
existing.Source);
existing.Source,
jobKind: jobKind,
pluginConfig: pluginConfig);
}
}

View File

@@ -24,6 +24,7 @@
<ProjectReference Include="../../Router/__Libraries/StellaOps.Messaging/StellaOps.Messaging.csproj" />
<ProjectReference Include="../../Router/__Libraries/StellaOps.Router.AspNet/StellaOps.Router.AspNet.csproj" />
<ProjectReference Include="../../__Libraries/StellaOps.Localization/StellaOps.Localization.csproj" />
<ProjectReference Include="../../__Libraries/StellaOps.Audit.Emission/StellaOps.Audit.Emission.csproj" />
</ItemGroup>
<ItemGroup>
<EmbeddedResource Include="Translations\*.json" />

View File

@@ -15,6 +15,7 @@ using Serilog;
using Serilog.Events;
using StellaOps.Auth.ServerIntegration;
using StellaOps.Auth.ServerIntegration.Tenancy;
using StellaOps.Audit.Emission;
using StellaOps.Configuration;
using StellaOps.Notify.Models;
using StellaOps.Notify.Persistence.Extensions;
@@ -228,6 +229,9 @@ ConfigureRateLimiting(builder, bootstrapOptions);
builder.Services.AddEndpointsApiExplorer();
// Unified audit emission (posts audit events to Timeline service)
builder.Services.AddAuditEmission(builder.Configuration);
// Stella Router integration
var routerEnabled = builder.Services.AddRouterMicroservice(
builder.Configuration,

View File

@@ -38,6 +38,7 @@
<ProjectReference Include="../../__Libraries/StellaOps.Localization/StellaOps.Localization.csproj" />
<!-- Notifier Worker: shared types for correlation, simulation, security, escalation, etc. -->
<ProjectReference Include="../../Notifier/StellaOps.Notifier/StellaOps.Notifier.Worker/StellaOps.Notifier.Worker.csproj" />
<ProjectReference Include="../../__Libraries/StellaOps.Audit.Emission/StellaOps.Audit.Emission.csproj" />
</ItemGroup>
<ItemGroup>
<EmbeddedResource Include="Translations\*.json" />

View File

@@ -3,6 +3,7 @@ using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using StackExchange.Redis;
using StellaOps.Auth.Abstractions;
using StellaOps.Audit.Emission;
using StellaOps.Auth.ServerIntegration;
using StellaOps.Infrastructure.Postgres.Migrations;
using StellaOps.Auth.ServerIntegration.Tenancy;
@@ -370,6 +371,9 @@ builder.Services.AddSingleton<IFunctionMapService, FunctionMapService>();
// Script registry services (multi-language script editor)
builder.Services.AddSingleton<IScriptService, InMemoryScriptService>();
// Unified audit emission (posts audit events to Timeline service)
builder.Services.AddAuditEmission(builder.Configuration);
var routerEnabled = builder.Services.AddRouterMicroservice(
builder.Configuration,
serviceName: "platform",

View File

@@ -0,0 +1,126 @@
using Microsoft.Extensions.Logging;
using StellaOps.Cryptography;
using StellaOps.Platform.WebService.Contracts;
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
namespace StellaOps.Platform.WebService.Services;
/// <summary>
/// Platform-specific implementation of <see cref="ITenantCryptoPreferenceProvider"/>
/// that bridges from <see cref="ICryptoProviderPreferenceStore"/> to the crypto library contract.
/// <para>
/// Caches per-tenant preferences with a configurable TTL (default: 5 minutes) to avoid
/// hitting the database on every crypto operation.
/// </para>
/// </summary>
public sealed class PlatformCryptoPreferenceProvider : ITenantCryptoPreferenceProvider
{
private readonly ICryptoProviderPreferenceStore store;
private readonly TimeProvider timeProvider;
private readonly ILogger<PlatformCryptoPreferenceProvider> logger;
private readonly TimeSpan cacheTtl;
private readonly ConcurrentDictionary<string, CachedEntry> cache = new(StringComparer.OrdinalIgnoreCase);
public PlatformCryptoPreferenceProvider(
ICryptoProviderPreferenceStore store,
TimeProvider timeProvider,
ILogger<PlatformCryptoPreferenceProvider> logger,
TimeSpan? cacheTtl = null)
{
this.store = store ?? throw new ArgumentNullException(nameof(store));
this.timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider));
this.logger = logger ?? throw new ArgumentNullException(nameof(logger));
this.cacheTtl = cacheTtl ?? TimeSpan.FromMinutes(5);
}
public async Task<IReadOnlyList<string>> GetPreferredProvidersAsync(
string tenantId,
string algorithmScope = "*",
CancellationToken cancellationToken = default)
{
if (string.IsNullOrWhiteSpace(tenantId))
{
return Array.Empty<string>();
}
var cacheKey = $"{tenantId}:{algorithmScope}";
var now = timeProvider.GetUtcNow();
if (cache.TryGetValue(cacheKey, out var cached) && now - cached.FetchedAt < cacheTtl)
{
return cached.Providers;
}
try
{
// Parse tenant ID to Guid for the store (the store uses Guid, tenant accessor uses string)
if (!Guid.TryParse(tenantId, out var tenantGuid))
{
logger.LogDebug(
"Tenant ID {TenantId} is not a valid GUID; returning empty preferences.",
tenantId);
var empty = Array.Empty<string>();
cache[cacheKey] = new CachedEntry(empty, now);
return empty;
}
var allPreferences = await store.GetByTenantAsync(tenantGuid, cancellationToken)
.ConfigureAwait(false);
// Filter to active preferences matching the requested algorithm scope.
// If no scope-specific preferences exist, fall back to global ("*") preferences.
var scopeSpecific = allPreferences
.Where(p => p.IsActive &&
string.Equals(p.AlgorithmScope, algorithmScope, StringComparison.OrdinalIgnoreCase))
.OrderBy(p => p.Priority)
.Select(p => p.ProviderId)
.ToList();
IReadOnlyList<string> result;
if (scopeSpecific.Count > 0)
{
result = scopeSpecific;
}
else if (!string.Equals(algorithmScope, "*", StringComparison.Ordinal))
{
// Fall back to global preferences when no scope-specific ones exist
result = allPreferences
.Where(p => p.IsActive &&
string.Equals(p.AlgorithmScope, "*", StringComparison.OrdinalIgnoreCase))
.OrderBy(p => p.Priority)
.Select(p => p.ProviderId)
.ToList();
}
else
{
result = Array.Empty<string>();
}
cache[cacheKey] = new CachedEntry(result, now);
return result;
}
catch (Exception ex)
{
logger.LogWarning(ex,
"Failed to fetch crypto provider preferences for tenant {TenantId}; returning cached or empty.",
tenantId);
if (cached is not null)
{
return cached.Providers;
}
var fallback = Array.Empty<string>();
cache[cacheKey] = new CachedEntry(fallback, now);
return fallback;
}
}
private sealed record CachedEntry(IReadOnlyList<string> Providers, DateTimeOffset FetchedAt);
}

View File

@@ -39,6 +39,8 @@
<ProjectReference Include="..\..\Notify\__Libraries\StellaOps.Notify.Persistence\StellaOps.Notify.Persistence.csproj" />
<ProjectReference Include="..\..\Concelier\__Libraries\StellaOps.Excititor.Persistence\StellaOps.Excititor.Persistence.csproj" />
<ProjectReference Include="..\..\__Libraries\StellaOps.Localization\StellaOps.Localization.csproj" />
<ProjectReference Include="..\..\__Libraries\StellaOps.Audit.Emission\StellaOps.Audit.Emission.csproj" />
<ProjectReference Include="..\..\__Libraries\StellaOps.Cryptography\StellaOps.Cryptography.csproj" />
</ItemGroup>
<ItemGroup>

View File

@@ -39,6 +39,7 @@ using StellaOps.Policy.Engine.Contracts.Gateway;
using StellaOps.Policy.Deltas;
using StellaOps.Policy.Snapshots;
using StellaOps.Policy.ToolLattice;
using StellaOps.Audit.Emission;
using StellaOps.Router.AspNet;
var builder = WebApplication.CreateBuilder(args);
@@ -420,6 +421,9 @@ builder.Services.AddSingleton<IToolAccessEvaluator, ToolAccessEvaluator>();
builder.Services.AddMemoryCache();
// ── End merged gateway services ────────────────────────────────────────────
// Unified audit emission (posts audit events to Timeline service)
builder.Services.AddAuditEmission(builder.Configuration);
// Stella Router integration
var routerEnabled = builder.Services.AddRouterMicroservice(
builder.Configuration,

View File

@@ -51,6 +51,7 @@
<ProjectReference Include="../../Signals/StellaOps.Signals/StellaOps.Signals.csproj" />
<ProjectReference Include="../../SbomService/__Libraries/StellaOps.SbomService.Persistence/StellaOps.SbomService.Persistence.csproj" />
<ProjectReference Include="../../Concelier/__Libraries/StellaOps.Concelier.SbomIntegration/StellaOps.Concelier.SbomIntegration.csproj" />
<ProjectReference Include="../../__Libraries/StellaOps.Audit.Emission/StellaOps.Audit.Emission.csproj" />
</ItemGroup>
<ItemGroup>
<InternalsVisibleTo Include="StellaOps.Policy.Engine.Tests" />

View File

@@ -1,3 +1,4 @@
using StellaOps.Audit.Emission;
using StellaOps.Router.AspNet;
using StellaOps.Auth.ServerIntegration;
using StellaOps.Auth.ServerIntegration.Tenancy;
@@ -70,6 +71,9 @@ builder.Services.AddSingleton<ILanguageValidator, PythonScriptValidator>();
builder.Services.AddSingleton<ILanguageValidator, TypeScriptScriptValidator>();
builder.Services.AddSingleton<IScriptRegistry, ScriptRegistry>();
// Unified audit emission (posts audit events to Timeline service)
builder.Services.AddAuditEmission(builder.Configuration);
// Router integration
var routerEnabled = builder.Services.AddRouterMicroservice(
builder.Configuration,

View File

@@ -21,6 +21,7 @@
<ProjectReference Include="..\..\..\JobEngine\StellaOps.JobEngine\StellaOps.JobEngine.Infrastructure\StellaOps.JobEngine.Infrastructure.csproj" />
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Localization\StellaOps.Localization.csproj" />
<ProjectReference Include="..\..\__Libraries\StellaOps.ReleaseOrchestrator.Scripts\StellaOps.ReleaseOrchestrator.Scripts.csproj" />
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Audit.Emission\StellaOps.Audit.Emission.csproj" />
</ItemGroup>
<PropertyGroup Label="StellaOpsReleaseVersion">

View File

@@ -15,6 +15,7 @@ using StellaOps.Auth.ServerIntegration;
using StellaOps.Auth.ServerIntegration.Tenancy;
using StellaOps.Authority.Persistence.Postgres.Repositories;
using StellaOps.Concelier.Core.Linksets;
using StellaOps.Audit.Emission;
using StellaOps.Configuration;
using StellaOps.Cryptography.DependencyInjection;
using StellaOps.Cryptography.Plugin.BouncyCastle;
@@ -90,6 +91,9 @@ var bootstrapOptions = builder.Configuration.BindOptions<ScannerWebServiceOption
builder.Services.AddStellaOpsCrypto(bootstrapOptions.Crypto);
builder.Services.AddControllers();
// Unified audit emission (posts audit events to Timeline service)
builder.Services.AddAuditEmission(builder.Configuration);
// Stella Router integration - enables ASP.NET endpoints to be registered with the Router
var routerEnabled = builder.Services.AddRouterMicroservice(
builder.Configuration,

View File

@@ -61,6 +61,7 @@
<ProjectReference Include="../__Libraries/StellaOps.Scanner.Gate/StellaOps.Scanner.Gate.csproj" />
<ProjectReference Include="../../Router/__Libraries/StellaOps.Router.AspNet/StellaOps.Router.AspNet.csproj" />
<ProjectReference Include="../../__Libraries/StellaOps.Localization/StellaOps.Localization.csproj" />
<ProjectReference Include="../../__Libraries/StellaOps.Audit.Emission/StellaOps.Audit.Emission.csproj" />
</ItemGroup>
<ItemGroup>

View File

@@ -4,37 +4,90 @@ namespace StellaOps.Timeline.WebService.Audit;
/// <summary>
/// Combines events from the HTTP module polling provider with events
/// received via the ingest endpoint, producing a unified event stream.
/// persisted in the PostgreSQL audit store, producing a unified event stream.
/// The Postgres store is the primary source for ingested events;
/// HTTP polling provides a transitional fallback for services not yet pushing
/// via the Audit.Emission library.
/// </summary>
public sealed class CompositeUnifiedAuditEventProvider : IUnifiedAuditEventProvider
{
private readonly HttpUnifiedAuditEventProvider _httpProvider;
private readonly IngestAuditEventStore _ingestStore;
private readonly PostgresUnifiedAuditEventStore _postgresStore;
private readonly ILogger<CompositeUnifiedAuditEventProvider> _logger;
public CompositeUnifiedAuditEventProvider(
HttpUnifiedAuditEventProvider httpProvider,
IngestAuditEventStore ingestStore)
PostgresUnifiedAuditEventStore postgresStore,
ILogger<CompositeUnifiedAuditEventProvider> logger)
{
_httpProvider = httpProvider;
_ingestStore = ingestStore;
_postgresStore = postgresStore;
_logger = logger;
}
public async Task<IReadOnlyList<UnifiedAuditEvent>> GetEventsAsync(CancellationToken cancellationToken)
{
var httpEvents = await _httpProvider.GetEventsAsync(cancellationToken).ConfigureAwait(false);
var ingestedEvents = _ingestStore.GetAll();
// Fetch from both sources concurrently
var httpTask = GetHttpEventsAsync(cancellationToken);
var postgresTask = GetPostgresEventsAsync(cancellationToken);
if (ingestedEvents.Count == 0)
await Task.WhenAll(httpTask, postgresTask).ConfigureAwait(false);
var httpEvents = httpTask.Result;
var persistedEvents = postgresTask.Result;
if (httpEvents.Count == 0 && persistedEvents.Count == 0)
{
return Array.Empty<UnifiedAuditEvent>();
}
if (httpEvents.Count == 0)
{
return persistedEvents;
}
if (persistedEvents.Count == 0)
{
return httpEvents;
}
return httpEvents
.Concat(ingestedEvents)
// Merge and deduplicate by event ID, preferring persisted events
var persistedIds = new HashSet<string>(
persistedEvents.Select(e => e.Id),
StringComparer.OrdinalIgnoreCase);
return persistedEvents
.Concat(httpEvents.Where(e => !persistedIds.Contains(e.Id)))
.OrderByDescending(e => e.Timestamp)
.ThenBy(e => e.Id, StringComparer.Ordinal)
.ThenBy(e => e.Module, StringComparer.Ordinal)
.ThenBy(e => e.Action, StringComparer.Ordinal)
.ToList();
}
private async Task<IReadOnlyList<UnifiedAuditEvent>> GetHttpEventsAsync(CancellationToken cancellationToken)
{
try
{
return await _httpProvider.GetEventsAsync(cancellationToken).ConfigureAwait(false);
}
catch (Exception ex) when (ex is not OperationCanceledException)
{
_logger.LogWarning(ex, "Failed to fetch HTTP-polled audit events; using persisted events only");
return Array.Empty<UnifiedAuditEvent>();
}
}
private async Task<IReadOnlyList<UnifiedAuditEvent>> GetPostgresEventsAsync(CancellationToken cancellationToken)
{
try
{
return await _postgresStore.GetAllAsync(cancellationToken).ConfigureAwait(false);
}
catch (Exception ex) when (ex is not OperationCanceledException)
{
_logger.LogWarning(ex, "Failed to fetch persisted audit events; falling back to HTTP polling only");
return Array.Empty<UnifiedAuditEvent>();
}
}
}

View File

@@ -0,0 +1,483 @@
// Copyright (c) StellaOps. Licensed under the BUSL-1.1.
using System.Data;
using System.Security.Cryptography;
using System.Text;
using System.Text.Json;
using System.Text.Json.Nodes;
using System.Text.Json.Serialization;
using Npgsql;
using NpgsqlTypes;
using StellaOps.Timeline.Core.Postgres;
namespace StellaOps.Timeline.WebService.Audit;
/// <summary>
/// PostgreSQL-backed audit event store with SHA-256 hash chain integrity.
/// Replaces the in-memory <see cref="IngestAuditEventStore"/> to ensure audit events
/// survive service restarts and provide tamper-evident chain verification.
/// </summary>
/// <remarks>
/// Hash chain pattern follows the JobEngine audit_entries design:
/// - Each event gets a content_hash computed from canonical JSON of its fields.
/// - Each event links to the previous event's content_hash via previous_entry_hash.
/// - Sequence numbers are monotonically increasing per tenant.
/// - SERIALIZABLE isolation ensures chain integrity under concurrent writes.
/// </remarks>
public sealed class PostgresUnifiedAuditEventStore
{
private readonly TimelineCoreDataSource _dataSource;
private readonly ILogger<PostgresUnifiedAuditEventStore> _logger;
private static readonly JsonSerializerOptions CanonicalJsonOptions = new()
{
DefaultIgnoreCondition = JsonIgnoreCondition.Never,
WriteIndented = false,
PropertyNamingPolicy = null,
Converters = { new JsonStringEnumConverter(JsonNamingPolicy.CamelCase) }
};
public PostgresUnifiedAuditEventStore(
TimelineCoreDataSource dataSource,
ILogger<PostgresUnifiedAuditEventStore> logger)
{
_dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
}
/// <summary>
/// Persists an audit event with hash chain integrity.
/// Uses SERIALIZABLE isolation to prevent concurrent chain corruption.
/// </summary>
public async Task AddAsync(UnifiedAuditEvent auditEvent, CancellationToken cancellationToken = default)
{
ArgumentNullException.ThrowIfNull(auditEvent);
var tenantId = auditEvent.TenantId ?? "default";
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "writer", cancellationToken)
.ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(
IsolationLevel.Serializable, cancellationToken).ConfigureAwait(false);
try
{
// Step 1: Get next sequence number and previous hash atomically
var (sequenceNumber, previousHash) = await GetNextSequenceAsync(
connection, transaction, tenantId, cancellationToken).ConfigureAwait(false);
// Step 2: Compute content hash from canonical JSON
var contentHash = ComputeContentHash(auditEvent, tenantId, sequenceNumber);
// Step 3: Insert the audit event
await InsertEventAsync(
connection, transaction, auditEvent, tenantId,
sequenceNumber, previousHash, contentHash, cancellationToken).ConfigureAwait(false);
// Step 4: Update the sequence tracker with the new hash
await UpdateSequenceHashAsync(
connection, transaction, tenantId, contentHash, cancellationToken).ConfigureAwait(false);
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
_logger.LogDebug(
"Persisted audit event {EventId} for tenant {TenantId} (seq={Seq}, hash={Hash})",
auditEvent.Id, tenantId, sequenceNumber, contentHash[..16]);
}
catch (Exception ex) when (ex is not OperationCanceledException)
{
_logger.LogError(ex,
"Failed to persist audit event {EventId} for tenant {TenantId}",
auditEvent.Id, tenantId);
try
{
await transaction.RollbackAsync(cancellationToken).ConfigureAwait(false);
}
catch (Exception rollbackEx)
{
_logger.LogWarning(rollbackEx, "Rollback failed for audit event {EventId}", auditEvent.Id);
}
throw;
}
}
/// <summary>
/// Returns all persisted audit events, ordered by timestamp descending.
/// </summary>
public async Task<IReadOnlyList<UnifiedAuditEvent>> GetAllAsync(CancellationToken cancellationToken = default)
{
await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken)
.ConfigureAwait(false);
const string sql = """
SELECT id, tenant_id, timestamp, module, action, severity,
actor_id, actor_name, actor_email, actor_type, actor_ip, actor_user_agent,
resource_type, resource_id, resource_name,
description, details_jsonb, diff_jsonb,
correlation_id, parent_event_id, tags
FROM timeline.unified_audit_events
ORDER BY timestamp DESC, id ASC
LIMIT 10000
""";
await using var command = new NpgsqlCommand(sql, connection);
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
var events = new List<UnifiedAuditEvent>();
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
events.Add(MapReaderToEvent(reader));
}
return events;
}
/// <summary>
/// Returns persisted audit events for a specific tenant, ordered by timestamp descending.
/// </summary>
public async Task<IReadOnlyList<UnifiedAuditEvent>> GetByTenantAsync(
string tenantId, int limit = 1000, CancellationToken cancellationToken = default)
{
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
.ConfigureAwait(false);
const string sql = """
SELECT id, tenant_id, timestamp, module, action, severity,
actor_id, actor_name, actor_email, actor_type, actor_ip, actor_user_agent,
resource_type, resource_id, resource_name,
description, details_jsonb, diff_jsonb,
correlation_id, parent_event_id, tags
FROM timeline.unified_audit_events
WHERE tenant_id = @tenantId
ORDER BY timestamp DESC, id ASC
LIMIT @limit
""";
await using var command = new NpgsqlCommand(sql, connection);
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
command.Parameters.AddWithValue("tenantId", tenantId);
command.Parameters.AddWithValue("limit", Math.Clamp(limit, 1, 10000));
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
var events = new List<UnifiedAuditEvent>();
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
events.Add(MapReaderToEvent(reader));
}
return events;
}
/// <summary>
/// Verifies the hash chain integrity for a tenant.
/// </summary>
public async Task<AuditChainVerificationResult> VerifyChainAsync(
string tenantId,
long? startSequence = null,
long? endSequence = null,
CancellationToken cancellationToken = default)
{
await using var connection = await _dataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
.ConfigureAwait(false);
const string sql = """
SELECT is_valid, invalid_event_id, invalid_sequence, error_message
FROM timeline.verify_unified_audit_chain(@tenantId, @startSeq, @endSeq)
""";
await using var command = new NpgsqlCommand(sql, connection);
command.CommandTimeout = _dataSource.CommandTimeoutSeconds;
command.Parameters.AddWithValue("tenantId", tenantId);
command.Parameters.AddWithValue("startSeq", startSequence ?? 1L);
command.Parameters.AddWithValue("endSeq", (object?)endSequence ?? DBNull.Value);
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
if (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
return new AuditChainVerificationResult
{
IsValid = reader.GetBoolean(0),
InvalidEventId = reader.IsDBNull(1) ? null : reader.GetString(1),
InvalidSequence = reader.IsDBNull(2) ? null : reader.GetInt64(2),
ErrorMessage = reader.IsDBNull(3) ? null : reader.GetString(3)
};
}
// No rows returned means empty chain, which is valid
return new AuditChainVerificationResult { IsValid = true };
}
// ── Private helpers ──────────────────────────────────────────────────────
private static async Task<(long SequenceNumber, string? PreviousHash)> GetNextSequenceAsync(
NpgsqlConnection connection,
NpgsqlTransaction transaction,
string tenantId,
CancellationToken cancellationToken)
{
const string sql = "SELECT next_seq, prev_hash FROM timeline.next_unified_audit_sequence(@tenantId)";
await using var command = new NpgsqlCommand(sql, connection, transaction);
command.Parameters.AddWithValue("tenantId", tenantId);
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
if (!await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
throw new InvalidOperationException($"Failed to acquire audit sequence for tenant {tenantId}");
}
var sequenceNumber = reader.GetInt64(0);
var previousHash = reader.IsDBNull(1) ? null : reader.GetString(1);
return (sequenceNumber, previousHash);
}
private static async Task InsertEventAsync(
NpgsqlConnection connection,
NpgsqlTransaction transaction,
UnifiedAuditEvent auditEvent,
string tenantId,
long sequenceNumber,
string? previousHash,
string contentHash,
CancellationToken cancellationToken)
{
const string sql = """
INSERT INTO timeline.unified_audit_events (
id, tenant_id, timestamp, module, action, severity,
actor_id, actor_name, actor_email, actor_type, actor_ip, actor_user_agent,
resource_type, resource_id, resource_name,
description, details_jsonb, diff_jsonb,
correlation_id, parent_event_id, tags,
content_hash, previous_entry_hash, sequence_number,
created_at
) VALUES (
@id, @tenantId, @timestamp, @module, @action, @severity,
@actorId, @actorName, @actorEmail, @actorType, @actorIp, @actorUserAgent,
@resourceType, @resourceId, @resourceName,
@description, @detailsJsonb::jsonb, @diffJsonb::jsonb,
@correlationId, @parentEventId, @tags,
@contentHash, @previousHash, @sequenceNumber,
NOW()
)
ON CONFLICT (id, tenant_id) DO NOTHING
""";
await using var command = new NpgsqlCommand(sql, connection, transaction);
command.Parameters.AddWithValue("id", auditEvent.Id);
command.Parameters.AddWithValue("tenantId", tenantId);
command.Parameters.AddWithValue("timestamp", auditEvent.Timestamp);
command.Parameters.AddWithValue("module", auditEvent.Module);
command.Parameters.AddWithValue("action", auditEvent.Action);
command.Parameters.AddWithValue("severity", auditEvent.Severity);
command.Parameters.AddWithValue("actorId", auditEvent.Actor.Id);
command.Parameters.AddWithValue("actorName", auditEvent.Actor.Name);
command.Parameters.AddWithValue("actorEmail", (object?)auditEvent.Actor.Email ?? DBNull.Value);
command.Parameters.AddWithValue("actorType", auditEvent.Actor.Type);
command.Parameters.AddWithValue("actorIp", (object?)auditEvent.Actor.IpAddress ?? DBNull.Value);
command.Parameters.AddWithValue("actorUserAgent", (object?)auditEvent.Actor.UserAgent ?? DBNull.Value);
command.Parameters.AddWithValue("resourceType", auditEvent.Resource.Type);
command.Parameters.AddWithValue("resourceId", auditEvent.Resource.Id);
command.Parameters.AddWithValue("resourceName", (object?)auditEvent.Resource.Name ?? DBNull.Value);
command.Parameters.AddWithValue("description", auditEvent.Description);
command.Parameters.AddWithValue("detailsJsonb", SerializeDetails(auditEvent.Details));
command.Parameters.AddWithValue("diffJsonb", (object?)SerializeDiff(auditEvent.Diff) ?? DBNull.Value);
command.Parameters.AddWithValue("correlationId", (object?)auditEvent.CorrelationId ?? DBNull.Value);
command.Parameters.AddWithValue("parentEventId", (object?)auditEvent.ParentEventId ?? DBNull.Value);
command.Parameters.Add(new NpgsqlParameter("tags", NpgsqlDbType.Array | NpgsqlDbType.Text)
{
Value = auditEvent.Tags.ToArray()
});
command.Parameters.AddWithValue("contentHash", contentHash);
command.Parameters.AddWithValue("previousHash", (object?)previousHash ?? DBNull.Value);
command.Parameters.AddWithValue("sequenceNumber", sequenceNumber);
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
private static async Task UpdateSequenceHashAsync(
NpgsqlConnection connection,
NpgsqlTransaction transaction,
string tenantId,
string contentHash,
CancellationToken cancellationToken)
{
const string sql = "SELECT timeline.update_unified_audit_sequence_hash(@tenantId, @contentHash)";
await using var command = new NpgsqlCommand(sql, connection, transaction);
command.Parameters.AddWithValue("tenantId", tenantId);
command.Parameters.AddWithValue("contentHash", contentHash);
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
/// <summary>
/// Computes a SHA-256 content hash from the canonical JSON representation of an audit event.
/// Fields are sorted lexicographically for deterministic hashing.
/// </summary>
internal static string ComputeContentHash(UnifiedAuditEvent auditEvent, string tenantId, long sequenceNumber)
{
// Build canonical representation with sorted keys
var canonical = new
{
Action = auditEvent.Action,
ActorId = auditEvent.Actor.Id,
ActorName = auditEvent.Actor.Name,
ActorType = auditEvent.Actor.Type,
CorrelationId = auditEvent.CorrelationId,
Description = auditEvent.Description,
Id = auditEvent.Id,
Module = auditEvent.Module,
ResourceId = auditEvent.Resource.Id,
ResourceType = auditEvent.Resource.Type,
SequenceNumber = sequenceNumber,
Severity = auditEvent.Severity,
TenantId = tenantId,
Timestamp = auditEvent.Timestamp
};
var canonicalJson = ToCanonicalJson(canonical);
var bytes = Encoding.UTF8.GetBytes(canonicalJson);
var hash = SHA256.HashData(bytes);
return Convert.ToHexStringLower(hash);
}
private static string ToCanonicalJson<T>(T value)
{
var node = JsonSerializer.SerializeToNode(value, CanonicalJsonOptions) ?? new JsonObject();
var ordered = OrderNode(node.DeepClone());
return ordered.ToJsonString(CanonicalJsonOptions);
}
private static JsonNode OrderNode(JsonNode node)
{
switch (node)
{
case JsonObject obj:
var orderedObj = new JsonObject();
foreach (var kvp in obj.OrderBy(x => x.Key, StringComparer.Ordinal))
{
orderedObj.Add(kvp.Key, kvp.Value is null ? null : OrderNode(kvp.Value.DeepClone()));
}
return orderedObj;
case JsonArray arr:
var orderedArr = new JsonArray();
foreach (var item in arr)
{
orderedArr.Add(item is null ? null : OrderNode(item.DeepClone()));
}
return orderedArr;
default:
return node.DeepClone();
}
}
private static string SerializeDetails(IReadOnlyDictionary<string, object?> details)
{
return JsonSerializer.Serialize(details, CanonicalJsonOptions);
}
private static string? SerializeDiff(UnifiedAuditDiff? diff)
{
if (diff is null)
{
return null;
}
return JsonSerializer.Serialize(diff, CanonicalJsonOptions);
}
private static UnifiedAuditEvent MapReaderToEvent(NpgsqlDataReader reader)
{
var detailsJson = reader.IsDBNull(16) ? "{}" : reader.GetString(16);
var diffJson = reader.IsDBNull(17) ? null : reader.GetString(17);
var tagsArray = reader.IsDBNull(20) ? Array.Empty<string>() : reader.GetFieldValue<string[]>(20);
return new UnifiedAuditEvent
{
Id = reader.GetString(0),
TenantId = reader.GetString(1),
Timestamp = reader.GetFieldValue<DateTimeOffset>(2),
Module = reader.GetString(3),
Action = reader.GetString(4),
Severity = reader.GetString(5),
Actor = new UnifiedAuditActor
{
Id = reader.GetString(6),
Name = reader.GetString(7),
Email = reader.IsDBNull(8) ? null : reader.GetString(8),
Type = reader.GetString(9),
IpAddress = reader.IsDBNull(10) ? null : reader.GetString(10),
UserAgent = reader.IsDBNull(11) ? null : reader.GetString(11)
},
Resource = new UnifiedAuditResource
{
Type = reader.GetString(12),
Id = reader.GetString(13),
Name = reader.IsDBNull(14) ? null : reader.GetString(14)
},
Description = reader.GetString(15),
Details = DeserializeDetails(detailsJson),
Diff = DeserializeDiff(diffJson),
CorrelationId = reader.IsDBNull(18) ? null : reader.GetString(18),
ParentEventId = reader.IsDBNull(19) ? null : reader.GetString(19),
Tags = tagsArray
};
}
private static IReadOnlyDictionary<string, object?> DeserializeDetails(string json)
{
try
{
return JsonSerializer.Deserialize<Dictionary<string, object?>>(json) ??
new Dictionary<string, object?>();
}
catch (JsonException)
{
return new Dictionary<string, object?>();
}
}
private static UnifiedAuditDiff? DeserializeDiff(string? json)
{
if (string.IsNullOrWhiteSpace(json))
{
return null;
}
try
{
return JsonSerializer.Deserialize<UnifiedAuditDiff>(json);
}
catch (JsonException)
{
return null;
}
}
}
/// <summary>
/// Result of hash chain verification for a tenant's audit events.
/// </summary>
public sealed record AuditChainVerificationResult
{
/// <summary>Whether the chain is intact.</summary>
public required bool IsValid { get; init; }
/// <summary>Event ID where the chain breaks, if any.</summary>
public string? InvalidEventId { get; init; }
/// <summary>Sequence number where the chain breaks, if any.</summary>
public long? InvalidSequence { get; init; }
/// <summary>Human-readable error message if the chain is broken.</summary>
public string? ErrorMessage { get; init; }
}

View File

@@ -79,10 +79,11 @@ public static class UnifiedAuditEndpoints
.RequireAuthorization(TimelinePolicies.Write);
}
private static IResult IngestEventAsync(
private static async Task<IResult> IngestEventAsync(
UnifiedAuditIngestRequest request,
IngestAuditEventStore store,
ILoggerFactory loggerFactory)
PostgresUnifiedAuditEventStore store,
ILoggerFactory loggerFactory,
CancellationToken cancellationToken)
{
if (string.IsNullOrWhiteSpace(request.Module) || string.IsNullOrWhiteSpace(request.Action))
{
@@ -123,10 +124,22 @@ public static class UnifiedAuditEndpoints
Tags = request.Tags ?? [module, action]
};
store.Add(auditEvent);
try
{
await store.AddAsync(auditEvent, cancellationToken).ConfigureAwait(false);
}
catch (Exception ex) when (ex is not OperationCanceledException)
{
var logger = loggerFactory.CreateLogger("StellaOps.Timeline.AuditIngest");
logger.LogError(ex,
"Failed to persist audit event {EventId} ({Module}.{Action})",
auditEvent.Id, auditEvent.Module, auditEvent.Action);
var logger = loggerFactory.CreateLogger("StellaOps.Timeline.AuditIngest");
logger.LogDebug(
return Results.StatusCode(StatusCodes.Status503ServiceUnavailable);
}
var ingestLogger = loggerFactory.CreateLogger("StellaOps.Timeline.AuditIngest");
ingestLogger.LogDebug(
"Ingested audit event {EventId} ({Module}.{Action})",
auditEvent.Id,
auditEvent.Module,

View File

@@ -1,12 +1,15 @@
using Microsoft.AspNetCore.Mvc;
using StellaOps.Auth.Abstractions;
using StellaOps.Cryptography.Audit;
using StellaOps.Infrastructure.Postgres.Migrations;
using StellaOps.Infrastructure.Postgres.Options;
using StellaOps.Localization;
using StellaOps.Auth.ServerIntegration;
using StellaOps.Auth.ServerIntegration.Tenancy;
using StellaOps.Eventing;
using StellaOps.Router.AspNet;
using StellaOps.Timeline.Core;
using StellaOps.Timeline.Core.Postgres;
using StellaOps.Timeline.WebService.Audit;
using StellaOps.Timeline.WebService.Endpoints;
using StellaOps.Timeline.WebService.Security;
@@ -67,9 +70,22 @@ builder.Services.AddHttpClient(HttpUnifiedAuditEventProvider.ClientName, (provid
client.Timeout = TimeSpan.FromSeconds(Math.Max(1, options.RequestTimeoutSeconds));
});
// Audit event providers: HTTP polling from modules + in-memory ingest store
// ── Unified Audit persistence (Postgres + hash chain) ───────────────────────
// TimelineCoreDataSource provides connections to the timeline schema.
// The Postgres store replaces the old in-memory IngestAuditEventStore.
builder.Services.Configure<PostgresOptions>(builder.Configuration.GetSection("Postgres:Timeline"));
builder.Services.AddSingleton<TimelineCoreDataSource>();
builder.Services.AddSingleton<PostgresUnifiedAuditEventStore>();
// Auto-migrate: timeline schema including unified_audit_events + hash chain functions
builder.Services.AddStartupMigrations<PostgresOptions>(
schemaName: TimelineCoreDataSource.DefaultSchemaName,
moduleName: "TimelineAudit",
migrationsAssembly: typeof(TimelineCoreDataSource).Assembly,
connectionStringSelector: opts => opts.ConnectionString);
// Audit event providers: Postgres persistence (primary) + HTTP polling (transitional fallback)
builder.Services.AddSingleton<HttpUnifiedAuditEventProvider>();
builder.Services.AddSingleton<IngestAuditEventStore>();
builder.Services.AddSingleton<IUnifiedAuditEventProvider, CompositeUnifiedAuditEventProvider>();
builder.Services.AddSingleton<IUnifiedAuditAggregationService, UnifiedAuditAggregationService>();

View File

@@ -11,6 +11,7 @@
<ItemGroup>
<ProjectReference Include="..\__Libraries\StellaOps.Timeline.Core\StellaOps.Timeline.Core.csproj" />
<ProjectReference Include="..\..\__Libraries\StellaOps.Infrastructure.Postgres\StellaOps.Infrastructure.Postgres.csproj" />
<ProjectReference Include="..\__Libraries\StellaOps.TimelineIndexer.Core\StellaOps.TimelineIndexer.Core.csproj" />
<ProjectReference Include="..\__Libraries\StellaOps.TimelineIndexer.Infrastructure\StellaOps.TimelineIndexer.Infrastructure.csproj" />
<ProjectReference Include="..\..\__Libraries\StellaOps.Eventing\StellaOps.Eventing.csproj" />
@@ -26,6 +27,7 @@
<ItemGroup>
<PackageReference Include="Microsoft.AspNetCore.OpenApi" />
<PackageReference Include="Npgsql" />
<PackageReference Include="Swashbuckle.AspNetCore" />
</ItemGroup>

View File

@@ -0,0 +1,194 @@
-- Migration: 20260408_003_unified_audit_events
-- Purpose: Create persistent audit event store with SHA-256 hash chain integrity
-- Pattern: Follows JobEngine audit_entries hash chain (005_audit_ledger.sql)
-- ── Unified audit events table (immutable append-only log with hash chain) ──
CREATE TABLE IF NOT EXISTS timeline.unified_audit_events (
id TEXT NOT NULL,
tenant_id TEXT NOT NULL,
timestamp TIMESTAMPTZ NOT NULL,
module TEXT NOT NULL,
action TEXT NOT NULL,
severity TEXT NOT NULL DEFAULT 'info',
-- Actor fields
actor_id TEXT NOT NULL DEFAULT 'system',
actor_name TEXT NOT NULL DEFAULT 'system',
actor_email TEXT,
actor_type TEXT NOT NULL DEFAULT 'system',
actor_ip TEXT,
actor_user_agent TEXT,
-- Resource fields
resource_type TEXT NOT NULL DEFAULT 'resource',
resource_id TEXT NOT NULL DEFAULT 'unknown',
resource_name TEXT,
-- Event content
description TEXT NOT NULL,
details_jsonb JSONB NOT NULL DEFAULT '{}'::JSONB,
diff_jsonb JSONB,
-- Correlation and hierarchy
correlation_id TEXT,
parent_event_id TEXT,
tags TEXT[] NOT NULL DEFAULT '{}',
-- Hash chain integrity (SHA-256)
content_hash TEXT NOT NULL,
previous_entry_hash TEXT,
sequence_number BIGINT NOT NULL,
-- Metadata
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
PRIMARY KEY (id, tenant_id)
);
-- ── Indexes for efficient querying ──
CREATE INDEX IF NOT EXISTS idx_uae_tenant_time
ON timeline.unified_audit_events (tenant_id, timestamp DESC);
CREATE INDEX IF NOT EXISTS idx_uae_tenant_seq
ON timeline.unified_audit_events (tenant_id, sequence_number DESC);
CREATE INDEX IF NOT EXISTS idx_uae_tenant_module
ON timeline.unified_audit_events (tenant_id, module);
CREATE INDEX IF NOT EXISTS idx_uae_tenant_action
ON timeline.unified_audit_events (tenant_id, action);
CREATE INDEX IF NOT EXISTS idx_uae_tenant_severity
ON timeline.unified_audit_events (tenant_id, severity);
CREATE INDEX IF NOT EXISTS idx_uae_tenant_actor
ON timeline.unified_audit_events (tenant_id, actor_id);
CREATE INDEX IF NOT EXISTS idx_uae_tenant_resource
ON timeline.unified_audit_events (tenant_id, resource_type, resource_id);
CREATE INDEX IF NOT EXISTS idx_uae_correlation
ON timeline.unified_audit_events (correlation_id)
WHERE correlation_id IS NOT NULL;
CREATE INDEX IF NOT EXISTS idx_uae_tags
ON timeline.unified_audit_events USING GIN (tags);
CREATE INDEX IF NOT EXISTS idx_uae_content_hash
ON timeline.unified_audit_events (content_hash);
CREATE INDEX IF NOT EXISTS idx_uae_description_search
ON timeline.unified_audit_events USING GIN (to_tsvector('english', description));
-- ── Per-tenant sequence tracking for hash chain ──
CREATE TABLE IF NOT EXISTS timeline.unified_audit_sequences (
tenant_id TEXT PRIMARY KEY,
last_sequence_number BIGINT NOT NULL DEFAULT 0,
last_entry_hash TEXT,
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- ── Function: get next sequence number and previous hash for a tenant ──
CREATE OR REPLACE FUNCTION timeline.next_unified_audit_sequence(
p_tenant_id TEXT
) RETURNS TABLE (
next_seq BIGINT,
prev_hash TEXT
) AS $$
DECLARE
v_next_seq BIGINT;
v_prev_hash TEXT;
BEGIN
INSERT INTO timeline.unified_audit_sequences (tenant_id, last_sequence_number, last_entry_hash, updated_at)
VALUES (p_tenant_id, 1, NULL, NOW())
ON CONFLICT (tenant_id)
DO UPDATE SET
last_sequence_number = timeline.unified_audit_sequences.last_sequence_number + 1,
updated_at = NOW()
RETURNING
timeline.unified_audit_sequences.last_sequence_number,
timeline.unified_audit_sequences.last_entry_hash
INTO v_next_seq, v_prev_hash;
RETURN QUERY SELECT v_next_seq, v_prev_hash;
END;
$$ LANGUAGE plpgsql;
-- ── Function: update sequence hash after event insertion ──
CREATE OR REPLACE FUNCTION timeline.update_unified_audit_sequence_hash(
p_tenant_id TEXT,
p_content_hash TEXT
) RETURNS VOID AS $$
BEGIN
UPDATE timeline.unified_audit_sequences
SET last_entry_hash = p_content_hash,
updated_at = NOW()
WHERE tenant_id = p_tenant_id;
END;
$$ LANGUAGE plpgsql;
-- ── Function: verify hash chain integrity ──
CREATE OR REPLACE FUNCTION timeline.verify_unified_audit_chain(
p_tenant_id TEXT,
p_start_seq BIGINT DEFAULT 1,
p_end_seq BIGINT DEFAULT NULL
) RETURNS TABLE (
is_valid BOOLEAN,
invalid_event_id TEXT,
invalid_sequence BIGINT,
error_message TEXT
) AS $$
DECLARE
v_prev_hash TEXT;
v_entry RECORD;
BEGIN
FOR v_entry IN
SELECT e.id, e.sequence_number, e.previous_entry_hash, e.content_hash
FROM timeline.unified_audit_events e
WHERE e.tenant_id = p_tenant_id
AND e.sequence_number >= p_start_seq
AND (p_end_seq IS NULL OR e.sequence_number <= p_end_seq)
ORDER BY e.sequence_number ASC
LOOP
-- First entry in the chain should have null previous hash
IF v_entry.sequence_number = 1 AND v_entry.previous_entry_hash IS NOT NULL THEN
RETURN QUERY SELECT FALSE, v_entry.id, v_entry.sequence_number,
'First entry should have null previous_entry_hash'::TEXT;
RETURN;
END IF;
-- Verify chain link: previous entry's content_hash must match this entry's previous_entry_hash
IF v_prev_hash IS NOT NULL AND v_entry.previous_entry_hash != v_prev_hash THEN
RETURN QUERY SELECT FALSE, v_entry.id, v_entry.sequence_number,
format('Chain break at seq %s: expected previous_hash %s, got %s',
v_entry.sequence_number, v_prev_hash, v_entry.previous_entry_hash);
RETURN;
END IF;
v_prev_hash := v_entry.content_hash;
END LOOP;
RETURN QUERY SELECT TRUE, NULL::TEXT, NULL::BIGINT, NULL::TEXT;
END;
$$ LANGUAGE plpgsql;
-- ── Comments ──
COMMENT ON TABLE timeline.unified_audit_events IS
'Immutable unified audit log with SHA-256 hash chain for tamper evidence. '
'Receives events from all services via the audit ingest endpoint.';
COMMENT ON TABLE timeline.unified_audit_sequences IS
'Per-tenant sequence tracking for unified audit hash chain integrity.';
COMMENT ON FUNCTION timeline.next_unified_audit_sequence IS
'Atomically increments the sequence counter for a tenant and returns the previous hash for chain linking.';
COMMENT ON FUNCTION timeline.verify_unified_audit_chain IS
'Verifies the hash chain integrity of unified audit events for a tenant within an optional sequence range.';

View File

@@ -0,0 +1,108 @@
using System;
using System.Linq;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions;
using Microsoft.Extensions.Logging;
using StellaOps.Cryptography;
namespace StellaOps.Cryptography.DependencyInjection;
/// <summary>
/// Extension methods for registering tenant-aware crypto provider resolution.
/// </summary>
public static class TenantAwareCryptoProviderRegistryExtensions
{
/// <summary>
/// Decorates the existing <see cref="ICryptoProviderRegistry"/> with tenant-aware resolution.
/// <para>
/// When a tenant context is available and the tenant has configured crypto provider preferences,
/// the decorated registry will prefer the tenant's chosen providers. Falls back to the default
/// ordering when no tenant context exists or no preferences are set.
/// </para>
/// <para>
/// Prerequisites:
/// <list type="bullet">
/// <item><see cref="ICryptoProviderRegistry"/> must already be registered (via <c>AddStellaOpsCrypto</c>).</item>
/// <item><see cref="ITenantCryptoPreferenceProvider"/> must be registered by the caller.</item>
/// </list>
/// </para>
/// </summary>
/// <param name="services">Service collection.</param>
/// <param name="tenantIdAccessorFactory">
/// Factory that creates a function returning the current tenant ID (or null when no tenant context).
/// Example: <c>sp => () => sp.GetService&lt;IStellaOpsTenantAccessor&gt;()?.TenantId</c>
/// </param>
/// <param name="cacheTtl">
/// How long tenant preferences are cached before refresh. Default: 5 minutes.
/// </param>
/// <returns>The service collection.</returns>
public static IServiceCollection AddTenantAwareCryptoResolution(
this IServiceCollection services,
Func<IServiceProvider, Func<string?>> tenantIdAccessorFactory,
TimeSpan? cacheTtl = null)
{
ArgumentNullException.ThrowIfNull(services);
ArgumentNullException.ThrowIfNull(tenantIdAccessorFactory);
// Manual decorator pattern: find the existing ICryptoProviderRegistry registration,
// replace it with a factory that wraps the original in TenantAwareCryptoProviderRegistry.
var innerDescriptor = services.LastOrDefault(d => d.ServiceType == typeof(ICryptoProviderRegistry));
if (innerDescriptor is null)
{
throw new InvalidOperationException(
"ICryptoProviderRegistry is not registered. Call AddStellaOpsCrypto() before AddTenantAwareCryptoResolution().");
}
services.Remove(innerDescriptor);
services.AddSingleton<ICryptoProviderRegistry>(sp =>
{
// Resolve the inner (original) registry
var innerRegistry = ResolveInner(sp, innerDescriptor);
var preferenceProvider = sp.GetService<ITenantCryptoPreferenceProvider>();
if (preferenceProvider is null)
{
// No preference provider registered; tenant-aware resolution is a no-op.
// This is expected in CLI / background worker scenarios.
return innerRegistry;
}
var tenantIdAccessor = tenantIdAccessorFactory(sp);
var timeProvider = sp.GetService<TimeProvider>() ?? TimeProvider.System;
var logger = sp.GetRequiredService<ILoggerFactory>()
.CreateLogger<TenantAwareCryptoProviderRegistry>();
return new TenantAwareCryptoProviderRegistry(
innerRegistry,
preferenceProvider,
tenantIdAccessor,
timeProvider,
logger,
cacheTtl);
});
return services;
}
private static ICryptoProviderRegistry ResolveInner(IServiceProvider sp, ServiceDescriptor descriptor)
{
if (descriptor.ImplementationInstance is ICryptoProviderRegistry instance)
{
return instance;
}
if (descriptor.ImplementationFactory is not null)
{
return (ICryptoProviderRegistry)descriptor.ImplementationFactory(sp);
}
if (descriptor.ImplementationType is not null)
{
return (ICryptoProviderRegistry)ActivatorUtilities.CreateInstance(sp, descriptor.ImplementationType);
}
throw new InvalidOperationException(
$"Cannot resolve inner ICryptoProviderRegistry from descriptor: {descriptor}");
}
}

View File

@@ -0,0 +1,29 @@
using System.Collections.Generic;
using System.Threading;
using System.Threading.Tasks;
namespace StellaOps.Cryptography;
/// <summary>
/// Provides per-tenant crypto provider ordering.
/// Implementations are expected to cache results internally (recommended TTL: 60s-5min)
/// to avoid hitting persistence on every crypto operation.
/// </summary>
public interface ITenantCryptoPreferenceProvider
{
/// <summary>
/// Returns the tenant's preferred provider ordering, or an empty list if no preferences are set.
/// Only active preferences should be returned, ordered by priority (ascending).
/// </summary>
/// <param name="tenantId">Tenant identifier (normalised, lower-case).</param>
/// <param name="algorithmScope">
/// Algorithm scope filter (e.g., "SM", "GOST", or "*" for global).
/// Implementations should return global ("*") preferences when no scope-specific preferences exist.
/// </param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Ordered list of provider names; empty list means "use default ordering".</returns>
Task<IReadOnlyList<string>> GetPreferredProvidersAsync(
string tenantId,
string algorithmScope = "*",
CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,218 @@
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Logging;
namespace StellaOps.Cryptography;
/// <summary>
/// Decorator over <see cref="ICryptoProviderRegistry"/> that consults tenant preferences
/// (via <see cref="ITenantCryptoPreferenceProvider"/>) to override the default provider ordering.
/// <para>
/// Falls back to the inner registry's default ordering when:
/// <list type="bullet">
/// <item>No tenant context is available (CLI, background workers)</item>
/// <item>No preferences are configured for the current tenant</item>
/// <item>The preference provider is unavailable or throws</item>
/// </list>
/// </para>
/// </summary>
public sealed class TenantAwareCryptoProviderRegistry : ICryptoProviderRegistry
{
private readonly ICryptoProviderRegistry inner;
private readonly ITenantCryptoPreferenceProvider preferenceProvider;
private readonly Func<string?> tenantIdAccessor;
private readonly TimeProvider timeProvider;
private readonly ILogger logger;
private readonly TimeSpan cacheTtl;
private readonly ConcurrentDictionary<string, CachedPreference> cache = new(StringComparer.OrdinalIgnoreCase);
public TenantAwareCryptoProviderRegistry(
ICryptoProviderRegistry inner,
ITenantCryptoPreferenceProvider preferenceProvider,
Func<string?> tenantIdAccessor,
TimeProvider timeProvider,
ILogger logger,
TimeSpan? cacheTtl = null)
{
this.inner = inner ?? throw new ArgumentNullException(nameof(inner));
this.preferenceProvider = preferenceProvider ?? throw new ArgumentNullException(nameof(preferenceProvider));
this.tenantIdAccessor = tenantIdAccessor ?? throw new ArgumentNullException(nameof(tenantIdAccessor));
this.timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider));
this.logger = logger ?? throw new ArgumentNullException(nameof(logger));
this.cacheTtl = cacheTtl ?? TimeSpan.FromMinutes(5);
}
public IReadOnlyCollection<ICryptoProvider> Providers => inner.Providers;
public bool TryResolve(string preferredProvider, out ICryptoProvider provider)
=> inner.TryResolve(preferredProvider, out provider);
public ICryptoProvider ResolveOrThrow(CryptoCapability capability, string algorithmId)
{
var tenantOrder = GetTenantPreferredOrder(algorithmScope: "*");
if (tenantOrder is null || tenantOrder.Count == 0)
{
return inner.ResolveOrThrow(capability, algorithmId);
}
// Try tenant-preferred providers first
foreach (var providerName in tenantOrder)
{
if (inner.TryResolve(providerName, out var provider) &&
provider.Supports(capability, algorithmId))
{
CryptoProviderMetrics.RecordProviderResolution(provider.Name, capability, algorithmId);
return provider;
}
}
// Fall back to default ordering for providers not in the tenant preference list
return inner.ResolveOrThrow(capability, algorithmId);
}
public CryptoSignerResolution ResolveSigner(
CryptoCapability capability,
string algorithmId,
CryptoKeyReference keyReference,
string? preferredProvider = null)
{
// If caller already specified a preferred provider, honour it (explicit > tenant preference)
if (!string.IsNullOrWhiteSpace(preferredProvider))
{
return inner.ResolveSigner(capability, algorithmId, keyReference, preferredProvider);
}
var tenantPreferred = GetTenantPreferredProvider(capability, algorithmId);
return inner.ResolveSigner(capability, algorithmId, keyReference, tenantPreferred);
}
public CryptoHasherResolution ResolveHasher(string algorithmId, string? preferredProvider = null)
{
// If caller already specified a preferred provider, honour it
if (!string.IsNullOrWhiteSpace(preferredProvider))
{
return inner.ResolveHasher(algorithmId, preferredProvider);
}
var tenantPreferred = GetTenantPreferredProvider(CryptoCapability.ContentHashing, algorithmId);
return inner.ResolveHasher(algorithmId, tenantPreferred);
}
/// <summary>
/// Returns the first tenant-preferred provider that supports the given capability and algorithm,
/// or null if no tenant preference applies.
/// </summary>
private string? GetTenantPreferredProvider(CryptoCapability capability, string algorithmId)
{
var tenantOrder = GetTenantPreferredOrder(algorithmScope: "*");
if (tenantOrder is null || tenantOrder.Count == 0)
{
return null;
}
foreach (var providerName in tenantOrder)
{
if (inner.TryResolve(providerName, out var provider) &&
provider.Supports(capability, algorithmId))
{
return providerName;
}
}
return null;
}
/// <summary>
/// Reads the tenant's preferred provider order from cache, refreshing asynchronously when stale.
/// Returns null when no tenant context is available.
/// </summary>
private IReadOnlyList<string>? GetTenantPreferredOrder(string algorithmScope)
{
string? tenantId;
try
{
tenantId = tenantIdAccessor();
}
catch (Exception ex)
{
logger.LogDebug(ex, "Failed to resolve tenant ID for crypto provider selection; using default ordering.");
return null;
}
if (string.IsNullOrWhiteSpace(tenantId))
{
return null;
}
var cacheKey = $"{tenantId}:{algorithmScope}";
var now = timeProvider.GetUtcNow();
if (cache.TryGetValue(cacheKey, out var cached) && now - cached.FetchedAt < cacheTtl)
{
return cached.Providers;
}
// Cache miss or stale: refresh synchronously on first call, then async on subsequent stale reads.
// This avoids blocking on every request while still keeping the cache warm.
if (cached is not null)
{
// Stale: return stale data and refresh in background
_ = RefreshCacheAsync(cacheKey, tenantId, algorithmScope);
return cached.Providers;
}
// Cold miss: must block to get initial data
try
{
var providers = preferenceProvider.GetPreferredProvidersAsync(tenantId, algorithmScope, CancellationToken.None)
.ConfigureAwait(false)
.GetAwaiter()
.GetResult();
var entry = new CachedPreference(providers, now);
cache[cacheKey] = entry;
if (providers.Count > 0)
{
logger.LogDebug(
"Loaded crypto provider preferences for tenant {TenantId} scope {Scope}: [{Providers}]",
tenantId, algorithmScope, string.Join(", ", providers));
}
return providers;
}
catch (Exception ex)
{
logger.LogWarning(ex,
"Failed to load crypto provider preferences for tenant {TenantId}; using default ordering.",
tenantId);
// Cache an empty result to avoid repeated failures
cache[cacheKey] = new CachedPreference(Array.Empty<string>(), now);
return null;
}
}
private async Task RefreshCacheAsync(string cacheKey, string tenantId, string algorithmScope)
{
try
{
var providers = await preferenceProvider.GetPreferredProvidersAsync(
tenantId, algorithmScope, CancellationToken.None).ConfigureAwait(false);
cache[cacheKey] = new CachedPreference(providers, timeProvider.GetUtcNow());
}
catch (Exception ex)
{
logger.LogWarning(ex,
"Background refresh of crypto provider preferences failed for tenant {TenantId}.",
tenantId);
}
}
private sealed record CachedPreference(IReadOnlyList<string> Providers, DateTimeOffset FetchedAt);
}