This commit is contained in:
StellaOps Bot
2025-12-09 00:20:52 +02:00
parent 3d01bf9edc
commit bc0762e97d
261 changed files with 14033 additions and 4427 deletions

View File

@@ -89,14 +89,9 @@ public class Sm2AttestorTests
new AttestorSigningKeyRegistry(options, TimeProvider.System, NullLogger<AttestorSigningKeyRegistry>.Instance));
}
private void Dispose(bool disposing)
{
Environment.SetEnvironmentVariable("SM_SOFT_ALLOWED", _gate);
}
public void Dispose()
{
Dispose(true);
Environment.SetEnvironmentVariable("SM_SOFT_ALLOWED", _gate);
}
}

View File

@@ -25,6 +25,7 @@ using StellaOps.Attestor.WebService.Contracts;
using StellaOps.Attestor.Core.Bulk;
using Microsoft.AspNetCore.Server.Kestrel.Https;
using Serilog.Context;
using StellaOps.Cryptography.DependencyInjection;
const string ConfigurationSection = "attestor";
@@ -52,6 +53,7 @@ var clientCertificateAuthorities = LoadClientCertificateAuthorities(attestorOpti
builder.Services.AddSingleton(TimeProvider.System);
builder.Services.AddSingleton(attestorOptions);
builder.Services.AddStellaOpsCryptoRu(builder.Configuration, CryptoProviderRegistryValidator.EnforceRuLinuxDefaults);
builder.Services.AddRateLimiter(options =>
{

View File

@@ -116,7 +116,8 @@ builder.Host.UseSerilog((context, _, loggerConfiguration) =>
});
var authorityOptions = authorityConfiguration.Options;
builder.Services.AddStellaOpsCrypto(authorityOptions.Crypto);
CryptoProviderRegistryValidator.EnforceRuLinuxDefaults(authorityOptions.Crypto.Registry);
builder.Services.AddStellaOpsCryptoRu(builder.Configuration, CryptoProviderRegistryValidator.EnforceRuLinuxDefaults);
builder.Services.AddHostedService<AuthoritySecretHasherInitializer>();
var issuerUri = authorityOptions.Issuer;
if (issuerUri is null)

View File

@@ -9,6 +9,7 @@
<!-- Concelier is migrating off MongoDB; strip implicit Mongo2Go/Mongo driver packages inherited from the repo root. -->
<PackageReference Remove="Mongo2Go" />
<PackageReference Remove="MongoDB.Driver" />
<PackageReference Remove="MongoDB.Bson" />
</ItemGroup>
<ItemGroup Condition="$([System.String]::Copy('$(MSBuildProjectName)').EndsWith('.Tests')) and '$(UseConcelierTestInfra)'=='true'">
<PackageReference Include="coverlet.collector" Version="6.0.4" />

View File

@@ -2,13 +2,13 @@
## Role
Minimal API host wiring configuration, storage, plugin routines, and job endpoints. Operational surface for health, readiness, and job control.
## Scope
- Configuration: appsettings.json + etc/concelier.yaml (yaml path = ../etc/concelier.yaml); bind into ConcelierOptions with validation (Only Mongo supported).
- Mongo: MongoUrl from options.Storage.Dsn; IMongoClient/IMongoDatabase singletons; default database name fallback (options -> URL -> "concelier").
- Services: AddMongoStorage(); AddSourceHttpClients(); RegisterPluginRoutines(configuration, PluginHostOptions).
- Bootstrap: MongoBootstrapper.InitializeAsync on startup.
- Configuration: appsettings.json + etc/concelier.yaml (yaml path = ../etc/concelier.yaml); bind into ConcelierOptions with PostgreSQL storage enabled by default.
- Storage: PostgreSQL only (`Concelier:PostgresStorage:*`). No MongoDB/Mongo2Go; readiness probes issue `SELECT 1` against ConcelierDataSource.
- Services: AddConcelierPostgresStorage(); AddSourceHttpClients(); RegisterPluginRoutines(configuration, PluginHostOptions).
- Bootstrap: PostgreSQL connectivity verified on startup.
- Endpoints (configuration & job control only; root path intentionally unbound):
- GET /health -> {status:"healthy"} after options validation binds.
- GET /ready -> MongoDB ping; 503 on MongoException/Timeout.
- GET /ready -> PostgreSQL connectivity check; degraded if connection fails.
- GET /jobs?kind=&limit= -> recent runs.
- GET /jobs/{id} -> run detail.
- GET /jobs/definitions -> definitions with lastRun.
@@ -18,7 +18,7 @@ Minimal API host wiring configuration, storage, plugin routines, and job endpoin
- POST /jobs/{*jobKind} with {trigger?,parameters?} -> 202 Accepted (Location:/jobs/{runId}) | 404 | 409 | 423.
- PluginHost defaults: BaseDirectory = solution root; PluginsDirectory = "StellaOps.Concelier.PluginBinaries"; SearchPatterns += "StellaOps.Concelier.Plugin.*.dll"; EnsureDirectoryExists = true.
## Participants
- Core job system; Storage.Mongo; Source.Common HTTP clients; Exporter and Connector plugin routines discover/register jobs.
- Core job system; Storage.Postgres; Source.Common HTTP clients; Exporter and Connector plugin routines discover/register jobs.
## Interfaces & contracts
- Dependency injection boundary for all connectors/exporters; IOptions<ConcelierOptions> validated on start.
- Cancellation: pass app.Lifetime.ApplicationStopping to bootstrapper.
@@ -30,7 +30,7 @@ Out: business logic of jobs, HTML UI, authn/z (future).
- Structured responses with status codes; no stack traces in HTTP bodies; errors mapped cleanly.
## Tests
- Author and review coverage in `../StellaOps.Concelier.WebService.Tests`.
- Shared fixtures (e.g., `MongoIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
- Shared fixtures (PostgreSQL-backed harnesses) live in `../StellaOps.Concelier.Testing`.
- Keep fixtures deterministic; match new cases to real-world advisories or regression scenarios.
## Required Reading

View File

@@ -1,10 +1,11 @@
namespace StellaOps.Concelier.WebService.Diagnostics;
internal sealed record StorageBootstrapHealth(
string Driver,
bool Completed,
DateTimeOffset? CompletedAt,
double? DurationMs);
internal sealed record StorageHealth(
string Backend,
bool Ready,
DateTimeOffset? CheckedAt,
double? LatencyMs,
string? Error);
internal sealed record TelemetryHealth(
bool Enabled,
@@ -16,17 +17,11 @@ internal sealed record HealthDocument(
string Status,
DateTimeOffset StartedAt,
double UptimeSeconds,
StorageBootstrapHealth Storage,
StorageHealth Storage,
TelemetryHealth Telemetry);
internal sealed record MongoReadyHealth(
string Status,
double? LatencyMs,
DateTimeOffset? CheckedAt,
string? Error);
internal sealed record ReadyDocument(
string Status,
DateTimeOffset StartedAt,
double UptimeSeconds,
MongoReadyHealth Mongo);
StorageHealth Storage);

View File

@@ -11,8 +11,8 @@ internal sealed class ServiceStatus
private DateTimeOffset? _bootstrapCompletedAt;
private TimeSpan? _bootstrapDuration;
private DateTimeOffset? _lastReadyCheckAt;
private TimeSpan? _lastMongoLatency;
private string? _lastMongoError;
private TimeSpan? _lastStorageLatency;
private string? _lastStorageError;
private bool _lastReadySucceeded;
public ServiceStatus(TimeProvider timeProvider)
@@ -31,8 +31,8 @@ internal sealed class ServiceStatus
BootstrapCompletedAt: _bootstrapCompletedAt,
BootstrapDuration: _bootstrapDuration,
LastReadyCheckAt: _lastReadyCheckAt,
LastMongoLatency: _lastMongoLatency,
LastMongoError: _lastMongoError,
LastStorageLatency: _lastStorageLatency,
LastStorageError: _lastStorageError,
LastReadySucceeded: _lastReadySucceeded);
}
}
@@ -45,19 +45,19 @@ internal sealed class ServiceStatus
_bootstrapCompletedAt = completedAt;
_bootstrapDuration = duration;
_lastReadySucceeded = true;
_lastMongoLatency = duration;
_lastMongoError = null;
_lastStorageLatency = duration;
_lastStorageError = null;
_lastReadyCheckAt = completedAt;
}
}
public void RecordMongoCheck(bool success, TimeSpan latency, string? error)
public void RecordStorageCheck(bool success, TimeSpan latency, string? error)
{
lock (_sync)
{
_lastReadySucceeded = success;
_lastMongoLatency = latency;
_lastMongoError = success ? null : error;
_lastStorageLatency = latency;
_lastStorageError = success ? null : error;
_lastReadyCheckAt = _timeProvider.GetUtcNow();
}
}
@@ -69,6 +69,6 @@ internal sealed record ServiceHealthSnapshot(
DateTimeOffset? BootstrapCompletedAt,
TimeSpan? BootstrapDuration,
DateTimeOffset? LastReadyCheckAt,
TimeSpan? LastMongoLatency,
string? LastMongoError,
TimeSpan? LastStorageLatency,
string? LastStorageError,
bool LastReadySucceeded);

View File

@@ -1,71 +1,71 @@
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Reflection;
using Microsoft.AspNetCore.Builder;
using Microsoft.Extensions.DependencyInjection;
using OpenTelemetry.Metrics;
using OpenTelemetry.Resources;
using OpenTelemetry.Trace;
using Serilog;
using Serilog.Core;
using Serilog.Events;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Reflection;
using Microsoft.AspNetCore.Builder;
using Microsoft.Extensions.DependencyInjection;
using OpenTelemetry.Metrics;
using OpenTelemetry.Resources;
using OpenTelemetry.Trace;
using Serilog;
using Serilog.Core;
using Serilog.Events;
using StellaOps.Concelier.Core.Jobs;
using StellaOps.Concelier.Connector.Common.Telemetry;
using StellaOps.Concelier.WebService.Diagnostics;
using StellaOps.Concelier.WebService.Options;
using StellaOps.Ingestion.Telemetry;
namespace StellaOps.Concelier.WebService.Extensions;
public static class TelemetryExtensions
{
public static void ConfigureConcelierTelemetry(this WebApplicationBuilder builder, ConcelierOptions options)
{
ArgumentNullException.ThrowIfNull(builder);
ArgumentNullException.ThrowIfNull(options);
var telemetry = options.Telemetry ?? new ConcelierOptions.TelemetryOptions();
if (telemetry.EnableLogging)
{
builder.Host.UseSerilog((context, services, configuration) =>
{
ConfigureSerilog(configuration, telemetry, builder.Environment.EnvironmentName, builder.Environment.ApplicationName);
});
}
if (!telemetry.Enabled || (!telemetry.EnableTracing && !telemetry.EnableMetrics))
{
return;
}
var openTelemetry = builder.Services.AddOpenTelemetry();
openTelemetry.ConfigureResource(resource =>
{
var serviceName = telemetry.ServiceName ?? builder.Environment.ApplicationName;
var version = Assembly.GetExecutingAssembly().GetName().Version?.ToString() ?? "unknown";
resource.AddService(serviceName, serviceVersion: version, serviceInstanceId: Environment.MachineName);
resource.AddAttributes(new[]
{
new KeyValuePair<string, object>("deployment.environment", builder.Environment.EnvironmentName),
});
foreach (var attribute in telemetry.ResourceAttributes)
{
if (string.IsNullOrWhiteSpace(attribute.Key) || attribute.Value is null)
{
continue;
}
resource.AddAttributes(new[] { new KeyValuePair<string, object>(attribute.Key, attribute.Value) });
}
});
if (telemetry.EnableTracing)
{
namespace StellaOps.Concelier.WebService.Extensions;
public static class TelemetryExtensions
{
public static void ConfigureConcelierTelemetry(this WebApplicationBuilder builder, ConcelierOptions options)
{
ArgumentNullException.ThrowIfNull(builder);
ArgumentNullException.ThrowIfNull(options);
var telemetry = options.Telemetry ?? new ConcelierOptions.TelemetryOptions();
if (telemetry.EnableLogging)
{
builder.Host.UseSerilog((context, services, configuration) =>
{
ConfigureSerilog(configuration, telemetry, builder.Environment.EnvironmentName, builder.Environment.ApplicationName);
});
}
if (!telemetry.Enabled || (!telemetry.EnableTracing && !telemetry.EnableMetrics))
{
return;
}
var openTelemetry = builder.Services.AddOpenTelemetry();
openTelemetry.ConfigureResource(resource =>
{
var serviceName = telemetry.ServiceName ?? builder.Environment.ApplicationName;
var version = Assembly.GetExecutingAssembly().GetName().Version?.ToString() ?? "unknown";
resource.AddService(serviceName, serviceVersion: version, serviceInstanceId: Environment.MachineName);
resource.AddAttributes(new[]
{
new KeyValuePair<string, object>("deployment.environment", builder.Environment.EnvironmentName),
});
foreach (var attribute in telemetry.ResourceAttributes)
{
if (string.IsNullOrWhiteSpace(attribute.Key) || attribute.Value is null)
{
continue;
}
resource.AddAttributes(new[] { new KeyValuePair<string, object>(attribute.Key, attribute.Value) });
}
});
if (telemetry.EnableTracing)
{
openTelemetry.WithTracing(tracing =>
{
tracing
@@ -74,15 +74,15 @@ public static class TelemetryExtensions
.AddSource(IngestionTelemetry.ActivitySourceName)
.AddAspNetCoreInstrumentation()
.AddHttpClientInstrumentation();
ConfigureExporters(telemetry, tracing);
});
}
if (telemetry.EnableMetrics)
{
openTelemetry.WithMetrics(metrics =>
{
ConfigureExporters(telemetry, tracing);
});
}
if (telemetry.EnableMetrics)
{
openTelemetry.WithMetrics(metrics =>
{
metrics
.AddMeter(JobDiagnostics.MeterName)
.AddMeter(SourceDiagnostics.MeterName)
@@ -92,131 +92,132 @@ public static class TelemetryExtensions
.AddMeter("StellaOps.Concelier.Connector.Vndr.Chromium")
.AddMeter("StellaOps.Concelier.Connector.Vndr.Apple")
.AddMeter("StellaOps.Concelier.Connector.Vndr.Adobe")
.AddMeter("StellaOps.Concelier.VulnExplorer")
.AddMeter(JobMetrics.MeterName)
.AddAspNetCoreInstrumentation()
.AddHttpClientInstrumentation()
.AddRuntimeInstrumentation();
ConfigureExporters(telemetry, metrics);
});
}
}
private static void ConfigureSerilog(LoggerConfiguration configuration, ConcelierOptions.TelemetryOptions telemetry, string environmentName, string applicationName)
{
if (!Enum.TryParse(telemetry.MinimumLogLevel, ignoreCase: true, out LogEventLevel level))
{
level = LogEventLevel.Information;
}
configuration
.MinimumLevel.Is(level)
.MinimumLevel.Override("Microsoft", LogEventLevel.Warning)
.MinimumLevel.Override("Microsoft.Hosting.Lifetime", LogEventLevel.Information)
.Enrich.FromLogContext()
.Enrich.With<ActivityEnricher>()
.Enrich.WithProperty("service.name", telemetry.ServiceName ?? applicationName)
.Enrich.WithProperty("deployment.environment", environmentName)
.WriteTo.Console(outputTemplate: "[{Timestamp:O}] [{Level:u3}] {Message:lj} {Properties}{NewLine}{Exception}");
}
private static void ConfigureExporters(ConcelierOptions.TelemetryOptions telemetry, TracerProviderBuilder tracing)
{
if (string.IsNullOrWhiteSpace(telemetry.OtlpEndpoint))
{
if (telemetry.ExportConsole)
{
tracing.AddConsoleExporter();
}
return;
}
tracing.AddOtlpExporter(options =>
{
options.Endpoint = new Uri(telemetry.OtlpEndpoint);
var headers = BuildHeaders(telemetry);
if (!string.IsNullOrEmpty(headers))
{
options.Headers = headers;
}
});
if (telemetry.ExportConsole)
{
tracing.AddConsoleExporter();
}
}
private static void ConfigureExporters(ConcelierOptions.TelemetryOptions telemetry, MeterProviderBuilder metrics)
{
if (string.IsNullOrWhiteSpace(telemetry.OtlpEndpoint))
{
if (telemetry.ExportConsole)
{
metrics.AddConsoleExporter();
}
return;
}
metrics.AddOtlpExporter(options =>
{
options.Endpoint = new Uri(telemetry.OtlpEndpoint);
var headers = BuildHeaders(telemetry);
if (!string.IsNullOrEmpty(headers))
{
options.Headers = headers;
}
});
if (telemetry.ExportConsole)
{
metrics.AddConsoleExporter();
}
}
private static string? BuildHeaders(ConcelierOptions.TelemetryOptions telemetry)
{
if (telemetry.OtlpHeaders.Count == 0)
{
return null;
}
return string.Join(",", telemetry.OtlpHeaders
.Where(static kvp => !string.IsNullOrWhiteSpace(kvp.Key) && !string.IsNullOrWhiteSpace(kvp.Value))
.Select(static kvp => $"{kvp.Key}={kvp.Value}"));
}
}
internal sealed class ActivityEnricher : ILogEventEnricher
{
public void Enrich(LogEvent logEvent, ILogEventPropertyFactory propertyFactory)
{
var activity = Activity.Current;
if (activity is null)
{
return;
}
if (activity.TraceId != default)
{
logEvent.AddPropertyIfAbsent(propertyFactory.CreateProperty("trace_id", activity.TraceId.ToString()));
}
if (activity.SpanId != default)
{
logEvent.AddPropertyIfAbsent(propertyFactory.CreateProperty("span_id", activity.SpanId.ToString()));
}
if (activity.ParentSpanId != default)
{
logEvent.AddPropertyIfAbsent(propertyFactory.CreateProperty("parent_span_id", activity.ParentSpanId.ToString()));
}
if (!string.IsNullOrEmpty(activity.TraceStateString))
{
logEvent.AddPropertyIfAbsent(propertyFactory.CreateProperty("trace_state", activity.TraceStateString));
}
}
}
ConfigureExporters(telemetry, metrics);
});
}
}
private static void ConfigureSerilog(LoggerConfiguration configuration, ConcelierOptions.TelemetryOptions telemetry, string environmentName, string applicationName)
{
if (!Enum.TryParse(telemetry.MinimumLogLevel, ignoreCase: true, out LogEventLevel level))
{
level = LogEventLevel.Information;
}
configuration
.MinimumLevel.Is(level)
.MinimumLevel.Override("Microsoft", LogEventLevel.Warning)
.MinimumLevel.Override("Microsoft.Hosting.Lifetime", LogEventLevel.Information)
.Enrich.FromLogContext()
.Enrich.With<ActivityEnricher>()
.Enrich.WithProperty("service.name", telemetry.ServiceName ?? applicationName)
.Enrich.WithProperty("deployment.environment", environmentName)
.WriteTo.Console(outputTemplate: "[{Timestamp:O}] [{Level:u3}] {Message:lj} {Properties}{NewLine}{Exception}");
}
private static void ConfigureExporters(ConcelierOptions.TelemetryOptions telemetry, TracerProviderBuilder tracing)
{
if (string.IsNullOrWhiteSpace(telemetry.OtlpEndpoint))
{
if (telemetry.ExportConsole)
{
tracing.AddConsoleExporter();
}
return;
}
tracing.AddOtlpExporter(options =>
{
options.Endpoint = new Uri(telemetry.OtlpEndpoint);
var headers = BuildHeaders(telemetry);
if (!string.IsNullOrEmpty(headers))
{
options.Headers = headers;
}
});
if (telemetry.ExportConsole)
{
tracing.AddConsoleExporter();
}
}
private static void ConfigureExporters(ConcelierOptions.TelemetryOptions telemetry, MeterProviderBuilder metrics)
{
if (string.IsNullOrWhiteSpace(telemetry.OtlpEndpoint))
{
if (telemetry.ExportConsole)
{
metrics.AddConsoleExporter();
}
return;
}
metrics.AddOtlpExporter(options =>
{
options.Endpoint = new Uri(telemetry.OtlpEndpoint);
var headers = BuildHeaders(telemetry);
if (!string.IsNullOrEmpty(headers))
{
options.Headers = headers;
}
});
if (telemetry.ExportConsole)
{
metrics.AddConsoleExporter();
}
}
private static string? BuildHeaders(ConcelierOptions.TelemetryOptions telemetry)
{
if (telemetry.OtlpHeaders.Count == 0)
{
return null;
}
return string.Join(",", telemetry.OtlpHeaders
.Where(static kvp => !string.IsNullOrWhiteSpace(kvp.Key) && !string.IsNullOrWhiteSpace(kvp.Value))
.Select(static kvp => $"{kvp.Key}={kvp.Value}"));
}
}
internal sealed class ActivityEnricher : ILogEventEnricher
{
public void Enrich(LogEvent logEvent, ILogEventPropertyFactory propertyFactory)
{
var activity = Activity.Current;
if (activity is null)
{
return;
}
if (activity.TraceId != default)
{
logEvent.AddPropertyIfAbsent(propertyFactory.CreateProperty("trace_id", activity.TraceId.ToString()));
}
if (activity.SpanId != default)
{
logEvent.AddPropertyIfAbsent(propertyFactory.CreateProperty("span_id", activity.SpanId.ToString()));
}
if (activity.ParentSpanId != default)
{
logEvent.AddPropertyIfAbsent(propertyFactory.CreateProperty("parent_span_id", activity.ParentSpanId.ToString()));
}
if (!string.IsNullOrEmpty(activity.TraceStateString))
{
logEvent.AddPropertyIfAbsent(propertyFactory.CreateProperty("trace_state", activity.TraceStateString));
}
}
}

View File

@@ -7,9 +7,13 @@ namespace StellaOps.Concelier.WebService.Options;
public sealed class ConcelierOptions
{
[Obsolete("Mongo storage has been removed; use PostgresStorage.")]
public StorageOptions Storage { get; set; } = new();
public PostgresStorageOptions? PostgresStorage { get; set; }
public PostgresStorageOptions? PostgresStorage { get; set; } = new PostgresStorageOptions
{
Enabled = true
};
public PluginOptions Plugins { get; set; } = new();
@@ -33,6 +37,7 @@ public sealed class ConcelierOptions
/// </summary>
public AirGapOptions AirGap { get; set; } = new();
[Obsolete("Mongo storage has been removed; use PostgresStorage.")]
public sealed class StorageOptions
{
public string Driver { get; set; } = "mongo";

View File

@@ -2,30 +2,17 @@ using System;
using System.Collections.Generic;
using Microsoft.Extensions.Logging;
using StellaOps.Auth.Abstractions;
namespace StellaOps.Concelier.WebService.Options;
public static class ConcelierOptionsValidator
{
public static void Validate(ConcelierOptions options)
{
ArgumentNullException.ThrowIfNull(options);
if (!string.Equals(options.Storage.Driver, "mongo", StringComparison.OrdinalIgnoreCase))
{
throw new InvalidOperationException("Only Mongo storage driver is supported (storage.driver == 'mongo').");
}
if (string.IsNullOrWhiteSpace(options.Storage.Dsn))
{
throw new InvalidOperationException("Storage DSN must be configured.");
}
if (options.Storage.CommandTimeoutSeconds <= 0)
{
throw new InvalidOperationException("Command timeout must be greater than zero seconds.");
}
namespace StellaOps.Concelier.WebService.Options;
public static class ConcelierOptionsValidator
{
public static void Validate(ConcelierOptions options)
{
ArgumentNullException.ThrowIfNull(options);
ValidatePostgres(options);
options.Telemetry ??= new ConcelierOptions.TelemetryOptions();
options.Authority ??= new ConcelierOptions.AuthorityOptions();
@@ -107,25 +94,25 @@ public static class ConcelierOptionsValidator
}
}
}
if (!Enum.TryParse(options.Telemetry.MinimumLogLevel, ignoreCase: true, out LogLevel _))
{
throw new InvalidOperationException($"Telemetry minimum log level '{options.Telemetry.MinimumLogLevel}' is invalid.");
}
if (!string.IsNullOrWhiteSpace(options.Telemetry.OtlpEndpoint) && !Uri.TryCreate(options.Telemetry.OtlpEndpoint, UriKind.Absolute, out _))
{
throw new InvalidOperationException("Telemetry OTLP endpoint must be an absolute URI.");
}
foreach (var attribute in options.Telemetry.ResourceAttributes)
{
if (string.IsNullOrWhiteSpace(attribute.Key))
{
throw new InvalidOperationException("Telemetry resource attribute keys must be non-empty.");
}
}
if (!Enum.TryParse(options.Telemetry.MinimumLogLevel, ignoreCase: true, out LogLevel _))
{
throw new InvalidOperationException($"Telemetry minimum log level '{options.Telemetry.MinimumLogLevel}' is invalid.");
}
if (!string.IsNullOrWhiteSpace(options.Telemetry.OtlpEndpoint) && !Uri.TryCreate(options.Telemetry.OtlpEndpoint, UriKind.Absolute, out _))
{
throw new InvalidOperationException("Telemetry OTLP endpoint must be an absolute URI.");
}
foreach (var attribute in options.Telemetry.ResourceAttributes)
{
if (string.IsNullOrWhiteSpace(attribute.Key))
{
throw new InvalidOperationException("Telemetry resource attribute keys must be non-empty.");
}
}
foreach (var header in options.Telemetry.OtlpHeaders)
{
if (string.IsNullOrWhiteSpace(header.Key))
@@ -333,4 +320,50 @@ public static class ConcelierOptionsValidator
throw new InvalidOperationException("Evidence bundle pipelineVersion must be provided.");
}
}
private static void ValidatePostgres(ConcelierOptions options)
{
var postgres = options.PostgresStorage ?? new ConcelierOptions.PostgresStorageOptions();
options.PostgresStorage = postgres;
if (!postgres.Enabled)
{
throw new InvalidOperationException("PostgreSQL storage must be enabled (postgresStorage.enabled).");
}
if (string.IsNullOrWhiteSpace(postgres.ConnectionString))
{
throw new InvalidOperationException("PostgreSQL connectionString must be configured (postgresStorage.connectionString).");
}
if (postgres.CommandTimeoutSeconds <= 0)
{
throw new InvalidOperationException("PostgreSQL commandTimeoutSeconds must be greater than zero.");
}
if (postgres.MaxPoolSize < 1)
{
throw new InvalidOperationException("PostgreSQL maxPoolSize must be greater than zero.");
}
if (postgres.MinPoolSize < 0 || postgres.MinPoolSize > postgres.MaxPoolSize)
{
throw new InvalidOperationException("PostgreSQL minPoolSize must be between 0 and maxPoolSize.");
}
if (postgres.ConnectionIdleLifetimeSeconds < 0)
{
throw new InvalidOperationException("PostgreSQL connectionIdleLifetimeSeconds must be zero or greater.");
}
if (postgres.AutoMigrate && string.IsNullOrWhiteSpace(postgres.MigrationsPath))
{
throw new InvalidOperationException("PostgreSQL migrationsPath must be configured when autoMigrate is enabled.");
}
if (string.IsNullOrWhiteSpace(postgres.SchemaName))
{
postgres.SchemaName = "vuln";
}
}
}

View File

@@ -26,6 +26,7 @@ using StellaOps.Concelier.Core.Events;
using StellaOps.Concelier.Core.Jobs;
using StellaOps.Concelier.Core.Observations;
using StellaOps.Concelier.Core.Linksets;
using StellaOps.Concelier.Core.Diagnostics;
using StellaOps.Concelier.Models;
using StellaOps.Concelier.WebService.Diagnostics;
using ServiceStatus = StellaOps.Concelier.WebService.Diagnostics.ServiceStatus;
@@ -54,9 +55,6 @@ using StellaOps.Concelier.Core.Aoc;
using StellaOps.Concelier.Core.Raw;
using StellaOps.Concelier.RawModels;
using StellaOps.Concelier.Storage.Postgres;
using StellaOps.Concelier.Storage.Mongo;
using StellaOps.Concelier.Storage.Mongo.Advisories;
using StellaOps.Concelier.Storage.Mongo.Aliases;
using StellaOps.Concelier.Core.Attestation;
using StellaOps.Concelier.Core.Signals;
using AttestationClaims = StellaOps.Concelier.Core.Attestation.AttestationClaims;
@@ -64,8 +62,10 @@ using StellaOps.Concelier.Core.Orchestration;
using System.Diagnostics.Metrics;
using StellaOps.Concelier.Models.Observations;
using StellaOps.Aoc.AspNetCore.Results;
using StellaOps.Provenance.Mongo;
using HttpResults = Microsoft.AspNetCore.Http.Results;
using StellaOps.Concelier.Storage.Mongo.Advisories;
using StellaOps.Concelier.Storage.Mongo.Aliases;
using StellaOps.Provenance.Mongo;
namespace StellaOps.Concelier.WebService
{
@@ -91,9 +91,10 @@ builder.Host.ConfigureAppConfiguration((context, cfg) =>
{
cfg.AddInMemoryCollection(new Dictionary<string, string?>
{
{"Concelier:Storage:Dsn", Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN") ?? "mongodb://localhost:27017/test-health"},
{"Concelier:Storage:Driver", "mongo"},
{"Concelier:Storage:CommandTimeoutSeconds", "30"},
{"Concelier:PostgresStorage:Enabled", "true"},
{"Concelier:PostgresStorage:ConnectionString", Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN") ?? "Host=localhost;Port=5432;Database=concelier_test;Username=postgres;Password=postgres"},
{"Concelier:PostgresStorage:CommandTimeoutSeconds", "30"},
{"Concelier:PostgresStorage:SchemaName", "vuln"},
{"Concelier:Telemetry:Enabled", "false"}
});
}
@@ -125,11 +126,12 @@ if (builder.Environment.IsEnvironment("Testing"))
#pragma warning restore ASP0000
concelierOptions = tempProvider.GetService<IOptions<ConcelierOptions>>()?.Value ?? new ConcelierOptions
{
Storage = new ConcelierOptions.StorageOptions
PostgresStorage = new ConcelierOptions.PostgresStorageOptions
{
Dsn = Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN") ?? "mongodb://localhost:27017/test-health",
Driver = "mongo",
CommandTimeoutSeconds = 30
Enabled = true,
ConnectionString = Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN") ?? "Host=localhost;Port=5432;Database=concelier_test;Username=postgres;Password=postgres",
CommandTimeoutSeconds = 30,
SchemaName = "vuln"
},
Telemetry = new ConcelierOptions.TelemetryOptions
{
@@ -137,10 +139,18 @@ if (builder.Environment.IsEnvironment("Testing"))
}
};
concelierOptions.Storage ??= new ConcelierOptions.StorageOptions();
concelierOptions.Storage.Dsn = Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN") ?? "mongodb://localhost:27017/orch-tests";
concelierOptions.Storage.Driver = "mongo";
concelierOptions.Storage.CommandTimeoutSeconds = concelierOptions.Storage.CommandTimeoutSeconds <= 0 ? 30 : concelierOptions.Storage.CommandTimeoutSeconds;
concelierOptions.PostgresStorage ??= new ConcelierOptions.PostgresStorageOptions
{
Enabled = true,
ConnectionString = Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN") ?? "Host=localhost;Port=5432;Database=concelier_test;Username=postgres;Password=postgres",
CommandTimeoutSeconds = 30,
SchemaName = "vuln"
};
if (string.IsNullOrWhiteSpace(concelierOptions.PostgresStorage.ConnectionString))
{
concelierOptions.PostgresStorage.ConnectionString = Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN") ?? string.Empty;
}
ConcelierOptionsPostConfigure.Apply(concelierOptions, contentRootPath);
// Skip validation in Testing to allow factory-provided wiring.
@@ -149,10 +159,21 @@ else
{
concelierOptions = builder.Configuration.BindOptions<ConcelierOptions>(postConfigure: (opts, _) =>
{
var testDsn = Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN");
if (string.IsNullOrWhiteSpace(opts.Storage.Dsn) && !string.IsNullOrWhiteSpace(testDsn))
var testDsn = Environment.GetEnvironmentVariable("CONCELIER_POSTGRES_DSN")
?? Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN");
opts.PostgresStorage ??= new ConcelierOptions.PostgresStorageOptions
{
opts.Storage.Dsn = testDsn;
Enabled = !string.IsNullOrWhiteSpace(testDsn),
ConnectionString = testDsn ?? string.Empty,
SchemaName = "vuln",
CommandTimeoutSeconds = 30
};
if (string.IsNullOrWhiteSpace(opts.PostgresStorage.ConnectionString) && !string.IsNullOrWhiteSpace(testDsn))
{
opts.PostgresStorage.ConnectionString = testDsn;
opts.PostgresStorage.Enabled = true;
}
ConcelierOptionsPostConfigure.Apply(opts, contentRootPath);
@@ -179,24 +200,26 @@ builder.Services.AddSingleton<MirrorFileLocator>();
var isTesting = builder.Environment.IsEnvironment("Testing");
// Add PostgreSQL storage for LNM linkset cache if configured.
// This provides a PostgreSQL-backed implementation of IAdvisoryLinksetStore for the read-through cache.
if (concelierOptions.PostgresStorage is { Enabled: true } postgresOptions)
// Add PostgreSQL storage for all Concelier persistence.
var postgresOptions = concelierOptions.PostgresStorage ?? throw new InvalidOperationException("PostgreSQL storage must be configured.");
if (!postgresOptions.Enabled)
{
builder.Services.AddConcelierPostgresStorage(pgOptions =>
{
pgOptions.ConnectionString = postgresOptions.ConnectionString;
pgOptions.CommandTimeoutSeconds = postgresOptions.CommandTimeoutSeconds;
pgOptions.MaxPoolSize = postgresOptions.MaxPoolSize;
pgOptions.MinPoolSize = postgresOptions.MinPoolSize;
pgOptions.ConnectionIdleLifetimeSeconds = postgresOptions.ConnectionIdleLifetimeSeconds;
pgOptions.Pooling = postgresOptions.Pooling;
pgOptions.SchemaName = postgresOptions.SchemaName;
pgOptions.AutoMigrate = postgresOptions.AutoMigrate;
pgOptions.MigrationsPath = postgresOptions.MigrationsPath;
});
throw new InvalidOperationException("PostgreSQL storage must be enabled.");
}
builder.Services.AddConcelierPostgresStorage(pgOptions =>
{
pgOptions.ConnectionString = postgresOptions.ConnectionString;
pgOptions.CommandTimeoutSeconds = postgresOptions.CommandTimeoutSeconds;
pgOptions.MaxPoolSize = postgresOptions.MaxPoolSize;
pgOptions.MinPoolSize = postgresOptions.MinPoolSize;
pgOptions.ConnectionIdleLifetimeSeconds = postgresOptions.ConnectionIdleLifetimeSeconds;
pgOptions.Pooling = postgresOptions.Pooling;
pgOptions.SchemaName = postgresOptions.SchemaName;
pgOptions.AutoMigrate = postgresOptions.AutoMigrate;
pgOptions.MigrationsPath = postgresOptions.MigrationsPath;
});
builder.Services.AddOptions<AdvisoryObservationEventPublisherOptions>()
.Bind(builder.Configuration.GetSection("advisoryObservationEvents"))
.PostConfigure(options =>
@@ -1039,9 +1062,12 @@ var advisoryIngestEndpoint = app.MapPost("/ingest/advisory", async (
return Problem(context, "Invalid advisory payload", StatusCodes.Status400BadRequest, ProblemTypes.Validation, ex.Message);
}
var chunkStopwatch = Stopwatch.StartNew();
try
{
var result = await rawService.IngestAsync(document, cancellationToken).ConfigureAwait(false);
chunkStopwatch.Stop();
var response = new AdvisoryIngestResponse(
result.Record.Id,
@@ -1065,10 +1091,21 @@ var advisoryIngestEndpoint = app.MapPost("/ingest/advisory", async (
ingestRequest.Source.Vendor ?? "(unknown)",
result.Inserted ? "inserted" : "duplicate"));
var telemetrySource = ingestRequest.Source.Vendor ?? "(unknown)";
var (_, _, conflicts) = AdvisoryLinksetNormalization.FromRawLinksetWithConfidence(document.Linkset, providedConfidence: null);
var collisionCount = VulnExplorerTelemetry.CountAliasCollisions(conflicts);
VulnExplorerTelemetry.RecordIdentifierCollisions(tenant, telemetrySource, collisionCount);
VulnExplorerTelemetry.RecordChunkLatency(tenant, telemetrySource, chunkStopwatch.Elapsed);
if (VulnExplorerTelemetry.IsWithdrawn(document.Content.Raw))
{
VulnExplorerTelemetry.RecordWithdrawnStatement(tenant, telemetrySource);
}
return JsonResult(response, statusCode);
}
catch (ConcelierAocGuardException guardException)
{
chunkStopwatch.Stop();
logger.LogWarning(
guardException,
"AOC guard rejected advisory ingest tenant={Tenant} upstream={UpstreamId} requestHash={RequestHash} documentHash={DocumentHash} codes={Codes}",
@@ -2115,6 +2152,12 @@ var advisoryChunksEndpoint = app.MapGet("/advisories/{advisoryKey}/chunks", asyn
buildResult.Response.Entries.Count,
duration,
guardrailCounts));
VulnExplorerTelemetry.RecordChunkRequest(
tenant!,
result: "ok",
cacheHit,
buildResult.Response.Entries.Count,
duration.TotalMilliseconds);
return JsonResult(buildResult.Response);
});
@@ -3269,7 +3312,7 @@ void ApplyNoCache(HttpResponse response)
response.Headers["Expires"] = "0";
}
await InitializeMongoAsync(app);
await InitializePostgresAsync(app);
app.MapGet("/health", ([FromServices] IOptions<ConcelierOptions> opts, [FromServices] StellaOps.Concelier.WebService.Diagnostics.ServiceStatus status, HttpContext context) =>
{
@@ -3278,11 +3321,12 @@ app.MapGet("/health", ([FromServices] IOptions<ConcelierOptions> opts, [FromServ
var snapshot = status.CreateSnapshot();
var uptimeSeconds = Math.Max((snapshot.CapturedAt - snapshot.StartedAt).TotalSeconds, 0d);
var storage = new StorageBootstrapHealth(
Driver: opts.Value.Storage.Driver,
Completed: snapshot.BootstrapCompletedAt is not null,
CompletedAt: snapshot.BootstrapCompletedAt,
DurationMs: snapshot.BootstrapDuration?.TotalMilliseconds);
var storage = new StorageHealth(
Backend: "postgres",
Ready: snapshot.LastReadySucceeded,
CheckedAt: snapshot.LastReadyCheckAt,
LatencyMs: snapshot.LastStorageLatency?.TotalMilliseconds,
Error: snapshot.LastStorageError);
var telemetry = new TelemetryHealth(
Enabled: opts.Value.Telemetry.Enabled,
@@ -3300,24 +3344,32 @@ app.MapGet("/health", ([FromServices] IOptions<ConcelierOptions> opts, [FromServ
return JsonResult(response);
});
app.MapGet("/ready", ([FromServices] StellaOps.Concelier.WebService.Diagnostics.ServiceStatus status, HttpContext context) =>
app.MapGet("/ready", async (
[FromServices] StellaOps.Concelier.WebService.Diagnostics.ServiceStatus status,
[FromServices] ConcelierDataSource dataSource,
HttpContext context,
CancellationToken cancellationToken) =>
{
ApplyNoCache(context.Response);
var (ready, latency, error) = await CheckPostgresAsync(dataSource, cancellationToken).ConfigureAwait(false);
status.RecordStorageCheck(ready, latency, error);
var snapshot = status.CreateSnapshot();
var uptimeSeconds = Math.Max((snapshot.CapturedAt - snapshot.StartedAt).TotalSeconds, 0d);
var mongo = new MongoReadyHealth(
Status: "bypassed",
LatencyMs: null,
var storage = new StorageHealth(
Backend: "postgres",
Ready: ready,
CheckedAt: snapshot.LastReadyCheckAt,
Error: "mongo disabled");
LatencyMs: latency.TotalMilliseconds,
Error: error);
var response = new ReadyDocument(
Status: "ready",
Status: ready ? "ready" : "degraded",
StartedAt: snapshot.StartedAt,
UptimeSeconds: uptimeSeconds,
Mongo: mongo);
Storage: storage);
return JsonResult(response);
});
@@ -4019,9 +4071,54 @@ static SignalsSymbolSetResponse ToSymbolSetResponse(AffectedSymbolSet symbolSet)
return pluginOptions;
}
static async Task InitializeMongoAsync(WebApplication app)
static async Task InitializePostgresAsync(WebApplication app)
{
await Task.CompletedTask;
var dataSource = app.Services.GetService<ConcelierDataSource>();
var status = app.Services.GetRequiredService<StellaOps.Concelier.WebService.Diagnostics.ServiceStatus>();
if (dataSource is null)
{
status.RecordStorageCheck(false, TimeSpan.Zero, "PostgreSQL storage not configured");
return;
}
var stopwatch = Stopwatch.StartNew();
try
{
var (ready, latency, error) = await CheckPostgresAsync(dataSource, CancellationToken.None).ConfigureAwait(false);
stopwatch.Stop();
status.RecordStorageCheck(ready, latency, error);
if (ready)
{
status.MarkBootstrapCompleted(latency);
}
}
catch (Exception ex)
{
stopwatch.Stop();
status.RecordStorageCheck(false, stopwatch.Elapsed, ex.Message);
}
}
static async Task<(bool Ready, TimeSpan Latency, string? Error)> CheckPostgresAsync(
ConcelierDataSource dataSource,
CancellationToken cancellationToken)
{
var stopwatch = Stopwatch.StartNew();
try
{
await using var connection = await dataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
await using var command = connection.CreateCommand();
command.CommandText = "select 1";
_ = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
stopwatch.Stop();
return (true, stopwatch.Elapsed, null);
}
catch (Exception ex)
{
stopwatch.Stop();
return (false, stopwatch.Elapsed, ex.Message);
}
}
}

View File

@@ -41,4 +41,4 @@
OutputItemType="Analyzer"
ReferenceOutputAssembly="false" />
</ItemGroup>
</Project>
</Project>

View File

@@ -185,6 +185,22 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Concelier.Analyze
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Ingestion.Telemetry", "..\__Libraries\StellaOps.Ingestion.Telemetry\StellaOps.Ingestion.Telemetry.csproj", "{85D215EC-DCFE-4F7F-BB07-540DCF66BE8C}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Cryptography.Plugin.SmRemote", "..\__Libraries\StellaOps.Cryptography.Plugin.SmRemote\StellaOps.Cryptography.Plugin.SmRemote.csproj", "{FCA91451-5D4A-4E75-9268-B253A902A726}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.SmRemote.Service", "..\SmRemote\StellaOps.SmRemote.Service\StellaOps.SmRemote.Service.csproj", "{E823EB56-86F4-4989-9480-9F1D8DD780F8}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Cryptography.Plugin.SmSoft", "..\__Libraries\StellaOps.Cryptography.Plugin.SmSoft\StellaOps.Cryptography.Plugin.SmSoft.csproj", "{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Cryptography.DependencyInjection", "..\__Libraries\StellaOps.Cryptography.DependencyInjection\StellaOps.Cryptography.DependencyInjection.csproj", "{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Cryptography.Plugin.Pkcs11Gost", "..\__Libraries\StellaOps.Cryptography.Plugin.Pkcs11Gost\StellaOps.Cryptography.Plugin.Pkcs11Gost.csproj", "{3CC87BD4-38B7-421B-9688-B2ED2B392646}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Cryptography.Plugin.OpenSslGost", "..\__Libraries\StellaOps.Cryptography.Plugin.OpenSslGost\StellaOps.Cryptography.Plugin.OpenSslGost.csproj", "{27052CD3-98B4-4D37-88F9-7D8B54363F74}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Cryptography.Plugin.PqSoft", "..\__Libraries\StellaOps.Cryptography.Plugin.PqSoft\StellaOps.Cryptography.Plugin.PqSoft.csproj", "{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Cryptography.Plugin.WineCsp", "..\__Libraries\StellaOps.Cryptography.Plugin.WineCsp\StellaOps.Cryptography.Plugin.WineCsp.csproj", "{98908D4F-1A48-4CED-B2CF-92C3179B44FD}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
@@ -1251,6 +1267,102 @@ Global
{85D215EC-DCFE-4F7F-BB07-540DCF66BE8C}.Release|x64.Build.0 = Release|Any CPU
{85D215EC-DCFE-4F7F-BB07-540DCF66BE8C}.Release|x86.ActiveCfg = Release|Any CPU
{85D215EC-DCFE-4F7F-BB07-540DCF66BE8C}.Release|x86.Build.0 = Release|Any CPU
{FCA91451-5D4A-4E75-9268-B253A902A726}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{FCA91451-5D4A-4E75-9268-B253A902A726}.Debug|Any CPU.Build.0 = Debug|Any CPU
{FCA91451-5D4A-4E75-9268-B253A902A726}.Debug|x64.ActiveCfg = Debug|Any CPU
{FCA91451-5D4A-4E75-9268-B253A902A726}.Debug|x64.Build.0 = Debug|Any CPU
{FCA91451-5D4A-4E75-9268-B253A902A726}.Debug|x86.ActiveCfg = Debug|Any CPU
{FCA91451-5D4A-4E75-9268-B253A902A726}.Debug|x86.Build.0 = Debug|Any CPU
{FCA91451-5D4A-4E75-9268-B253A902A726}.Release|Any CPU.ActiveCfg = Release|Any CPU
{FCA91451-5D4A-4E75-9268-B253A902A726}.Release|Any CPU.Build.0 = Release|Any CPU
{FCA91451-5D4A-4E75-9268-B253A902A726}.Release|x64.ActiveCfg = Release|Any CPU
{FCA91451-5D4A-4E75-9268-B253A902A726}.Release|x64.Build.0 = Release|Any CPU
{FCA91451-5D4A-4E75-9268-B253A902A726}.Release|x86.ActiveCfg = Release|Any CPU
{FCA91451-5D4A-4E75-9268-B253A902A726}.Release|x86.Build.0 = Release|Any CPU
{E823EB56-86F4-4989-9480-9F1D8DD780F8}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{E823EB56-86F4-4989-9480-9F1D8DD780F8}.Debug|Any CPU.Build.0 = Debug|Any CPU
{E823EB56-86F4-4989-9480-9F1D8DD780F8}.Debug|x64.ActiveCfg = Debug|Any CPU
{E823EB56-86F4-4989-9480-9F1D8DD780F8}.Debug|x64.Build.0 = Debug|Any CPU
{E823EB56-86F4-4989-9480-9F1D8DD780F8}.Debug|x86.ActiveCfg = Debug|Any CPU
{E823EB56-86F4-4989-9480-9F1D8DD780F8}.Debug|x86.Build.0 = Debug|Any CPU
{E823EB56-86F4-4989-9480-9F1D8DD780F8}.Release|Any CPU.ActiveCfg = Release|Any CPU
{E823EB56-86F4-4989-9480-9F1D8DD780F8}.Release|Any CPU.Build.0 = Release|Any CPU
{E823EB56-86F4-4989-9480-9F1D8DD780F8}.Release|x64.ActiveCfg = Release|Any CPU
{E823EB56-86F4-4989-9480-9F1D8DD780F8}.Release|x64.Build.0 = Release|Any CPU
{E823EB56-86F4-4989-9480-9F1D8DD780F8}.Release|x86.ActiveCfg = Release|Any CPU
{E823EB56-86F4-4989-9480-9F1D8DD780F8}.Release|x86.Build.0 = Release|Any CPU
{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Debug|Any CPU.Build.0 = Debug|Any CPU
{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Debug|x64.ActiveCfg = Debug|Any CPU
{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Debug|x64.Build.0 = Debug|Any CPU
{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Debug|x86.ActiveCfg = Debug|Any CPU
{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Debug|x86.Build.0 = Debug|Any CPU
{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Release|Any CPU.ActiveCfg = Release|Any CPU
{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Release|Any CPU.Build.0 = Release|Any CPU
{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Release|x64.ActiveCfg = Release|Any CPU
{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Release|x64.Build.0 = Release|Any CPU
{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Release|x86.ActiveCfg = Release|Any CPU
{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Release|x86.Build.0 = Release|Any CPU
{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Debug|Any CPU.Build.0 = Debug|Any CPU
{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Debug|x64.ActiveCfg = Debug|Any CPU
{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Debug|x64.Build.0 = Debug|Any CPU
{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Debug|x86.ActiveCfg = Debug|Any CPU
{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Debug|x86.Build.0 = Debug|Any CPU
{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Release|Any CPU.ActiveCfg = Release|Any CPU
{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Release|Any CPU.Build.0 = Release|Any CPU
{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Release|x64.ActiveCfg = Release|Any CPU
{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Release|x64.Build.0 = Release|Any CPU
{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Release|x86.ActiveCfg = Release|Any CPU
{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Release|x86.Build.0 = Release|Any CPU
{3CC87BD4-38B7-421B-9688-B2ED2B392646}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{3CC87BD4-38B7-421B-9688-B2ED2B392646}.Debug|Any CPU.Build.0 = Debug|Any CPU
{3CC87BD4-38B7-421B-9688-B2ED2B392646}.Debug|x64.ActiveCfg = Debug|Any CPU
{3CC87BD4-38B7-421B-9688-B2ED2B392646}.Debug|x64.Build.0 = Debug|Any CPU
{3CC87BD4-38B7-421B-9688-B2ED2B392646}.Debug|x86.ActiveCfg = Debug|Any CPU
{3CC87BD4-38B7-421B-9688-B2ED2B392646}.Debug|x86.Build.0 = Debug|Any CPU
{3CC87BD4-38B7-421B-9688-B2ED2B392646}.Release|Any CPU.ActiveCfg = Release|Any CPU
{3CC87BD4-38B7-421B-9688-B2ED2B392646}.Release|Any CPU.Build.0 = Release|Any CPU
{3CC87BD4-38B7-421B-9688-B2ED2B392646}.Release|x64.ActiveCfg = Release|Any CPU
{3CC87BD4-38B7-421B-9688-B2ED2B392646}.Release|x64.Build.0 = Release|Any CPU
{3CC87BD4-38B7-421B-9688-B2ED2B392646}.Release|x86.ActiveCfg = Release|Any CPU
{3CC87BD4-38B7-421B-9688-B2ED2B392646}.Release|x86.Build.0 = Release|Any CPU
{27052CD3-98B4-4D37-88F9-7D8B54363F74}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{27052CD3-98B4-4D37-88F9-7D8B54363F74}.Debug|Any CPU.Build.0 = Debug|Any CPU
{27052CD3-98B4-4D37-88F9-7D8B54363F74}.Debug|x64.ActiveCfg = Debug|Any CPU
{27052CD3-98B4-4D37-88F9-7D8B54363F74}.Debug|x64.Build.0 = Debug|Any CPU
{27052CD3-98B4-4D37-88F9-7D8B54363F74}.Debug|x86.ActiveCfg = Debug|Any CPU
{27052CD3-98B4-4D37-88F9-7D8B54363F74}.Debug|x86.Build.0 = Debug|Any CPU
{27052CD3-98B4-4D37-88F9-7D8B54363F74}.Release|Any CPU.ActiveCfg = Release|Any CPU
{27052CD3-98B4-4D37-88F9-7D8B54363F74}.Release|Any CPU.Build.0 = Release|Any CPU
{27052CD3-98B4-4D37-88F9-7D8B54363F74}.Release|x64.ActiveCfg = Release|Any CPU
{27052CD3-98B4-4D37-88F9-7D8B54363F74}.Release|x64.Build.0 = Release|Any CPU
{27052CD3-98B4-4D37-88F9-7D8B54363F74}.Release|x86.ActiveCfg = Release|Any CPU
{27052CD3-98B4-4D37-88F9-7D8B54363F74}.Release|x86.Build.0 = Release|Any CPU
{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Debug|Any CPU.Build.0 = Debug|Any CPU
{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Debug|x64.ActiveCfg = Debug|Any CPU
{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Debug|x64.Build.0 = Debug|Any CPU
{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Debug|x86.ActiveCfg = Debug|Any CPU
{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Debug|x86.Build.0 = Debug|Any CPU
{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Release|Any CPU.ActiveCfg = Release|Any CPU
{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Release|Any CPU.Build.0 = Release|Any CPU
{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Release|x64.ActiveCfg = Release|Any CPU
{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Release|x64.Build.0 = Release|Any CPU
{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Release|x86.ActiveCfg = Release|Any CPU
{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Release|x86.Build.0 = Release|Any CPU
{98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Debug|Any CPU.Build.0 = Debug|Any CPU
{98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Debug|x64.ActiveCfg = Debug|Any CPU
{98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Debug|x64.Build.0 = Debug|Any CPU
{98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Debug|x86.ActiveCfg = Debug|Any CPU
{98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Debug|x86.Build.0 = Debug|Any CPU
{98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Release|Any CPU.ActiveCfg = Release|Any CPU
{98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Release|Any CPU.Build.0 = Release|Any CPU
{98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Release|x64.ActiveCfg = Release|Any CPU
{98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Release|x64.Build.0 = Release|Any CPU
{98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Release|x86.ActiveCfg = Release|Any CPU
{98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Release|x86.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE

View File

@@ -0,0 +1,143 @@
using System;
using System.Collections.Generic;
using System.Diagnostics.Metrics;
using System.Linq;
using System.Text.Json;
using StellaOps.Concelier.Core.Linksets;
namespace StellaOps.Concelier.Core.Diagnostics;
/// <summary>
/// Metrics exported for Vuln Explorer consumers (fact-only telemetry).
/// </summary>
public static class VulnExplorerTelemetry
{
public const string MeterName = "StellaOps.Concelier.VulnExplorer";
private static readonly Meter Meter = new(MeterName);
private static readonly Counter<long> IdentifierCollisionCounter = Meter.CreateCounter<long>(
"vuln.identifier_collisions_total",
unit: "collision",
description: "Identifier/alias collisions detected while aggregating linksets for Vuln Explorer.");
private static readonly Counter<long> WithdrawnStatementCounter = Meter.CreateCounter<long>(
"vuln.withdrawn_statements_total",
unit: "statement",
description: "Withdrawn advisory observations detected by change emitters.");
private static readonly Counter<long> ChunkRequestCounter = Meter.CreateCounter<long>(
"vuln.chunk_requests_total",
unit: "request",
description: "Advisory chunk requests served for Vuln Explorer evidence panels.");
private static readonly Histogram<double> ChunkLatencyHistogram = Meter.CreateHistogram<double>(
"vuln.chunk_latency_ms",
unit: "ms",
description: "Latency to build advisory chunks (fact-only) for Vuln Explorer.");
public static void RecordIdentifierCollisions(string tenant, string? source, int collisions)
{
if (collisions <= 0 || string.IsNullOrWhiteSpace(tenant))
{
return;
}
var tags = new[]
{
KeyValuePair.Create<string, object?>("tenant", tenant),
KeyValuePair.Create<string, object?>("source", source ?? "unknown")
};
IdentifierCollisionCounter.Add(collisions, tags);
}
public static int CountAliasCollisions(IReadOnlyList<AdvisoryLinksetConflict>? conflicts)
{
if (conflicts is null || conflicts.Count == 0)
{
return 0;
}
return conflicts.Count(conflict =>
string.Equals(conflict.Reason, "alias-inconsistency", StringComparison.OrdinalIgnoreCase) ||
string.Equals(conflict.Field, "aliases", StringComparison.OrdinalIgnoreCase));
}
public static void RecordWithdrawnStatement(string tenant, string? source)
{
if (string.IsNullOrWhiteSpace(tenant))
{
return;
}
var tags = new[]
{
KeyValuePair.Create<string, object?>("tenant", tenant),
KeyValuePair.Create<string, object?>("source", source ?? "unknown")
};
WithdrawnStatementCounter.Add(1, tags);
}
public static void RecordChunkRequest(string tenant, string result, bool cacheHit, int chunkCount, double latencyMs)
{
if (string.IsNullOrWhiteSpace(tenant))
{
return;
}
var sanitizedResult = string.IsNullOrWhiteSpace(result) ? "unknown" : result.Trim().ToLowerInvariant();
var safeLatency = latencyMs < 0 ? 0d : latencyMs;
var normalizedChunkCount = chunkCount < 0 ? 0 : chunkCount;
var tags = new[]
{
KeyValuePair.Create<string, object?>("tenant", tenant),
KeyValuePair.Create<string, object?>("result", sanitizedResult),
KeyValuePair.Create<string, object?>("cache_hit", cacheHit),
KeyValuePair.Create<string, object?>("chunk_count", normalizedChunkCount)
};
ChunkRequestCounter.Add(1, tags);
ChunkLatencyHistogram.Record(safeLatency, tags);
}
public static void RecordChunkLatency(string tenant, string? source, TimeSpan duration)
{
if (string.IsNullOrWhiteSpace(tenant))
{
return;
}
var tags = new[]
{
KeyValuePair.Create<string, object?>("tenant", tenant),
KeyValuePair.Create<string, object?>("source", source ?? "unknown")
};
ChunkLatencyHistogram.Record(Math.Max(0, duration.TotalMilliseconds), tags);
}
public static bool IsWithdrawn(JsonElement content)
{
if (content.ValueKind != JsonValueKind.Object)
{
return false;
}
if (content.TryGetProperty("withdrawn", out var withdrawnElement) &&
withdrawnElement.ValueKind == JsonValueKind.True)
{
return true;
}
if (content.TryGetProperty("withdrawn_at", out var withdrawnAtElement) &&
withdrawnAtElement.ValueKind is JsonValueKind.String)
{
return !string.IsNullOrWhiteSpace(withdrawnAtElement.GetString());
}
return false;
}
}

View File

@@ -7,7 +7,7 @@ using StellaOps.Concelier.Normalization.SemVer;
namespace StellaOps.Concelier.Core.Linksets;
internal static class AdvisoryLinksetNormalization
public static class AdvisoryLinksetNormalization
{
public static AdvisoryLinksetNormalized? FromRawLinkset(RawLinkset linkset)
{

View File

@@ -5,192 +5,194 @@ using StellaOps.Concelier.Models;
using StellaOps.Concelier.Models.Observations;
using StellaOps.Concelier.RawModels;
using StellaOps.Concelier.Core.Linksets;
namespace StellaOps.Concelier.Core.Observations;
/// <summary>
/// Default implementation of <see cref="IAdvisoryObservationQueryService"/> that projects raw observations for overlay consumers.
/// </summary>
public sealed class AdvisoryObservationQueryService : IAdvisoryObservationQueryService
{
private const int DefaultPageSize = 200;
private const int MaxPageSize = 500;
private readonly IAdvisoryObservationLookup _lookup;
public AdvisoryObservationQueryService(IAdvisoryObservationLookup lookup)
{
_lookup = lookup ?? throw new ArgumentNullException(nameof(lookup));
}
public async ValueTask<AdvisoryObservationQueryResult> QueryAsync(
AdvisoryObservationQueryOptions options,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(options);
cancellationToken.ThrowIfCancellationRequested();
var normalizedTenant = NormalizeTenant(options.Tenant);
var normalizedObservationIds = NormalizeSet(options.ObservationIds, static value => value, StringComparer.Ordinal);
using StellaOps.Concelier.Core.Diagnostics;
namespace StellaOps.Concelier.Core.Observations;
/// <summary>
/// Default implementation of <see cref="IAdvisoryObservationQueryService"/> that projects raw observations for overlay consumers.
/// </summary>
public sealed class AdvisoryObservationQueryService : IAdvisoryObservationQueryService
{
private const int DefaultPageSize = 200;
private const int MaxPageSize = 500;
private readonly IAdvisoryObservationLookup _lookup;
public AdvisoryObservationQueryService(IAdvisoryObservationLookup lookup)
{
_lookup = lookup ?? throw new ArgumentNullException(nameof(lookup));
}
public async ValueTask<AdvisoryObservationQueryResult> QueryAsync(
AdvisoryObservationQueryOptions options,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(options);
cancellationToken.ThrowIfCancellationRequested();
var normalizedTenant = NormalizeTenant(options.Tenant);
var normalizedObservationIds = NormalizeSet(options.ObservationIds, static value => value, StringComparer.Ordinal);
var normalizedAliases = NormalizeSet(options.Aliases, static value => value, StringComparer.OrdinalIgnoreCase);
var normalizedPurls = NormalizeSet(options.Purls, static value => value, StringComparer.Ordinal);
var normalizedCpes = NormalizeSet(options.Cpes, static value => value, StringComparer.Ordinal);
var limit = NormalizeLimit(options.Limit);
var fetchSize = checked(limit + 1);
var cursor = DecodeCursor(options.Cursor);
var observations = await _lookup
.FindByFiltersAsync(
normalizedTenant,
normalizedObservationIds,
normalizedAliases,
normalizedPurls,
normalizedCpes,
cursor,
fetchSize,
cancellationToken)
.ConfigureAwait(false);
var ordered = observations
.Where(observation => Matches(observation, normalizedObservationIds, normalizedAliases, normalizedPurls, normalizedCpes))
.OrderByDescending(static observation => observation.CreatedAt)
.ThenBy(static observation => observation.ObservationId, StringComparer.Ordinal)
.ToImmutableArray();
var hasMore = ordered.Length > limit;
var page = hasMore ? ordered.Take(limit).ToImmutableArray() : ordered;
var nextCursor = hasMore ? EncodeCursor(page[^1]) : null;
var linkset = BuildAggregateLinkset(page);
return new AdvisoryObservationQueryResult(page, linkset, nextCursor, hasMore);
}
private static bool Matches(
AdvisoryObservation observation,
ImmutableHashSet<string> observationIds,
ImmutableHashSet<string> aliases,
ImmutableHashSet<string> purls,
ImmutableHashSet<string> cpes)
{
ArgumentNullException.ThrowIfNull(observation);
if (observationIds.Count > 0 && !observationIds.Contains(observation.ObservationId))
{
return false;
}
if (aliases.Count > 0 && !observation.Linkset.Aliases.Any(aliases.Contains))
{
return false;
}
if (purls.Count > 0 && !observation.Linkset.Purls.Any(purls.Contains))
{
return false;
}
if (cpes.Count > 0 && !observation.Linkset.Cpes.Any(cpes.Contains))
{
return false;
}
return true;
}
private static string NormalizeTenant(string tenant)
=> Validation.EnsureNotNullOrWhiteSpace(tenant, nameof(tenant)).ToLowerInvariant();
private static ImmutableHashSet<string> NormalizeSet(
IEnumerable<string>? values,
Func<string, string> projector,
StringComparer comparer)
{
if (values is null)
{
return ImmutableHashSet<string>.Empty;
}
var builder = ImmutableHashSet.CreateBuilder<string>(comparer);
foreach (var value in values)
{
var normalized = Validation.TrimToNull(value);
if (normalized is null)
{
continue;
}
builder.Add(projector(normalized));
}
return builder.ToImmutable();
}
private static int NormalizeLimit(int? requestedLimit)
{
if (!requestedLimit.HasValue || requestedLimit.Value <= 0)
{
return DefaultPageSize;
}
var limit = requestedLimit.Value;
if (limit > MaxPageSize)
{
return MaxPageSize;
}
return limit;
}
private static AdvisoryObservationCursor? DecodeCursor(string? cursor)
{
if (string.IsNullOrWhiteSpace(cursor))
{
return null;
}
try
{
var decoded = Convert.FromBase64String(cursor.Trim());
var payload = Encoding.UTF8.GetString(decoded);
var separator = payload.IndexOf(':');
if (separator <= 0 || separator >= payload.Length - 1)
{
throw new FormatException("Cursor is malformed.");
}
var ticksText = payload.AsSpan(0, separator);
if (!long.TryParse(ticksText, NumberStyles.Integer, CultureInfo.InvariantCulture, out var ticks))
{
throw new FormatException("Cursor timestamp is invalid.");
}
var createdAt = new DateTimeOffset(DateTime.SpecifyKind(new DateTime(ticks), DateTimeKind.Utc));
var observationId = payload[(separator + 1)..];
if (string.IsNullOrWhiteSpace(observationId))
{
throw new FormatException("Cursor observation id is missing.");
}
return new AdvisoryObservationCursor(createdAt, observationId);
}
catch (FormatException)
{
throw;
}
catch (Exception ex)
{
throw new FormatException("Cursor is malformed.", ex);
}
}
private static string? EncodeCursor(AdvisoryObservation observation)
{
if (observation is null)
{
return null;
}
var normalizedPurls = NormalizeSet(options.Purls, static value => value, StringComparer.Ordinal);
var normalizedCpes = NormalizeSet(options.Cpes, static value => value, StringComparer.Ordinal);
var limit = NormalizeLimit(options.Limit);
var fetchSize = checked(limit + 1);
var cursor = DecodeCursor(options.Cursor);
var observations = await _lookup
.FindByFiltersAsync(
normalizedTenant,
normalizedObservationIds,
normalizedAliases,
normalizedPurls,
normalizedCpes,
cursor,
fetchSize,
cancellationToken)
.ConfigureAwait(false);
var ordered = observations
.Where(observation => Matches(observation, normalizedObservationIds, normalizedAliases, normalizedPurls, normalizedCpes))
.OrderByDescending(static observation => observation.CreatedAt)
.ThenBy(static observation => observation.ObservationId, StringComparer.Ordinal)
.ToImmutableArray();
var hasMore = ordered.Length > limit;
var page = hasMore ? ordered.Take(limit).ToImmutableArray() : ordered;
var nextCursor = hasMore ? EncodeCursor(page[^1]) : null;
var linkset = BuildAggregateLinkset(page);
RecordIdentifierCollisions(normalizedTenant, linkset);
return new AdvisoryObservationQueryResult(page, linkset, nextCursor, hasMore);
}
private static bool Matches(
AdvisoryObservation observation,
ImmutableHashSet<string> observationIds,
ImmutableHashSet<string> aliases,
ImmutableHashSet<string> purls,
ImmutableHashSet<string> cpes)
{
ArgumentNullException.ThrowIfNull(observation);
if (observationIds.Count > 0 && !observationIds.Contains(observation.ObservationId))
{
return false;
}
if (aliases.Count > 0 && !observation.Linkset.Aliases.Any(aliases.Contains))
{
return false;
}
if (purls.Count > 0 && !observation.Linkset.Purls.Any(purls.Contains))
{
return false;
}
if (cpes.Count > 0 && !observation.Linkset.Cpes.Any(cpes.Contains))
{
return false;
}
return true;
}
private static string NormalizeTenant(string tenant)
=> Validation.EnsureNotNullOrWhiteSpace(tenant, nameof(tenant)).ToLowerInvariant();
private static ImmutableHashSet<string> NormalizeSet(
IEnumerable<string>? values,
Func<string, string> projector,
StringComparer comparer)
{
if (values is null)
{
return ImmutableHashSet<string>.Empty;
}
var builder = ImmutableHashSet.CreateBuilder<string>(comparer);
foreach (var value in values)
{
var normalized = Validation.TrimToNull(value);
if (normalized is null)
{
continue;
}
builder.Add(projector(normalized));
}
return builder.ToImmutable();
}
private static int NormalizeLimit(int? requestedLimit)
{
if (!requestedLimit.HasValue || requestedLimit.Value <= 0)
{
return DefaultPageSize;
}
var limit = requestedLimit.Value;
if (limit > MaxPageSize)
{
return MaxPageSize;
}
return limit;
}
private static AdvisoryObservationCursor? DecodeCursor(string? cursor)
{
if (string.IsNullOrWhiteSpace(cursor))
{
return null;
}
try
{
var decoded = Convert.FromBase64String(cursor.Trim());
var payload = Encoding.UTF8.GetString(decoded);
var separator = payload.IndexOf(':');
if (separator <= 0 || separator >= payload.Length - 1)
{
throw new FormatException("Cursor is malformed.");
}
var ticksText = payload.AsSpan(0, separator);
if (!long.TryParse(ticksText, NumberStyles.Integer, CultureInfo.InvariantCulture, out var ticks))
{
throw new FormatException("Cursor timestamp is invalid.");
}
var createdAt = new DateTimeOffset(DateTime.SpecifyKind(new DateTime(ticks), DateTimeKind.Utc));
var observationId = payload[(separator + 1)..];
if (string.IsNullOrWhiteSpace(observationId))
{
throw new FormatException("Cursor observation id is missing.");
}
return new AdvisoryObservationCursor(createdAt, observationId);
}
catch (FormatException)
{
throw;
}
catch (Exception ex)
{
throw new FormatException("Cursor is malformed.", ex);
}
}
private static string? EncodeCursor(AdvisoryObservation observation)
{
if (observation is null)
{
return null;
}
var payload = $"{observation.CreatedAt.UtcTicks.ToString(CultureInfo.InvariantCulture)}:{observation.ObservationId}";
return Convert.ToBase64String(Encoding.UTF8.GetBytes(payload));
}
@@ -283,4 +285,18 @@ public sealed class AdvisoryObservationQueryService : IAdvisoryObservationQueryS
.ThenBy(static c => string.Join('|', c.Values ?? Array.Empty<string>()), StringComparer.Ordinal)
.ToImmutableArray());
}
private static void RecordIdentifierCollisions(string tenant, AdvisoryObservationLinksetAggregate linkset)
{
if (linkset.Conflicts.IsDefaultOrEmpty)
{
return;
}
var collisionCount = linkset.Conflicts.Count(conflict =>
string.Equals(conflict.Field, "aliases", StringComparison.OrdinalIgnoreCase) &&
conflict.Reason.Contains("alias", StringComparison.OrdinalIgnoreCase));
VulnExplorerTelemetry.RecordIdentifierCollisions(tenant, source: null, collisionCount);
}
}

View File

@@ -5,6 +5,7 @@ using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Logging;
using StellaOps.Concelier.Core.Diagnostics;
namespace StellaOps.Concelier.Core.Risk;
@@ -177,6 +178,7 @@ public sealed class AdvisoryFieldChangeEmitter : IAdvisoryFieldChangeEmitter
_logger.LogInformation(
"Emitted withdrawn observation notification for {ObservationId}",
previousSignal.ObservationId);
VulnExplorerTelemetry.RecordWithdrawnStatement(tenantId, previousSignal.Provenance.Vendor);
return notification;
}

View File

@@ -12,6 +12,7 @@
<PackageReference Include="Microsoft.Extensions.Options" Version="10.0.0" />
<PackageReference Include="Microsoft.Extensions.Hosting.Abstractions" Version="10.0.0" />
<PackageReference Include="Cronos" Version="0.10.0" />
<PackageReference Include="System.Diagnostics.DiagnosticSource" Version="9.0.0" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\StellaOps.Concelier.Models\StellaOps.Concelier.Models.csproj" />

View File

@@ -1,5 +1,6 @@
using System;
using System.Collections;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Linq;
using System.Threading;
@@ -66,6 +67,8 @@ namespace MongoDB.Driver
public class MongoDatabase : IMongoDatabase
{
private readonly ConcurrentDictionary<string, object> _collections = new(StringComparer.Ordinal);
public MongoDatabase(string name)
{
Name = name;
@@ -73,8 +76,17 @@ namespace MongoDB.Driver
}
public string Name { get; }
public DatabaseNamespace DatabaseNamespace { get; }
public IMongoCollection<TDocument> GetCollection<TDocument>(string name, MongoCollectionSettings? settings = null) => new MongoCollection<TDocument>(name);
public Task DropCollectionAsync(string name, CancellationToken cancellationToken = default) => Task.CompletedTask;
public IMongoCollection<TDocument> GetCollection<TDocument>(string name, MongoCollectionSettings? settings = null)
{
var collection = (MongoCollection<TDocument>)_collections.GetOrAdd(name, _ => new MongoCollection<TDocument>(name));
return collection;
}
public Task DropCollectionAsync(string name, CancellationToken cancellationToken = default)
{
_collections.TryRemove(name, out _);
return Task.CompletedTask;
}
public BsonDocument RunCommand(BsonDocument command, CancellationToken cancellationToken = default) => new();
public T RunCommand<T>(BsonDocument command, CancellationToken cancellationToken = default) => default!;
public Task<T> RunCommandAsync<T>(BsonDocument command, CancellationToken cancellationToken = default) => Task.FromResult(default(T)!);

View File

@@ -5,6 +5,7 @@
<Nullable>enable</Nullable>
<ImplicitUsings>enable</ImplicitUsings>
<TreatWarningsAsErrors>false</TreatWarningsAsErrors>
<EnableDefaultCompileItems>false</EnableDefaultCompileItems>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="10.0.0" />
@@ -12,4 +13,7 @@
<ItemGroup>
<ProjectReference Include="..\StellaOps.Concelier.RawModels\StellaOps.Concelier.RawModels.csproj" />
</ItemGroup>
<ItemGroup>
<Compile Include="**\*.cs" Exclude="bin\**;obj\**;out\**;bin2\**" />
</ItemGroup>
</Project>

View File

@@ -0,0 +1,69 @@
-- Concelier Migration 005: Postgres equivalents for DTO, export, PSIRT/JP flags, and change history.
CREATE SCHEMA IF NOT EXISTS concelier;
CREATE TABLE IF NOT EXISTS concelier.dtos (
id UUID NOT NULL,
document_id UUID NOT NULL,
source_name TEXT NOT NULL,
format TEXT NOT NULL,
payload_json JSONB NOT NULL,
schema_version TEXT NOT NULL DEFAULT '',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
validated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT pk_concelier_dtos PRIMARY KEY (document_id)
);
CREATE INDEX IF NOT EXISTS idx_concelier_dtos_source ON concelier.dtos(source_name, created_at DESC);
CREATE TABLE IF NOT EXISTS concelier.export_states (
id TEXT NOT NULL,
export_cursor TEXT NOT NULL,
last_full_digest TEXT,
last_delta_digest TEXT,
base_export_id TEXT,
base_digest TEXT,
target_repository TEXT,
files JSONB NOT NULL,
exporter_version TEXT NOT NULL,
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT pk_concelier_export_states PRIMARY KEY (id)
);
CREATE TABLE IF NOT EXISTS concelier.psirt_flags (
advisory_id TEXT NOT NULL,
vendor TEXT NOT NULL,
source_name TEXT NOT NULL,
external_id TEXT,
recorded_at TIMESTAMPTZ NOT NULL,
CONSTRAINT pk_concelier_psirt_flags PRIMARY KEY (advisory_id, vendor)
);
CREATE INDEX IF NOT EXISTS idx_concelier_psirt_source ON concelier.psirt_flags(source_name, recorded_at DESC);
CREATE TABLE IF NOT EXISTS concelier.jp_flags (
advisory_key TEXT NOT NULL,
source_name TEXT NOT NULL,
category TEXT NOT NULL,
vendor_status TEXT,
created_at TIMESTAMPTZ NOT NULL,
CONSTRAINT pk_concelier_jp_flags PRIMARY KEY (advisory_key)
);
CREATE TABLE IF NOT EXISTS concelier.change_history (
id UUID NOT NULL,
source_name TEXT NOT NULL,
advisory_key TEXT NOT NULL,
document_id UUID NOT NULL,
document_hash TEXT NOT NULL,
snapshot_hash TEXT NOT NULL,
previous_snapshot_hash TEXT,
snapshot JSONB NOT NULL,
previous_snapshot JSONB,
changes JSONB NOT NULL,
created_at TIMESTAMPTZ NOT NULL,
CONSTRAINT pk_concelier_change_history PRIMARY KEY (id)
);
CREATE INDEX IF NOT EXISTS idx_concelier_change_history_advisory
ON concelier.change_history(advisory_key, created_at DESC);

View File

@@ -0,0 +1,96 @@
using System.Text.Json;
using Dapper;
using StellaOps.Concelier.Storage.Mongo.ChangeHistory;
namespace StellaOps.Concelier.Storage.Postgres.Repositories;
internal sealed class PostgresChangeHistoryStore : IChangeHistoryStore
{
private readonly ConcelierDataSource _dataSource;
private readonly JsonSerializerOptions _jsonOptions = new(JsonSerializerDefaults.General)
{
PropertyNamingPolicy = JsonNamingPolicy.CamelCase
};
public PostgresChangeHistoryStore(ConcelierDataSource dataSource)
{
_dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource));
}
public async Task AddAsync(ChangeHistoryRecord record, CancellationToken cancellationToken)
{
const string sql = """
INSERT INTO concelier.change_history
(id, source_name, advisory_key, document_id, document_hash, snapshot_hash, previous_snapshot_hash, snapshot, previous_snapshot, changes, created_at)
VALUES (@Id, @SourceName, @AdvisoryKey, @DocumentId, @DocumentHash, @SnapshotHash, @PreviousSnapshotHash, @Snapshot, @PreviousSnapshot, @Changes, @CreatedAt)
ON CONFLICT (id) DO NOTHING;
""";
await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken);
await connection.ExecuteAsync(new CommandDefinition(sql, new
{
record.Id,
record.SourceName,
record.AdvisoryKey,
record.DocumentId,
record.DocumentHash,
record.SnapshotHash,
record.PreviousSnapshotHash,
Snapshot = record.Snapshot,
PreviousSnapshot = record.PreviousSnapshot,
Changes = JsonSerializer.Serialize(record.Changes, _jsonOptions),
record.CreatedAt
}, cancellationToken: cancellationToken));
}
public async Task<IReadOnlyList<ChangeHistoryRecord>> GetRecentAsync(string sourceName, string advisoryKey, int limit, CancellationToken cancellationToken)
{
const string sql = """
SELECT id, source_name, advisory_key, document_id, document_hash, snapshot_hash, previous_snapshot_hash, snapshot, previous_snapshot, changes, created_at
FROM concelier.change_history
WHERE source_name = @SourceName AND advisory_key = @AdvisoryKey
ORDER BY created_at DESC
LIMIT @Limit;
""";
await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken);
var rows = await connection.QueryAsync<ChangeHistoryRow>(new CommandDefinition(sql, new
{
SourceName = sourceName,
AdvisoryKey = advisoryKey,
Limit = limit
}, cancellationToken: cancellationToken));
return rows.Select(ToRecord).ToArray();
}
private ChangeHistoryRecord ToRecord(ChangeHistoryRow row)
{
var changes = JsonSerializer.Deserialize<IReadOnlyList<ChangeHistoryFieldChange>>(row.Changes, _jsonOptions) ?? Array.Empty<ChangeHistoryFieldChange>();
return new ChangeHistoryRecord(
row.Id,
row.SourceName,
row.AdvisoryKey,
row.DocumentId,
row.DocumentHash,
row.SnapshotHash,
row.PreviousSnapshotHash ?? string.Empty,
row.Snapshot,
row.PreviousSnapshot ?? string.Empty,
changes,
row.CreatedAt);
}
private sealed record ChangeHistoryRow(
Guid Id,
string SourceName,
string AdvisoryKey,
Guid DocumentId,
string DocumentHash,
string SnapshotHash,
string? PreviousSnapshotHash,
string Snapshot,
string? PreviousSnapshot,
string Changes,
DateTimeOffset CreatedAt);
}

View File

@@ -0,0 +1,104 @@
using System.Text.Json;
using Dapper;
using StellaOps.Concelier.Storage.Mongo;
namespace StellaOps.Concelier.Storage.Postgres.Repositories;
internal sealed class PostgresDtoStore : IDtoStore
{
private readonly ConcelierDataSource _dataSource;
private readonly JsonSerializerOptions _jsonOptions = new(JsonSerializerDefaults.General)
{
PropertyNamingPolicy = JsonNamingPolicy.CamelCase
};
public PostgresDtoStore(ConcelierDataSource dataSource)
{
_dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource));
}
public async Task<DtoRecord> UpsertAsync(DtoRecord record, CancellationToken cancellationToken)
{
const string sql = """
INSERT INTO concelier.dtos (id, document_id, source_name, format, payload_json, schema_version, created_at, validated_at)
VALUES (@Id, @DocumentId, @SourceName, @Format, @PayloadJson, @SchemaVersion, @CreatedAt, @ValidatedAt)
ON CONFLICT (document_id) DO UPDATE
SET payload_json = EXCLUDED.payload_json,
schema_version = EXCLUDED.schema_version,
source_name = EXCLUDED.source_name,
format = EXCLUDED.format,
validated_at = EXCLUDED.validated_at
RETURNING id, document_id, source_name, format, payload_json, schema_version, created_at, validated_at;
""";
var payloadJson = record.Payload.ToJson();
await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken);
var row = await connection.QuerySingleAsync<DtoRow>(new CommandDefinition(sql, new
{
record.Id,
record.DocumentId,
record.SourceName,
record.Format,
PayloadJson = payloadJson,
record.SchemaVersion,
record.CreatedAt,
record.ValidatedAt
}, cancellationToken: cancellationToken));
return ToRecord(row);
}
public async Task<DtoRecord?> FindByDocumentIdAsync(Guid documentId, CancellationToken cancellationToken)
{
const string sql = """
SELECT id, document_id, source_name, format, payload_json, schema_version, created_at, validated_at
FROM concelier.dtos
WHERE document_id = @DocumentId
LIMIT 1;
""";
await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken);
var row = await connection.QuerySingleOrDefaultAsync<DtoRow>(new CommandDefinition(sql, new { DocumentId = documentId }, cancellationToken: cancellationToken));
return row is null ? null : ToRecord(row);
}
public async Task<IReadOnlyList<DtoRecord>> GetBySourceAsync(string sourceName, int limit, CancellationToken cancellationToken)
{
const string sql = """
SELECT id, document_id, source_name, format, payload_json, schema_version, created_at, validated_at
FROM concelier.dtos
WHERE source_name = @SourceName
ORDER BY created_at DESC
LIMIT @Limit;
""";
await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken);
var rows = await connection.QueryAsync<DtoRow>(new CommandDefinition(sql, new { SourceName = sourceName, Limit = limit }, cancellationToken: cancellationToken));
return rows.Select(ToRecord).ToArray();
}
private DtoRecord ToRecord(DtoRow row)
{
var payload = MongoDB.Bson.BsonDocument.Parse(row.PayloadJson);
return new DtoRecord(
row.Id,
row.DocumentId,
row.SourceName,
row.Format,
payload,
row.CreatedAt,
row.SchemaVersion,
row.ValidatedAt);
}
private sealed record DtoRow(
Guid Id,
Guid DocumentId,
string SourceName,
string Format,
string PayloadJson,
string SchemaVersion,
DateTimeOffset CreatedAt,
DateTimeOffset ValidatedAt);
}

View File

@@ -0,0 +1,119 @@
using System.Text.Json;
using Dapper;
using StellaOps.Concelier.Storage.Mongo.Exporting;
namespace StellaOps.Concelier.Storage.Postgres.Repositories;
internal sealed class PostgresExportStateStore : IExportStateStore
{
private readonly ConcelierDataSource _dataSource;
private readonly JsonSerializerOptions _jsonOptions = new(JsonSerializerDefaults.General)
{
PropertyNamingPolicy = JsonNamingPolicy.CamelCase
};
public PostgresExportStateStore(ConcelierDataSource dataSource)
{
_dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource));
}
public async Task<ExportStateRecord?> FindAsync(string id, CancellationToken cancellationToken)
{
const string sql = """
SELECT id,
export_cursor,
last_full_digest,
last_delta_digest,
base_export_id,
base_digest,
target_repository,
files,
exporter_version,
updated_at
FROM concelier.export_states
WHERE id = @Id
LIMIT 1;
""";
await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken);
var row = await connection.QuerySingleOrDefaultAsync<ExportStateRow>(new CommandDefinition(sql, new { Id = id }, cancellationToken: cancellationToken));
return row is null ? null : ToRecord(row);
}
public async Task<ExportStateRecord> UpsertAsync(ExportStateRecord record, CancellationToken cancellationToken)
{
const string sql = """
INSERT INTO concelier.export_states
(id, export_cursor, last_full_digest, last_delta_digest, base_export_id, base_digest, target_repository, files, exporter_version, updated_at)
VALUES (@Id, @ExportCursor, @LastFullDigest, @LastDeltaDigest, @BaseExportId, @BaseDigest, @TargetRepository, @Files, @ExporterVersion, @UpdatedAt)
ON CONFLICT (id) DO UPDATE
SET export_cursor = EXCLUDED.export_cursor,
last_full_digest = EXCLUDED.last_full_digest,
last_delta_digest = EXCLUDED.last_delta_digest,
base_export_id = EXCLUDED.base_export_id,
base_digest = EXCLUDED.base_digest,
target_repository = EXCLUDED.target_repository,
files = EXCLUDED.files,
exporter_version = EXCLUDED.exporter_version,
updated_at = EXCLUDED.updated_at
RETURNING id,
export_cursor,
last_full_digest,
last_delta_digest,
base_export_id,
base_digest,
target_repository,
files,
exporter_version,
updated_at;
""";
var filesJson = JsonSerializer.Serialize(record.Files, _jsonOptions);
await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken);
var row = await connection.QuerySingleAsync<ExportStateRow>(new CommandDefinition(sql, new
{
record.Id,
record.ExportCursor,
record.LastFullDigest,
record.LastDeltaDigest,
record.BaseExportId,
record.BaseDigest,
record.TargetRepository,
Files = filesJson,
record.ExporterVersion,
record.UpdatedAt
}, cancellationToken: cancellationToken));
return ToRecord(row);
}
private ExportStateRecord ToRecord(ExportStateRow row)
{
var files = JsonSerializer.Deserialize<IReadOnlyList<ExportFileRecord>>(row.Files, _jsonOptions) ?? Array.Empty<ExportFileRecord>();
return new ExportStateRecord(
row.Id,
row.ExportCursor,
row.LastFullDigest,
row.LastDeltaDigest,
row.BaseExportId,
row.BaseDigest,
row.TargetRepository,
files,
row.ExporterVersion,
row.UpdatedAt);
}
private sealed record ExportStateRow(
string Id,
string ExportCursor,
string? LastFullDigest,
string? LastDeltaDigest,
string? BaseExportId,
string? BaseDigest,
string? TargetRepository,
string Files,
string ExporterVersion,
DateTimeOffset UpdatedAt);
}

View File

@@ -0,0 +1,58 @@
using Dapper;
using StellaOps.Concelier.Storage.Mongo.JpFlags;
namespace StellaOps.Concelier.Storage.Postgres.Repositories;
internal sealed class PostgresJpFlagStore : IJpFlagStore
{
private readonly ConcelierDataSource _dataSource;
public PostgresJpFlagStore(ConcelierDataSource dataSource)
{
_dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource));
}
public async Task UpsertAsync(JpFlagRecord record, CancellationToken cancellationToken)
{
const string sql = """
INSERT INTO concelier.jp_flags (advisory_key, source_name, category, vendor_status, created_at)
VALUES (@AdvisoryKey, @SourceName, @Category, @VendorStatus, @CreatedAt)
ON CONFLICT (advisory_key) DO UPDATE
SET source_name = EXCLUDED.source_name,
category = EXCLUDED.category,
vendor_status = EXCLUDED.vendor_status,
created_at = EXCLUDED.created_at;
""";
await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken);
await connection.ExecuteAsync(new CommandDefinition(sql, new
{
record.AdvisoryKey,
record.SourceName,
record.Category,
record.VendorStatus,
record.CreatedAt
}, cancellationToken: cancellationToken));
}
public async Task<JpFlagRecord?> FindAsync(string advisoryKey, CancellationToken cancellationToken)
{
const string sql = """
SELECT advisory_key, source_name, category, vendor_status, created_at
FROM concelier.jp_flags
WHERE advisory_key = @AdvisoryKey
LIMIT 1;
""";
await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken);
var row = await connection.QuerySingleOrDefaultAsync<JpFlagRow>(new CommandDefinition(sql, new { AdvisoryKey = advisoryKey }, cancellationToken: cancellationToken));
return row is null ? null : new JpFlagRecord(row.AdvisoryKey, row.SourceName, row.Category, row.VendorStatus, row.CreatedAt);
}
private sealed record JpFlagRow(
string AdvisoryKey,
string SourceName,
string Category,
string? VendorStatus,
DateTimeOffset CreatedAt);
}

View File

@@ -0,0 +1,76 @@
using Dapper;
using StellaOps.Concelier.Storage.Mongo.PsirtFlags;
namespace StellaOps.Concelier.Storage.Postgres.Repositories;
internal sealed class PostgresPsirtFlagStore : IPsirtFlagStore
{
private readonly ConcelierDataSource _dataSource;
public PostgresPsirtFlagStore(ConcelierDataSource dataSource)
{
_dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource));
}
public async Task UpsertAsync(PsirtFlagRecord flag, CancellationToken cancellationToken)
{
const string sql = """
INSERT INTO concelier.psirt_flags (advisory_id, vendor, source_name, external_id, recorded_at)
VALUES (@AdvisoryId, @Vendor, @SourceName, @ExternalId, @RecordedAt)
ON CONFLICT (advisory_id, vendor) DO UPDATE
SET source_name = EXCLUDED.source_name,
external_id = EXCLUDED.external_id,
recorded_at = EXCLUDED.recorded_at;
""";
await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken);
await connection.ExecuteAsync(new CommandDefinition(sql, new
{
flag.AdvisoryId,
flag.Vendor,
flag.SourceName,
flag.ExternalId,
flag.RecordedAt
}, cancellationToken: cancellationToken));
}
public async Task<IReadOnlyList<PsirtFlagRecord>> GetRecentAsync(string advisoryKey, int limit, CancellationToken cancellationToken)
{
const string sql = """
SELECT advisory_id, vendor, source_name, external_id, recorded_at
FROM concelier.psirt_flags
WHERE advisory_id = @AdvisoryId
ORDER BY recorded_at DESC
LIMIT @Limit;
""";
await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken);
var rows = await connection.QueryAsync<PsirtFlagRow>(new CommandDefinition(sql, new { AdvisoryId = advisoryKey, Limit = limit }, cancellationToken: cancellationToken));
return rows.Select(ToRecord).ToArray();
}
public async Task<PsirtFlagRecord?> FindAsync(string advisoryKey, CancellationToken cancellationToken)
{
const string sql = """
SELECT advisory_id, vendor, source_name, external_id, recorded_at
FROM concelier.psirt_flags
WHERE advisory_id = @AdvisoryId
ORDER BY recorded_at DESC
LIMIT 1;
""";
await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken);
var row = await connection.QuerySingleOrDefaultAsync<PsirtFlagRow>(new CommandDefinition(sql, new { AdvisoryId = advisoryKey }, cancellationToken: cancellationToken));
return row is null ? null : ToRecord(row);
}
private static PsirtFlagRecord ToRecord(PsirtFlagRow row) =>
new(row.AdvisoryId, row.Vendor, row.SourceName, row.ExternalId, row.RecordedAt);
private sealed record PsirtFlagRow(
string AdvisoryId,
string Vendor,
string SourceName,
string? ExternalId,
DateTimeOffset RecordedAt);
}

View File

@@ -7,6 +7,10 @@ using StellaOps.Infrastructure.Postgres.Options;
using StellaOps.Concelier.Core.Linksets;
using MongoContracts = StellaOps.Concelier.Storage.Mongo;
using MongoAdvisories = StellaOps.Concelier.Storage.Mongo.Advisories;
using MongoExporting = StellaOps.Concelier.Storage.Mongo.Exporting;
using MongoJpFlags = StellaOps.Concelier.Storage.Mongo.JpFlags;
using MongoPsirt = StellaOps.Concelier.Storage.Mongo.PsirtFlags;
using MongoHistory = StellaOps.Concelier.Storage.Mongo.ChangeHistory;
namespace StellaOps.Concelier.Storage.Postgres;
@@ -51,6 +55,11 @@ public static class ServiceCollectionExtensions
services.AddScoped<IAdvisoryLinksetStore, AdvisoryLinksetCacheRepository>();
services.AddScoped<IAdvisoryLinksetLookup>(sp => sp.GetRequiredService<IAdvisoryLinksetStore>());
services.AddScoped<MongoContracts.IDocumentStore, PostgresDocumentStore>();
services.AddScoped<MongoContracts.IDtoStore, PostgresDtoStore>();
services.AddScoped<MongoExporting.IExportStateStore, PostgresExportStateStore>();
services.AddScoped<MongoPsirt.IPsirtFlagStore, PostgresPsirtFlagStore>();
services.AddScoped<MongoJpFlags.IJpFlagStore, PostgresJpFlagStore>();
services.AddScoped<MongoHistory.IChangeHistoryStore, PostgresChangeHistoryStore>();
return services;
}
@@ -89,6 +98,11 @@ public static class ServiceCollectionExtensions
services.AddScoped<IAdvisoryLinksetStore, AdvisoryLinksetCacheRepository>();
services.AddScoped<IAdvisoryLinksetLookup>(sp => sp.GetRequiredService<IAdvisoryLinksetStore>());
services.AddScoped<MongoContracts.IDocumentStore, PostgresDocumentStore>();
services.AddScoped<MongoContracts.IDtoStore, PostgresDtoStore>();
services.AddScoped<MongoExporting.IExportStateStore, PostgresExportStateStore>();
services.AddScoped<MongoPsirt.IPsirtFlagStore, PostgresPsirtFlagStore>();
services.AddScoped<MongoJpFlags.IJpFlagStore, PostgresJpFlagStore>();
services.AddScoped<MongoHistory.IChangeHistoryStore, PostgresChangeHistoryStore>();
return services;
}

View File

@@ -30,6 +30,7 @@
<ItemGroup>
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Infrastructure.Postgres\StellaOps.Infrastructure.Postgres.csproj" />
<ProjectReference Include="..\StellaOps.Concelier.Core\StellaOps.Concelier.Core.csproj" />
<ProjectReference Include="..\StellaOps.Concelier.Models\StellaOps.Concelier.Models.csproj" />
<ProjectReference Include="..\..\..\__Libraries\StellaOps.DependencyInjection\StellaOps.DependencyInjection.csproj" />
</ItemGroup>

View File

@@ -1,81 +1,60 @@
using System;
using System.IO;
using System.Linq;
using MongoDB.Bson;
using System.Threading;
using System.Threading.Tasks;
using Mongo2Go;
using Xunit;
using MongoDB.Driver;
using Xunit;
namespace StellaOps.Concelier.Testing;
public sealed class MongoIntegrationFixture : IAsyncLifetime
{
public MongoDbRunner Runner { get; private set; } = null!;
public IMongoDatabase Database { get; private set; } = null!;
public IMongoClient Client { get; private set; } = null!;
public Task InitializeAsync()
/// <summary>
/// In-memory stand-in for the legacy Mongo2Go fixture. No external processes are launched;
/// DropDatabaseAsync simply resets the backing in-memory collections.
/// </summary>
public sealed class MongoIntegrationFixture : IAsyncLifetime
{
private readonly FixtureMongoClient _client;
private MongoDatabase _database;
public MongoIntegrationFixture()
{
EnsureMongo2GoEnvironment();
Runner = MongoDbRunner.Start(singleNodeReplSet: true);
Client = new MongoClient(Runner.ConnectionString);
Database = Client.GetDatabase($"concelier-tests-{Guid.NewGuid():N}");
return Task.CompletedTask;
}
public Task DisposeAsync()
{
Runner.Dispose();
return Task.CompletedTask;
_client = new FixtureMongoClient(this);
Runner = MongoDbRunner.Start(singleNodeReplSet: false);
_database = CreateDatabase();
}
private static void EnsureMongo2GoEnvironment()
public MongoDbRunner Runner { get; }
public IMongoDatabase Database => _database;
public IMongoClient Client => _client;
public Task InitializeAsync() => Task.CompletedTask;
public Task DisposeAsync() => Task.CompletedTask;
internal void Reset()
{
if (!OperatingSystem.IsLinux())
{
return;
}
var libraryPath = ResolveOpenSslLibraryPath();
if (libraryPath is null)
{
return;
}
var existing = Environment.GetEnvironmentVariable("LD_LIBRARY_PATH");
if (string.IsNullOrEmpty(existing))
{
Environment.SetEnvironmentVariable("LD_LIBRARY_PATH", libraryPath);
return;
}
var segments = existing.Split(':', StringSplitOptions.RemoveEmptyEntries | StringSplitOptions.TrimEntries);
if (!segments.Contains(libraryPath, StringComparer.Ordinal))
{
Environment.SetEnvironmentVariable("LD_LIBRARY_PATH", string.Join(':', new[] { libraryPath }.Concat(segments)));
}
_database = CreateDatabase();
}
private static string? ResolveOpenSslLibraryPath()
private MongoDatabase CreateDatabase() => new($"concelier-tests-{Guid.NewGuid():N}");
private sealed class FixtureMongoClient : IMongoClient
{
var current = AppContext.BaseDirectory;
while (!string.IsNullOrEmpty(current))
private readonly MongoIntegrationFixture _fixture;
public FixtureMongoClient(MongoIntegrationFixture fixture)
{
var candidate = Path.Combine(current, "tools", "openssl", "linux-x64");
if (Directory.Exists(candidate))
{
return candidate;
}
var parent = Directory.GetParent(current);
if (parent is null)
{
break;
}
current = parent.FullName;
_fixture = fixture;
}
return null;
public IMongoDatabase GetDatabase(string name, MongoDatabaseSettings? settings = null) => _fixture.Database;
public Task DropDatabaseAsync(string name, CancellationToken cancellationToken = default)
{
_fixture.Reset();
return Task.CompletedTask;
}
}
}

View File

@@ -0,0 +1,211 @@
using System;
using System.Collections.Generic;
using System.Collections.Immutable;
using System.Diagnostics.Metrics;
using System.Linq;
using System.Text.Json.Nodes;
using System.Threading;
using System.Threading.Tasks;
using StellaOps.Concelier.Core.Diagnostics;
using StellaOps.Concelier.Core.Observations;
using StellaOps.Concelier.Models.Observations;
using StellaOps.Concelier.RawModels;
using Xunit;
namespace StellaOps.Concelier.Core.Tests.Diagnostics;
public sealed class VulnExplorerTelemetryTests
{
private static readonly AdvisoryObservationSource DefaultSource = new("ghsa", "stream", "https://example.test/api");
private static readonly AdvisoryObservationSignature DefaultSignature = new(false, null, null, null);
[Fact]
public async Task QueryAsync_RecordsIdentifierCollisionMetric()
{
var (listener, measurements) = CreateListener(
VulnExplorerTelemetry.MeterName,
"vuln.identifier_collisions_total");
var observations = new[]
{
CreateObservation(
"tenant-a:ghsa:1",
"tenant-a",
aliases: new[] { "CVE-2025-0001" }),
CreateObservation(
"tenant-a:osv:2",
"tenant-a",
aliases: new[] { "GHSA-aaaa-bbbb-cccc" })
};
var service = new AdvisoryObservationQueryService(new TestObservationLookup(observations));
await service.QueryAsync(new AdvisoryObservationQueryOptions("tenant-a"), CancellationToken.None);
listener.Dispose();
var collision = measurements.Single(m => m.Instrument == "vuln.identifier_collisions_total");
Assert.Equal(1, collision.Value);
Assert.Equal("tenant-a", collision.Tags.Single(t => t.Key == "tenant").Value);
}
[Fact]
public void RecordChunkRequest_EmitsCounterAndLatency()
{
var (listener, measurements) = CreateListener(
VulnExplorerTelemetry.MeterName,
"vuln.chunk_requests_total",
"vuln.chunk_latency_ms");
VulnExplorerTelemetry.RecordChunkRequest("tenant-a", "ok", cacheHit: true, chunkCount: 3, latencyMs: 42.5);
listener.Dispose();
Assert.Equal(1, measurements.Single(m => m.Instrument == "vuln.chunk_requests_total").Value);
Assert.Equal(42.5, measurements.Single(m => m.Instrument == "vuln.chunk_latency_ms").Value);
}
[Fact]
public void RecordWithdrawnStatement_EmitsCounter()
{
var (listener, measurements) = CreateListener(
VulnExplorerTelemetry.MeterName,
"vuln.withdrawn_statements_total");
VulnExplorerTelemetry.RecordWithdrawnStatement("tenant-a", "nvd");
listener.Dispose();
var withdrawn = measurements.Single(m => m.Instrument == "vuln.withdrawn_statements_total");
Assert.Equal(1, withdrawn.Value);
Assert.Equal("tenant-a", withdrawn.Tags.Single(t => t.Key == "tenant").Value);
Assert.Equal("nvd", withdrawn.Tags.Single(t => t.Key == "source").Value);
}
private static AdvisoryObservation CreateObservation(
string observationId,
string tenant,
IEnumerable<string>? aliases = null)
{
var upstream = new AdvisoryObservationUpstream(
upstreamId: $"upstream-{observationId}",
documentVersion: null,
fetchedAt: DateTimeOffset.UtcNow,
receivedAt: DateTimeOffset.UtcNow,
contentHash: "sha256:d41d8cd98f00b204e9800998ecf8427e",
signature: DefaultSignature);
var content = new AdvisoryObservationContent(
"json",
"1.0",
new JsonObject());
var aliasArray = aliases?.ToImmutableArray() ?? ImmutableArray<string>.Empty;
var linkset = new AdvisoryObservationLinkset(
aliasArray,
Enumerable.Empty<string>(),
Enumerable.Empty<string>(),
Enumerable.Empty<AdvisoryObservationReference>());
var rawLinkset = new RawLinkset
{
Aliases = aliasArray
};
return new AdvisoryObservation(
observationId,
tenant,
DefaultSource,
upstream,
content,
linkset,
rawLinkset,
DateTimeOffset.UtcNow);
}
private static (MeterListener Listener, List<MeasurementRecord> Measurements) CreateListener(
string meterName,
params string[] instruments)
{
var measurements = new List<MeasurementRecord>();
var instrumentSet = instruments.ToHashSet(StringComparer.Ordinal);
var listener = new MeterListener
{
InstrumentPublished = (instrument, meterListener) =>
{
if (string.Equals(instrument.Meter.Name, meterName, StringComparison.Ordinal) &&
instrumentSet.Contains(instrument.Name))
{
meterListener.EnableMeasurementEvents(instrument);
}
}
};
listener.SetMeasurementEventCallback<long>((instrument, measurement, tags, state) =>
{
if (instrumentSet.Contains(instrument.Name))
{
measurements.Add(new MeasurementRecord(instrument.Name, measurement, CopyTags(tags)));
}
});
listener.SetMeasurementEventCallback<double>((instrument, measurement, tags, state) =>
{
if (instrumentSet.Contains(instrument.Name))
{
measurements.Add(new MeasurementRecord(instrument.Name, measurement, CopyTags(tags)));
}
});
listener.Start();
return (listener, measurements);
}
private static IReadOnlyList<KeyValuePair<string, object?>> CopyTags(ReadOnlySpan<KeyValuePair<string, object?>> tags)
{
var list = new List<KeyValuePair<string, object?>>(tags.Length);
foreach (var tag in tags)
{
list.Add(tag);
}
return list;
}
private sealed record MeasurementRecord(string Instrument, double Value, IReadOnlyList<KeyValuePair<string, object?>> Tags);
private sealed class TestObservationLookup : IAdvisoryObservationLookup
{
private readonly IReadOnlyList<AdvisoryObservation> _observations;
public TestObservationLookup(IReadOnlyList<AdvisoryObservation> observations)
{
_observations = observations;
}
public ValueTask<IReadOnlyList<AdvisoryObservation>> ListByTenantAsync(string tenant, CancellationToken cancellationToken)
{
var matches = _observations
.Where(o => string.Equals(o.Tenant, tenant, StringComparison.OrdinalIgnoreCase))
.ToList();
return ValueTask.FromResult<IReadOnlyList<AdvisoryObservation>>(matches);
}
public ValueTask<IReadOnlyList<AdvisoryObservation>> FindByFiltersAsync(
string tenant,
IReadOnlyCollection<string> observationIds,
IReadOnlyCollection<string> aliases,
IReadOnlyCollection<string> purls,
IReadOnlyCollection<string> cpes,
AdvisoryObservationCursor? cursor,
int limit,
CancellationToken cancellationToken)
{
var matches = _observations
.Where(o => string.Equals(o.Tenant, tenant, StringComparison.OrdinalIgnoreCase))
.Take(limit)
.ToList();
return ValueTask.FromResult<IReadOnlyList<AdvisoryObservation>>(matches);
}
}
}

View File

@@ -0,0 +1,95 @@
using System;
using System.Collections.Generic;
using System.Diagnostics.Metrics;
using System.Text.Json;
using StellaOps.Concelier.Core.Diagnostics;
using StellaOps.Concelier.Core.Linksets;
using Xunit;
namespace StellaOps.Concelier.WebService.Tests;
public sealed class VulnExplorerTelemetryTests : IDisposable
{
private readonly MeterListener _listener;
private readonly List<(string Name, double Value, KeyValuePair<string, object?>[] Tags)> _histogramMeasurements = new();
private readonly List<(string Name, long Value, KeyValuePair<string, object?>[] Tags)> _counterMeasurements = new();
public VulnExplorerTelemetryTests()
{
_listener = new MeterListener
{
InstrumentPublished = (instrument, listener) =>
{
if (instrument.Meter.Name == VulnExplorerTelemetry.MeterName)
{
listener.EnableMeasurementEvents(instrument);
}
}
};
_listener.SetMeasurementEventCallback<long>((instrument, measurement, tags, state) =>
{
if (instrument.Meter.Name == VulnExplorerTelemetry.MeterName)
{
_counterMeasurements.Add((instrument.Name, measurement, tags.ToArray()));
}
});
_listener.SetMeasurementEventCallback<double>((instrument, measurement, tags, state) =>
{
if (instrument.Meter.Name == VulnExplorerTelemetry.MeterName)
{
_histogramMeasurements.Add((instrument.Name, measurement, tags.ToArray()));
}
});
_listener.Start();
}
[Fact]
public void CountAliasCollisions_FiltersAliasConflicts()
{
var conflicts = new List<AdvisoryLinksetConflict>
{
new("aliases", "alias-inconsistency", Array.Empty<string>()),
new("ranges", "range-divergence", Array.Empty<string>()),
new("alias-field", "ALIAS-INCONSISTENCY", Array.Empty<string>())
};
var count = VulnExplorerTelemetry.CountAliasCollisions(conflicts);
Assert.Equal(2, count);
}
[Fact]
public void IsWithdrawn_DetectsWithdrawnFlagsAndTimestamps()
{
using var json = JsonDocument.Parse("{\"withdrawn\":true,\"withdrawn_at\":\"2024-10-10T00:00:00Z\"}");
Assert.True(VulnExplorerTelemetry.IsWithdrawn(json.RootElement));
}
[Fact]
public void RecordChunkLatency_EmitsHistogramMeasurement()
{
VulnExplorerTelemetry.RecordChunkLatency("tenant-a", "vendor-a", TimeSpan.FromMilliseconds(42));
var measurement = Assert.Single(_histogramMeasurements);
Assert.Equal("vuln.chunk_latency_ms", measurement.Name);
Assert.Equal(42, measurement.Value);
}
[Fact]
public void RecordWithdrawnStatement_EmitsCounter()
{
VulnExplorerTelemetry.RecordWithdrawnStatement("tenant-b", "vendor-b");
var measurement = Assert.Single(_counterMeasurements);
Assert.Equal("vuln.withdrawn_statements_total", measurement.Name);
Assert.Equal(1, measurement.Value);
}
public void Dispose()
{
_listener.Dispose();
}
}

View File

@@ -75,16 +75,7 @@ public sealed class WebServiceEndpointsTests : IAsyncLifetime
public Task InitializeAsync()
{
PrepareMongoEnvironment();
if (TryStartExternalMongo(out var externalConnectionString) && !string.IsNullOrWhiteSpace(externalConnectionString))
{
_factory = new ConcelierApplicationFactory(externalConnectionString);
}
else
{
_runner = MongoDbRunner.Start(singleNodeReplSet: true);
_factory = new ConcelierApplicationFactory(_runner.ConnectionString);
}
_factory = new ConcelierApplicationFactory(string.Empty);
WarmupFactory(_factory);
return Task.CompletedTask;
}
@@ -92,30 +83,6 @@ public sealed class WebServiceEndpointsTests : IAsyncLifetime
public Task DisposeAsync()
{
_factory.Dispose();
if (_externalMongo is not null)
{
try
{
if (!_externalMongo.HasExited)
{
_externalMongo.Kill(true);
_externalMongo.WaitForExit(2000);
}
}
catch
{
// ignore cleanup errors in tests
}
if (!string.IsNullOrEmpty(_externalMongoDataPath) && Directory.Exists(_externalMongoDataPath))
{
try { Directory.Delete(_externalMongoDataPath, recursive: true); } catch { /* ignore */ }
}
}
else
{
_runner.Dispose();
}
return Task.CompletedTask;
}
@@ -141,12 +108,12 @@ public sealed class WebServiceEndpointsTests : IAsyncLifetime
var healthPayload = await healthResponse.Content.ReadFromJsonAsync<HealthPayload>();
Assert.NotNull(healthPayload);
Assert.Equal("healthy", healthPayload!.Status);
Assert.Equal("mongo", healthPayload.Storage.Driver);
Assert.Equal("postgres", healthPayload.Storage.Backend);
var readyPayload = await readyResponse.Content.ReadFromJsonAsync<ReadyPayload>();
Assert.NotNull(readyPayload);
Assert.Equal("ready", readyPayload!.Status);
Assert.Equal("ready", readyPayload.Mongo.Status);
Assert.True(readyPayload!.Status is "ready" or "degraded");
Assert.Equal("postgres", readyPayload.Storage.Backend);
}
[Fact]
@@ -2019,9 +1986,10 @@ public sealed class WebServiceEndpointsTests : IAsyncLifetime
private sealed class ConcelierApplicationFactory : WebApplicationFactory<Program>
{
private readonly string _connectionString;
private readonly string? _previousDsn;
private readonly string? _previousDriver;
private readonly string? _previousTimeout;
private readonly string? _previousPgDsn;
private readonly string? _previousPgEnabled;
private readonly string? _previousPgTimeout;
private readonly string? _previousPgSchema;
private readonly string? _previousTelemetryEnabled;
private readonly string? _previousTelemetryLogging;
private readonly string? _previousTelemetryTracing;
@@ -2035,11 +2003,15 @@ public sealed class WebServiceEndpointsTests : IAsyncLifetime
Action<ConcelierOptions.AuthorityOptions>? authorityConfigure = null,
IDictionary<string, string?>? environmentOverrides = null)
{
_connectionString = connectionString;
var defaultPostgresDsn = "Host=localhost;Port=5432;Database=concelier_test;Username=postgres;Password=postgres";
_connectionString = string.IsNullOrWhiteSpace(connectionString) || connectionString.StartsWith("mongodb://", StringComparison.OrdinalIgnoreCase)
? defaultPostgresDsn
: connectionString;
_authorityConfigure = authorityConfigure;
_previousDsn = Environment.GetEnvironmentVariable("CONCELIER_STORAGE__DSN");
_previousDriver = Environment.GetEnvironmentVariable("CONCELIER_STORAGE__DRIVER");
_previousTimeout = Environment.GetEnvironmentVariable("CONCELIER_STORAGE__COMMANDTIMEOUTSECONDS");
_previousPgDsn = Environment.GetEnvironmentVariable("CONCELIER_POSTGRESSTORAGE__CONNECTIONSTRING");
_previousPgEnabled = Environment.GetEnvironmentVariable("CONCELIER_POSTGRESSTORAGE__ENABLED");
_previousPgTimeout = Environment.GetEnvironmentVariable("CONCELIER_POSTGRESSTORAGE__COMMANDTIMEOUTSECONDS");
_previousPgSchema = Environment.GetEnvironmentVariable("CONCELIER_POSTGRESSTORAGE__SCHEMANAME");
_previousTelemetryEnabled = Environment.GetEnvironmentVariable("CONCELIER_TELEMETRY__ENABLED");
_previousTelemetryLogging = Environment.GetEnvironmentVariable("CONCELIER_TELEMETRY__ENABLELOGGING");
_previousTelemetryTracing = Environment.GetEnvironmentVariable("CONCELIER_TELEMETRY__ENABLETRACING");
@@ -2055,13 +2027,15 @@ public sealed class WebServiceEndpointsTests : IAsyncLifetime
Environment.SetEnvironmentVariable("LD_LIBRARY_PATH", merged);
}
Environment.SetEnvironmentVariable("CONCELIER_STORAGE__DSN", connectionString);
Environment.SetEnvironmentVariable("CONCELIER_STORAGE__DRIVER", "mongo");
Environment.SetEnvironmentVariable("CONCELIER_STORAGE__COMMANDTIMEOUTSECONDS", "30");
Environment.SetEnvironmentVariable("CONCELIER_POSTGRESSTORAGE__CONNECTIONSTRING", _connectionString);
Environment.SetEnvironmentVariable("CONCELIER_POSTGRESSTORAGE__ENABLED", "true");
Environment.SetEnvironmentVariable("CONCELIER_POSTGRESSTORAGE__COMMANDTIMEOUTSECONDS", "30");
Environment.SetEnvironmentVariable("CONCELIER_POSTGRESSTORAGE__SCHEMANAME", "vuln");
Environment.SetEnvironmentVariable("CONCELIER_TELEMETRY__ENABLED", "false");
Environment.SetEnvironmentVariable("CONCELIER_TELEMETRY__ENABLELOGGING", "false");
Environment.SetEnvironmentVariable("CONCELIER_TELEMETRY__ENABLETRACING", "false");
Environment.SetEnvironmentVariable("CONCELIER_TELEMETRY__ENABLEMETRICS", "false");
Environment.SetEnvironmentVariable("CONCELIER_SKIP_OPTIONS_VALIDATION", "1");
const string EvidenceRootKey = "CONCELIER_EVIDENCE__ROOT";
var repoRoot = Path.GetFullPath(Path.Combine(AppContext.BaseDirectory, "..", "..", "..", "..", "..", "..", ".."));
_additionalPreviousEnvironment[EvidenceRootKey] = Environment.GetEnvironmentVariable(EvidenceRootKey);
@@ -2176,9 +2150,11 @@ public sealed class WebServiceEndpointsTests : IAsyncLifetime
protected override void Dispose(bool disposing)
{
base.Dispose(disposing);
Environment.SetEnvironmentVariable("CONCELIER_STORAGE__DSN", _previousDsn);
Environment.SetEnvironmentVariable("CONCELIER_STORAGE__DRIVER", _previousDriver);
Environment.SetEnvironmentVariable("CONCELIER_STORAGE__COMMANDTIMEOUTSECONDS", _previousTimeout);
Environment.SetEnvironmentVariable("CONCELIER_POSTGRESSTORAGE__CONNECTIONSTRING", _previousPgDsn);
Environment.SetEnvironmentVariable("CONCELIER_POSTGRESSTORAGE__ENABLED", _previousPgEnabled);
Environment.SetEnvironmentVariable("CONCELIER_POSTGRESSTORAGE__COMMANDTIMEOUTSECONDS", _previousPgTimeout);
Environment.SetEnvironmentVariable("CONCELIER_POSTGRESSTORAGE__SCHEMANAME", _previousPgSchema);
Environment.SetEnvironmentVariable("CONCELIER_SKIP_OPTIONS_VALIDATION", null);
Environment.SetEnvironmentVariable("CONCELIER_TELEMETRY__ENABLED", _previousTelemetryEnabled);
Environment.SetEnvironmentVariable("CONCELIER_TELEMETRY__ENABLELOGGING", _previousTelemetryLogging);
Environment.SetEnvironmentVariable("CONCELIER_TELEMETRY__ENABLETRACING", _previousTelemetryTracing);
@@ -2470,13 +2446,11 @@ public sealed class WebServiceEndpointsTests : IAsyncLifetime
private sealed record HealthPayload(string Status, DateTimeOffset StartedAt, double UptimeSeconds, StoragePayload Storage, TelemetryPayload Telemetry);
private sealed record StoragePayload(string Driver, bool Completed, DateTimeOffset? CompletedAt, double? DurationMs);
private sealed record StoragePayload(string Backend, bool Ready, DateTimeOffset? CheckedAt, double? LatencyMs, string? Error);
private sealed record TelemetryPayload(bool Enabled, bool Tracing, bool Metrics, bool Logging);
private sealed record ReadyPayload(string Status, DateTimeOffset StartedAt, double UptimeSeconds, ReadyMongoPayload Mongo);
private sealed record ReadyMongoPayload(string Status, double? LatencyMs, DateTimeOffset? CheckedAt, string? Error);
private sealed record ReadyPayload(string Status, DateTimeOffset StartedAt, double UptimeSeconds, StoragePayload Storage);
private sealed record JobDefinitionPayload(string Kind, bool Enabled, string? CronExpression, TimeSpan Timeout, TimeSpan LeaseDuration, JobRunPayload? LastRun);

View File

@@ -9,7 +9,7 @@ using Microsoft.Extensions.Options;
using MongoDB.Bson;
using MongoDB.Driver;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.WebService.Contracts;
using StellaOps.Excititor.WebService.Services;
@@ -27,7 +27,7 @@ public static class AttestationEndpoints
// GET /attestations/vex/list - List attestations
app.MapGet("/attestations/vex/list", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IMongoDatabase database,
TimeProvider timeProvider,
[FromQuery] int? limit,
@@ -102,7 +102,7 @@ public static class AttestationEndpoints
app.MapGet("/attestations/vex/{attestationId}", async (
HttpContext context,
string attestationId,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IVexAttestationLinkStore attestationStore,
TimeProvider timeProvider,
CancellationToken cancellationToken) =>
@@ -209,7 +209,7 @@ public static class AttestationEndpoints
// GET /attestations/vex/lookup - Lookup attestations by linkset or observation
app.MapGet("/attestations/vex/lookup", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IMongoDatabase database,
TimeProvider timeProvider,
[FromQuery] string? linksetId,
@@ -283,7 +283,7 @@ public static class AttestationEndpoints
BuilderId: doc.GetValue("SupplierId", BsonNull.Value).AsString);
}
private static bool TryResolveTenant(HttpContext context, VexMongoStorageOptions options, out string tenant, out IResult? problem)
private static bool TryResolveTenant(HttpContext context, VexStorageOptions options, out string tenant, out IResult? problem)
{
tenant = options.DefaultTenant;
problem = null;

View File

@@ -16,7 +16,7 @@ using MongoDB.Driver;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Core.Canonicalization;
using StellaOps.Excititor.Core.Observations;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.WebService.Contracts;
using StellaOps.Excititor.WebService.Services;
using StellaOps.Excititor.WebService.Telemetry;
@@ -36,7 +36,7 @@ public static class EvidenceEndpoints
// GET /evidence/vex/list - List evidence exports
app.MapGet("/evidence/vex/list", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IMongoDatabase database,
TimeProvider timeProvider,
[FromQuery] int? limit,
@@ -114,7 +114,7 @@ public static class EvidenceEndpoints
app.MapGet("/evidence/vex/bundle/{bundleId}", async (
HttpContext context,
string bundleId,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IMongoDatabase database,
TimeProvider timeProvider,
CancellationToken cancellationToken) =>
@@ -191,7 +191,7 @@ public static class EvidenceEndpoints
// GET /evidence/vex/lookup - Lookup evidence for vuln/product pair
app.MapGet("/evidence/vex/lookup", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IVexObservationProjectionService projectionService,
TimeProvider timeProvider,
[FromQuery] string vulnerabilityId,
@@ -256,7 +256,7 @@ public static class EvidenceEndpoints
app.MapGet("/vuln/evidence/vex/{advisory_key}", async (
HttpContext context,
string advisory_key,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IMongoDatabase database,
TimeProvider timeProvider,
[FromQuery] int? limit,
@@ -446,7 +446,7 @@ public static class EvidenceEndpoints
HttpContext context,
string bundleId,
[FromQuery] string? generation,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
IOptions<AirgapOptions> airgapOptions,
[FromServices] IAirgapImportStore airgapImportStore,
[FromServices] IVexHashingService hashingService,
@@ -528,7 +528,7 @@ public static class EvidenceEndpoints
HttpContext context,
string bundleId,
[FromQuery] string? generation,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
IOptions<AirgapOptions> airgapOptions,
[FromServices] IAirgapImportStore airgapImportStore,
CancellationToken cancellationToken) =>
@@ -575,7 +575,7 @@ public static class EvidenceEndpoints
HttpContext context,
string bundleId,
[FromQuery] string? generation,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
IOptions<AirgapOptions> airgapOptions,
[FromServices] IAirgapImportStore airgapImportStore,
CancellationToken cancellationToken) =>
@@ -679,7 +679,7 @@ public static class EvidenceEndpoints
return (digest, size);
}
private static bool TryResolveTenant(HttpContext context, VexMongoStorageOptions options, out string tenant, out IResult? problem)
private static bool TryResolveTenant(HttpContext context, VexStorageOptions options, out string tenant, out IResult? problem)
{
tenant = options.DefaultTenant;
problem = null;

View File

@@ -20,49 +20,49 @@ internal static class IngestEndpoints
group.MapPost("/reconcile", HandleReconcileAsync);
}
internal static async Task<IResult> HandleInitAsync(
HttpContext httpContext,
ExcititorInitRequest request,
IVexIngestOrchestrator orchestrator,
TimeProvider timeProvider,
CancellationToken cancellationToken)
internal static async Task<IResult> HandleInitAsync(
HttpContext httpContext,
ExcititorInitRequest request,
IVexIngestOrchestrator orchestrator,
TimeProvider timeProvider,
CancellationToken cancellationToken)
{
var scopeResult = ScopeAuthorization.RequireScope(httpContext, AdminScope);
if (scopeResult is not null)
{
return scopeResult;
}
var providerIds = NormalizeProviders(request.Providers);
_ = timeProvider;
var options = new IngestInitOptions(providerIds, request.Resume ?? false);
{
return scopeResult;
}
var providerIds = NormalizeProviders(request.Providers);
_ = timeProvider;
var options = new IngestInitOptions(providerIds, request.Resume ?? false);
var summary = await orchestrator.InitializeAsync(options, cancellationToken).ConfigureAwait(false);
var message = $"Initialized {summary.ProviderCount} provider(s); {summary.SuccessCount} succeeded, {summary.FailureCount} failed.";
return TypedResults.Ok<object>(new
{
message,
runId = summary.RunId,
startedAt = summary.StartedAt,
completedAt = summary.CompletedAt,
providers = summary.Providers.Select(static provider => new
{
providerId = provider.ProviderId,
displayName = provider.DisplayName,
status = provider.Status,
durationMs = provider.Duration.TotalMilliseconds,
error = provider.Error
})
});
}
return TypedResults.Ok<object>(new
{
message,
runId = summary.RunId,
startedAt = summary.StartedAt,
completedAt = summary.CompletedAt,
providers = summary.Providers.Select(static provider => new
{
providerId = provider.ProviderId,
displayName = provider.DisplayName,
status = provider.Status,
durationMs = provider.Duration.TotalMilliseconds,
error = provider.Error
})
});
}
internal static async Task<IResult> HandleRunAsync(
HttpContext httpContext,
ExcititorIngestRunRequest request,
IVexIngestOrchestrator orchestrator,
TimeProvider timeProvider,
CancellationToken cancellationToken)
internal static async Task<IResult> HandleRunAsync(
HttpContext httpContext,
ExcititorIngestRunRequest request,
IVexIngestOrchestrator orchestrator,
TimeProvider timeProvider,
CancellationToken cancellationToken)
{
var scopeResult = ScopeAuthorization.RequireScope(httpContext, AdminScope);
if (scopeResult is not null)
@@ -72,98 +72,55 @@ internal static class IngestEndpoints
if (!TryParseDateTimeOffset(request.Since, out var since, out var sinceError))
{
return TypedResults.BadRequest<object>(new { message = sinceError });
}
if (!TryParseTimeSpan(request.Window, out var window, out var windowError))
{
return TypedResults.BadRequest<object>(new { message = windowError });
}
_ = timeProvider;
var providerIds = NormalizeProviders(request.Providers);
var options = new IngestRunOptions(
providerIds,
since,
window,
request.Force ?? false);
var summary = await orchestrator.RunAsync(options, cancellationToken).ConfigureAwait(false);
var message = $"Ingest run completed for {summary.ProviderCount} provider(s); {summary.SuccessCount} succeeded, {summary.FailureCount} failed.";
return TypedResults.Ok<object>(new
{
message,
runId = summary.RunId,
startedAt = summary.StartedAt,
completedAt = summary.CompletedAt,
durationMs = summary.Duration.TotalMilliseconds,
providers = summary.Providers.Select(static provider => new
{
providerId = provider.ProviderId,
status = provider.Status,
documents = provider.Documents,
claims = provider.Claims,
startedAt = provider.StartedAt,
completedAt = provider.CompletedAt,
durationMs = provider.Duration.TotalMilliseconds,
lastDigest = provider.LastDigest,
lastUpdated = provider.LastUpdated,
checkpoint = provider.Checkpoint,
error = provider.Error
})
});
}
return TypedResults.BadRequest<object>(new { message = sinceError });
}
internal static async Task<IResult> HandleResumeAsync(
HttpContext httpContext,
ExcititorIngestResumeRequest request,
IVexIngestOrchestrator orchestrator,
TimeProvider timeProvider,
CancellationToken cancellationToken)
{
var scopeResult = ScopeAuthorization.RequireScope(httpContext, AdminScope);
if (scopeResult is not null)
{
return scopeResult;
}
_ = timeProvider;
var providerIds = NormalizeProviders(request.Providers);
var options = new IngestResumeOptions(providerIds, request.Checkpoint);
var summary = await orchestrator.ResumeAsync(options, cancellationToken).ConfigureAwait(false);
var message = $"Resume run completed for {summary.ProviderCount} provider(s); {summary.SuccessCount} succeeded, {summary.FailureCount} failed.";
return TypedResults.Ok<object>(new
{
message,
runId = summary.RunId,
startedAt = summary.StartedAt,
completedAt = summary.CompletedAt,
durationMs = summary.Duration.TotalMilliseconds,
providers = summary.Providers.Select(static provider => new
{
providerId = provider.ProviderId,
status = provider.Status,
documents = provider.Documents,
claims = provider.Claims,
startedAt = provider.StartedAt,
completedAt = provider.CompletedAt,
durationMs = provider.Duration.TotalMilliseconds,
since = provider.Since,
checkpoint = provider.Checkpoint,
error = provider.Error
})
});
}
if (!TryParseTimeSpan(request.Window, out var window, out var windowError))
{
return TypedResults.BadRequest<object>(new { message = windowError });
}
internal static async Task<IResult> HandleReconcileAsync(
HttpContext httpContext,
ExcititorReconcileRequest request,
IVexIngestOrchestrator orchestrator,
TimeProvider timeProvider,
CancellationToken cancellationToken)
_ = timeProvider;
var providerIds = NormalizeProviders(request.Providers);
var options = new IngestRunOptions(
providerIds,
since,
window,
request.Force ?? false);
var summary = await orchestrator.RunAsync(options, cancellationToken).ConfigureAwait(false);
var message = $"Ingest run completed for {summary.ProviderCount} provider(s); {summary.SuccessCount} succeeded, {summary.FailureCount} failed.";
return TypedResults.Ok<object>(new
{
message,
runId = summary.RunId,
startedAt = summary.StartedAt,
completedAt = summary.CompletedAt,
durationMs = summary.Duration.TotalMilliseconds,
providers = summary.Providers.Select(static provider => new
{
providerId = provider.ProviderId,
status = provider.Status,
documents = provider.Documents,
claims = provider.Claims,
startedAt = provider.StartedAt,
completedAt = provider.CompletedAt,
durationMs = provider.Duration.TotalMilliseconds,
lastDigest = provider.LastDigest,
lastUpdated = provider.LastUpdated,
checkpoint = provider.Checkpoint,
error = provider.Error
})
});
}
internal static async Task<IResult> HandleResumeAsync(
HttpContext httpContext,
ExcititorIngestResumeRequest request,
IVexIngestOrchestrator orchestrator,
TimeProvider timeProvider,
CancellationToken cancellationToken)
{
var scopeResult = ScopeAuthorization.RequireScope(httpContext, AdminScope);
if (scopeResult is not null)
@@ -171,40 +128,83 @@ internal static class IngestEndpoints
return scopeResult;
}
if (!TryParseTimeSpan(request.MaxAge, out var maxAge, out var error))
{
return TypedResults.BadRequest<object>(new { message = error });
}
_ = timeProvider;
var providerIds = NormalizeProviders(request.Providers);
var options = new ReconcileOptions(providerIds, maxAge);
var summary = await orchestrator.ReconcileAsync(options, cancellationToken).ConfigureAwait(false);
var message = $"Reconcile completed for {summary.ProviderCount} provider(s); {summary.ReconciledCount} reconciled, {summary.SkippedCount} skipped, {summary.FailureCount} failed.";
return TypedResults.Ok<object>(new
{
message,
runId = summary.RunId,
startedAt = summary.StartedAt,
completedAt = summary.CompletedAt,
durationMs = summary.Duration.TotalMilliseconds,
providers = summary.Providers.Select(static provider => new
{
providerId = provider.ProviderId,
status = provider.Status,
action = provider.Action,
lastUpdated = provider.LastUpdated,
threshold = provider.Threshold,
documents = provider.Documents,
claims = provider.Claims,
error = provider.Error
})
});
}
_ = timeProvider;
var providerIds = NormalizeProviders(request.Providers);
var options = new IngestResumeOptions(providerIds, request.Checkpoint);
internal static ImmutableArray<string> NormalizeProviders(IReadOnlyCollection<string>? providers)
var summary = await orchestrator.ResumeAsync(options, cancellationToken).ConfigureAwait(false);
var message = $"Resume run completed for {summary.ProviderCount} provider(s); {summary.SuccessCount} succeeded, {summary.FailureCount} failed.";
return TypedResults.Ok<object>(new
{
message,
runId = summary.RunId,
startedAt = summary.StartedAt,
completedAt = summary.CompletedAt,
durationMs = summary.Duration.TotalMilliseconds,
providers = summary.Providers.Select(static provider => new
{
providerId = provider.ProviderId,
status = provider.Status,
documents = provider.Documents,
claims = provider.Claims,
startedAt = provider.StartedAt,
completedAt = provider.CompletedAt,
durationMs = provider.Duration.TotalMilliseconds,
since = provider.Since,
checkpoint = provider.Checkpoint,
error = provider.Error
})
});
}
internal static async Task<IResult> HandleReconcileAsync(
HttpContext httpContext,
ExcititorReconcileRequest request,
IVexIngestOrchestrator orchestrator,
TimeProvider timeProvider,
CancellationToken cancellationToken)
{
var scopeResult = ScopeAuthorization.RequireScope(httpContext, AdminScope);
if (scopeResult is not null)
{
return scopeResult;
}
if (!TryParseTimeSpan(request.MaxAge, out var maxAge, out var error))
{
return TypedResults.BadRequest<object>(new { message = error });
}
_ = timeProvider;
var providerIds = NormalizeProviders(request.Providers);
var options = new ReconcileOptions(providerIds, maxAge);
var summary = await orchestrator.ReconcileAsync(options, cancellationToken).ConfigureAwait(false);
var message = $"Reconcile completed for {summary.ProviderCount} provider(s); {summary.ReconciledCount} reconciled, {summary.SkippedCount} skipped, {summary.FailureCount} failed.";
return TypedResults.Ok<object>(new
{
message,
runId = summary.RunId,
startedAt = summary.StartedAt,
completedAt = summary.CompletedAt,
durationMs = summary.Duration.TotalMilliseconds,
providers = summary.Providers.Select(static provider => new
{
providerId = provider.ProviderId,
status = provider.Status,
action = provider.Action,
lastUpdated = provider.LastUpdated,
threshold = provider.Threshold,
documents = provider.Documents,
claims = provider.Claims,
error = provider.Error
})
});
}
internal static ImmutableArray<string> NormalizeProviders(IReadOnlyCollection<string>? providers)
{
if (providers is null || providers.Count == 0)
{
@@ -225,7 +225,7 @@ internal static class IngestEndpoints
return set.ToImmutableArray();
}
internal static bool TryParseDateTimeOffset(string? value, out DateTimeOffset? result, out string? error)
internal static bool TryParseDateTimeOffset(string? value, out DateTimeOffset? result, out string? error)
{
result = null;
error = null;
@@ -249,7 +249,7 @@ internal static class IngestEndpoints
return false;
}
internal static bool TryParseTimeSpan(string? value, out TimeSpan? result, out string? error)
internal static bool TryParseTimeSpan(string? value, out TimeSpan? result, out string? error)
{
result = null;
error = null;
@@ -269,19 +269,19 @@ internal static class IngestEndpoints
return false;
}
internal sealed record ExcititorInitRequest(IReadOnlyList<string>? Providers, bool? Resume);
internal sealed record ExcititorIngestRunRequest(
IReadOnlyList<string>? Providers,
string? Since,
string? Window,
bool? Force);
internal sealed record ExcititorIngestResumeRequest(
IReadOnlyList<string>? Providers,
string? Checkpoint);
internal sealed record ExcititorReconcileRequest(
IReadOnlyList<string>? Providers,
string? MaxAge);
}
internal sealed record ExcititorInitRequest(IReadOnlyList<string>? Providers, bool? Resume);
internal sealed record ExcititorIngestRunRequest(
IReadOnlyList<string>? Providers,
string? Since,
string? Window,
bool? Force);
internal sealed record ExcititorIngestResumeRequest(
IReadOnlyList<string>? Providers,
string? Checkpoint);
internal sealed record ExcititorReconcileRequest(
IReadOnlyList<string>? Providers,
string? MaxAge);
}

View File

@@ -10,7 +10,7 @@ using Microsoft.AspNetCore.Mvc;
using Microsoft.Extensions.Options;
using StellaOps.Excititor.Core.Canonicalization;
using StellaOps.Excititor.Core.Observations;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.WebService.Contracts;
using StellaOps.Excititor.WebService.Services;
using StellaOps.Excititor.WebService.Telemetry;
@@ -32,7 +32,7 @@ public static class LinksetEndpoints
// GET /vex/linksets - List linksets with filters
group.MapGet("", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IVexLinksetStore linksetStore,
[FromQuery] int? limit,
[FromQuery] string? cursor,
@@ -124,7 +124,7 @@ public static class LinksetEndpoints
group.MapGet("/{linksetId}", async (
HttpContext context,
string linksetId,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IVexLinksetStore linksetStore,
CancellationToken cancellationToken) =>
{
@@ -166,7 +166,7 @@ public static class LinksetEndpoints
// GET /vex/linksets/lookup - Lookup linkset by vulnerability and product
group.MapGet("/lookup", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IVexLinksetStore linksetStore,
[FromQuery] string? vulnerabilityId,
[FromQuery] string? productKey,
@@ -211,7 +211,7 @@ public static class LinksetEndpoints
// GET /vex/linksets/count - Get linkset counts for tenant
group.MapGet("/count", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IVexLinksetStore linksetStore,
CancellationToken cancellationToken) =>
{
@@ -240,7 +240,7 @@ public static class LinksetEndpoints
// GET /vex/linksets/conflicts - List linksets with conflicts (shorthand)
group.MapGet("/conflicts", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IVexLinksetStore linksetStore,
[FromQuery] int? limit,
CancellationToken cancellationToken) =>
@@ -317,7 +317,7 @@ public static class LinksetEndpoints
private static bool TryResolveTenant(
HttpContext context,
VexMongoStorageOptions options,
VexStorageOptions options,
out string tenant,
out IResult? problem)
{

View File

@@ -8,8 +8,8 @@ using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Export;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.WebService.Services;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.WebService.Services;
namespace StellaOps.Excititor.WebService.Endpoints;
@@ -98,13 +98,13 @@ internal static class MirrorEndpoints
}
var resolvedExports = new List<MirrorExportIndexEntry>();
foreach (var exportOption in domain.Exports)
{
if (!MirrorExportPlanner.TryBuild(exportOption, out var plan, out var error))
{
resolvedExports.Add(new MirrorExportIndexEntry(
exportOption.Key,
null,
foreach (var exportOption in domain.Exports)
{
if (!MirrorExportPlanner.TryBuild(exportOption, out var plan, out var error))
{
resolvedExports.Add(new MirrorExportIndexEntry(
exportOption.Key,
null,
null,
exportOption.Format,
null,
@@ -116,7 +116,7 @@ internal static class MirrorEndpoints
continue;
}
var manifest = await exportStore.FindAsync(plan.Signature, plan.Format, cancellationToken).ConfigureAwait(false);
var manifest = await exportStore.FindAsync(plan.Signature, plan.Format, cancellationToken).ConfigureAwait(false);
if (manifest is null)
{
@@ -177,16 +177,16 @@ internal static class MirrorEndpoints
return Results.Unauthorized();
}
if (!TryFindExport(domain, exportKey, out var exportOptions))
{
return Results.NotFound();
}
if (!MirrorExportPlanner.TryBuild(exportOptions, out var plan, out var error))
{
await WritePlainTextAsync(httpContext, error ?? "invalid_export_configuration", StatusCodes.Status503ServiceUnavailable, cancellationToken).ConfigureAwait(false);
return Results.Empty;
}
if (!TryFindExport(domain, exportKey, out var exportOptions))
{
return Results.NotFound();
}
if (!MirrorExportPlanner.TryBuild(exportOptions, out var plan, out var error))
{
await WritePlainTextAsync(httpContext, error ?? "invalid_export_configuration", StatusCodes.Status503ServiceUnavailable, cancellationToken).ConfigureAwait(false);
return Results.Empty;
}
var manifest = await exportStore.FindAsync(plan.Signature, plan.Format, cancellationToken).ConfigureAwait(false);
if (manifest is null)
@@ -241,10 +241,10 @@ internal static class MirrorEndpoints
return Results.Empty;
}
if (!TryFindExport(domain, exportKey, out var exportOptions) || !MirrorExportPlanner.TryBuild(exportOptions, out var plan, out _))
{
return Results.NotFound();
}
if (!TryFindExport(domain, exportKey, out var exportOptions) || !MirrorExportPlanner.TryBuild(exportOptions, out var plan, out _))
{
return Results.NotFound();
}
var manifest = await exportStore.FindAsync(plan.Signature, plan.Format, cancellationToken).ConfigureAwait(false);
if (manifest is null)
@@ -286,36 +286,36 @@ internal static class MirrorEndpoints
return domain is not null;
}
private static bool TryFindExport(MirrorDomainOptions domain, string exportKey, out MirrorExportOptions export)
{
export = domain.Exports.FirstOrDefault(e => string.Equals(e.Key, exportKey, StringComparison.OrdinalIgnoreCase))!;
return export is not null;
}
private static bool TryFindExport(MirrorDomainOptions domain, string exportKey, out MirrorExportOptions export)
{
export = domain.Exports.FirstOrDefault(e => string.Equals(e.Key, exportKey, StringComparison.OrdinalIgnoreCase))!;
return export is not null;
}
private static string ResolveContentType(VexExportFormat format)
=> format switch
{
VexExportFormat.Json => "application/json",
VexExportFormat.JsonLines => "application/jsonl",
VexExportFormat.OpenVex => "application/json",
VexExportFormat.Csaf => "application/json",
VexExportFormat.CycloneDx => "application/json",
_ => "application/octet-stream",
};
private static string ResolveContentType(VexExportFormat format)
=> format switch
{
VexExportFormat.Json => "application/json",
VexExportFormat.JsonLines => "application/jsonl",
VexExportFormat.OpenVex => "application/json",
VexExportFormat.Csaf => "application/json",
VexExportFormat.CycloneDx => "application/json",
_ => "application/octet-stream",
};
private static string BuildDownloadFileName(string domainId, string exportKey, VexExportFormat format)
{
var builder = new StringBuilder(domainId.Length + exportKey.Length + 8);
builder.Append(domainId).Append('-').Append(exportKey);
builder.Append(format switch
{
VexExportFormat.Json => ".json",
VexExportFormat.JsonLines => ".jsonl",
VexExportFormat.OpenVex => ".openvex.json",
VexExportFormat.Csaf => ".csaf.json",
VexExportFormat.CycloneDx => ".cyclonedx.json",
_ => ".bin",
});
builder.Append(format switch
{
VexExportFormat.Json => ".json",
VexExportFormat.JsonLines => ".jsonl",
VexExportFormat.OpenVex => ".openvex.json",
VexExportFormat.Csaf => ".csaf.json",
VexExportFormat.CycloneDx => ".cyclonedx.json",
_ => ".bin",
});
return builder.ToString();
}
@@ -326,15 +326,15 @@ internal static class MirrorEndpoints
await context.Response.WriteAsync(message, cancellationToken);
}
private static async Task WriteJsonAsync<T>(HttpContext context, T payload, int statusCode, CancellationToken cancellationToken)
{
context.Response.StatusCode = statusCode;
context.Response.ContentType = "application/json";
var json = VexCanonicalJsonSerializer.Serialize(payload);
await context.Response.WriteAsync(json, cancellationToken);
private static async Task WriteJsonAsync<T>(HttpContext context, T payload, int statusCode, CancellationToken cancellationToken)
{
context.Response.StatusCode = statusCode;
context.Response.ContentType = "application/json";
var json = VexCanonicalJsonSerializer.Serialize(payload);
await context.Response.WriteAsync(json, cancellationToken);
}
}
}
internal sealed record MirrorDomainListResponse(IReadOnlyList<MirrorDomainSummary> Domains);

View File

@@ -8,7 +8,7 @@ using Microsoft.AspNetCore.Http;
using Microsoft.AspNetCore.Mvc;
using Microsoft.Extensions.Logging;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.WebService.Contracts;
namespace StellaOps.Excititor.WebService.Endpoints;

View File

@@ -6,7 +6,7 @@ using Microsoft.AspNetCore.Http;
using Microsoft.AspNetCore.Mvc;
using Microsoft.Extensions.Options;
using StellaOps.Excititor.Core.Observations;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.WebService.Contracts;
using StellaOps.Excititor.WebService.Services;
@@ -26,7 +26,7 @@ public static class ObservationEndpoints
// GET /vex/observations - List observations with filters
group.MapGet("", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IVexObservationStore observationStore,
TimeProvider timeProvider,
[FromQuery] int? limit,
@@ -98,7 +98,7 @@ public static class ObservationEndpoints
group.MapGet("/{observationId}", async (
HttpContext context,
string observationId,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IVexObservationStore observationStore,
CancellationToken cancellationToken) =>
{
@@ -140,7 +140,7 @@ public static class ObservationEndpoints
// GET /vex/observations/count - Get observation count for tenant
group.MapGet("/count", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IVexObservationStore observationStore,
CancellationToken cancellationToken) =>
{
@@ -230,7 +230,7 @@ public static class ObservationEndpoints
private static bool TryResolveTenant(
HttpContext context,
VexMongoStorageOptions options,
VexStorageOptions options,
out string tenant,
out IResult? problem)
{

View File

@@ -11,7 +11,7 @@ using Microsoft.Extensions.Options;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Core.Canonicalization;
using StellaOps.Excititor.Core.Orchestration;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.WebService.Contracts;
using StellaOps.Excititor.WebService.Services;
@@ -33,7 +33,7 @@ public static class PolicyEndpoints
private static async Task<IResult> LookupVexAsync(
HttpContext context,
[FromBody] PolicyVexLookupRequest request,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IVexClaimStore claimStore,
TimeProvider timeProvider,
CancellationToken cancellationToken)
@@ -174,7 +174,7 @@ public static class PolicyEndpoints
private static bool TryResolveTenant(
HttpContext context,
VexMongoStorageOptions options,
VexStorageOptions options,
out string tenant,
out IResult? problem)
{

View File

@@ -5,55 +5,55 @@ using System.Collections.Generic;
using System.Collections.Immutable;
using System.Linq;
using System.Security.Cryptography;
using System.Text;
using System.Text.Json;
using Microsoft.AspNetCore.Builder;
using Microsoft.AspNetCore.Http;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.DependencyInjection;
using StellaOps.Excititor.Attestation;
using StellaOps.Excititor.Attestation.Dsse;
using StellaOps.Excititor.Attestation.Signing;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Policy;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.WebService.Services;
internal static class ResolveEndpoint
{
private const int MaxSubjectPairs = 256;
private const string ReadScope = "vex.read";
public static void MapResolveEndpoint(WebApplication app)
{
app.MapPost("/excititor/resolve", HandleResolveAsync);
}
using System.Text;
using System.Text.Json;
using Microsoft.AspNetCore.Builder;
using Microsoft.AspNetCore.Http;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.DependencyInjection;
using StellaOps.Excititor.Attestation;
using StellaOps.Excititor.Attestation.Dsse;
using StellaOps.Excititor.Attestation.Signing;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Policy;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.WebService.Services;
internal static class ResolveEndpoint
{
private const int MaxSubjectPairs = 256;
private const string ReadScope = "vex.read";
public static void MapResolveEndpoint(WebApplication app)
{
app.MapPost("/excititor/resolve", HandleResolveAsync);
}
private static async Task<IResult> HandleResolveAsync(
VexResolveRequest request,
HttpContext httpContext,
IVexClaimStore claimStore,
IVexConsensusStore consensusStore,
IVexProviderStore providerStore,
IVexPolicyProvider policyProvider,
TimeProvider timeProvider,
ILoggerFactory loggerFactory,
IVexAttestationClient? attestationClient,
CancellationToken cancellationToken)
{
var scopeResult = ScopeAuthorization.RequireScope(httpContext, ReadScope);
if (scopeResult is not null)
{
return scopeResult;
}
if (request is null)
{
return Results.BadRequest("Request payload is required.");
}
var logger = loggerFactory.CreateLogger("ResolveEndpoint");
var signer = httpContext.RequestServices.GetService<IVexSigner>();
IVexProviderStore providerStore,
IVexPolicyProvider policyProvider,
TimeProvider timeProvider,
ILoggerFactory loggerFactory,
IVexAttestationClient? attestationClient,
CancellationToken cancellationToken)
{
var scopeResult = ScopeAuthorization.RequireScope(httpContext, ReadScope);
if (scopeResult is not null)
{
return scopeResult;
}
if (request is null)
{
return Results.BadRequest("Request payload is required.");
}
var logger = loggerFactory.CreateLogger("ResolveEndpoint");
var signer = httpContext.RequestServices.GetService<IVexSigner>();
var productKeys = NormalizeValues(request.ProductKeys, request.Purls);
var vulnerabilityIds = NormalizeValues(request.VulnerabilityIds);

View File

@@ -6,7 +6,7 @@ using Microsoft.AspNetCore.Mvc;
using Microsoft.Extensions.Options;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Core.RiskFeed;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.WebService.Services;
namespace StellaOps.Excititor.WebService.Endpoints;
@@ -25,7 +25,7 @@ public static class RiskFeedEndpoints
// POST /risk/v1/feed - Generate risk feed
group.MapPost("/feed", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IRiskFeedService riskFeedService,
[FromBody] RiskFeedRequestDto request,
CancellationToken cancellationToken) =>
@@ -67,7 +67,7 @@ public static class RiskFeedEndpoints
// GET /risk/v1/feed/item - Get single risk feed item
group.MapGet("/feed/item", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IRiskFeedService riskFeedService,
[FromQuery] string? advisoryKey,
[FromQuery] string? artifact,
@@ -112,7 +112,7 @@ public static class RiskFeedEndpoints
group.MapGet("/feed/by-advisory/{advisoryKey}", async (
HttpContext context,
string advisoryKey,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IRiskFeedService riskFeedService,
[FromQuery] int? limit,
CancellationToken cancellationToken) =>
@@ -153,7 +153,7 @@ public static class RiskFeedEndpoints
group.MapGet("/feed/by-artifact/{**artifact}", async (
HttpContext context,
string artifact,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IRiskFeedService riskFeedService,
[FromQuery] int? limit,
CancellationToken cancellationToken) =>
@@ -235,7 +235,7 @@ public static class RiskFeedEndpoints
private static bool TryResolveTenant(
HttpContext context,
VexMongoStorageOptions options,
VexStorageOptions options,
out string tenant,
out IResult? problem)
{

View File

@@ -0,0 +1,71 @@
using System.Collections.Immutable;
using System.Text.Json;
using StellaOps.Concelier.RawModels;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Core.Storage;
namespace StellaOps.Excititor.WebService.Extensions;
internal static class VexRawDocumentMapper
{
public static VexRawDocument ToRawModel(VexRawRecord record, string defaultTenant)
{
ArgumentNullException.ThrowIfNull(record);
var metadata = record.Metadata ?? ImmutableDictionary<string, string>.Empty;
var tenant = Get(metadata, "tenant", record.Tenant) ?? defaultTenant;
var source = new RawSourceMetadata(
Vendor: Get(metadata, "source.vendor", record.ProviderId) ?? record.ProviderId,
Connector: Get(metadata, "source.connector", record.ProviderId) ?? record.ProviderId,
ConnectorVersion: Get(metadata, "source.connector_version", "unknown") ?? "unknown",
Stream: Get(metadata, "source.stream", record.Format.ToString().ToLowerInvariant()));
var signature = new RawSignatureMetadata(
Present: string.Equals(Get(metadata, "signature.present"), "true", StringComparison.OrdinalIgnoreCase),
Format: Get(metadata, "signature.format"),
KeyId: Get(metadata, "signature.key_id"),
Signature: Get(metadata, "signature.sig"),
Certificate: Get(metadata, "signature.certificate"),
Digest: Get(metadata, "signature.digest"));
var upstream = new RawUpstreamMetadata(
UpstreamId: Get(metadata, "upstream.id", record.Digest) ?? record.Digest,
DocumentVersion: Get(metadata, "upstream.version"),
RetrievedAt: record.RetrievedAt,
ContentHash: Get(metadata, "upstream.content_hash", record.Digest) ?? record.Digest,
Signature: signature,
Provenance: metadata);
var content = new RawContent(
Format: record.Format.ToString().ToLowerInvariant(),
SpecVersion: Get(metadata, "content.spec_version"),
Raw: ParseJson(record.Content),
Encoding: Get(metadata, "content.encoding"));
return new VexRawDocument(
tenant,
source,
upstream,
content,
new RawLinkset(),
statements: null,
supersedes: record.SupersedesDigest);
}
private static string? Get(IReadOnlyDictionary<string, string> metadata, string key, string? fallback = null)
{
if (metadata.TryGetValue(key, out var value) && !string.IsNullOrWhiteSpace(value))
{
return value;
}
return fallback;
}
private static JsonElement ParseJson(ReadOnlyMemory<byte> content)
{
using var document = JsonDocument.Parse(content);
return document.RootElement.Clone();
}
}

View File

@@ -6,17 +6,16 @@ using System.Linq;
using System.Text;
using Microsoft.AspNetCore.Http;
using Microsoft.Extensions.Primitives;
using MongoDB.Bson;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Core.Aoc;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.WebService.Contracts;
using StellaOps.Excititor.WebService.Services;
public partial class Program
{
private const string TenantHeaderName = "X-Stella-Tenant";
private static bool TryResolveTenant(HttpContext context, VexMongoStorageOptions options, bool requireHeader, out string tenant, out IResult? problem)
private static bool TryResolveTenant(HttpContext context, VexStorageOptions options, bool requireHeader, out string tenant, out IResult? problem)
{
tenant = options.DefaultTenant;
problem = null;
@@ -51,27 +50,6 @@ public partial class Program
return true;
}
private static IReadOnlyDictionary<string, string> ReadMetadata(BsonValue value)
{
if (value is not BsonDocument doc || doc.ElementCount == 0)
{
return new Dictionary<string, string>(StringComparer.Ordinal);
}
var result = new Dictionary<string, string>(StringComparer.Ordinal);
foreach (var element in doc.Elements)
{
if (string.IsNullOrWhiteSpace(element.Name))
{
continue;
}
result[element.Name] = element.Value?.ToString() ?? string.Empty;
}
return result;
}
private static bool TryDecodeCursor(string? cursor, out DateTimeOffset timestamp, out string digest)
{
timestamp = default;

View File

@@ -27,28 +27,27 @@ using StellaOps.Excititor.Formats.CSAF;
using StellaOps.Excititor.Formats.CycloneDX;
using StellaOps.Excititor.Formats.OpenVEX;
using StellaOps.Excititor.Policy;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Storage.Postgres;
using StellaOps.Excititor.WebService.Endpoints;
using StellaOps.Excititor.WebService.Extensions;
using StellaOps.Excititor.WebService.Options;
using StellaOps.Excititor.WebService.Services;
using StellaOps.Excititor.Core.Aoc;
using StellaOps.Excititor.WebService.Telemetry;
using MongoDB.Driver;
using MongoDB.Bson;
using Microsoft.Extensions.Caching.Memory;
using StellaOps.Excititor.WebService.Contracts;
using System.Globalization;
using StellaOps.Excititor.WebService.Graph;
using StellaOps.Excititor.Core.Storage;
var builder = WebApplication.CreateBuilder(args);
var configuration = builder.Configuration;
var services = builder.Services;
services.AddOptions<VexMongoStorageOptions>()
.Bind(configuration.GetSection("Excititor:Storage:Mongo"))
services.AddOptions<VexStorageOptions>()
.Bind(configuration.GetSection("Excititor:Storage"))
.ValidateOnStart();
services.AddExcititorMongoStorage();
services.AddExcititorPostgresStorage(configuration);
services.AddCsafNormalizer();
services.AddCycloneDxNormalizer();
services.AddOpenVexNormalizer();
@@ -147,7 +146,7 @@ app.UseObservabilityHeaders();
app.MapGet("/excititor/status", async (HttpContext context,
IEnumerable<IVexArtifactStore> artifactStores,
IOptions<VexMongoStorageOptions> mongoOptions,
IOptions<VexStorageOptions> mongoOptions,
TimeProvider timeProvider) =>
{
var payload = new StatusResponse(
@@ -1260,7 +1259,7 @@ app.MapPost("/excititor/admin/backfill-statements", async (
app.MapGet("/console/vex", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
IVexObservationQueryService queryService,
ConsoleTelemetry telemetry,
IMemoryCache cache,
@@ -1459,7 +1458,7 @@ var response = new GraphLinkoutsResponse(items, notFound);
app.MapGet("/v1/graph/status", async (
HttpContext context,
[FromQuery(Name = "purl")] string[]? purls,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
IOptions<GraphOptions> graphOptions,
IVexObservationQueryService queryService,
IMemoryCache cache,
@@ -1519,7 +1518,7 @@ app.MapGet("/v1/graph/overlays", async (
HttpContext context,
[FromQuery(Name = "purl")] string[]? purls,
[FromQuery] bool includeJustifications,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
IOptions<GraphOptions> graphOptions,
IVexObservationQueryService queryService,
IMemoryCache cache,
@@ -1580,7 +1579,7 @@ app.MapGet("/v1/graph/observations", async (
[FromQuery] bool includeJustifications,
[FromQuery] int? limitPerPurl,
[FromQuery] string? cursor,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
IOptions<GraphOptions> graphOptions,
IVexObservationQueryService queryService,
CancellationToken cancellationToken) =>
@@ -1638,7 +1637,7 @@ app.MapPost("/ingest/vex", async (
HttpContext context,
VexIngestRequest request,
IVexRawStore rawStore,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
TimeProvider timeProvider,
ILogger<Program> logger,
CancellationToken cancellationToken) =>
@@ -1692,8 +1691,8 @@ app.MapPost("/ingest/vex", async (
app.MapGet("/vex/raw", async (
HttpContext context,
IMongoDatabase database,
IOptions<VexMongoStorageOptions> storageOptions,
IVexRawStore rawStore,
IOptions<VexStorageOptions> storageOptions,
CancellationToken cancellationToken) =>
{
var scopeResult = ScopeAuthorization.RequireScope(context, "vex.read");
@@ -1702,132 +1701,69 @@ app.MapGet("/vex/raw", async (
return scopeResult;
}
if (!TryResolveTenant(context, storageOptions.Value, requireHeader: false, out _, out var tenantError))
if (!TryResolveTenant(context, storageOptions.Value, requireHeader: false, out var tenant, out var tenantError))
{
return tenantError;
}
var collection = database.GetCollection<BsonDocument>(VexMongoCollectionNames.Raw);
var query = context.Request.Query;
var filters = new List<FilterDefinition<BsonDocument>>();
var builder = Builders<BsonDocument>.Filter;
var providerFilter = BuildStringFilterSet(query["providerId"]);
var digestFilter = BuildStringFilterSet(query["digest"]);
var formatFilter = query.TryGetValue("format", out var formats)
? formats
.Where(static f => !string.IsNullOrWhiteSpace(f))
.Select(static f => Enum.TryParse<VexDocumentFormat>(f, true, out var parsed) ? parsed : VexDocumentFormat.Unknown)
.Where(static f => f != VexDocumentFormat.Unknown)
.ToArray()
: Array.Empty<VexDocumentFormat>();
if (query.TryGetValue("providerId", out var providerValues))
{
var providers = providerValues
.Where(static value => !string.IsNullOrWhiteSpace(value))
.Select(static value => value!.Trim())
.ToArray();
if (providers.Length > 0)
{
filters.Add(builder.In("ProviderId", providers));
}
}
if (query.TryGetValue("digest", out var digestValues))
{
var digests = digestValues
.Where(static value => !string.IsNullOrWhiteSpace(value))
.Select(static value => value!.Trim())
.ToArray();
if (digests.Length > 0)
{
filters.Add(builder.In("Digest", digests));
}
}
if (query.TryGetValue("format", out var formatValues))
{
var formats = formatValues
.Where(static value => !string.IsNullOrWhiteSpace(value))
.Select(static value => value!.Trim().ToLowerInvariant())
.ToArray();
if (formats.Length > 0)
{
filters.Add(builder.In("Format", formats));
}
}
if (query.TryGetValue("since", out var sinceValues) && DateTimeOffset.TryParse(sinceValues.FirstOrDefault(), CultureInfo.InvariantCulture, DateTimeStyles.AssumeUniversal, out var sinceValue))
{
filters.Add(builder.Gte("RetrievedAt", sinceValue.UtcDateTime));
}
var since = ParseSinceTimestamp(query["since"]);
var cursorToken = query.TryGetValue("cursor", out var cursorValues) ? cursorValues.FirstOrDefault() : null;
DateTime? cursorTimestamp = null;
string? cursorDigest = null;
if (!string.IsNullOrWhiteSpace(cursorToken) && TryDecodeCursor(cursorToken, out var cursorTime, out var cursorId))
VexRawCursor? cursor = null;
if (!string.IsNullOrWhiteSpace(cursorToken) &&
TryDecodeCursor(cursorToken, out var cursorTime, out var cursorId))
{
cursorTimestamp = cursorTime.UtcDateTime;
cursorDigest = cursorId;
cursor = new VexRawCursor(cursorTime, cursorId);
}
if (cursorTimestamp is not null && cursorDigest is not null)
{
var ltTime = builder.Lt("RetrievedAt", cursorTimestamp.Value);
var eqTimeLtDigest = builder.And(
builder.Eq("RetrievedAt", cursorTimestamp.Value),
builder.Lt("Digest", cursorDigest));
filters.Add(builder.Or(ltTime, eqTimeLtDigest));
}
var limit = ResolveLimit(query["limit"], defaultValue: 50, min: 1, max: 200);
var limit = 50;
if (query.TryGetValue("limit", out var limitValues) && int.TryParse(limitValues.FirstOrDefault(), NumberStyles.Integer, CultureInfo.InvariantCulture, out var requestedLimit))
{
limit = Math.Clamp(requestedLimit, 1, 200);
}
var page = await rawStore.QueryAsync(
new VexRawQuery(
tenant,
providerFilter,
digestFilter,
formatFilter,
since,
Until: null,
cursor,
limit),
cancellationToken).ConfigureAwait(false);
var filter = filters.Count == 0 ? builder.Empty : builder.And(filters);
var sort = Builders<BsonDocument>.Sort.Descending("RetrievedAt").Descending("Digest");
var documents = await collection
.Find(filter)
.Sort(sort)
.Limit(limit)
.Project(Builders<BsonDocument>.Projection.Include("Digest").Include("ProviderId").Include("Format").Include("SourceUri").Include("RetrievedAt").Include("Metadata").Include("GridFsObjectId"))
.ToListAsync(cancellationToken)
.ConfigureAwait(false);
var summaries = page.Items
.Select(summary => new VexRawSummaryResponse(
summary.Digest,
summary.ProviderId,
summary.Format.ToString().ToLowerInvariant(),
summary.SourceUri.ToString(),
summary.RetrievedAt,
summary.InlineContent,
summary.Metadata))
.ToList();
var summaries = new List<VexRawSummaryResponse>(documents.Count);
foreach (var document in documents)
{
var digest = document.TryGetValue("Digest", out var digestValue) && digestValue.IsString ? digestValue.AsString : string.Empty;
var providerId = document.TryGetValue("ProviderId", out var providerValue) && providerValue.IsString ? providerValue.AsString : string.Empty;
var format = document.TryGetValue("Format", out var formatValue) && formatValue.IsString ? formatValue.AsString : string.Empty;
var sourceUri = document.TryGetValue("SourceUri", out var sourceValue) && sourceValue.IsString ? sourceValue.AsString : string.Empty;
var retrievedAt = document.TryGetValue("RetrievedAt", out var retrievedValue) && retrievedValue is BsonDateTime bsonDate
? bsonDate.ToUniversalTime()
: DateTime.UtcNow;
var metadata = ReadMetadata(document.TryGetValue("Metadata", out var metadataValue) ? metadataValue : BsonNull.Value);
var inlineContent = !document.TryGetValue("GridFsObjectId", out var gridId) || gridId.IsBsonNull || (gridId.IsString && string.IsNullOrWhiteSpace(gridId.AsString));
var nextCursor = page.NextCursor is null
? null
: EncodeCursor(page.NextCursor.RetrievedAt.UtcDateTime, page.NextCursor.Digest);
summaries.Add(new VexRawSummaryResponse(
digest,
providerId,
format,
sourceUri,
new DateTimeOffset(retrievedAt),
inlineContent,
metadata));
}
var hasMore = documents.Count == limit;
string? nextCursor = null;
if (hasMore && documents.Count > 0)
{
var last = documents[^1];
var lastTime = last.GetValue("RetrievedAt", BsonNull.Value).ToUniversalTime();
var lastDigest = last.GetValue("Digest", BsonNull.Value).AsString;
nextCursor = EncodeCursor(lastTime, lastDigest);
}
return Results.Json(new VexRawListResponse(summaries, nextCursor, hasMore));
return Results.Json(new VexRawListResponse(summaries, nextCursor, page.HasMore));
});
app.MapGet("/vex/raw/{digest}", async (
string digest,
HttpContext context,
IVexRawStore rawStore,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
CancellationToken cancellationToken) =>
{
var scopeResult = ScopeAuthorization.RequireScope(context, "vex.read");
@@ -1861,7 +1797,7 @@ app.MapGet("/vex/raw/{digest}/provenance", async (
string digest,
HttpContext context,
IVexRawStore rawStore,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
CancellationToken cancellationToken) =>
{
var scopeResult = ScopeAuthorization.RequireScope(context, "vex.read");
@@ -1901,7 +1837,7 @@ app.MapGet("/v1/vex/observations/{vulnerabilityId}/{productKey}", async (
string vulnerabilityId,
string productKey,
[FromServices] IVexObservationProjectionService projectionService,
[FromServices] IOptions<VexMongoStorageOptions> storageOptions,
[FromServices] IOptions<VexStorageOptions> storageOptions,
CancellationToken cancellationToken) =>
{
var scopeResult = ScopeAuthorization.RequireScope(context, "vex.read");
@@ -1977,7 +1913,7 @@ app.MapGet("/v1/vex/observations/{vulnerabilityId}/{productKey}", async (
app.MapGet("/v1/vex/evidence/chunks", async (
HttpContext context,
[FromServices] IVexEvidenceChunkService chunkService,
[FromServices] IOptions<VexMongoStorageOptions> storageOptions,
[FromServices] IOptions<VexStorageOptions> storageOptions,
[FromServices] ChunkTelemetry chunkTelemetry,
[FromServices] ILogger<VexEvidenceChunkRequest> logger,
[FromServices] TimeProvider timeProvider,
@@ -2083,10 +2019,9 @@ app.MapGet("/v1/vex/evidence/chunks", async (
app.MapPost("/aoc/verify", async (
HttpContext context,
VexAocVerifyRequest? request,
IMongoDatabase database,
IVexRawStore rawStore,
IVexRawWriteGuard guard,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
TimeProvider timeProvider,
CancellationToken cancellationToken) =>
{
@@ -2119,33 +2054,26 @@ app.MapPost("/aoc/verify", async (
.Select(static value => value!.Trim())
.ToArray();
var builder = Builders<BsonDocument>.Filter;
var filter = builder.And(
builder.Gte("RetrievedAt", since),
builder.Lte("RetrievedAt", until));
if (sources is { Length: > 0 })
{
filter &= builder.In("ProviderId", sources);
}
var collection = database.GetCollection<BsonDocument>(VexMongoCollectionNames.Raw);
var digests = await collection
.Find(filter)
.Sort(Builders<BsonDocument>.Sort.Descending("RetrievedAt"))
.Limit(limit)
.Project(Builders<BsonDocument>.Projection.Include("Digest").Include("RetrievedAt").Include("ProviderId"))
.ToListAsync(cancellationToken)
.ConfigureAwait(false);
var page = await rawStore.QueryAsync(
new VexRawQuery(
tenant,
sources ?? Array.Empty<string>(),
Array.Empty<string>(),
Array.Empty<VexDocumentFormat>(),
since: new DateTimeOffset(since, TimeSpan.Zero),
until: new DateTimeOffset(until, TimeSpan.Zero),
cursor: null,
limit),
cancellationToken).ConfigureAwait(false);
var checkedCount = 0;
var violationMap = new Dictionary<string, (int Count, List<VexAocVerifyViolationExample> Examples)>(StringComparer.OrdinalIgnoreCase);
const int MaxExamplesPerCode = 5;
foreach (var digestDocument in digests)
foreach (var item in page.Items)
{
var digestValue = digestDocument.GetValue("Digest", BsonNull.Value).AsString;
var provider = digestDocument.GetValue("ProviderId", BsonNull.Value).AsString;
var digestValue = item.Digest;
var provider = item.ProviderId;
var domainDocument = await rawStore.FindByDigestAsync(digestValue, cancellationToken).ConfigureAwait(false);
if (domainDocument is null)
@@ -2202,7 +2130,7 @@ app.MapPost("/aoc/verify", async (
new VexAocVerifyChecked(0, checkedCount),
violations,
new VexAocVerifyMetrics(checkedCount, violations.Sum(v => v.Count)),
digests.Count == limit);
page.HasMore);
return Results.Json(response);
});
@@ -2225,7 +2153,7 @@ app.MapGet("/obs/excititor/health", async (
// VEX timeline SSE (WEB-OBS-52-001)
app.MapGet("/obs/excititor/timeline", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IVexTimelineEventStore timelineStore,
TimeProvider timeProvider,
ILoggerFactory loggerFactory,

View File

@@ -1,4 +1,4 @@
using System.Runtime.CompilerServices;
[assembly: InternalsVisibleTo("StellaOps.Excititor.WebService.Tests")]
[assembly: InternalsVisibleTo("StellaOps.Excititor.Core.UnitTests")]
[assembly: InternalsVisibleTo("StellaOps.Excititor.WebService.Tests")]
[assembly: InternalsVisibleTo("StellaOps.Excititor.Core.UnitTests")]

View File

@@ -7,7 +7,7 @@ using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using StellaOps.Excititor.Connectors.Abstractions;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.WebService.Options;
namespace StellaOps.Excititor.WebService.Services;

View File

@@ -6,7 +6,7 @@ using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.WebService.Contracts;
namespace StellaOps.Excititor.WebService.Services;

View File

@@ -1,14 +1,14 @@
using System.Collections.Generic;
using System.Collections.Immutable;
using System.Diagnostics;
using System.Globalization;
using System.Linq;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using MongoDB.Driver;
using System.Collections.Generic;
using System.Collections.Immutable;
using System.Diagnostics;
using System.Globalization;
using System.Linq;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using MongoDB.Driver;
using StellaOps.Excititor.Connectors.Abstractions;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Core.Storage;
namespace StellaOps.Excititor.WebService.Services;
@@ -23,50 +23,47 @@ internal interface IVexIngestOrchestrator
Task<ReconcileSummary> ReconcileAsync(ReconcileOptions options, CancellationToken cancellationToken);
}
internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
{
private readonly IServiceProvider _serviceProvider;
private readonly IReadOnlyDictionary<string, IVexConnector> _connectors;
private readonly IVexRawStore _rawStore;
private readonly IVexClaimStore _claimStore;
private readonly IVexProviderStore _providerStore;
private readonly IVexConnectorStateRepository _stateRepository;
private readonly IVexNormalizerRouter _normalizerRouter;
private readonly IVexSignatureVerifier _signatureVerifier;
private readonly IVexMongoSessionProvider _sessionProvider;
private readonly TimeProvider _timeProvider;
private readonly ILogger<VexIngestOrchestrator> _logger;
private readonly string _defaultTenant;
internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
{
private readonly IServiceProvider _serviceProvider;
private readonly IReadOnlyDictionary<string, IVexConnector> _connectors;
private readonly IVexRawStore _rawStore;
private readonly IVexClaimStore _claimStore;
private readonly IVexProviderStore _providerStore;
private readonly IVexConnectorStateRepository _stateRepository;
private readonly IVexNormalizerRouter _normalizerRouter;
private readonly IVexSignatureVerifier _signatureVerifier;
private readonly TimeProvider _timeProvider;
private readonly ILogger<VexIngestOrchestrator> _logger;
private readonly string _defaultTenant;
public VexIngestOrchestrator(
IServiceProvider serviceProvider,
IEnumerable<IVexConnector> connectors,
IVexRawStore rawStore,
IVexClaimStore claimStore,
IVexProviderStore providerStore,
IVexConnectorStateRepository stateRepository,
IVexNormalizerRouter normalizerRouter,
IVexSignatureVerifier signatureVerifier,
IVexMongoSessionProvider sessionProvider,
TimeProvider timeProvider,
IOptions<VexMongoStorageOptions> storageOptions,
ILogger<VexIngestOrchestrator> logger)
{
_serviceProvider = serviceProvider ?? throw new ArgumentNullException(nameof(serviceProvider));
_rawStore = rawStore ?? throw new ArgumentNullException(nameof(rawStore));
_claimStore = claimStore ?? throw new ArgumentNullException(nameof(claimStore));
public VexIngestOrchestrator(
IServiceProvider serviceProvider,
IEnumerable<IVexConnector> connectors,
IVexRawStore rawStore,
IVexClaimStore claimStore,
IVexProviderStore providerStore,
IVexConnectorStateRepository stateRepository,
IVexNormalizerRouter normalizerRouter,
IVexSignatureVerifier signatureVerifier,
TimeProvider timeProvider,
IOptions<VexStorageOptions> storageOptions,
ILogger<VexIngestOrchestrator> logger)
{
_serviceProvider = serviceProvider ?? throw new ArgumentNullException(nameof(serviceProvider));
_rawStore = rawStore ?? throw new ArgumentNullException(nameof(rawStore));
_claimStore = claimStore ?? throw new ArgumentNullException(nameof(claimStore));
_providerStore = providerStore ?? throw new ArgumentNullException(nameof(providerStore));
_stateRepository = stateRepository ?? throw new ArgumentNullException(nameof(stateRepository));
_normalizerRouter = normalizerRouter ?? throw new ArgumentNullException(nameof(normalizerRouter));
_signatureVerifier = signatureVerifier ?? throw new ArgumentNullException(nameof(signatureVerifier));
_sessionProvider = sessionProvider ?? throw new ArgumentNullException(nameof(sessionProvider));
_timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
var optionsValue = (storageOptions ?? throw new ArgumentNullException(nameof(storageOptions))).Value
?? throw new ArgumentNullException(nameof(storageOptions));
_defaultTenant = string.IsNullOrWhiteSpace(optionsValue.DefaultTenant)
? "default"
: optionsValue.DefaultTenant.Trim();
_timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
var optionsValue = (storageOptions ?? throw new ArgumentNullException(nameof(storageOptions))).Value
?? throw new ArgumentNullException(nameof(storageOptions));
_defaultTenant = string.IsNullOrWhiteSpace(optionsValue.DefaultTenant)
? "default"
: optionsValue.DefaultTenant.Trim();
if (connectors is null)
{
@@ -86,8 +83,6 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
var startedAt = _timeProvider.GetUtcNow();
var results = ImmutableArray.CreateBuilder<InitProviderResult>();
var session = await _sessionProvider.StartSessionAsync(cancellationToken).ConfigureAwait(false);
var (handles, missing) = ResolveConnectors(options.Providers);
foreach (var providerId in missing)
{
@@ -100,15 +95,15 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
try
{
await ValidateConnectorAsync(handle, cancellationToken).ConfigureAwait(false);
await EnsureProviderRegistrationAsync(handle.Descriptor, session, cancellationToken).ConfigureAwait(false);
await EnsureProviderRegistrationAsync(handle.Descriptor, cancellationToken).ConfigureAwait(false);
stopwatch.Stop();
results.Add(new InitProviderResult(
handle.Descriptor.Id,
handle.Descriptor.DisplayName,
"succeeded",
stopwatch.Elapsed,
Error: null));
results.Add(new InitProviderResult(
handle.Descriptor.Id,
handle.Descriptor.DisplayName,
"succeeded",
stopwatch.Elapsed,
Error: null));
_logger.LogInformation("Excititor init validated provider {ProviderId} in {Duration}ms.", handle.Descriptor.Id, stopwatch.Elapsed.TotalMilliseconds);
}
@@ -148,8 +143,6 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
var startedAt = _timeProvider.GetUtcNow();
var since = ResolveSince(options.Since, options.Window, startedAt);
var results = ImmutableArray.CreateBuilder<ProviderRunResult>();
var session = await _sessionProvider.StartSessionAsync(cancellationToken).ConfigureAwait(false);
var (handles, missing) = ResolveConnectors(options.Providers);
foreach (var providerId in missing)
{
@@ -158,7 +151,7 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
foreach (var handle in handles)
{
var result = await ExecuteRunAsync(runId, handle, since, options.Force, session, cancellationToken).ConfigureAwait(false);
var result = await ExecuteRunAsync(runId, handle, since, options.Force, session, cancellationToken).ConfigureAwait(false);
results.Add(result);
}
@@ -173,20 +166,18 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
var runId = Guid.NewGuid();
var startedAt = _timeProvider.GetUtcNow();
var results = ImmutableArray.CreateBuilder<ProviderRunResult>();
var session = await _sessionProvider.StartSessionAsync(cancellationToken).ConfigureAwait(false);
var (handles, missing) = ResolveConnectors(options.Providers);
foreach (var providerId in missing)
{
results.Add(ProviderRunResult.Missing(providerId, since: null));
}
foreach (var handle in handles)
{
var since = await ResolveResumeSinceAsync(handle.Descriptor.Id, options.Checkpoint, session, cancellationToken).ConfigureAwait(false);
var result = await ExecuteRunAsync(runId, handle, since, force: false, session, cancellationToken).ConfigureAwait(false);
results.Add(result);
}
foreach (var handle in handles)
{
var since = await ResolveResumeSinceAsync(handle.Descriptor.Id, options.Checkpoint, session, cancellationToken).ConfigureAwait(false);
var result = await ExecuteRunAsync(runId, handle, since, force: false, session, cancellationToken).ConfigureAwait(false);
results.Add(result);
}
var completedAt = _timeProvider.GetUtcNow();
return new IngestRunSummary(runId, startedAt, completedAt, results.ToImmutable());
@@ -200,8 +191,6 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
var startedAt = _timeProvider.GetUtcNow();
var threshold = options.MaxAge is null ? (DateTimeOffset?)null : startedAt - options.MaxAge.Value;
var results = ImmutableArray.CreateBuilder<ReconcileProviderResult>();
var session = await _sessionProvider.StartSessionAsync(cancellationToken).ConfigureAwait(false);
var (handles, missing) = ResolveConnectors(options.Providers);
foreach (var providerId in missing)
{
@@ -219,8 +208,8 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
if (stale || state is null)
{
var since = stale ? threshold : lastUpdated;
var result = await ExecuteRunAsync(runId, handle, since, force: false, session, cancellationToken).ConfigureAwait(false);
results.Add(new ReconcileProviderResult(
var result = await ExecuteRunAsync(runId, handle, since, force: false, session, cancellationToken).ConfigureAwait(false);
results.Add(new ReconcileProviderResult(
handle.Descriptor.Id,
result.Status,
"reconciled",
@@ -232,15 +221,15 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
}
else
{
results.Add(new ReconcileProviderResult(
handle.Descriptor.Id,
"succeeded",
"skipped",
lastUpdated,
threshold,
Documents: 0,
Claims: 0,
Error: null));
results.Add(new ReconcileProviderResult(
handle.Descriptor.Id,
"succeeded",
"skipped",
lastUpdated,
threshold,
Documents: 0,
Claims: 0,
Error: null));
}
}
catch (OperationCanceledException) when (cancellationToken.IsCancellationRequested)
@@ -280,7 +269,7 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
await handle.Connector.ValidateAsync(VexConnectorSettings.Empty, cancellationToken).ConfigureAwait(false);
}
private async Task EnsureProviderRegistrationAsync(VexConnectorDescriptor descriptor, IClientSessionHandle session, CancellationToken cancellationToken)
private async Task EnsureProviderRegistrationAsync(VexConnectorDescriptor descriptor, CancellationToken cancellationToken)
{
var existing = await _providerStore.FindAsync(descriptor.Id, cancellationToken, session).ConfigureAwait(false);
if (existing is not null)
@@ -292,48 +281,48 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
await _providerStore.SaveAsync(provider, cancellationToken, session).ConfigureAwait(false);
}
private async Task<ProviderRunResult> ExecuteRunAsync(
Guid runId,
ConnectorHandle handle,
DateTimeOffset? since,
bool force,
IClientSessionHandle session,
CancellationToken cancellationToken)
{
var providerId = handle.Descriptor.Id;
var startedAt = _timeProvider.GetUtcNow();
var stopwatch = Stopwatch.StartNew();
using var scope = _logger.BeginScope(new Dictionary<string, object?>(StringComparer.Ordinal)
{
["tenant"] = _defaultTenant,
["runId"] = runId,
["providerId"] = providerId,
["window.since"] = since?.ToString("O", CultureInfo.InvariantCulture),
["force"] = force,
});
private async Task<ProviderRunResult> ExecuteRunAsync(
Guid runId,
ConnectorHandle handle,
DateTimeOffset? since,
bool force,
IClientSessionHandle session,
CancellationToken cancellationToken)
{
var providerId = handle.Descriptor.Id;
var startedAt = _timeProvider.GetUtcNow();
var stopwatch = Stopwatch.StartNew();
using var scope = _logger.BeginScope(new Dictionary<string, object?>(StringComparer.Ordinal)
{
["tenant"] = _defaultTenant,
["runId"] = runId,
["providerId"] = providerId,
["window.since"] = since?.ToString("O", CultureInfo.InvariantCulture),
["force"] = force,
});
try
{
await ValidateConnectorAsync(handle, cancellationToken).ConfigureAwait(false);
await EnsureProviderRegistrationAsync(handle.Descriptor, session, cancellationToken).ConfigureAwait(false);
if (force)
{
var resetState = new VexConnectorState(providerId, null, ImmutableArray<string>.Empty);
await _stateRepository.SaveAsync(resetState, cancellationToken, session).ConfigureAwait(false);
}
var stateBeforeRun = await _stateRepository.GetAsync(providerId, cancellationToken, session).ConfigureAwait(false);
var resumeTokens = stateBeforeRun?.ResumeTokens ?? ImmutableDictionary<string, string>.Empty;
var context = new VexConnectorContext(
since,
VexConnectorSettings.Empty,
_rawStore,
_signatureVerifier,
_normalizerRouter,
_serviceProvider,
resumeTokens);
if (force)
{
var resetState = new VexConnectorState(providerId, null, ImmutableArray<string>.Empty);
await _stateRepository.SaveAsync(resetState, cancellationToken, session).ConfigureAwait(false);
}
var stateBeforeRun = await _stateRepository.GetAsync(providerId, cancellationToken, session).ConfigureAwait(false);
var resumeTokens = stateBeforeRun?.ResumeTokens ?? ImmutableDictionary<string, string>.Empty;
var context = new VexConnectorContext(
since,
VexConnectorSettings.Empty,
_rawStore,
_signatureVerifier,
_normalizerRouter,
_serviceProvider,
resumeTokens);
var documents = 0;
var claims = 0;
@@ -354,25 +343,25 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
stopwatch.Stop();
var completedAt = _timeProvider.GetUtcNow();
var stateAfterRun = await _stateRepository.GetAsync(providerId, cancellationToken, session).ConfigureAwait(false);
var checkpoint = stateAfterRun?.DocumentDigests.IsDefaultOrEmpty == false
? stateAfterRun.DocumentDigests[^1]
: lastDigest;
var result = new ProviderRunResult(
providerId,
"succeeded",
var stateAfterRun = await _stateRepository.GetAsync(providerId, cancellationToken, session).ConfigureAwait(false);
var checkpoint = stateAfterRun?.DocumentDigests.IsDefaultOrEmpty == false
? stateAfterRun.DocumentDigests[^1]
: lastDigest;
var result = new ProviderRunResult(
providerId,
"succeeded",
documents,
claims,
startedAt,
completedAt,
stopwatch.Elapsed,
lastDigest,
stateAfterRun?.LastUpdated,
checkpoint,
null,
since);
lastDigest,
stateAfterRun?.LastUpdated,
checkpoint,
null,
since);
_logger.LogInformation(
"Excititor ingest provider {ProviderId} completed: documents={Documents} claims={Claims} since={Since} duration={Duration}ms",

View File

@@ -6,7 +6,7 @@ using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Core.Storage;
namespace StellaOps.Excititor.WebService.Services;

View File

@@ -75,13 +75,7 @@ public sealed class AppendOnlyLinksetExtractionService
results.Add(result);
if (result.HadChanges && _eventPublisher is not null)
{
await _eventPublisher.PublishLinksetUpdatedAsync(
normalizedTenant,
result.Linkset,
cancellationToken);
}
await PublishIfNeededAsync(normalizedTenant, result, cancellationToken);
}
catch (Exception ex)
{
@@ -142,13 +136,7 @@ public sealed class AppendOnlyLinksetExtractionService
disagreement,
cancellationToken);
if (storeResult.HadChanges && _eventPublisher is not null)
{
await _eventPublisher.PublishLinksetUpdatedAsync(
normalizedTenant,
storeResult.Linkset,
cancellationToken);
}
await PublishIfNeededAsync(normalizedTenant, storeResult, cancellationToken);
return LinksetAppendResult.Succeeded(
normalizedTenant,
@@ -193,7 +181,7 @@ public sealed class AppendOnlyLinksetExtractionService
ProviderId: obs.ProviderId,
Status: stmt.Status.ToString().ToLowerInvariant(),
Confidence: null)))
.Distinct(VexLinksetObservationRefComparer.Instance)
.DistinctBy(refModel => $"{refModel.ProviderId}:{refModel.Status}:{refModel.ObservationId}", StringComparer.OrdinalIgnoreCase)
.ToList();
if (observationRefs.Count == 0)
@@ -263,6 +251,60 @@ public sealed class AppendOnlyLinksetExtractionService
return at >= 0 && at < key.Length - 1 ? key[(at + 1)..] : null;
}
private async Task PublishIfNeededAsync(string tenant, AppendLinksetResult result, CancellationToken cancellationToken)
{
if (_eventPublisher is null || !result.HadChanges)
{
return;
}
var evt = ToEvent(tenant, result.Linkset);
await _eventPublisher.PublishAsync(evt, cancellationToken).ConfigureAwait(false);
}
private static VexLinksetUpdatedEvent ToEvent(string tenant, VexLinkset linkset)
{
var observationRefs = linkset.Observations
.Select(o => new VexLinksetObservationRefCore(
o.ObservationId,
o.ProviderId,
o.Status,
o.Confidence,
ImmutableDictionary<string, string>.Empty))
.OrderBy(o => o.ProviderId, StringComparer.OrdinalIgnoreCase)
.ThenBy(o => o.Status, StringComparer.OrdinalIgnoreCase)
.ThenBy(o => o.ObservationId, StringComparer.Ordinal)
.ToImmutableArray();
var disagreements = linkset.Disagreements
.OrderBy(d => d.ProviderId, StringComparer.OrdinalIgnoreCase)
.ThenBy(d => d.Status, StringComparer.OrdinalIgnoreCase)
.ThenBy(d => d.Justification ?? string.Empty, StringComparer.OrdinalIgnoreCase)
.ToImmutableArray();
return new VexLinksetUpdatedEvent(
VexLinksetUpdatedEventFactory.EventType,
tenant,
linkset.LinksetId,
linkset.VulnerabilityId,
linkset.ProductKey,
linkset.Scope,
observationRefs,
disagreements,
linkset.UpdatedAt);
}
private async Task PublishIfNeededAsync(string tenant, LinksetAppendResult result, CancellationToken cancellationToken)
{
if (_eventPublisher is null || !result.HadChanges || result.Linkset is null)
{
return;
}
var evt = ToEvent(tenant, result.Linkset);
await _eventPublisher.PublishAsync(evt, cancellationToken).ConfigureAwait(false);
}
private static string Normalize(string value) =>
VexObservation.EnsureNotNullOrWhiteSpace(value, nameof(value));

View File

@@ -9,6 +9,7 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Microsoft.Extensions.Options" Version="10.0.0" />
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="10.0.0" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="../../../Aoc/__Libraries/StellaOps.Aoc/StellaOps.Aoc.csproj" />

View File

@@ -0,0 +1,7 @@
// Temporary stubs to allow legacy interfaces to compile while MongoDB is removed.
// These types are intentionally minimal; they do not perform any database operations.
namespace MongoDB.Driver;
public interface IClientSessionHandle : IAsyncDisposable, IDisposable
{
}

View File

@@ -0,0 +1,80 @@
using System;
using System.Collections.Generic;
using System.Collections.Immutable;
namespace StellaOps.Excititor.Core.Storage;
/// <summary>
/// Query envelope for listing raw VEX documents.
/// </summary>
public sealed record VexRawQuery(
string Tenant,
IReadOnlyCollection<string> ProviderIds,
IReadOnlyCollection<string> Digests,
IReadOnlyCollection<VexDocumentFormat> Formats,
DateTimeOffset? Since,
DateTimeOffset? Until,
VexRawCursor? Cursor,
int Limit);
/// <summary>
/// Stable pagination cursor based on retrieved-at and digest ordering.
/// </summary>
public sealed record VexRawCursor(DateTimeOffset RetrievedAt, string Digest);
/// <summary>
/// Lightweight summary used for list endpoints.
/// </summary>
public sealed record VexRawDocumentSummary(
string Digest,
string ProviderId,
VexDocumentFormat Format,
Uri SourceUri,
DateTimeOffset RetrievedAt,
bool InlineContent,
ImmutableDictionary<string, string> Metadata);
/// <summary>
/// Paged result for raw document listings.
/// </summary>
public sealed record VexRawDocumentPage(
IReadOnlyList<VexRawDocumentSummary> Items,
VexRawCursor? NextCursor,
bool HasMore);
/// <summary>
/// Stored raw VEX document with canonical content and metadata.
/// </summary>
public sealed record VexRawRecord(
string Digest,
string Tenant,
string ProviderId,
VexDocumentFormat Format,
Uri SourceUri,
DateTimeOffset RetrievedAt,
ImmutableDictionary<string, string> Metadata,
ReadOnlyMemory<byte> Content,
bool InlineContent,
string? SupersedesDigest = null,
string? ETag = null,
DateTimeOffset? RecordedAt = null);
/// <summary>
/// Append-only raw document store abstraction (backed by Postgres for Excititor).
/// </summary>
public interface IVexRawStore : IVexRawDocumentSink
{
/// <summary>
/// Finds a raw document by digest.
/// </summary>
/// <param name="digest">Content-addressed digest (sha256:...)</param>
/// <param name="cancellationToken">Cancellation token.</param>
ValueTask<VexRawRecord?> FindByDigestAsync(string digest, CancellationToken cancellationToken);
/// <summary>
/// Lists raw documents using deterministic ordering.
/// </summary>
/// <param name="query">Query filters and pagination cursor.</param>
/// <param name="cancellationToken">Cancellation token.</param>
ValueTask<VexRawDocumentPage> QueryAsync(VexRawQuery query, CancellationToken cancellationToken);
}

View File

@@ -0,0 +1,38 @@
namespace StellaOps.Excititor.Core.Storage;
/// <summary>
/// Storage options for Excititor persistence (Postgres-backed, legacy name retained for compatibility).
/// </summary>
public class VexStorageOptions
{
/// <summary>
/// Default tenant to apply when no tenant header is supplied.
/// </summary>
public string DefaultTenant { get; set; } = "default";
/// <summary>
/// Inline content threshold in bytes; larger payloads are stored in the blob table.
/// </summary>
public int InlineThresholdBytes { get; set; } = 256 * 1024;
}
/// <summary>
/// Legacy alias preserved while migrating off MongoDB-specific naming.
/// </summary>
[System.Obsolete("Use VexStorageOptions; retained for backwards compatibility during Mongo removal.")]
public sealed class VexMongoStorageOptions : VexStorageOptions
{
/// <summary>
/// Historical bucket name (unused in Postgres mode).
/// </summary>
public string RawBucketName { get; set; } = "vex-raw";
/// <summary>
/// Backwards-compatible inline threshold property.
/// </summary>
public int GridFsInlineThresholdBytes
{
get => InlineThresholdBytes;
set => InlineThresholdBytes = value;
}
}

View File

@@ -1,308 +1,92 @@
-- VEX Schema Migration 001: Initial Schema
-- Creates the vex schema for VEX statements and dependency graphs
-- VEX Schema Migration 001: Append-only linksets (no Mongo, no consensus)
-- This migration defines an append-only Postgres backend for Excititor linksets,
-- observations, disagreements, and mutation logs. All operations are additive and
-- preserve deterministic ordering for audit/replay.
-- Create schema
CREATE SCHEMA IF NOT EXISTS vex;
-- Projects table
CREATE TABLE IF NOT EXISTS vex.projects (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
name TEXT NOT NULL,
display_name TEXT,
description TEXT,
repository_url TEXT,
default_branch TEXT,
settings JSONB NOT NULL DEFAULT '{}',
metadata JSONB NOT NULL DEFAULT '{}',
-- Drop legacy tables that carried mutable/consensus state
DROP TABLE IF EXISTS vex.linkset_mutations CASCADE;
DROP TABLE IF EXISTS vex.linkset_disagreements CASCADE;
DROP TABLE IF EXISTS vex.linkset_observations CASCADE;
DROP TABLE IF EXISTS vex.linksets CASCADE;
DROP TABLE IF EXISTS vex.observations CASCADE;
DROP TABLE IF EXISTS vex.consensus_holds CASCADE;
DROP TABLE IF EXISTS vex.consensus CASCADE;
DROP TABLE IF EXISTS vex.statements CASCADE;
DROP TABLE IF EXISTS vex.graph_edges CASCADE;
DROP TABLE IF EXISTS vex.graph_nodes CASCADE;
DROP TABLE IF EXISTS vex.graph_revisions CASCADE;
DROP TABLE IF EXISTS vex.projects CASCADE;
DROP TABLE IF EXISTS vex.linkset_events CASCADE;
DROP TABLE IF EXISTS vex.evidence_manifests CASCADE;
DROP TABLE IF EXISTS vex.cvss_receipts CASCADE;
DROP TABLE IF EXISTS vex.attestations CASCADE;
DROP TABLE IF EXISTS vex.timeline_events CASCADE;
DROP TABLE IF EXISTS vex.unknown_items CASCADE;
DROP TABLE IF EXISTS vex.unknowns_snapshots CASCADE;
-- Core linkset table (append-only semantics; updated_at is refreshed on append)
CREATE TABLE vex.linksets (
linkset_id TEXT PRIMARY KEY,
tenant TEXT NOT NULL,
vulnerability_id TEXT NOT NULL,
product_key TEXT NOT NULL,
scope JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
created_by TEXT,
UNIQUE(tenant_id, name)
UNIQUE (tenant, vulnerability_id, product_key)
);
CREATE INDEX idx_projects_tenant ON vex.projects(tenant_id);
CREATE INDEX idx_linksets_updated ON vex.linksets (tenant, updated_at DESC);
-- Graph revisions table
CREATE TABLE IF NOT EXISTS vex.graph_revisions (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
project_id UUID NOT NULL REFERENCES vex.projects(id) ON DELETE CASCADE,
revision_id TEXT NOT NULL UNIQUE,
parent_revision_id TEXT,
sbom_digest TEXT NOT NULL,
feed_snapshot_id TEXT,
policy_version TEXT,
node_count INT NOT NULL DEFAULT 0,
edge_count INT NOT NULL DEFAULT 0,
metadata JSONB NOT NULL DEFAULT '{}',
-- Observation references recorded per linkset (immutable; deduplicated)
CREATE TABLE vex.linkset_observations (
id BIGSERIAL PRIMARY KEY,
linkset_id TEXT NOT NULL REFERENCES vex.linksets(linkset_id) ON DELETE CASCADE,
observation_id TEXT NOT NULL,
provider_id TEXT NOT NULL,
status TEXT NOT NULL CHECK (status IN ('affected', 'not_affected', 'fixed', 'under_investigation')),
confidence NUMERIC(4,3),
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
created_by TEXT
UNIQUE (linkset_id, observation_id, provider_id, status)
);
CREATE INDEX idx_graph_revisions_project ON vex.graph_revisions(project_id);
CREATE INDEX idx_graph_revisions_revision ON vex.graph_revisions(revision_id);
CREATE INDEX idx_graph_revisions_created ON vex.graph_revisions(project_id, created_at DESC);
CREATE INDEX idx_linkset_observations_linkset ON vex.linkset_observations (linkset_id);
CREATE INDEX idx_linkset_observations_provider ON vex.linkset_observations (linkset_id, provider_id);
CREATE INDEX idx_linkset_observations_status ON vex.linkset_observations (linkset_id, status);
-- Graph nodes table (BIGSERIAL for high volume)
CREATE TABLE IF NOT EXISTS vex.graph_nodes (
-- Disagreements/conflicts recorded per linkset (immutable; deduplicated)
CREATE TABLE vex.linkset_disagreements (
id BIGSERIAL PRIMARY KEY,
graph_revision_id UUID NOT NULL REFERENCES vex.graph_revisions(id) ON DELETE CASCADE,
node_key TEXT NOT NULL,
node_type TEXT NOT NULL,
purl TEXT,
name TEXT,
version TEXT,
attributes JSONB NOT NULL DEFAULT '{}',
UNIQUE(graph_revision_id, node_key)
);
CREATE INDEX idx_graph_nodes_revision ON vex.graph_nodes(graph_revision_id);
CREATE INDEX idx_graph_nodes_key ON vex.graph_nodes(graph_revision_id, node_key);
CREATE INDEX idx_graph_nodes_purl ON vex.graph_nodes(purl);
CREATE INDEX idx_graph_nodes_type ON vex.graph_nodes(graph_revision_id, node_type);
-- Graph edges table (BIGSERIAL for high volume)
CREATE TABLE IF NOT EXISTS vex.graph_edges (
id BIGSERIAL PRIMARY KEY,
graph_revision_id UUID NOT NULL REFERENCES vex.graph_revisions(id) ON DELETE CASCADE,
from_node_id BIGINT NOT NULL REFERENCES vex.graph_nodes(id) ON DELETE CASCADE,
to_node_id BIGINT NOT NULL REFERENCES vex.graph_nodes(id) ON DELETE CASCADE,
edge_type TEXT NOT NULL,
attributes JSONB NOT NULL DEFAULT '{}'
);
CREATE INDEX idx_graph_edges_revision ON vex.graph_edges(graph_revision_id);
CREATE INDEX idx_graph_edges_from ON vex.graph_edges(from_node_id);
CREATE INDEX idx_graph_edges_to ON vex.graph_edges(to_node_id);
-- VEX statements table
CREATE TABLE IF NOT EXISTS vex.statements (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
project_id UUID REFERENCES vex.projects(id),
graph_revision_id UUID REFERENCES vex.graph_revisions(id),
vulnerability_id TEXT NOT NULL,
product_id TEXT,
status TEXT NOT NULL CHECK (status IN (
'not_affected', 'affected', 'fixed', 'under_investigation'
)),
justification TEXT CHECK (justification IN (
'component_not_present', 'vulnerable_code_not_present',
'vulnerable_code_not_in_execute_path', 'vulnerable_code_cannot_be_controlled_by_adversary',
'inline_mitigations_already_exist'
)),
impact_statement TEXT,
action_statement TEXT,
action_statement_timestamp TIMESTAMPTZ,
first_issued TIMESTAMPTZ NOT NULL DEFAULT NOW(),
last_updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
source TEXT,
source_url TEXT,
evidence JSONB NOT NULL DEFAULT '{}',
provenance JSONB NOT NULL DEFAULT '{}',
metadata JSONB NOT NULL DEFAULT '{}',
created_by TEXT
);
CREATE INDEX idx_statements_tenant ON vex.statements(tenant_id);
CREATE INDEX idx_statements_project ON vex.statements(project_id);
CREATE INDEX idx_statements_revision ON vex.statements(graph_revision_id);
CREATE INDEX idx_statements_vuln ON vex.statements(vulnerability_id);
CREATE INDEX idx_statements_status ON vex.statements(tenant_id, status);
-- VEX observations table
CREATE TABLE IF NOT EXISTS vex.observations (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
statement_id UUID REFERENCES vex.statements(id) ON DELETE CASCADE,
vulnerability_id TEXT NOT NULL,
product_id TEXT NOT NULL,
observed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
observer TEXT NOT NULL,
observation_type TEXT NOT NULL,
confidence NUMERIC(3,2),
details JSONB NOT NULL DEFAULT '{}',
linkset_id TEXT NOT NULL REFERENCES vex.linksets(linkset_id) ON DELETE CASCADE,
provider_id TEXT NOT NULL,
status TEXT NOT NULL,
justification TEXT,
confidence NUMERIC(4,3),
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(tenant_id, vulnerability_id, product_id, observer, observation_type)
UNIQUE (linkset_id, provider_id, status, justification)
);
CREATE INDEX idx_observations_tenant ON vex.observations(tenant_id);
CREATE INDEX idx_observations_statement ON vex.observations(statement_id);
CREATE INDEX idx_observations_vuln ON vex.observations(vulnerability_id, product_id);
CREATE INDEX idx_linkset_disagreements_linkset ON vex.linkset_disagreements (linkset_id);
-- Linksets table
CREATE TABLE IF NOT EXISTS vex.linksets (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
name TEXT NOT NULL,
description TEXT,
source_type TEXT NOT NULL,
source_url TEXT,
enabled BOOLEAN NOT NULL DEFAULT TRUE,
priority INT NOT NULL DEFAULT 0,
filter JSONB NOT NULL DEFAULT '{}',
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(tenant_id, name)
-- Append-only mutation log for deterministic replay/audit
CREATE TABLE vex.linkset_mutations (
sequence_number BIGSERIAL PRIMARY KEY,
linkset_id TEXT NOT NULL REFERENCES vex.linksets(linkset_id) ON DELETE CASCADE,
mutation_type TEXT NOT NULL CHECK (mutation_type IN ('linkset_created', 'observation_added', 'disagreement_added')),
observation_id TEXT,
provider_id TEXT,
status TEXT,
confidence NUMERIC(4,3),
justification TEXT,
occurred_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_linksets_tenant ON vex.linksets(tenant_id);
CREATE INDEX idx_linksets_enabled ON vex.linksets(tenant_id, enabled, priority DESC);
CREATE INDEX idx_linkset_mutations_linkset ON vex.linkset_mutations (linkset_id, sequence_number);
-- Linkset events table
CREATE TABLE IF NOT EXISTS vex.linkset_events (
id BIGSERIAL PRIMARY KEY,
linkset_id UUID NOT NULL REFERENCES vex.linksets(id) ON DELETE CASCADE,
event_type TEXT NOT NULL,
statement_count INT NOT NULL DEFAULT 0,
error_message TEXT,
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_linkset_events_linkset ON vex.linkset_events(linkset_id);
CREATE INDEX idx_linkset_events_created ON vex.linkset_events(created_at);
-- Consensus table (VEX consensus state)
CREATE TABLE IF NOT EXISTS vex.consensus (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
vulnerability_id TEXT NOT NULL,
product_id TEXT NOT NULL,
consensus_status TEXT NOT NULL,
contributing_statements UUID[] NOT NULL DEFAULT '{}',
confidence NUMERIC(3,2),
computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
metadata JSONB NOT NULL DEFAULT '{}',
UNIQUE(tenant_id, vulnerability_id, product_id)
);
CREATE INDEX idx_consensus_tenant ON vex.consensus(tenant_id);
CREATE INDEX idx_consensus_vuln ON vex.consensus(vulnerability_id, product_id);
-- Consensus holds table
CREATE TABLE IF NOT EXISTS vex.consensus_holds (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
consensus_id UUID NOT NULL REFERENCES vex.consensus(id) ON DELETE CASCADE,
hold_type TEXT NOT NULL,
reason TEXT NOT NULL,
held_by TEXT NOT NULL,
held_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
released_at TIMESTAMPTZ,
released_by TEXT,
metadata JSONB NOT NULL DEFAULT '{}'
);
CREATE INDEX idx_consensus_holds_consensus ON vex.consensus_holds(consensus_id);
CREATE INDEX idx_consensus_holds_active ON vex.consensus_holds(consensus_id, released_at)
WHERE released_at IS NULL;
-- Unknown snapshots table
CREATE TABLE IF NOT EXISTS vex.unknowns_snapshots (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
project_id UUID REFERENCES vex.projects(id),
graph_revision_id UUID REFERENCES vex.graph_revisions(id),
snapshot_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
unknown_count INT NOT NULL DEFAULT 0,
metadata JSONB NOT NULL DEFAULT '{}'
);
CREATE INDEX idx_unknowns_snapshots_tenant ON vex.unknowns_snapshots(tenant_id);
CREATE INDEX idx_unknowns_snapshots_project ON vex.unknowns_snapshots(project_id);
-- Unknown items table
CREATE TABLE IF NOT EXISTS vex.unknown_items (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
snapshot_id UUID NOT NULL REFERENCES vex.unknowns_snapshots(id) ON DELETE CASCADE,
vulnerability_id TEXT NOT NULL,
product_id TEXT,
reason TEXT NOT NULL,
metadata JSONB NOT NULL DEFAULT '{}'
);
CREATE INDEX idx_unknown_items_snapshot ON vex.unknown_items(snapshot_id);
CREATE INDEX idx_unknown_items_vuln ON vex.unknown_items(vulnerability_id);
-- Evidence manifests table
CREATE TABLE IF NOT EXISTS vex.evidence_manifests (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
statement_id UUID REFERENCES vex.statements(id) ON DELETE CASCADE,
manifest_type TEXT NOT NULL,
content_hash TEXT NOT NULL,
content JSONB NOT NULL,
source TEXT,
collected_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
metadata JSONB NOT NULL DEFAULT '{}'
);
CREATE INDEX idx_evidence_manifests_tenant ON vex.evidence_manifests(tenant_id);
CREATE INDEX idx_evidence_manifests_statement ON vex.evidence_manifests(statement_id);
-- CVSS receipts table
CREATE TABLE IF NOT EXISTS vex.cvss_receipts (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
statement_id UUID REFERENCES vex.statements(id) ON DELETE CASCADE,
vulnerability_id TEXT NOT NULL,
cvss_version TEXT NOT NULL,
vector_string TEXT NOT NULL,
base_score NUMERIC(3,1) NOT NULL,
environmental_score NUMERIC(3,1),
temporal_score NUMERIC(3,1),
computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
metadata JSONB NOT NULL DEFAULT '{}'
);
CREATE INDEX idx_cvss_receipts_tenant ON vex.cvss_receipts(tenant_id);
CREATE INDEX idx_cvss_receipts_statement ON vex.cvss_receipts(statement_id);
CREATE INDEX idx_cvss_receipts_vuln ON vex.cvss_receipts(vulnerability_id);
-- Attestations table
CREATE TABLE IF NOT EXISTS vex.attestations (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
statement_id UUID REFERENCES vex.statements(id),
subject_digest TEXT NOT NULL,
predicate_type TEXT NOT NULL,
predicate JSONB NOT NULL,
signature TEXT,
signature_algorithm TEXT,
signed_by TEXT,
signed_at TIMESTAMPTZ,
verified BOOLEAN NOT NULL DEFAULT FALSE,
verified_at TIMESTAMPTZ,
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_attestations_tenant ON vex.attestations(tenant_id);
CREATE INDEX idx_attestations_statement ON vex.attestations(statement_id);
CREATE INDEX idx_attestations_subject ON vex.attestations(subject_digest);
-- Timeline events table
CREATE TABLE IF NOT EXISTS vex.timeline_events (
id BIGSERIAL PRIMARY KEY,
tenant_id TEXT NOT NULL,
project_id UUID REFERENCES vex.projects(id),
statement_id UUID REFERENCES vex.statements(id),
event_type TEXT NOT NULL,
event_data JSONB NOT NULL DEFAULT '{}',
actor TEXT,
correlation_id TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_timeline_events_tenant ON vex.timeline_events(tenant_id);
CREATE INDEX idx_timeline_events_project ON vex.timeline_events(project_id);
CREATE INDEX idx_timeline_events_statement ON vex.timeline_events(statement_id);
CREATE INDEX idx_timeline_events_created ON vex.timeline_events(tenant_id, created_at);
CREATE INDEX idx_timeline_events_correlation ON vex.timeline_events(correlation_id);
-- Update timestamp function
CREATE OR REPLACE FUNCTION vex.update_updated_at()
-- Refresh updated_at whenever linkset rows change
CREATE OR REPLACE FUNCTION vex.touch_updated_at()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = NOW();
@@ -310,15 +94,6 @@ BEGIN
END;
$$ LANGUAGE plpgsql;
-- Triggers
CREATE TRIGGER trg_projects_updated_at
BEFORE UPDATE ON vex.projects
FOR EACH ROW EXECUTE FUNCTION vex.update_updated_at();
CREATE TRIGGER trg_linksets_updated_at
CREATE TRIGGER trg_linksets_touch_updated_at
BEFORE UPDATE ON vex.linksets
FOR EACH ROW EXECUTE FUNCTION vex.update_updated_at();
CREATE TRIGGER trg_statements_updated_at
BEFORE UPDATE ON vex.statements
FOR EACH ROW EXECUTE FUNCTION vex.update_updated_at();
FOR EACH ROW EXECUTE FUNCTION vex.touch_updated_at();

View File

@@ -0,0 +1,43 @@
-- VEX Raw Store Migration 002: Postgres-backed raw document and blob storage (Mongo/BSON removed)
-- Raw documents (append-only)
CREATE TABLE IF NOT EXISTS vex.vex_raw_documents (
digest TEXT PRIMARY KEY,
tenant TEXT NOT NULL,
provider_id TEXT NOT NULL,
format TEXT NOT NULL CHECK (format IN ('openvex','csaf','cyclonedx','custom','unknown')),
source_uri TEXT NOT NULL,
etag TEXT NULL,
retrieved_at TIMESTAMPTZ NOT NULL,
recorded_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
supersedes_digest TEXT NULL REFERENCES vex.vex_raw_documents(digest),
content_json JSONB NOT NULL,
content_size_bytes INT NOT NULL,
metadata_json JSONB NOT NULL,
provenance_json JSONB NOT NULL,
inline_payload BOOLEAN NOT NULL DEFAULT TRUE,
UNIQUE (tenant, provider_id, source_uri, COALESCE(etag, ''))
);
CREATE INDEX IF NOT EXISTS idx_vex_raw_documents_tenant_retrieved ON vex.vex_raw_documents (tenant, retrieved_at DESC, digest);
CREATE INDEX IF NOT EXISTS idx_vex_raw_documents_provider ON vex.vex_raw_documents (tenant, provider_id, retrieved_at DESC);
CREATE INDEX IF NOT EXISTS idx_vex_raw_documents_supersedes ON vex.vex_raw_documents (tenant, supersedes_digest);
CREATE INDEX IF NOT EXISTS idx_vex_raw_documents_metadata ON vex.vex_raw_documents USING GIN (metadata_json);
CREATE INDEX IF NOT EXISTS idx_vex_raw_documents_provenance ON vex.vex_raw_documents USING GIN (provenance_json);
-- Large payloads stored separately when inline threshold exceeded
CREATE TABLE IF NOT EXISTS vex.vex_raw_blobs (
digest TEXT PRIMARY KEY REFERENCES vex.vex_raw_documents(digest) ON DELETE CASCADE,
payload BYTEA NOT NULL,
payload_hash TEXT NOT NULL
);
-- Optional attachment support (kept for parity with prior GridFS usage)
CREATE TABLE IF NOT EXISTS vex.vex_raw_attachments (
digest TEXT REFERENCES vex.vex_raw_documents(digest) ON DELETE CASCADE,
name TEXT NOT NULL,
media_type TEXT NOT NULL,
payload BYTEA NOT NULL,
payload_hash TEXT NOT NULL,
PRIMARY KEY (digest, name)
);

View File

@@ -0,0 +1,858 @@
using System.Text.Json;
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Excititor.Core.Observations;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Excititor.Storage.Postgres.Repositories;
/// <summary>
/// PostgreSQL implementation of <see cref="IAppendOnlyLinksetStore"/> backed by append-only tables.
/// Uses deterministic ordering and mutation logs for audit/replay.
/// </summary>
public sealed class PostgresAppendOnlyLinksetStore : RepositoryBase<ExcititorDataSource>, IAppendOnlyLinksetStore, IVexLinksetStore
{
private const string MutationCreated = "linkset_created";
private const string MutationObservationAdded = "observation_added";
private const string MutationDisagreementAdded = "disagreement_added";
private static readonly JsonSerializerOptions JsonOptions = new()
{
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
WriteIndented = false
};
public PostgresAppendOnlyLinksetStore(
ExcititorDataSource dataSource,
ILogger<PostgresAppendOnlyLinksetStore> logger)
: base(dataSource, logger)
{
}
public async ValueTask<bool> InsertAsync(
VexLinkset linkset,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(linkset);
var tenant = linkset.Tenant;
var linksetId = linkset.LinksetId;
await using var connection = await DataSource.OpenConnectionAsync(tenant, "writer", cancellationToken)
.ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
var sequenceNumbers = new List<long>();
var created = await EnsureLinksetAsync(
connection,
linksetId,
tenant,
linkset.VulnerabilityId,
linkset.ProductKey,
linkset.Scope,
sequenceNumbers,
cancellationToken).ConfigureAwait(false);
if (!created)
{
await transaction.RollbackAsync(cancellationToken).ConfigureAwait(false);
return false;
}
foreach (var observation in linkset.Observations)
{
await InsertObservationAsync(connection, linksetId, observation, sequenceNumbers, cancellationToken)
.ConfigureAwait(false);
}
foreach (var disagreement in linkset.Disagreements)
{
await InsertDisagreementAsync(connection, linksetId, disagreement, sequenceNumbers, cancellationToken)
.ConfigureAwait(false);
}
await TouchLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
return true;
}
public async ValueTask<bool> UpsertAsync(
VexLinkset linkset,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(linkset);
var tenant = linkset.Tenant;
var linksetId = linkset.LinksetId;
await using var connection = await DataSource.OpenConnectionAsync(tenant, "writer", cancellationToken)
.ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
var sequenceNumbers = new List<long>();
var created = await EnsureLinksetAsync(
connection,
linksetId,
tenant,
linkset.VulnerabilityId,
linkset.ProductKey,
linkset.Scope,
sequenceNumbers,
cancellationToken).ConfigureAwait(false);
foreach (var observation in linkset.Observations)
{
await InsertObservationAsync(connection, linksetId, observation, sequenceNumbers, cancellationToken)
.ConfigureAwait(false);
}
foreach (var disagreement in linkset.Disagreements)
{
await InsertDisagreementAsync(connection, linksetId, disagreement, sequenceNumbers, cancellationToken)
.ConfigureAwait(false);
}
if (created || sequenceNumbers.Count > 0)
{
await TouchLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
}
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
return created;
}
public async ValueTask<VexLinkset> GetOrCreateAsync(
string tenant,
string vulnerabilityId,
string productKey,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(vulnerabilityId);
ArgumentNullException.ThrowIfNull(productKey);
var linksetId = VexLinkset.CreateLinksetId(tenant, vulnerabilityId, productKey);
await using var connection = await DataSource.OpenConnectionAsync(tenant, "writer", cancellationToken)
.ConfigureAwait(false);
var existing = await ReadLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
if (existing is not null)
{
return existing;
}
var sequenceNumbers = new List<long>();
await EnsureLinksetAsync(
connection,
linksetId,
tenant,
vulnerabilityId,
productKey,
VexProductScope.Unknown(productKey),
sequenceNumbers,
cancellationToken).ConfigureAwait(false);
return await ReadLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false)
?? throw new InvalidOperationException($"Failed to create linkset {linksetId}.");
}
public async ValueTask<AppendLinksetResult> AppendObservationAsync(
string tenant,
string vulnerabilityId,
string productKey,
VexLinksetObservationRefModel observation,
VexProductScope scope,
CancellationToken cancellationToken)
{
return await AppendObservationsBatchAsync(
tenant,
vulnerabilityId,
productKey,
new[] { observation },
scope,
cancellationToken).ConfigureAwait(false);
}
public async ValueTask<AppendLinksetResult> AppendObservationsBatchAsync(
string tenant,
string vulnerabilityId,
string productKey,
IEnumerable<VexLinksetObservationRefModel> observations,
VexProductScope scope,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(vulnerabilityId);
ArgumentNullException.ThrowIfNull(productKey);
ArgumentNullException.ThrowIfNull(observations);
ArgumentNullException.ThrowIfNull(scope);
var linksetId = VexLinkset.CreateLinksetId(tenant, vulnerabilityId, productKey);
var observationList = observations.ToList();
await using var connection = await DataSource.OpenConnectionAsync(tenant, "writer", cancellationToken)
.ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
var sequenceNumbers = new List<long>();
var wasCreated = await EnsureLinksetAsync(connection, linksetId, tenant, vulnerabilityId, productKey, scope, sequenceNumbers, cancellationToken)
.ConfigureAwait(false);
var observationsAdded = 0;
foreach (var obs in observationList)
{
var added = await InsertObservationAsync(connection, linksetId, obs, sequenceNumbers, cancellationToken)
.ConfigureAwait(false);
if (added)
{
observationsAdded++;
}
}
if (wasCreated || observationsAdded > 0)
{
await TouchLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
}
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
var linkset = await ReadLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false)
?? throw new InvalidOperationException($"Linkset {linksetId} not found after append.");
var sequenceNumber = await GetLatestSequenceAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
if (observationsAdded == 0 && !wasCreated)
{
return AppendLinksetResult.NoChange(linkset, sequenceNumber);
}
if (wasCreated)
{
return AppendLinksetResult.Created(linkset, observationsAdded, sequenceNumber);
}
return AppendLinksetResult.Updated(linkset, observationsAdded, disagreementsAdded: 0, sequenceNumber);
}
public async ValueTask<AppendLinksetResult> AppendDisagreementAsync(
string tenant,
string vulnerabilityId,
string productKey,
VexObservationDisagreement disagreement,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(vulnerabilityId);
ArgumentNullException.ThrowIfNull(productKey);
ArgumentNullException.ThrowIfNull(disagreement);
var linksetId = VexLinkset.CreateLinksetId(tenant, vulnerabilityId, productKey);
await using var connection = await DataSource.OpenConnectionAsync(tenant, "writer", cancellationToken)
.ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
var sequenceNumbers = new List<long>();
var wasCreated = await EnsureLinksetAsync(
connection,
linksetId,
tenant,
vulnerabilityId,
productKey,
VexProductScope.Unknown(productKey),
sequenceNumbers,
cancellationToken).ConfigureAwait(false);
var disagreementsAdded = await InsertDisagreementAsync(connection, linksetId, disagreement, sequenceNumbers, cancellationToken)
.ConfigureAwait(false)
? 1
: 0;
if (wasCreated || disagreementsAdded > 0)
{
await TouchLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
}
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
var linkset = await ReadLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false)
?? throw new InvalidOperationException($"Linkset {linksetId} not found after append.");
var sequenceNumber = await GetLatestSequenceAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
if (disagreementsAdded == 0 && !wasCreated)
{
return AppendLinksetResult.NoChange(linkset, sequenceNumber);
}
if (wasCreated)
{
return AppendLinksetResult.Created(linkset, observationsAdded: 0, sequenceNumber);
}
return AppendLinksetResult.Updated(linkset, observationsAdded: 0, disagreementsAdded, sequenceNumber);
}
public async ValueTask<VexLinkset?> GetByIdAsync(
string tenant,
string linksetId,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(linksetId);
await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken)
.ConfigureAwait(false);
return await ReadLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
}
public async ValueTask<VexLinkset?> GetByKeyAsync(
string tenant,
string vulnerabilityId,
string productKey,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(vulnerabilityId);
ArgumentNullException.ThrowIfNull(productKey);
var linksetId = VexLinkset.CreateLinksetId(tenant, vulnerabilityId, productKey);
return await GetByIdAsync(tenant, linksetId, cancellationToken).ConfigureAwait(false);
}
public async ValueTask<IReadOnlyList<VexLinkset>> FindByVulnerabilityAsync(
string tenant,
string vulnerabilityId,
int limit,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(vulnerabilityId);
await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken)
.ConfigureAwait(false);
var linksetIds = await GetLinksetIdsAsync(connection, "vulnerability_id = @vulnerability_id", cmd =>
{
AddParameter(cmd, "vulnerability_id", vulnerabilityId);
AddParameter(cmd, "tenant", tenant);
AddParameter(cmd, "limit", limit);
}, cancellationToken).ConfigureAwait(false);
return await ReadLinksetsAsync(connection, linksetIds, cancellationToken).ConfigureAwait(false);
}
public async ValueTask<IReadOnlyList<VexLinkset>> FindByProductKeyAsync(
string tenant,
string productKey,
int limit,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(productKey);
await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken)
.ConfigureAwait(false);
var linksetIds = await GetLinksetIdsAsync(connection, "product_key = @product_key", cmd =>
{
AddParameter(cmd, "product_key", productKey);
AddParameter(cmd, "tenant", tenant);
AddParameter(cmd, "limit", limit);
}, cancellationToken).ConfigureAwait(false);
return await ReadLinksetsAsync(connection, linksetIds, cancellationToken).ConfigureAwait(false);
}
public async ValueTask<IReadOnlyList<VexLinkset>> FindWithConflictsAsync(
string tenant,
int limit,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
const string sql = """
SELECT DISTINCT ls.linkset_id, ls.updated_at
FROM vex.linksets ls
JOIN vex.linkset_disagreements d ON d.linkset_id = ls.linkset_id
WHERE ls.tenant = @tenant
ORDER BY ls.updated_at DESC, ls.linkset_id
LIMIT @limit;
""";
await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "tenant", tenant);
AddParameter(command, "limit", limit);
var linksetIds = new List<string>();
await using (var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false))
{
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
linksetIds.Add(reader.GetString(0));
}
}
return await ReadLinksetsAsync(connection, linksetIds, cancellationToken).ConfigureAwait(false);
}
public async ValueTask<IReadOnlyList<VexLinkset>> FindByProviderAsync(
string tenant,
string providerId,
int limit,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(providerId);
const string sql = """
SELECT DISTINCT ls.linkset_id, ls.updated_at
FROM vex.linksets ls
JOIN vex.linkset_observations o ON o.linkset_id = ls.linkset_id
WHERE ls.tenant = @tenant AND o.provider_id = @provider_id
ORDER BY ls.updated_at DESC, ls.linkset_id
LIMIT @limit;
""";
await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "tenant", tenant);
AddParameter(command, "provider_id", providerId);
AddParameter(command, "limit", limit);
var ids = new List<string>();
await using (var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false))
{
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
ids.Add(reader.GetString(0));
}
}
return await ReadLinksetsAsync(connection, ids, cancellationToken).ConfigureAwait(false);
}
public ValueTask<bool> DeleteAsync(
string tenant,
string linksetId,
CancellationToken cancellationToken)
{
// Append-only store does not support deletions; signal no-op.
return ValueTask.FromResult(false);
}
public async ValueTask<long> CountAsync(string tenant, CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
const string sql = "SELECT COUNT(*) FROM vex.linksets WHERE tenant = @tenant;";
await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "tenant", tenant);
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
return result is long count ? count : Convert.ToInt64(result);
}
public async ValueTask<long> CountWithConflictsAsync(string tenant, CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
const string sql = """
SELECT COUNT(DISTINCT ls.linkset_id)
FROM vex.linksets ls
JOIN vex.linkset_disagreements d ON d.linkset_id = ls.linkset_id
WHERE ls.tenant = @tenant;
""";
await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "tenant", tenant);
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
return result is long count ? count : Convert.ToInt64(result);
}
public async ValueTask<IReadOnlyList<LinksetMutationEvent>> GetMutationLogAsync(
string tenant,
string linksetId,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(linksetId);
const string sql = """
SELECT sequence_number, mutation_type, occurred_at, observation_id, provider_id, status, confidence, justification
FROM vex.linkset_mutations
WHERE linkset_id = @linkset_id
ORDER BY sequence_number;
""";
await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
var mutations = new List<LinksetMutationEvent>();
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
mutations.Add(new LinksetMutationEvent(
sequenceNumber: reader.GetInt64(0),
mutationType: reader.GetString(1),
timestamp: reader.GetFieldValue<DateTimeOffset>(2),
observationId: GetNullableString(reader, 3),
providerId: GetNullableString(reader, 4),
status: GetNullableString(reader, 5),
confidence: reader.IsDBNull(6) ? null : reader.GetDouble(6),
justification: GetNullableString(reader, 7)));
}
return mutations;
}
private async Task<bool> EnsureLinksetAsync(
NpgsqlConnection connection,
string linksetId,
string tenant,
string vulnerabilityId,
string productKey,
VexProductScope scope,
List<long> sequenceNumbers,
CancellationToken cancellationToken)
{
const string sql = """
INSERT INTO vex.linksets (linkset_id, tenant, vulnerability_id, product_key, scope)
VALUES (@linkset_id, @tenant, @vulnerability_id, @product_key, @scope::jsonb)
ON CONFLICT (linkset_id) DO NOTHING
RETURNING linkset_id;
""";
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
AddParameter(command, "tenant", tenant);
AddParameter(command, "vulnerability_id", vulnerabilityId);
AddParameter(command, "product_key", productKey);
AddJsonbParameter(command, "scope", SerializeScope(scope));
var inserted = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
if (inserted is not null)
{
var seq = await InsertMutationAsync(connection, linksetId, MutationCreated, null, null, null, null, null, cancellationToken)
.ConfigureAwait(false);
sequenceNumbers.Add(seq);
return true;
}
return false;
}
private async Task<bool> InsertObservationAsync(
NpgsqlConnection connection,
string linksetId,
VexLinksetObservationRefModel observation,
List<long> sequenceNumbers,
CancellationToken cancellationToken)
{
const string sql = """
INSERT INTO vex.linkset_observations (
linkset_id, observation_id, provider_id, status, confidence)
VALUES (@linkset_id, @observation_id, @provider_id, @status, @confidence)
ON CONFLICT DO NOTHING
RETURNING id;
""";
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
AddParameter(command, "observation_id", observation.ObservationId);
AddParameter(command, "provider_id", observation.ProviderId);
AddParameter(command, "status", observation.Status);
AddParameter(command, "confidence", observation.Confidence);
var inserted = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
if (inserted is not null)
{
var seq = await InsertMutationAsync(
connection,
linksetId,
MutationObservationAdded,
observation.ObservationId,
observation.ProviderId,
observation.Status,
observation.Confidence,
null,
cancellationToken).ConfigureAwait(false);
sequenceNumbers.Add(seq);
return true;
}
return false;
}
private async Task<bool> InsertDisagreementAsync(
NpgsqlConnection connection,
string linksetId,
VexObservationDisagreement disagreement,
List<long> sequenceNumbers,
CancellationToken cancellationToken)
{
const string sql = """
INSERT INTO vex.linkset_disagreements (
linkset_id, provider_id, status, justification, confidence)
VALUES (@linkset_id, @provider_id, @status, @justification, @confidence)
ON CONFLICT DO NOTHING
RETURNING id;
""";
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
AddParameter(command, "provider_id", disagreement.ProviderId);
AddParameter(command, "status", disagreement.Status);
AddParameter(command, "justification", disagreement.Justification);
AddParameter(command, "confidence", disagreement.Confidence);
var inserted = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
if (inserted is not null)
{
var seq = await InsertMutationAsync(
connection,
linksetId,
MutationDisagreementAdded,
null,
disagreement.ProviderId,
disagreement.Status,
disagreement.Confidence,
disagreement.Justification,
cancellationToken).ConfigureAwait(false);
sequenceNumbers.Add(seq);
return true;
}
return false;
}
private async Task<long> InsertMutationAsync(
NpgsqlConnection connection,
string linksetId,
string mutationType,
string? observationId,
string? providerId,
string? status,
double? confidence,
string? justification,
CancellationToken cancellationToken)
{
const string sql = """
INSERT INTO vex.linkset_mutations (
linkset_id, mutation_type, observation_id, provider_id, status, confidence, justification)
VALUES (@linkset_id, @mutation_type, @observation_id, @provider_id, @status, @confidence, @justification)
RETURNING sequence_number;
""";
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
AddParameter(command, "mutation_type", mutationType);
AddParameter(command, "observation_id", observationId);
AddParameter(command, "provider_id", providerId);
AddParameter(command, "status", status);
AddParameter(command, "confidence", confidence);
AddParameter(command, "justification", justification);
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false)
?? throw new InvalidOperationException("Failed to insert mutation log entry.");
return Convert.ToInt64(result);
}
private static async Task TouchLinksetAsync(
NpgsqlConnection connection,
string linksetId,
CancellationToken cancellationToken)
{
const string sql = "UPDATE vex.linksets SET updated_at = NOW() WHERE linkset_id = @linkset_id;";
await using var command = new NpgsqlCommand(sql, connection);
command.Parameters.AddWithValue("linkset_id", linksetId);
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
private async Task<long> GetLatestSequenceAsync(
NpgsqlConnection connection,
string linksetId,
CancellationToken cancellationToken)
{
const string sql = "SELECT COALESCE(MAX(sequence_number), 0) FROM vex.linkset_mutations WHERE linkset_id = @linkset_id;";
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
return result is long value ? value : Convert.ToInt64(result);
}
private async Task<IReadOnlyList<string>> GetLinksetIdsAsync(
NpgsqlConnection connection,
string predicate,
Action<NpgsqlCommand> configure,
CancellationToken cancellationToken)
{
var sql = $"""
SELECT linkset_id
FROM vex.linksets
WHERE {predicate} AND tenant = @tenant
ORDER BY updated_at DESC, linkset_id
LIMIT @limit;
""";
await using var command = CreateCommand(sql, connection);
configure(command);
var ids = new List<string>();
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
ids.Add(reader.GetString(0));
}
return ids;
}
private async Task<IReadOnlyList<VexLinkset>> ReadLinksetsAsync(
NpgsqlConnection connection,
IReadOnlyList<string> linksetIds,
CancellationToken cancellationToken)
{
var results = new List<VexLinkset>();
foreach (var id in linksetIds)
{
var linkset = await ReadLinksetAsync(connection, id, cancellationToken).ConfigureAwait(false);
if (linkset is not null)
{
results.Add(linkset);
}
}
return results;
}
private async Task<VexLinkset?> ReadLinksetAsync(
NpgsqlConnection connection,
string linksetId,
CancellationToken cancellationToken)
{
const string sql = """
SELECT linkset_id, tenant, vulnerability_id, product_key, scope::text, created_at, updated_at
FROM vex.linksets
WHERE linkset_id = @linkset_id;
""";
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
if (!await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
return null;
}
var id = reader.GetString(0);
var tenant = reader.GetString(1);
var vulnerabilityId = reader.GetString(2);
var productKey = reader.GetString(3);
var scopeJson = reader.GetString(4);
var createdAt = reader.GetFieldValue<DateTimeOffset>(5);
var updatedAt = reader.GetFieldValue<DateTimeOffset>(6);
var scope = DeserializeScope(scopeJson) ?? VexProductScope.Unknown(productKey);
await reader.CloseAsync();
var observations = await ReadObservationsAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
var disagreements = await ReadDisagreementsAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
return new VexLinkset(
id,
tenant,
vulnerabilityId,
productKey,
scope,
observations,
disagreements,
createdAt,
updatedAt);
}
private async Task<IReadOnlyList<VexLinksetObservationRefModel>> ReadObservationsAsync(
NpgsqlConnection connection,
string linksetId,
CancellationToken cancellationToken)
{
const string sql = """
SELECT observation_id, provider_id, status, confidence
FROM vex.linkset_observations
WHERE linkset_id = @linkset_id
ORDER BY provider_id, status, observation_id;
""";
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
var observations = new List<VexLinksetObservationRefModel>();
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
observations.Add(new VexLinksetObservationRefModel(
reader.GetString(0),
reader.GetString(1),
reader.GetString(2),
reader.IsDBNull(3) ? null : reader.GetDouble(3)));
}
return observations;
}
private async Task<IReadOnlyList<VexObservationDisagreement>> ReadDisagreementsAsync(
NpgsqlConnection connection,
string linksetId,
CancellationToken cancellationToken)
{
const string sql = """
SELECT provider_id, status, justification, confidence
FROM vex.linkset_disagreements
WHERE linkset_id = @linkset_id
ORDER BY provider_id, status, COALESCE(justification, ''), id;
""";
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
var disagreements = new List<VexObservationDisagreement>();
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
disagreements.Add(new VexObservationDisagreement(
reader.GetString(0),
reader.GetString(1),
GetNullableString(reader, 2),
reader.IsDBNull(3) ? null : reader.GetDouble(3)));
}
return disagreements;
}
private static string? SerializeScope(VexProductScope scope)
{
return JsonSerializer.Serialize(scope, JsonOptions);
}
private static VexProductScope? DeserializeScope(string? json)
{
if (string.IsNullOrWhiteSpace(json))
{
return null;
}
return JsonSerializer.Deserialize<VexProductScope>(json, JsonOptions);
}
}

View File

@@ -0,0 +1,441 @@
using System;
using System.Buffers;
using System.Collections.Immutable;
using System.Linq;
using System.Text;
using System.Text.Json;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using Npgsql;
using NpgsqlTypes;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Excititor.Storage.Postgres.Repositories;
/// <summary>
/// PostgreSQL-backed implementation of <see cref="IVexRawStore"/> replacing Mongo/GridFS.
/// </summary>
public sealed class PostgresVexRawStore : RepositoryBase<ExcititorDataSource>, IVexRawStore
{
private readonly int _inlineThreshold;
public PostgresVexRawStore(
ExcititorDataSource dataSource,
IOptions<VexStorageOptions> options,
ILogger<PostgresVexRawStore> logger)
: base(dataSource, logger)
{
if (options is null)
{
throw new ArgumentNullException(nameof(options));
}
_inlineThreshold = Math.Max(1, options.Value?.InlineThresholdBytes ?? 256 * 1024);
}
public async ValueTask StoreAsync(VexRawDocument document, CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(document);
var canonicalContent = CanonicalizeJson(document.Content);
var digest = EnsureDigest(document.Digest, canonicalContent);
var metadata = document.Metadata ?? ImmutableDictionary<string, string>.Empty;
var tenant = ResolveTenant(metadata);
var format = document.Format.ToString().ToLowerInvariant();
var providerId = document.ProviderId;
var sourceUri = document.SourceUri.ToString();
var retrievedAt = document.RetrievedAt.UtcDateTime;
var inline = canonicalContent.Length <= _inlineThreshold;
await using var connection = await DataSource.OpenConnectionAsync(tenant, "writer", cancellationToken)
.ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
var metadataJson = JsonSerializer.Serialize(metadata, JsonSerializerOptions);
// Provenance is currently stored as a clone of metadata; callers may slice it as needed.
var provenanceJson = metadataJson;
var contentJson = GetJsonString(canonicalContent);
const string insertDocumentSql = """
INSERT INTO vex.vex_raw_documents (
digest,
tenant,
provider_id,
format,
source_uri,
etag,
retrieved_at,
supersedes_digest,
content_json,
content_size_bytes,
metadata_json,
provenance_json,
inline_payload)
VALUES (
@digest,
@tenant,
@provider_id,
@format,
@source_uri,
@etag,
@retrieved_at,
@supersedes_digest,
@content_json::jsonb,
@content_size_bytes,
@metadata_json::jsonb,
@provenance_json::jsonb,
@inline_payload)
ON CONFLICT (digest) DO NOTHING;
""";
await using (var command = CreateCommand(insertDocumentSql, connection, transaction))
{
AddParameter(command, "digest", digest);
AddParameter(command, "tenant", tenant);
AddParameter(command, "provider_id", providerId);
AddParameter(command, "format", format);
AddParameter(command, "source_uri", sourceUri);
AddParameter(command, "etag", metadata.TryGetValue("etag", out var etag) ? etag : null);
AddParameter(command, "retrieved_at", retrievedAt);
AddParameter(command, "supersedes_digest", metadata.TryGetValue("supersedes", out var supersedes) ? supersedes : null);
AddJsonbParameter(command, "content_json", contentJson);
AddParameter(command, "content_size_bytes", canonicalContent.Length);
AddJsonbParameter(command, "metadata_json", metadataJson);
AddJsonbParameter(command, "provenance_json", provenanceJson);
AddParameter(command, "inline_payload", inline);
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
if (!inline)
{
const string insertBlobSql = """
INSERT INTO vex.vex_raw_blobs (digest, payload, payload_hash)
VALUES (@digest, @payload, @payload_hash)
ON CONFLICT (digest) DO NOTHING;
""";
await using var blobCommand = CreateCommand(insertBlobSql, connection, transaction);
AddParameter(blobCommand, "digest", digest);
blobCommand.Parameters.Add(new NpgsqlParameter("payload", NpgsqlDbType.Bytea)
{
Value = canonicalContent.ToArray()
});
AddParameter(blobCommand, "payload_hash", digest);
await blobCommand.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
}
public async ValueTask<VexRawRecord?> FindByDigestAsync(string digest, CancellationToken cancellationToken)
{
ArgumentException.ThrowIfNullOrWhiteSpace(digest);
const string sql = """
SELECT d.digest,
d.tenant,
d.provider_id,
d.format,
d.source_uri,
d.retrieved_at,
d.metadata_json,
d.inline_payload,
d.content_json,
d.supersedes_digest,
d.etag,
d.recorded_at,
b.payload
FROM vex.vex_raw_documents d
LEFT JOIN vex.vex_raw_blobs b ON b.digest = d.digest
WHERE d.digest = @digest;
""";
await using var connection = await DataSource.OpenConnectionAsync("public", "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "digest", digest);
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
if (!await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
return null;
}
var tenant = reader.GetString(1);
var providerId = reader.GetString(2);
var format = ParseFormat(reader.GetString(3));
var sourceUri = new Uri(reader.GetString(4));
var retrievedAt = reader.GetFieldValue<DateTime>(5);
var metadata = ParseMetadata(reader.GetString(6));
var inline = reader.GetFieldValue<bool>(7);
var contentJson = reader.GetString(8);
var supersedes = reader.IsDBNull(9) ? null : reader.GetString(9);
var etag = reader.IsDBNull(10) ? null : reader.GetString(10);
var recordedAt = reader.IsDBNull(11) ? (DateTimeOffset?)null : reader.GetFieldValue<DateTimeOffset>(11);
ReadOnlyMemory<byte> contentBytes;
if (!inline && !reader.IsDBNull(12))
{
contentBytes = (byte[])reader.GetValue(12);
}
else
{
contentBytes = Encoding.UTF8.GetBytes(contentJson);
}
return new VexRawRecord(
digest,
tenant,
providerId,
format,
sourceUri,
new DateTimeOffset(retrievedAt, TimeSpan.Zero),
metadata,
contentBytes,
inline,
supersedes,
etag,
recordedAt);
}
public async ValueTask<VexRawDocumentPage> QueryAsync(VexRawQuery query, CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(query);
var conditions = new List<string> { "tenant = @tenant" };
if (query.ProviderIds.Count > 0)
{
conditions.Add("provider_id = ANY(@providers)");
}
if (query.Digests.Count > 0)
{
conditions.Add("digest = ANY(@digests)");
}
if (query.Formats.Count > 0)
{
conditions.Add("format = ANY(@formats)");
}
if (query.Since is not null)
{
conditions.Add("retrieved_at >= @since");
}
if (query.Until is not null)
{
conditions.Add("retrieved_at <= @until");
}
if (query.Cursor is not null)
{
conditions.Add("(retrieved_at < @cursor_retrieved_at OR (retrieved_at = @cursor_retrieved_at AND digest < @cursor_digest))");
}
var sql = $"""
SELECT digest, provider_id, format, source_uri, retrieved_at, metadata_json, inline_payload
FROM vex.vex_raw_documents
WHERE {string.Join(" AND ", conditions)}
ORDER BY retrieved_at DESC, digest DESC
LIMIT @limit;
""";
await using var connection = await DataSource.OpenConnectionAsync(query.Tenant, "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "tenant", query.Tenant);
AddArray(command, "providers", query.ProviderIds);
AddArray(command, "digests", query.Digests);
AddArray(command, "formats", query.Formats.Select(static f => f.ToString().ToLowerInvariant()).ToArray());
if (query.Since is not null)
{
AddParameter(command, "since", query.Since.Value.UtcDateTime);
}
if (query.Until is not null)
{
AddParameter(command, "until", query.Until.Value.UtcDateTime);
}
if (query.Cursor is not null)
{
AddParameter(command, "cursor_retrieved_at", query.Cursor.RetrievedAt.UtcDateTime);
AddParameter(command, "cursor_digest", query.Cursor.Digest);
}
AddParameter(command, "limit", query.Limit);
var summaries = new List<VexRawDocumentSummary>();
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
var digest = reader.GetString(0);
var providerId = reader.GetString(1);
var format = ParseFormat(reader.GetString(2));
var sourceUri = new Uri(reader.GetString(3));
var retrievedAt = reader.GetFieldValue<DateTime>(4);
var metadata = ParseMetadata(reader.GetString(5));
var inline = reader.GetFieldValue<bool>(6);
summaries.Add(new VexRawDocumentSummary(
digest,
providerId,
format,
sourceUri,
new DateTimeOffset(retrievedAt, TimeSpan.Zero),
inline,
metadata));
}
var hasMore = summaries.Count == query.Limit;
var nextCursor = hasMore && summaries.Count > 0
? new VexRawCursor(summaries[^1].RetrievedAt, summaries[^1].Digest)
: null;
return new VexRawDocumentPage(summaries, nextCursor, hasMore);
}
private static void AddArray(NpgsqlCommand command, string name, IReadOnlyCollection<string> values)
{
command.Parameters.Add(new NpgsqlParameter
{
ParameterName = name,
NpgsqlDbType = NpgsqlDbType.Array | NpgsqlDbType.Text,
Value = values.Count == 0 ? Array.Empty<string>() : values.ToArray()
});
}
private static string ResolveTenant(IReadOnlyDictionary<string, string> metadata)
{
if (metadata.TryGetValue("tenant", out var tenant) && !string.IsNullOrWhiteSpace(tenant))
{
return tenant.Trim();
}
return "default";
}
private static VexDocumentFormat ParseFormat(string value)
=> Enum.TryParse<VexDocumentFormat>(value, ignoreCase: true, out var parsed)
? parsed
: VexDocumentFormat.Unknown;
private static ImmutableDictionary<string, string> ParseMetadata(string json)
{
if (string.IsNullOrWhiteSpace(json))
{
return ImmutableDictionary<string, string>.Empty;
}
try
{
var doc = JsonDocument.Parse(json);
var builder = ImmutableDictionary.CreateBuilder<string, string>(StringComparer.Ordinal);
foreach (var property in doc.RootElement.EnumerateObject())
{
builder[property.Name] = property.Value.ToString();
}
return builder.ToImmutable();
}
catch
{
return ImmutableDictionary<string, string>.Empty;
}
}
private static byte[] CanonicalizeJson(ReadOnlyMemory<byte> content)
{
using var jsonDocument = JsonDocument.Parse(content);
var buffer = new ArrayBufferWriter<byte>();
using (var writer = new Utf8JsonWriter(buffer, new JsonWriterOptions { Indented = false }))
{
WriteCanonical(writer, jsonDocument.RootElement);
}
return buffer.WrittenMemory.ToArray();
}
private static void WriteCanonical(Utf8JsonWriter writer, JsonElement element)
{
switch (element.ValueKind)
{
case JsonValueKind.Object:
writer.WriteStartObject();
foreach (var property in element.EnumerateObject().OrderBy(static p => p.Name, StringComparer.Ordinal))
{
writer.WritePropertyName(property.Name);
WriteCanonical(writer, property.Value);
}
writer.WriteEndObject();
break;
case JsonValueKind.Array:
writer.WriteStartArray();
foreach (var item in element.EnumerateArray())
{
WriteCanonical(writer, item);
}
writer.WriteEndArray();
break;
case JsonValueKind.String:
writer.WriteStringValue(element.GetString());
break;
case JsonValueKind.Number:
if (element.TryGetInt64(out var l))
{
writer.WriteNumberValue(l);
}
else if (element.TryGetDouble(out var d))
{
writer.WriteNumberValue(d);
}
else
{
writer.WriteRawValue(element.GetRawText());
}
break;
case JsonValueKind.True:
writer.WriteBooleanValue(true);
break;
case JsonValueKind.False:
writer.WriteBooleanValue(false);
break;
case JsonValueKind.Null:
case JsonValueKind.Undefined:
writer.WriteNullValue();
break;
default:
writer.WriteRawValue(element.GetRawText());
break;
}
}
private static string EnsureDigest(string digest, ReadOnlyMemory<byte> canonicalContent)
{
if (!string.IsNullOrWhiteSpace(digest) && digest.StartsWith("sha256:", StringComparison.OrdinalIgnoreCase))
{
return digest;
}
Span<byte> hash = stackalloc byte[32];
if (!System.Security.Cryptography.SHA256.TryHashData(canonicalContent.Span, hash, out _))
{
hash = System.Security.Cryptography.SHA256.HashData(canonicalContent.ToArray());
}
return "sha256:" + Convert.ToHexString(hash).ToLowerInvariant();
}
private static string GetJsonString(ReadOnlyMemory<byte> canonicalContent)
=> Encoding.UTF8.GetString(canonicalContent.Span);
private static readonly JsonSerializerOptions JsonSerializerOptions = new()
{
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
DictionaryKeyPolicy = JsonNamingPolicy.CamelCase
};
}

View File

@@ -1,5 +1,7 @@
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using StellaOps.Excititor.Core.Observations;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.Storage.Postgres.Repositories;
using StellaOps.Infrastructure.Postgres;
using StellaOps.Infrastructure.Postgres.Options;
@@ -24,10 +26,14 @@ public static class ServiceCollectionExtensions
string sectionName = "Postgres:Excititor")
{
services.Configure<PostgresOptions>(sectionName, configuration.GetSection(sectionName));
services.Configure<VexStorageOptions>(configuration.GetSection("Excititor:Storage"));
services.AddSingleton<ExcititorDataSource>();
// Register repositories
services.AddScoped<IVexStatementRepository, VexStatementRepository>();
services.AddScoped<IAppendOnlyLinksetStore, PostgresAppendOnlyLinksetStore>();
services.AddScoped<IVexLinksetStore, PostgresAppendOnlyLinksetStore>();
services.AddScoped<IVexRawStore, PostgresVexRawStore>();
return services;
}
@@ -47,6 +53,9 @@ public static class ServiceCollectionExtensions
// Register repositories
services.AddScoped<IVexStatementRepository, VexStatementRepository>();
services.AddScoped<IAppendOnlyLinksetStore, PostgresAppendOnlyLinksetStore>();
services.AddScoped<IVexLinksetStore, PostgresAppendOnlyLinksetStore>();
services.AddScoped<IVexRawStore, PostgresVexRawStore>();
return services;
}

View File

@@ -15,6 +15,7 @@
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\StellaOps.Excititor.Core\StellaOps.Excititor.Core.csproj" />
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Infrastructure.Postgres\StellaOps.Infrastructure.Postgres.csproj" />
</ItemGroup>

View File

@@ -11,6 +11,6 @@
<ProjectReference Include="../../__Libraries/StellaOps.Excititor.Storage.Mongo/StellaOps.Excititor.Storage.Mongo.csproj" />
<ProjectReference Include="../../__Libraries/StellaOps.Excititor.Core/StellaOps.Excititor.Core.csproj" />
<ProjectReference Include="../../__Libraries/StellaOps.Excititor.Policy/StellaOps.Excititor.Policy.csproj" />
<ProjectReference Include="../../../Concelier/__Libraries/StellaOps.Concelier.Storage.Mongo/StellaOps.Concelier.Storage.Mongo.csproj" />
<ProjectReference Include="../../../Concelier/__Libraries/StellaOps.Concelier.Models/StellaOps.Concelier.Models.csproj" />
</ItemGroup>
</Project>
</Project>

View File

@@ -15,6 +15,8 @@ public sealed class ExcititorPostgresFixture : PostgresIntegrationFixture, IColl
=> typeof(ExcititorDataSource).Assembly;
protected override string GetModuleName() => "Excititor";
protected override string? GetResourcePrefix() => "Migrations";
}
/// <summary>

View File

@@ -0,0 +1,136 @@
using FluentAssertions;
using Microsoft.Extensions.Logging.Abstractions;
using Microsoft.Extensions.Options;
using StellaOps.Excititor.Core.Observations;
using StellaOps.Excititor.Storage.Postgres;
using StellaOps.Excititor.Storage.Postgres.Repositories;
using StellaOps.Infrastructure.Postgres.Options;
using Xunit;
namespace StellaOps.Excititor.Storage.Postgres.Tests;
[Collection(ExcititorPostgresCollection.Name)]
public sealed class PostgresAppendOnlyLinksetStoreTests : IAsyncLifetime
{
private readonly ExcititorPostgresFixture _fixture;
private readonly PostgresAppendOnlyLinksetStore _store;
private readonly ExcititorDataSource _dataSource;
public PostgresAppendOnlyLinksetStoreTests(ExcititorPostgresFixture fixture)
{
_fixture = fixture;
var options = Options.Create(new PostgresOptions
{
ConnectionString = fixture.ConnectionString,
SchemaName = fixture.SchemaName,
AutoMigrate = false
});
_dataSource = new ExcititorDataSource(options, NullLogger<ExcititorDataSource>.Instance);
_store = new PostgresAppendOnlyLinksetStore(_dataSource, NullLogger<PostgresAppendOnlyLinksetStore>.Instance);
}
public async Task InitializeAsync()
{
await _fixture.Fixture.RunMigrationsFromAssemblyAsync(
typeof(ExcititorDataSource).Assembly,
moduleName: "Excititor",
resourcePrefix: "Migrations",
cancellationToken: CancellationToken.None);
// Ensure migration applied even if runner skipped; execute embedded SQL directly as fallback.
var resourceName = typeof(ExcititorDataSource).Assembly
.GetManifestResourceNames()
.FirstOrDefault(n => n.EndsWith("001_initial_schema.sql", StringComparison.OrdinalIgnoreCase));
await using var stream = resourceName is null
? null
: typeof(ExcititorDataSource).Assembly.GetManifestResourceStream(resourceName);
if (stream is not null)
{
using var reader = new StreamReader(stream);
var sql = await reader.ReadToEndAsync();
await _fixture.Fixture.ExecuteSqlAsync(sql);
}
await _fixture.TruncateAllTablesAsync();
}
public async Task DisposeAsync()
{
await _dataSource.DisposeAsync();
}
[Fact]
public async Task AppendObservation_CreatesLinksetAndDedupes()
{
var tenant = "tenant-a";
var vuln = "CVE-2025-1234";
var product = "pkg:nuget/demo@1.0.0";
var scope = VexProductScope.Unknown(product);
var observation = new VexLinksetObservationRefModel("obs-1", "provider-a", "not_affected", 0.9);
var first = await _store.AppendObservationAsync(tenant, vuln, product, observation, scope, CancellationToken.None);
first.WasCreated.Should().BeTrue();
first.ObservationsAdded.Should().Be(1);
first.SequenceNumber.Should().BeGreaterThan(0);
first.Linkset.Observations.Should().HaveCount(1);
var second = await _store.AppendObservationAsync(tenant, vuln, product, observation, scope, CancellationToken.None);
second.HadChanges.Should().BeFalse();
second.Linkset.Observations.Should().HaveCount(1);
second.SequenceNumber.Should().Be(first.SequenceNumber);
var mutations = await _store.GetMutationLogAsync(tenant, first.Linkset.LinksetId, CancellationToken.None);
mutations.Select(m => m.SequenceNumber).Should().BeInAscendingOrder();
mutations.Should().HaveCount(2); // created + observation
}
[Fact]
public async Task AppendBatch_AppendsMultipleAndMaintainsOrder()
{
var tenant = "tenant-b";
var vuln = "CVE-2025-2000";
var product = "pkg:maven/demo/demo@2.0.0";
var scope = VexProductScope.Unknown(product);
var observations = new[]
{
new VexLinksetObservationRefModel("obs-2", "provider-b", "affected", 0.7),
new VexLinksetObservationRefModel("obs-1", "provider-a", "affected", 0.8),
new VexLinksetObservationRefModel("obs-3", "provider-a", "fixed", 0.9)
};
var result = await _store.AppendObservationsBatchAsync(tenant, vuln, product, observations, scope, CancellationToken.None);
result.Linkset.Observations.Should().HaveCount(3);
result.Linkset.Observations
.Select(o => $"{o.ProviderId}:{o.Status}:{o.ObservationId}")
.Should()
.ContainInOrder(
"provider-a:affected:obs-1",
"provider-a:fixed:obs-3",
"provider-b:affected:obs-2");
result.SequenceNumber.Should().BeGreaterThan(0);
}
[Fact]
public async Task AppendDisagreement_RegistersConflictAndCounts()
{
var tenant = "tenant-c";
var vuln = "CVE-2025-3000";
var product = "pkg:deb/debian/demo@1.2.3";
var disagreement = new VexObservationDisagreement("provider-c", "not_affected", "component_not_present", 0.6);
var result = await _store.AppendDisagreementAsync(tenant, vuln, product, disagreement, CancellationToken.None);
result.Linkset.HasConflicts.Should().BeTrue();
result.SequenceNumber.Should().BeGreaterThan(0);
var conflicts = await _store.FindWithConflictsAsync(tenant, limit: 10, CancellationToken.None);
conflicts.Should().ContainSingle(ls => ls.LinksetId == result.Linkset.LinksetId);
var conflictCount = await _store.CountWithConflictsAsync(tenant, CancellationToken.None);
conflictCount.Should().Be(1);
}
}

View File

@@ -10,9 +10,16 @@
<IsTestProject>true</IsTestProject>
</PropertyGroup>
<ItemGroup>
<PackageReference Remove="Microsoft.NET.Test.Sdk" />
<PackageReference Remove="xunit" />
<PackageReference Remove="xunit.runner.visualstudio" />
<PackageReference Remove="coverlet.collector" />
</ItemGroup>
<ItemGroup>
<PackageReference Include="FluentAssertions" Version="6.12.0" />
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.11.1" />
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.0" />
<PackageReference Include="Moq" Version="4.20.70" />
<PackageReference Include="xunit" Version="2.9.2" />
<PackageReference Include="xunit.runner.visualstudio" Version="2.8.2">
@@ -27,6 +34,7 @@
<ItemGroup>
<ProjectReference Include="..\..\__Libraries\StellaOps.Excititor.Storage.Postgres\StellaOps.Excititor.Storage.Postgres.csproj" />
<ProjectReference Include="..\..\__Libraries\StellaOps.Excititor.Core\StellaOps.Excititor.Core.csproj" />
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Infrastructure.Postgres.Testing\StellaOps.Infrastructure.Postgres.Testing.csproj" />
</ItemGroup>

View File

@@ -0,0 +1,123 @@
using System;
using System.Collections.Generic;
using System.Threading;
using System.Threading.Tasks;
using FluentAssertions;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using StellaOps.Findings.Ledger.Options;
using StellaOps.Findings.Ledger.Services.Incident;
using StellaOps.Findings.Ledger.Tests.Observability;
using StellaOps.Telemetry.Core;
using Xunit;
namespace StellaOps.Findings.Ledger.Tests.Incident;
public class LedgerIncidentCoordinatorTests
{
[Fact]
public async Task Activation_updates_state_and_notifies()
{
var options = Options.Create(new LedgerIncidentOptions { RetentionExtensionDays = 45, LagTraceThresholdSeconds = 0.0 });
var logger = new TestLogger<LedgerIncidentCoordinator>();
var notifier = new TestNotifier();
var incidentService = new StubIncidentModeService();
using var coordinator = new LedgerIncidentCoordinator(options, logger, notifier, TimeProvider.System, incidentService);
await incidentService.ActivateAsync("actor-a", reason: "test");
coordinator.IsActive.Should().BeTrue();
coordinator.Current.RetentionExtensionDays.Should().Be(45);
notifier.Published.Should().ContainSingle();
logger.Entries.Should().ContainSingle(e => e.EventId.Id == 6901);
}
[Fact]
public async Task RecordProjectionLag_emits_when_active_and_above_threshold()
{
var options = Options.Create(new LedgerIncidentOptions { LagTraceThresholdSeconds = 0.1, RetentionExtensionDays = 5 });
var logger = new TestLogger<LedgerIncidentCoordinator>();
var notifier = new TestNotifier();
var incidentService = new StubIncidentModeService();
using var coordinator = new LedgerIncidentCoordinator(options, logger, notifier, TimeProvider.System, incidentService);
await incidentService.ActivateAsync("actor-a");
coordinator.RecordProjectionLag(new ProjectionLagSample(
"tenant-a",
Guid.NewGuid(),
10,
"finding.created",
"v1",
5.0,
DateTimeOffset.UtcNow.AddSeconds(-5),
DateTimeOffset.UtcNow));
logger.Entries.Should().Contain(e => e.EventId.Id == 6902);
coordinator.GetDiagnosticsSnapshot().LagSamples.Should().NotBeEmpty();
}
private sealed class TestNotifier : ILedgerIncidentNotifier
{
private readonly List<LedgerIncidentSnapshot> _published = new();
public IReadOnlyList<LedgerIncidentSnapshot> Published => _published;
public Task PublishIncidentModeChangedAsync(LedgerIncidentSnapshot snapshot, CancellationToken cancellationToken)
{
_published.Add(snapshot);
return Task.CompletedTask;
}
}
private sealed class StubIncidentModeService : IIncidentModeService
{
private IncidentModeState? _state;
public bool IsActive => _state is { Enabled: true } && !_state.IsExpired;
public IncidentModeState? CurrentState => _state;
public event EventHandler<IncidentModeActivatedEventArgs>? Activated;
public event EventHandler<IncidentModeDeactivatedEventArgs>? Deactivated;
public Task<IncidentModeActivationResult> ActivateAsync(string actor, string? tenantId = null, TimeSpan? ttlOverride = null, string? reason = null, CancellationToken ct = default)
{
var now = DateTimeOffset.UtcNow;
_state = new IncidentModeState
{
Enabled = true,
ActivatedAt = now,
ExpiresAt = now.AddMinutes(30),
Actor = actor,
TenantId = tenantId,
Source = IncidentModeSource.Api,
Reason = reason,
ActivationId = Guid.NewGuid().ToString("N")[..12]
};
Activated?.Invoke(this, new IncidentModeActivatedEventArgs { State = _state, WasReactivation = false });
return Task.FromResult(IncidentModeActivationResult.Succeeded(_state));
}
public Task<IncidentModeDeactivationResult> DeactivateAsync(string actor, string? reason = null, CancellationToken ct = default)
{
var previous = _state;
_state = null;
if (previous is not null)
{
Deactivated?.Invoke(this, new IncidentModeDeactivatedEventArgs
{
State = previous,
Reason = IncidentModeDeactivationReason.Manual,
DeactivatedBy = actor
});
}
return Task.FromResult(IncidentModeDeactivationResult.Succeeded(previous is not null, IncidentModeDeactivationReason.Manual));
}
public Task<DateTimeOffset?> ExtendTtlAsync(TimeSpan extension, string actor, CancellationToken ct = default) =>
Task.FromResult<DateTimeOffset?>(_state?.ExpiresAt?.Add(extension));
public IReadOnlyDictionary<string, string> GetIncidentTags() => new Dictionary<string, string>();
}
}

View File

@@ -3,6 +3,7 @@ using System.Text.Json.Nodes;
using FluentAssertions;
using StellaOps.Findings.Ledger.Domain;
using StellaOps.Findings.Ledger.Observability;
using StellaOps.Findings.Ledger.Services.Incident;
using Xunit;
namespace StellaOps.Findings.Ledger.Tests.Observability;
@@ -45,6 +46,49 @@ public class LedgerTimelineTests
state["Status"].Should().Be("affected");
}
[Fact]
public void EmitIncidentModeChanged_writes_structured_log()
{
var logger = new TestLogger<LedgerTimelineTests>();
var snapshot = new LedgerIncidentSnapshot(
IsActive: true,
ActivationId: "act-123",
Actor: "actor-1",
Reason: "reason",
TenantId: "tenant-a",
ChangedAt: DateTimeOffset.UtcNow,
ExpiresAt: DateTimeOffset.UtcNow.AddMinutes(10),
RetentionExtensionDays: 30);
LedgerTimeline.EmitIncidentModeChanged(logger, snapshot, wasReactivation: false);
var entry = logger.Entries.Single(e => e.EventId.Id == 6901);
var state = AsDictionary(entry.State);
state["RetentionExtensionDays"].Should().Be(30);
state["ActivationId"].Should().Be("act-123");
}
[Fact]
public void EmitIncidentLagTrace_writes_structured_log()
{
var logger = new TestLogger<LedgerTimelineTests>();
var sample = new ProjectionLagSample(
"tenant-a",
Guid.NewGuid(),
10,
"finding.created",
"v1",
12.5,
DateTimeOffset.UtcNow.AddSeconds(-12),
DateTimeOffset.UtcNow);
LedgerTimeline.EmitIncidentLagTrace(logger, sample);
var entry = logger.Entries.Single(e => e.EventId.Id == 6902);
var state = AsDictionary(entry.State);
state["LagSeconds"].Should().Be(12.5);
}
private static LedgerEventRecord CreateRecord()
{
var payload = new JsonObject { ["status"] = "affected" };

View File

@@ -0,0 +1,81 @@
using System;
using System.Text.Json.Nodes;
using System.Threading;
using System.Threading.Tasks;
using FluentAssertions;
using Microsoft.Extensions.Logging.Abstractions;
using Moq;
using StellaOps.Findings.Ledger.Domain;
using StellaOps.Findings.Ledger.Infrastructure;
using StellaOps.Findings.Ledger.Services;
using StellaOps.Findings.Ledger.Services.Incident;
using Xunit;
namespace StellaOps.Findings.Ledger.Tests.Services;
public class LedgerEventWriteServiceIncidentTests
{
[Fact]
public async Task AppendAsync_sequence_mismatch_records_conflict_snapshot()
{
var repo = new Mock<ILedgerEventRepository>();
repo.Setup(r => r.GetByEventIdAsync(It.IsAny<string>(), It.IsAny<Guid>(), It.IsAny<CancellationToken>()))
.ReturnsAsync((LedgerEventRecord?)null);
var chainId = Guid.NewGuid();
var chainHead = new LedgerEventRecord(
"tenant-a",
chainId,
1,
Guid.NewGuid(),
LedgerEventConstants.EventFindingCreated,
"v1",
"finding-1",
"artifact-1",
null,
"actor-1",
"operator",
DateTimeOffset.UtcNow,
DateTimeOffset.UtcNow,
new JsonObject(),
"hash-prev",
LedgerEventConstants.EmptyHash,
"leaf-hash",
"{}");
repo.Setup(r => r.GetChainHeadAsync("tenant-a", chainId, It.IsAny<CancellationToken>()))
.ReturnsAsync(chainHead);
var scheduler = new Mock<IMerkleAnchorScheduler>();
var diagnostics = new Mock<ILedgerIncidentDiagnostics>();
var service = new LedgerEventWriteService(
repo.Object,
scheduler.Object,
NullLogger<LedgerEventWriteService>.Instance,
diagnostics.Object);
var draft = new LedgerEventDraft(
"tenant-a",
chainId,
3,
Guid.NewGuid(),
LedgerEventConstants.EventFindingCreated,
"v1",
"finding-1",
"artifact-1",
null,
"actor-1",
"operator",
DateTimeOffset.UtcNow,
DateTimeOffset.UtcNow,
new JsonObject(),
new JsonObject(),
null);
var result = await service.AppendAsync(draft, CancellationToken.None);
result.Status.Should().Be(LedgerWriteStatus.Conflict);
diagnostics.Verify(d => d.RecordConflict(It.Is<ConflictSnapshot>(s => s.Reason == "sequence_mismatch")), Times.Once);
}
}

View File

@@ -9,6 +9,7 @@ using Microsoft.Extensions.Logging.Abstractions;
using StellaOps.Findings.Ledger.Domain;
using StellaOps.Findings.Ledger.Infrastructure.Snapshot;
using StellaOps.Findings.Ledger.Services;
using StellaOps.Findings.Ledger.Services.Incident;
using Xunit;
public class SnapshotServiceTests
@@ -58,6 +59,33 @@ public class SnapshotServiceTests
Assert.True(result.Snapshot.ExpiresAt > DateTimeOffset.UtcNow);
}
[Fact]
public async Task CreateSnapshotAsync_WhenIncidentActive_ExtendsRetention()
{
var incident = new StaticIncidentDiagnostics(new LedgerIncidentSnapshot(
IsActive: true,
ActivationId: "act-1",
Actor: "actor",
Reason: "reason",
TenantId: "tenant-incident",
ChangedAt: DateTimeOffset.UtcNow,
ExpiresAt: DateTimeOffset.UtcNow.AddHours(1),
RetentionExtensionDays: 7));
var service = new SnapshotService(
_snapshotRepository,
_timeTravelRepository,
NullLogger<SnapshotService>.Instance,
incident);
var result = await service.CreateSnapshotAsync(
new CreateSnapshotInput("tenant-incident", Label: "incident-snapshot", ExpiresIn: TimeSpan.FromDays(1)));
Assert.NotNull(result.Snapshot);
Assert.True(result.Snapshot!.ExpiresAt >= DateTimeOffset.UtcNow.AddDays(1));
Assert.Equal("enabled", result.Snapshot.Metadata?["incident.mode"]);
}
[Fact]
public async Task GetSnapshotAsync_ReturnsExistingSnapshot()
{
@@ -371,3 +399,34 @@ internal class InMemoryTimeTravelRepository : ITimeTravelRepository
TimeSpan.FromMinutes(5)));
}
}
internal sealed class StaticIncidentDiagnostics : ILedgerIncidentDiagnostics
{
public StaticIncidentDiagnostics(LedgerIncidentSnapshot current)
{
Current = current;
}
public bool IsActive => Current.IsActive;
public LedgerIncidentSnapshot Current { get; }
public IncidentDiagnosticsSnapshot GetDiagnosticsSnapshot() => new(
Current,
Array.Empty<ProjectionLagSample>(),
Array.Empty<ConflictSnapshot>(),
Array.Empty<ReplayTraceSample>(),
DateTimeOffset.UtcNow);
public void RecordConflict(ConflictSnapshot snapshot)
{
}
public void RecordProjectionLag(ProjectionLagSample sample)
{
}
public void RecordReplayTrace(ReplayTraceSample sample)
{
}
}

View File

@@ -26,6 +26,10 @@ using StellaOps.Findings.Ledger.WebService.Services;
using StellaOps.Telemetry.Core;
using StellaOps.Findings.Ledger.Services.Security;
using StellaOps.Findings.Ledger.Observability;
using StellaOps.Findings.Ledger.OpenApi;
using System.Security.Cryptography;
using System.Text;
using StellaOps.Findings.Ledger.Services.Incident;
const string LedgerWritePolicy = "ledger.events.write";
const string LedgerExportPolicy = "ledger.export.read";
@@ -62,6 +66,11 @@ builder.Services.AddOptions<LedgerServiceOptions>()
.PostConfigure(options => options.Validate())
.ValidateOnStart();
builder.Services.AddOptions<LedgerIncidentOptions>()
.Bind(builder.Configuration.GetSection(LedgerIncidentOptions.SectionName))
.PostConfigure(options => options.Validate())
.ValidateOnStart();
builder.Services.AddSingleton(TimeProvider.System);
builder.Services.AddProblemDetails();
builder.Services.AddEndpointsApiExplorer();
@@ -80,6 +89,8 @@ builder.Services.AddStellaOpsTelemetry(
tracerBuilder.AddHttpClientInstrumentation();
});
builder.Services.AddIncidentMode(builder.Configuration);
builder.Services.AddStellaOpsResourceServerAuthentication(
builder.Configuration,
configurationSection: null,
@@ -130,6 +141,10 @@ builder.Services.AddAuthorization(options =>
});
});
builder.Services.AddSingleton<ILedgerIncidentNotifier, LoggingLedgerIncidentNotifier>();
builder.Services.AddSingleton<LedgerIncidentCoordinator>();
builder.Services.AddSingleton<ILedgerIncidentDiagnostics>(sp => sp.GetRequiredService<LedgerIncidentCoordinator>());
builder.Services.AddSingleton<ILedgerIncidentState>(sp => sp.GetRequiredService<LedgerIncidentCoordinator>());
builder.Services.AddSingleton<LedgerAnchorQueue>();
builder.Services.AddSingleton<LedgerDataSource>();
builder.Services.AddSingleton<IMerkleAnchorRepository, PostgresMerkleAnchorRepository>();
@@ -232,6 +247,8 @@ app.MapGet("/ledger/export/findings", async Task<Results<FileStreamHttpResult, J
ExportQueryService exportQueryService,
CancellationToken cancellationToken) =>
{
DeprecationHeaders.Apply(httpContext.Response, "ledger.export.findings");
if (!httpContext.Request.Headers.TryGetValue("X-Stella-Tenant", out var tenantValues) || string.IsNullOrWhiteSpace(tenantValues))
{
return TypedResults.Problem(statusCode: StatusCodes.Status400BadRequest, title: "missing_tenant", detail: "X-Stella-Tenant header is required.");
@@ -841,20 +858,40 @@ app.MapPut("/v1/ledger/attestation-pointers/{pointerId}/verification", async Tas
.Produces(StatusCodes.Status404NotFound)
.ProducesProblem(StatusCodes.Status400BadRequest);
app.MapGet("/.well-known/openapi", () =>
app.MapGet("/.well-known/openapi", async (HttpContext context) =>
{
var contentRoot = AppContext.BaseDirectory;
var candidate = Path.GetFullPath(Path.Combine(contentRoot, "../../docs/modules/findings-ledger/openapi/findings-ledger.v1.yaml"));
if (!File.Exists(candidate))
var specPath = OpenApiMetadataFactory.GetSpecPath(contentRoot);
if (!File.Exists(specPath))
{
return Results.Problem(statusCode: StatusCodes.Status500InternalServerError, title: "openapi_missing", detail: "OpenAPI document not found on server.");
}
var yaml = File.ReadAllText(candidate);
return Results.Text(yaml, "application/yaml");
var specBytes = await File.ReadAllBytesAsync(specPath, context.RequestAborted).ConfigureAwait(false);
var etag = OpenApiMetadataFactory.ComputeEtag(specBytes);
if (context.Request.Headers.IfNoneMatch.Any(match => string.Equals(match, etag, StringComparison.Ordinal)))
{
return Results.StatusCode(StatusCodes.Status304NotModified);
}
context.Response.Headers.ETag = etag;
context.Response.Headers.CacheControl = "public, max-age=300, must-revalidate";
context.Response.Headers.Append("X-Api-Version", OpenApiMetadataFactory.ApiVersion);
context.Response.Headers.Append("X-Build-Version", OpenApiMetadataFactory.GetBuildVersion());
var lastModified = OpenApiMetadataFactory.GetLastModified(specPath);
if (lastModified.HasValue)
{
context.Response.Headers.LastModified = lastModified.Value.ToString("R");
}
return Results.Text(Encoding.UTF8.GetString(specBytes), "application/yaml");
})
.WithName("LedgerOpenApiDocument")
.Produces(StatusCodes.Status200OK)
.Produces(StatusCodes.Status304NotModified)
.ProducesProblem(StatusCodes.Status500InternalServerError);
// Snapshot Endpoints (LEDGER-PACKS-42-001-DEV)

View File

@@ -0,0 +1,25 @@
using Microsoft.AspNetCore.Http;
namespace StellaOps.Findings.Ledger;
/// <summary>
/// Applies standardized deprecation/notification headers to retiring endpoints.
/// </summary>
public static class DeprecationHeaders
{
private const string DeprecationLink =
"</.well-known/openapi>; rel=\"deprecation\"; type=\"application/yaml\"";
public const string SunsetDate = "2026-03-31T00:00:00Z";
public static void Apply(HttpResponse response, string endpointId)
{
ArgumentNullException.ThrowIfNull(response);
ArgumentException.ThrowIfNullOrWhiteSpace(endpointId);
response.Headers["Deprecation"] = "true";
response.Headers["Sunset"] = SunsetDate;
response.Headers["X-Deprecated-Endpoint"] = endpointId;
response.Headers.Append("Link", DeprecationLink);
}
}

View File

@@ -1,4 +1,5 @@
using System.Text.Json.Nodes;
using StellaOps.Findings.Ledger.Infrastructure.Attestation;
namespace StellaOps.Findings.Ledger.Domain;
@@ -18,7 +19,12 @@ public sealed record FindingProjection(
string? ExplainRef,
JsonArray PolicyRationale,
DateTimeOffset UpdatedAt,
string CycleHash);
string CycleHash,
int AttestationCount = 0,
int VerifiedAttestationCount = 0,
int FailedAttestationCount = 0,
int UnverifiedAttestationCount = 0,
OverallVerificationStatus AttestationStatus = OverallVerificationStatus.NoAttestations);
public sealed record FindingHistoryEntry(
string TenantId,

View File

@@ -0,0 +1,27 @@
namespace StellaOps.Findings.Ledger.Infrastructure.Attestation;
/// <summary>
/// Computes overall attestation status from summary counts.
/// </summary>
public static class AttestationStatusCalculator
{
public static OverallVerificationStatus Compute(int attestationCount, int verifiedCount)
{
if (attestationCount <= 0)
{
return OverallVerificationStatus.NoAttestations;
}
if (verifiedCount == attestationCount)
{
return OverallVerificationStatus.AllVerified;
}
if (verifiedCount > 0)
{
return OverallVerificationStatus.PartiallyVerified;
}
return OverallVerificationStatus.NoneVerified;
}
}

View File

@@ -1,8 +1,10 @@
using System.Text;
using System.Text.Json.Nodes;
using Microsoft.Extensions.Logging;
using Npgsql;
using NpgsqlTypes;
using StellaOps.Findings.Ledger.Domain;
using StellaOps.Findings.Ledger.Infrastructure.Attestation;
using StellaOps.Findings.Ledger.Hashing;
using StellaOps.Findings.Ledger.Services;
@@ -11,23 +13,43 @@ namespace StellaOps.Findings.Ledger.Infrastructure.Postgres;
public sealed class PostgresFindingProjectionRepository : IFindingProjectionRepository
{
private const string GetProjectionSql = """
SELECT status,
severity,
risk_score,
risk_severity,
risk_profile_version,
risk_explanation_id,
risk_event_sequence,
labels,
current_event_id,
explain_ref,
policy_rationale,
updated_at,
cycle_hash
FROM findings_projection
WHERE tenant_id = @tenant_id
AND finding_id = @finding_id
AND policy_version = @policy_version
WITH attestation_summary AS (
SELECT
COUNT(*) AS attestation_count,
COUNT(*) FILTER (WHERE verification_result IS NOT NULL
AND (verification_result->>'verified')::boolean = true) AS verified_count,
COUNT(*) FILTER (WHERE verification_result IS NOT NULL
AND (verification_result->>'verified')::boolean = false) AS failed_count,
COUNT(*) FILTER (WHERE verification_result IS NULL) AS unverified_count
FROM ledger_attestation_pointers ap
WHERE ap.tenant_id = @tenant_id
AND ap.finding_id = @finding_id
)
SELECT fp.tenant_id,
fp.finding_id,
fp.policy_version,
fp.status,
fp.severity,
fp.risk_score,
fp.risk_severity,
fp.risk_profile_version,
fp.risk_explanation_id,
fp.risk_event_sequence,
fp.labels,
fp.current_event_id,
fp.explain_ref,
fp.policy_rationale,
fp.updated_at,
fp.cycle_hash,
COALESCE(a.attestation_count, 0) AS attestation_count,
COALESCE(a.verified_count, 0) AS verified_count,
COALESCE(a.failed_count, 0) AS failed_count,
COALESCE(a.unverified_count, 0) AS unverified_count
FROM findings_projection fp
LEFT JOIN attestation_summary a ON TRUE
WHERE fp.tenant_id = @tenant_id
AND fp.finding_id = @finding_id
AND fp.policy_version = @policy_version
""";
private const string UpsertProjectionSql = """
@@ -203,47 +225,7 @@ public sealed class PostgresFindingProjectionRepository : IFindingProjectionRepo
return null;
}
var status = reader.GetString(0);
var severity = reader.IsDBNull(1) ? (decimal?)null : reader.GetDecimal(1);
var riskScore = reader.IsDBNull(2) ? (decimal?)null : reader.GetDecimal(2);
var riskSeverity = reader.IsDBNull(3) ? null : reader.GetString(3);
var riskProfileVersion = reader.IsDBNull(4) ? null : reader.GetString(4);
var riskExplanationId = reader.IsDBNull(5) ? (Guid?)null : reader.GetGuid(5);
var riskEventSequence = reader.IsDBNull(6) ? (long?)null : reader.GetInt64(6);
var labelsJson = reader.GetFieldValue<string>(7);
var labels = JsonNode.Parse(labelsJson)?.AsObject() ?? new JsonObject();
var currentEventId = reader.GetGuid(8);
var explainRef = reader.IsDBNull(9) ? null : reader.GetString(9);
var rationaleJson = reader.IsDBNull(10) ? string.Empty : reader.GetFieldValue<string>(10);
JsonArray rationale;
if (string.IsNullOrWhiteSpace(rationaleJson))
{
rationale = new JsonArray();
}
else
{
rationale = JsonNode.Parse(rationaleJson) as JsonArray ?? new JsonArray();
}
var updatedAt = reader.GetFieldValue<DateTimeOffset>(11);
var cycleHash = reader.GetString(12);
return new FindingProjection(
tenantId,
findingId,
policyVersion,
status,
severity,
riskScore,
riskSeverity,
riskProfileVersion,
riskExplanationId,
riskEventSequence,
labels,
currentEventId,
explainRef,
rationale,
updatedAt,
cycleHash);
return MapProjection(reader);
}
public async Task UpsertAsync(FindingProjection projection, CancellationToken cancellationToken)
@@ -407,7 +389,7 @@ public sealed class PostgresFindingProjectionRepository : IFindingProjectionRepo
await using var connection = await _dataSource.OpenConnectionAsync(query.TenantId, "projector", cancellationToken).ConfigureAwait(false);
// Build dynamic query
var whereConditions = new List<string> { "tenant_id = @tenant_id" };
var whereConditions = new List<string> { "fp.tenant_id = @tenant_id" };
var parameters = new List<NpgsqlParameter>
{
new NpgsqlParameter<string>("tenant_id", query.TenantId) { NpgsqlDbType = NpgsqlDbType.Text }
@@ -415,34 +397,86 @@ public sealed class PostgresFindingProjectionRepository : IFindingProjectionRepo
if (!string.IsNullOrWhiteSpace(query.PolicyVersion))
{
whereConditions.Add("policy_version = @policy_version");
whereConditions.Add("fp.policy_version = @policy_version");
parameters.Add(new NpgsqlParameter<string>("policy_version", query.PolicyVersion) { NpgsqlDbType = NpgsqlDbType.Text });
}
if (query.MinScore.HasValue)
{
whereConditions.Add("risk_score >= @min_score");
whereConditions.Add("fp.risk_score >= @min_score");
parameters.Add(new NpgsqlParameter<decimal>("min_score", query.MinScore.Value) { NpgsqlDbType = NpgsqlDbType.Numeric });
}
if (query.MaxScore.HasValue)
{
whereConditions.Add("risk_score <= @max_score");
whereConditions.Add("fp.risk_score <= @max_score");
parameters.Add(new NpgsqlParameter<decimal>("max_score", query.MaxScore.Value) { NpgsqlDbType = NpgsqlDbType.Numeric });
}
if (query.Severities is { Count: > 0 })
{
whereConditions.Add("risk_severity = ANY(@severities)");
whereConditions.Add("fp.risk_severity = ANY(@severities)");
parameters.Add(new NpgsqlParameter("severities", query.Severities.ToArray()) { NpgsqlDbType = NpgsqlDbType.Array | NpgsqlDbType.Text });
}
if (query.Statuses is { Count: > 0 })
{
whereConditions.Add("status = ANY(@statuses)");
whereConditions.Add("fp.status = ANY(@statuses)");
parameters.Add(new NpgsqlParameter("statuses", query.Statuses.ToArray()) { NpgsqlDbType = NpgsqlDbType.Array | NpgsqlDbType.Text });
}
if (query.AttestationTypes is { Count: > 0 })
{
parameters.Add(new NpgsqlParameter("attestation_types", query.AttestationTypes.Select(t => t.ToString()).ToArray())
{
NpgsqlDbType = NpgsqlDbType.Array | NpgsqlDbType.Text
});
}
var attestationWhere = new List<string>();
if (query.AttestationVerification.HasValue &&
query.AttestationVerification.Value != AttestationVerificationFilter.Any)
{
var filter = query.AttestationVerification.Value switch
{
AttestationVerificationFilter.Verified => "verified_count > 0",
AttestationVerificationFilter.Unverified => "unverified_count > 0",
AttestationVerificationFilter.Failed => "failed_count > 0",
_ => string.Empty
};
if (!string.IsNullOrWhiteSpace(filter))
{
attestationWhere.Add(filter);
}
}
if (query.AttestationStatus.HasValue)
{
var statusFilter = query.AttestationStatus.Value switch
{
OverallVerificationStatus.AllVerified =>
"attestation_count > 0 AND verified_count = attestation_count",
OverallVerificationStatus.PartiallyVerified =>
"attestation_count > 0 AND verified_count > 0 AND verified_count < attestation_count",
OverallVerificationStatus.NoneVerified =>
"attestation_count > 0 AND verified_count = 0",
OverallVerificationStatus.NoAttestations =>
"attestation_count = 0",
_ => string.Empty
};
if (!string.IsNullOrWhiteSpace(statusFilter))
{
attestationWhere.Add(statusFilter);
}
}
var attestationWhereClause = attestationWhere.Count > 0
? "WHERE " + string.Join(" AND ", attestationWhere)
: string.Empty;
var whereClause = string.Join(" AND ", whereConditions);
var orderColumn = query.SortBy switch
{
@@ -454,8 +488,46 @@ public sealed class PostgresFindingProjectionRepository : IFindingProjectionRepo
};
var orderDirection = query.Descending ? "DESC NULLS LAST" : "ASC NULLS FIRST";
var attestationSummarySql = new StringBuilder(@"
SELECT tenant_id,
finding_id,
COUNT(*) AS attestation_count,
COUNT(*) FILTER (WHERE verification_result IS NOT NULL
AND (verification_result->>'verified')::boolean = true) AS verified_count,
COUNT(*) FILTER (WHERE verification_result IS NOT NULL
AND (verification_result->>'verified')::boolean = false) AS failed_count,
COUNT(*) FILTER (WHERE verification_result IS NULL) AS unverified_count
FROM ledger_attestation_pointers
WHERE tenant_id = @tenant_id");
if (query.AttestationTypes is { Count: > 0 })
{
attestationSummarySql.Append(" AND attestation_type = ANY(@attestation_types)");
}
attestationSummarySql.Append(" GROUP BY tenant_id, finding_id");
var cte = $@"
WITH attestation_summary AS (
{attestationSummarySql}
),
filtered_projection AS (
SELECT
fp.tenant_id, fp.finding_id, fp.policy_version, fp.status, fp.severity, fp.risk_score, fp.risk_severity,
fp.risk_profile_version, fp.risk_explanation_id, fp.risk_event_sequence, fp.labels, fp.current_event_id,
fp.explain_ref, fp.policy_rationale, fp.updated_at, fp.cycle_hash,
COALESCE(a.attestation_count, 0) AS attestation_count,
COALESCE(a.verified_count, 0) AS verified_count,
COALESCE(a.failed_count, 0) AS failed_count,
COALESCE(a.unverified_count, 0) AS unverified_count
FROM findings_projection fp
LEFT JOIN attestation_summary a
ON a.tenant_id = fp.tenant_id AND a.finding_id = fp.finding_id
WHERE {whereClause}
)";
// Count query
var countSql = $"SELECT COUNT(*) FROM findings_projection WHERE {whereClause}";
var countSql = $"{cte} SELECT COUNT(*) FROM filtered_projection {attestationWhereClause};";
await using var countCommand = new NpgsqlCommand(countSql, connection);
countCommand.CommandTimeout = _dataSource.CommandTimeoutSeconds;
foreach (var p in parameters) countCommand.Parameters.Add(p.Clone());
@@ -463,12 +535,14 @@ public sealed class PostgresFindingProjectionRepository : IFindingProjectionRepo
// Data query
var dataSql = $@"
{cte}
SELECT
tenant_id, finding_id, policy_version, status, severity, risk_score, risk_severity,
risk_profile_version, risk_explanation_id, risk_event_sequence, labels, current_event_id,
explain_ref, policy_rationale, updated_at, cycle_hash
FROM findings_projection
WHERE {whereClause}
explain_ref, policy_rationale, updated_at, cycle_hash,
attestation_count, verified_count, failed_count, unverified_count
FROM filtered_projection
{attestationWhereClause}
ORDER BY {orderColumn} {orderDirection}
LIMIT @limit";
@@ -638,6 +712,12 @@ public sealed class PostgresFindingProjectionRepository : IFindingProjectionRepo
var rationaleJson = reader.GetString(13);
var rationale = System.Text.Json.Nodes.JsonNode.Parse(rationaleJson) as System.Text.Json.Nodes.JsonArray ?? new System.Text.Json.Nodes.JsonArray();
var attestationCount = reader.GetInt32(16);
var verifiedCount = reader.GetInt32(17);
var failedCount = reader.GetInt32(18);
var unverifiedCount = reader.GetInt32(19);
var attestationStatus = AttestationStatusCalculator.Compute(attestationCount, verifiedCount);
return new FindingProjection(
TenantId: reader.GetString(0),
FindingId: reader.GetString(1),
@@ -654,6 +734,11 @@ public sealed class PostgresFindingProjectionRepository : IFindingProjectionRepo
ExplainRef: reader.IsDBNull(12) ? null : reader.GetString(12),
PolicyRationale: rationale,
UpdatedAt: reader.GetDateTime(14),
CycleHash: reader.GetString(15));
CycleHash: reader.GetString(15),
AttestationCount: attestationCount,
VerifiedAttestationCount: verifiedCount,
FailedAttestationCount: failedCount,
UnverifiedAttestationCount: unverifiedCount,
AttestationStatus: attestationStatus);
}
}

View File

@@ -8,6 +8,7 @@ using StellaOps.Findings.Ledger.Infrastructure.Policy;
using StellaOps.Findings.Ledger.Options;
using StellaOps.Findings.Ledger.Observability;
using StellaOps.Findings.Ledger.Services;
using StellaOps.Findings.Ledger.Services.Incident;
namespace StellaOps.Findings.Ledger.Infrastructure.Projection;
@@ -19,6 +20,7 @@ public sealed class LedgerProjectionWorker : BackgroundService
private readonly TimeProvider _timeProvider;
private readonly LedgerServiceOptions.ProjectionOptions _options;
private readonly ILogger<LedgerProjectionWorker> _logger;
private readonly ILedgerIncidentDiagnostics? _incidentDiagnostics;
public LedgerProjectionWorker(
ILedgerEventStream eventStream,
@@ -26,7 +28,8 @@ public sealed class LedgerProjectionWorker : BackgroundService
IPolicyEvaluationService policyEvaluationService,
IOptions<LedgerServiceOptions> options,
TimeProvider timeProvider,
ILogger<LedgerProjectionWorker> logger)
ILogger<LedgerProjectionWorker> logger,
ILedgerIncidentDiagnostics? incidentDiagnostics = null)
{
_eventStream = eventStream ?? throw new ArgumentNullException(nameof(eventStream));
_repository = repository ?? throw new ArgumentNullException(nameof(repository));
@@ -34,6 +37,7 @@ public sealed class LedgerProjectionWorker : BackgroundService
_timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider));
_options = (options ?? throw new ArgumentNullException(nameof(options))).Value.Projection;
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
_incidentDiagnostics = incidentDiagnostics;
}
protected override async Task ExecuteAsync(CancellationToken stoppingToken)
@@ -138,6 +142,15 @@ public sealed class LedgerProjectionWorker : BackgroundService
record.PolicyVersion,
evaluationStatus ?? string.Empty);
LedgerTimeline.EmitProjectionUpdated(_logger, record, evaluationStatus, evidenceBundleRef: null);
_incidentDiagnostics?.RecordProjectionLag(new ProjectionLagSample(
TenantId: record.TenantId,
ChainId: record.ChainId,
SequenceNumber: record.SequenceNumber,
EventType: record.EventType,
PolicyVersion: record.PolicyVersion,
LagSeconds: lagSeconds,
RecordedAt: record.RecordedAt,
ObservedAt: now));
}
catch (OperationCanceledException) when (stoppingToken.IsCancellationRequested)
{

View File

@@ -2,6 +2,7 @@ using System.Diagnostics;
using Microsoft.Extensions.Logging;
using StellaOps.Findings.Ledger.Domain;
using StellaOps.Findings.Ledger.Infrastructure.Exports;
using StellaOps.Findings.Ledger.Services.Incident;
namespace StellaOps.Findings.Ledger.Observability;
@@ -23,6 +24,10 @@ internal static class LedgerTimeline
private static readonly EventId TimeTravelQueryEvent = new(6803, "ledger.timetravel.query");
private static readonly EventId ReplayCompletedEvent = new(6804, "ledger.replay.completed");
private static readonly EventId DiffComputedEvent = new(6805, "ledger.diff.computed");
private static readonly EventId IncidentModeChangedEvent = new(6901, "ledger.incident.mode");
private static readonly EventId IncidentLagTraceEvent = new(6902, "ledger.incident.lag_trace");
private static readonly EventId IncidentConflictSnapshotEvent = new(6903, "ledger.incident.conflict_snapshot");
private static readonly EventId IncidentReplayTraceEvent = new(6904, "ledger.incident.replay_trace");
public static void EmitLedgerAppended(ILogger logger, LedgerEventRecord record, string? evidenceBundleRef = null)
{
@@ -280,4 +285,87 @@ internal static class LedgerTimeline
modified,
removed);
}
public static void EmitIncidentModeChanged(ILogger logger, LedgerIncidentSnapshot snapshot, bool wasReactivation)
{
if (logger is null)
{
return;
}
logger.LogInformation(
IncidentModeChangedEvent,
"timeline ledger.incident.mode state={State} activation_id={ActivationId} actor={Actor} reason={Reason} expires_at={ExpiresAt} retention_extension_days={RetentionExtensionDays} reactivation={Reactivation}",
snapshot.IsActive ? "enabled" : "disabled",
snapshot.ActivationId ?? string.Empty,
snapshot.Actor ?? string.Empty,
snapshot.Reason ?? string.Empty,
snapshot.ExpiresAt?.ToString("O") ?? string.Empty,
snapshot.RetentionExtensionDays,
wasReactivation);
}
public static void EmitIncidentLagTrace(ILogger logger, ProjectionLagSample sample)
{
if (logger is null)
{
return;
}
logger.LogWarning(
IncidentLagTraceEvent,
"timeline ledger.incident.lag_trace tenant={Tenant} chain={ChainId} seq={Sequence} event_type={EventType} policy={PolicyVersion} lag_seconds={LagSeconds:0.000} recorded_at={RecordedAt} observed_at={ObservedAt}",
sample.TenantId,
sample.ChainId,
sample.SequenceNumber,
sample.EventType,
sample.PolicyVersion,
sample.LagSeconds,
sample.RecordedAt.ToString("O"),
sample.ObservedAt.ToString("O"));
}
public static void EmitIncidentConflictSnapshot(ILogger logger, ConflictSnapshot snapshot)
{
if (logger is null)
{
return;
}
logger.LogWarning(
IncidentConflictSnapshotEvent,
"timeline ledger.incident.conflict_snapshot tenant={Tenant} chain={ChainId} seq={Sequence} event_id={EventId} event_type={EventType} policy={PolicyVersion} reason={Reason} expected_seq={ExpectedSequence} actor={Actor} actor_type={ActorType} observed_at={ObservedAt}",
snapshot.TenantId,
snapshot.ChainId,
snapshot.SequenceNumber,
snapshot.EventId,
snapshot.EventType,
snapshot.PolicyVersion,
snapshot.Reason,
snapshot.ExpectedSequence,
snapshot.ActorId ?? string.Empty,
snapshot.ActorType ?? string.Empty,
snapshot.ObservedAt.ToString("O"));
}
public static void EmitIncidentReplayTrace(ILogger logger, ReplayTraceSample sample)
{
if (logger is null)
{
return;
}
logger.LogInformation(
IncidentReplayTraceEvent,
"timeline ledger.incident.replay_trace tenant={Tenant} from_seq={FromSequence} to_seq={ToSequence} events={Events} duration_ms={DurationMs} has_more={HasMore} chain_filters={ChainFilters} event_type_filters={EventTypeFilters} observed_at={ObservedAt}",
sample.TenantId,
sample.FromSequence,
sample.ToSequence,
sample.EventsCount,
sample.DurationMs,
sample.HasMore,
sample.ChainFilterCount,
sample.EventTypeFilterCount,
sample.ObservedAt.ToString("O"));
}
}

View File

@@ -0,0 +1,55 @@
using System.IO;
using System.Reflection;
using System.Security.Cryptography;
using System.Text;
namespace StellaOps.Findings.Ledger.OpenApi;
/// <summary>
/// Provides versioned metadata for the Findings Ledger OpenAPI discovery endpoint.
/// </summary>
public static class OpenApiMetadataFactory
{
public const string ApiVersion = "1.0.0-beta1";
public static string GetBuildVersion()
{
var assembly = Assembly.GetExecutingAssembly();
var informational = assembly.GetCustomAttribute<AssemblyInformationalVersionAttribute>()?.InformationalVersion;
return string.IsNullOrWhiteSpace(informational)
? assembly.GetName().Version?.ToString() ?? "0.0.0"
: informational;
}
public static string GetSpecPath(string contentRoot)
{
var current = Path.GetFullPath(contentRoot);
for (var i = 0; i < 10; i++)
{
var candidate = Path.Combine(current, "docs", "modules", "findings-ledger", "openapi", "findings-ledger.v1.yaml");
if (File.Exists(candidate))
{
return candidate;
}
current = Path.GetFullPath(Path.Combine(current, ".."));
}
// Fallback to previous behavior if traversal fails
return Path.GetFullPath(Path.Combine(contentRoot, "../../docs/modules/findings-ledger/openapi/findings-ledger.v1.yaml"));
}
public static DateTimeOffset? GetLastModified(string specPath)
{
return File.Exists(specPath)
? File.GetLastWriteTimeUtc(specPath)
: null;
}
public static string ComputeEtag(byte[] content)
{
var hash = SHA256.HashData(content);
var shortHash = Convert.ToHexString(hash)[..16].ToLowerInvariant();
return $"W/\"{shortHash}\"";
}
}

View File

@@ -0,0 +1,92 @@
using System;
namespace StellaOps.Findings.Ledger.Options;
/// <summary>
/// Configures incident-mode behaviour for the Findings Ledger.
/// </summary>
public sealed class LedgerIncidentOptions
{
public const string SectionName = "findings:ledger:incident";
/// <summary>
/// Enables ledger-side incident instrumentation.
/// </summary>
public bool Enabled { get; set; } = true;
/// <summary>
/// Number of days to extend retention windows while incident mode is active.
/// </summary>
public int RetentionExtensionDays { get; set; } = 60;
/// <summary>
/// Minimum projection lag (seconds) that will be recorded during incident mode.
/// </summary>
public double LagTraceThresholdSeconds { get; set; } = 15;
/// <summary>
/// Maximum number of projection lag samples to retain.
/// </summary>
public int LagTraceBufferSize { get; set; } = 100;
/// <summary>
/// Maximum number of conflict snapshots to retain.
/// </summary>
public int ConflictSnapshotBufferSize { get; set; } = 50;
/// <summary>
/// Maximum number of replay traces to retain.
/// </summary>
public int ReplayTraceBufferSize { get; set; } = 50;
/// <summary>
/// Enables capture of projection lag traces when incident mode is active.
/// </summary>
public bool CaptureLagTraces { get; set; } = true;
/// <summary>
/// Enables capture of conflict snapshots when incident mode is active.
/// </summary>
public bool CaptureConflictSnapshots { get; set; } = true;
/// <summary>
/// Enables capture of replay request traces when incident mode is active.
/// </summary>
public bool CaptureReplayTraces { get; set; } = true;
/// <summary>
/// Whether to emit structured timeline/log entries for incident actions.
/// </summary>
public bool EmitTimelineEvents { get; set; } = true;
/// <summary>
/// Whether to emit notifier events (logging by default) for incident actions.
/// </summary>
public bool EmitNotifications { get; set; } = true;
/// <summary>
/// Clears buffered diagnostics on each activation to avoid mixing epochs.
/// </summary>
public bool ResetDiagnosticsOnActivation { get; set; } = true;
/// <summary>
/// Validates option values.
/// </summary>
public void Validate()
{
if (RetentionExtensionDays < 0 || RetentionExtensionDays > 3650)
{
throw new InvalidOperationException("RetentionExtensionDays must be between 0 and 3650.");
}
if (LagTraceThresholdSeconds < 0)
{
throw new InvalidOperationException("LagTraceThresholdSeconds must be non-negative.");
}
if (LagTraceBufferSize <= 0 || ConflictSnapshotBufferSize <= 0 || ReplayTraceBufferSize <= 0)
{
throw new InvalidOperationException("Incident diagnostic buffer sizes must be positive.");
}
}
}

View File

@@ -0,0 +1,355 @@
using System.Collections.Concurrent;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using StellaOps.Findings.Ledger.Observability;
using StellaOps.Findings.Ledger.Options;
using StellaOps.Telemetry.Core;
namespace StellaOps.Findings.Ledger.Services.Incident;
public interface ILedgerIncidentDiagnostics : ILedgerIncidentState
{
void RecordProjectionLag(ProjectionLagSample sample);
void RecordConflict(ConflictSnapshot snapshot);
void RecordReplayTrace(ReplayTraceSample sample);
IncidentDiagnosticsSnapshot GetDiagnosticsSnapshot();
}
public interface ILedgerIncidentState
{
bool IsActive { get; }
LedgerIncidentSnapshot Current { get; }
}
public interface ILedgerIncidentNotifier
{
Task PublishIncidentModeChangedAsync(LedgerIncidentSnapshot snapshot, CancellationToken cancellationToken);
}
public sealed class LoggingLedgerIncidentNotifier : ILedgerIncidentNotifier
{
private readonly ILogger<LoggingLedgerIncidentNotifier> _logger;
public LoggingLedgerIncidentNotifier(ILogger<LoggingLedgerIncidentNotifier> logger)
{
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
}
public Task PublishIncidentModeChangedAsync(LedgerIncidentSnapshot snapshot, CancellationToken cancellationToken)
{
var state = snapshot.IsActive ? "enabled" : "disabled";
_logger.LogWarning(
"NOTIFICATION: Ledger incident mode {State} (activation_id={ActivationId}, retention_extension_days={ExtensionDays})",
state,
snapshot.ActivationId ?? string.Empty,
snapshot.RetentionExtensionDays);
return Task.CompletedTask;
}
}
public sealed record LedgerIncidentSnapshot(
bool IsActive,
string? ActivationId,
string? Actor,
string? Reason,
string? TenantId,
DateTimeOffset ChangedAt,
DateTimeOffset? ExpiresAt,
int RetentionExtensionDays);
public sealed record ProjectionLagSample(
string TenantId,
Guid ChainId,
long SequenceNumber,
string EventType,
string PolicyVersion,
double LagSeconds,
DateTimeOffset RecordedAt,
DateTimeOffset ObservedAt);
public sealed record ConflictSnapshot(
string TenantId,
Guid ChainId,
long SequenceNumber,
Guid EventId,
string EventType,
string PolicyVersion,
string Reason,
DateTimeOffset RecordedAt,
DateTimeOffset ObservedAt,
string? ActorId,
string? ActorType,
long ExpectedSequence,
string? ProvidedPreviousHash,
string? ExpectedPreviousHash);
public sealed record ReplayTraceSample(
string TenantId,
long FromSequence,
long ToSequence,
long EventsCount,
bool HasMore,
double DurationMs,
DateTimeOffset ObservedAt,
int ChainFilterCount,
int EventTypeFilterCount);
public sealed record IncidentDiagnosticsSnapshot(
LedgerIncidentSnapshot Incident,
IReadOnlyList<ProjectionLagSample> LagSamples,
IReadOnlyList<ConflictSnapshot> ConflictSnapshots,
IReadOnlyList<ReplayTraceSample> ReplayTraces,
DateTimeOffset CapturedAt);
/// <summary>
/// Coordinates ledger-specific incident mode behaviour (diagnostics, retention hints, timeline/notification events).
/// </summary>
public sealed class LedgerIncidentCoordinator : ILedgerIncidentDiagnostics, IDisposable
{
private const int ReplayTraceLogThresholdMs = 250;
private readonly LedgerIncidentOptions _options;
private readonly ILogger<LedgerIncidentCoordinator> _logger;
private readonly ILedgerIncidentNotifier _notifier;
private readonly TimeProvider _timeProvider;
private readonly IIncidentModeService? _incidentModeService;
private readonly ConcurrentQueue<ProjectionLagSample> _lagSamples = new();
private readonly ConcurrentQueue<ConflictSnapshot> _conflictSnapshots = new();
private readonly ConcurrentQueue<ReplayTraceSample> _replayTraces = new();
private readonly ConcurrentDictionary<string, DateTimeOffset> _lastLagLogByChain = new(StringComparer.Ordinal);
private readonly object _stateLock = new();
private LedgerIncidentSnapshot _current;
private bool _disposed;
public LedgerIncidentCoordinator(
IOptions<LedgerIncidentOptions> options,
ILogger<LedgerIncidentCoordinator> logger,
ILedgerIncidentNotifier notifier,
TimeProvider? timeProvider = null,
IIncidentModeService? incidentModeService = null)
{
_options = (options ?? throw new ArgumentNullException(nameof(options))).Value;
_options.Validate();
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
_notifier = notifier ?? throw new ArgumentNullException(nameof(notifier));
_timeProvider = timeProvider ?? TimeProvider.System;
_incidentModeService = incidentModeService;
_current = new LedgerIncidentSnapshot(
IsActive: false,
ActivationId: null,
Actor: null,
Reason: null,
TenantId: null,
ChangedAt: _timeProvider.GetUtcNow(),
ExpiresAt: null,
RetentionExtensionDays: 0);
if (_incidentModeService is not null)
{
_incidentModeService.Activated += OnActivated;
_incidentModeService.Deactivated += OnDeactivated;
if (_incidentModeService.CurrentState is { } state && !_incidentModeService.CurrentState.IsExpired)
{
ApplyIncidentState(state, wasReactivation: false);
}
}
}
public bool IsActive => _current.IsActive;
public LedgerIncidentSnapshot Current => _current;
public void RecordProjectionLag(ProjectionLagSample sample)
{
if (!_options.Enabled || !IsActive || !_options.CaptureLagTraces)
{
return;
}
EnqueueWithLimit(_lagSamples, sample, _options.LagTraceBufferSize);
if (_options.EmitTimelineEvents && sample.LagSeconds >= _options.LagTraceThresholdSeconds)
{
var now = sample.ObservedAt;
var key = $"{sample.TenantId}:{sample.ChainId}";
if (!_lastLagLogByChain.TryGetValue(key, out var lastLogged) ||
now - lastLogged >= TimeSpan.FromMinutes(1))
{
_lastLagLogByChain[key] = now;
LedgerTimeline.EmitIncidentLagTrace(_logger, sample);
}
}
}
public void RecordConflict(ConflictSnapshot snapshot)
{
if (!_options.Enabled || !IsActive || !_options.CaptureConflictSnapshots)
{
return;
}
EnqueueWithLimit(_conflictSnapshots, snapshot, _options.ConflictSnapshotBufferSize);
if (_options.EmitTimelineEvents)
{
LedgerTimeline.EmitIncidentConflictSnapshot(_logger, snapshot);
}
}
public void RecordReplayTrace(ReplayTraceSample sample)
{
if (!_options.Enabled || !IsActive || !_options.CaptureReplayTraces)
{
return;
}
EnqueueWithLimit(_replayTraces, sample, _options.ReplayTraceBufferSize);
if (_options.EmitTimelineEvents &&
(sample.DurationMs >= ReplayTraceLogThresholdMs || sample.HasMore))
{
LedgerTimeline.EmitIncidentReplayTrace(_logger, sample);
}
}
public IncidentDiagnosticsSnapshot GetDiagnosticsSnapshot()
{
return new IncidentDiagnosticsSnapshot(
_current,
_lagSamples.ToArray(),
_conflictSnapshots.ToArray(),
_replayTraces.ToArray(),
_timeProvider.GetUtcNow());
}
private void OnActivated(object? sender, IncidentModeActivatedEventArgs e)
{
ApplyIncidentState(e.State, e.WasReactivation);
}
private void OnDeactivated(object? sender, IncidentModeDeactivatedEventArgs e)
{
if (!_options.Enabled)
{
return;
}
lock (_stateLock)
{
_current = new LedgerIncidentSnapshot(
IsActive: false,
ActivationId: e.State.ActivationId,
Actor: e.DeactivatedBy,
Reason: e.Reason.ToString(),
TenantId: e.State.TenantId,
ChangedAt: _timeProvider.GetUtcNow(),
ExpiresAt: e.State.ExpiresAt,
RetentionExtensionDays: 0);
}
if (_options.EmitTimelineEvents)
{
LedgerTimeline.EmitIncidentModeChanged(_logger, _current, wasReactivation: false);
}
if (_options.EmitNotifications)
{
_ = SafeNotifyAsync(_current);
}
}
private void ApplyIncidentState(IncidentModeState state, bool wasReactivation)
{
if (!_options.Enabled)
{
return;
}
lock (_stateLock)
{
_current = new LedgerIncidentSnapshot(
IsActive: true,
ActivationId: state.ActivationId,
Actor: state.Actor,
Reason: state.Reason,
TenantId: state.TenantId,
ChangedAt: _timeProvider.GetUtcNow(),
ExpiresAt: state.ExpiresAt,
RetentionExtensionDays: _options.RetentionExtensionDays);
if (_options.ResetDiagnosticsOnActivation)
{
ClearDiagnostics();
}
}
if (_options.EmitTimelineEvents)
{
LedgerTimeline.EmitIncidentModeChanged(_logger, _current, wasReactivation);
}
if (_options.EmitNotifications)
{
_ = SafeNotifyAsync(_current);
}
}
private Task SafeNotifyAsync(LedgerIncidentSnapshot snapshot)
{
try
{
return _notifier.PublishIncidentModeChangedAsync(snapshot, CancellationToken.None);
}
catch (Exception ex)
{
_logger.LogWarning(ex, "Failed to publish incident mode notification.");
return Task.CompletedTask;
}
}
private void ClearDiagnostics()
{
while (_lagSamples.TryDequeue(out _))
{
}
while (_conflictSnapshots.TryDequeue(out _))
{
}
while (_replayTraces.TryDequeue(out _))
{
}
}
private static void EnqueueWithLimit<T>(ConcurrentQueue<T> queue, T item, int limit)
{
queue.Enqueue(item);
while (queue.Count > limit && queue.TryDequeue(out _))
{
}
}
public void Dispose()
{
if (_disposed)
{
return;
}
if (_incidentModeService is not null)
{
_incidentModeService.Activated -= OnActivated;
_incidentModeService.Deactivated -= OnDeactivated;
}
_disposed = true;
}
}

View File

@@ -5,6 +5,7 @@ using StellaOps.Findings.Ledger.Domain;
using StellaOps.Findings.Ledger.Hashing;
using StellaOps.Findings.Ledger.Infrastructure;
using StellaOps.Findings.Ledger.Observability;
using StellaOps.Findings.Ledger.Services.Incident;
namespace StellaOps.Findings.Ledger.Services;
@@ -18,15 +19,18 @@ public sealed class LedgerEventWriteService : ILedgerEventWriteService
private readonly ILedgerEventRepository _repository;
private readonly IMerkleAnchorScheduler _merkleAnchorScheduler;
private readonly ILogger<LedgerEventWriteService> _logger;
private readonly ILedgerIncidentDiagnostics? _incidentDiagnostics;
public LedgerEventWriteService(
ILedgerEventRepository repository,
IMerkleAnchorScheduler merkleAnchorScheduler,
ILogger<LedgerEventWriteService> logger)
ILogger<LedgerEventWriteService> logger,
ILedgerIncidentDiagnostics? incidentDiagnostics = null)
{
_repository = repository ?? throw new ArgumentNullException(nameof(repository));
_merkleAnchorScheduler = merkleAnchorScheduler ?? throw new ArgumentNullException(nameof(merkleAnchorScheduler));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
_incidentDiagnostics = incidentDiagnostics;
}
public async Task<LedgerWriteResult> AppendAsync(LedgerEventDraft draft, CancellationToken cancellationToken)
@@ -57,6 +61,7 @@ public sealed class LedgerEventWriteService : ILedgerEventWriteService
if (!string.Equals(existing.CanonicalJson, canonicalJson, StringComparison.Ordinal))
{
LedgerTelemetry.MarkError(activity, "event_id_conflict");
RecordConflictSnapshot(draft, expectedSequence: existing.SequenceNumber + 1, reason: "event_id_conflict", expectedPreviousHash: existing.EventHash);
return LedgerWriteResult.Conflict(
"event_id_conflict",
$"Event '{draft.EventId}' already exists with a different payload.");
@@ -71,6 +76,7 @@ public sealed class LedgerEventWriteService : ILedgerEventWriteService
if (draft.SequenceNumber != expectedSequence)
{
LedgerTelemetry.MarkError(activity, "sequence_mismatch");
RecordConflictSnapshot(draft, expectedSequence, reason: "sequence_mismatch", expectedPreviousHash: chainHead?.EventHash);
return LedgerWriteResult.Conflict(
"sequence_mismatch",
$"Sequence number '{draft.SequenceNumber}' does not match expected '{expectedSequence}'.");
@@ -80,6 +86,7 @@ public sealed class LedgerEventWriteService : ILedgerEventWriteService
if (draft.ProvidedPreviousHash is not null && !string.Equals(draft.ProvidedPreviousHash, previousHash, StringComparison.OrdinalIgnoreCase))
{
LedgerTelemetry.MarkError(activity, "previous_hash_mismatch");
RecordConflictSnapshot(draft, expectedSequence, reason: "previous_hash_mismatch", providedPreviousHash: draft.ProvidedPreviousHash, expectedPreviousHash: previousHash);
return LedgerWriteResult.Conflict(
"previous_hash_mismatch",
$"Provided previous hash '{draft.ProvidedPreviousHash}' does not match chain head hash '{previousHash}'.");
@@ -143,11 +150,13 @@ public sealed class LedgerEventWriteService : ILedgerEventWriteService
var persisted = await _repository.GetByEventIdAsync(draft.TenantId, draft.EventId, cancellationToken).ConfigureAwait(false);
if (persisted is null)
{
RecordConflictSnapshot(draft, expectedSequence, reason: "append_failed", expectedPreviousHash: previousHash);
return LedgerWriteResult.Conflict("append_failed", "Ledger append failed due to concurrent write.");
}
if (!string.Equals(persisted.CanonicalJson, record.CanonicalJson, StringComparison.Ordinal))
{
RecordConflictSnapshot(draft, expectedSequence, reason: "event_id_conflict", expectedPreviousHash: persisted.EventHash);
return LedgerWriteResult.Conflict("event_id_conflict", "Ledger append raced with conflicting payload.");
}
@@ -157,6 +166,37 @@ public sealed class LedgerEventWriteService : ILedgerEventWriteService
return LedgerWriteResult.Success(record);
}
private void RecordConflictSnapshot(
LedgerEventDraft draft,
long expectedSequence,
string reason,
string? providedPreviousHash = null,
string? expectedPreviousHash = null)
{
if (_incidentDiagnostics is null)
{
return;
}
var snapshot = new ConflictSnapshot(
TenantId: draft.TenantId,
ChainId: draft.ChainId,
SequenceNumber: draft.SequenceNumber,
EventId: draft.EventId,
EventType: draft.EventType,
PolicyVersion: draft.PolicyVersion ?? string.Empty,
Reason: reason,
RecordedAt: draft.RecordedAt,
ObservedAt: DateTimeOffset.UtcNow,
ActorId: draft.ActorId,
ActorType: draft.ActorType,
ExpectedSequence: expectedSequence,
ProvidedPreviousHash: providedPreviousHash,
ExpectedPreviousHash: expectedPreviousHash);
_incidentDiagnostics.RecordConflict(snapshot);
}
private static string DetermineSource(LedgerEventDraft draft)
{
if (draft.SourceRunId.HasValue)

View File

@@ -154,7 +154,12 @@ public sealed class ScoredFindingsExportService : IScoredFindingsExportService
finding.RiskProfileVersion,
finding.RiskExplanationId,
finding.ExplainRef,
finding.UpdatedAt
finding.UpdatedAt,
finding.AttestationStatus,
finding.AttestationCount,
finding.VerifiedAttestationCount,
finding.FailedAttestationCount,
finding.UnverifiedAttestationCount
};
}

View File

@@ -1,3 +1,5 @@
using StellaOps.Findings.Ledger.Infrastructure.Attestation;
namespace StellaOps.Findings.Ledger.Services;
/// <summary>
@@ -18,6 +20,9 @@ public sealed record ScoredFindingsQuery
public int Limit { get; init; } = 50;
public ScoredFindingsSortField SortBy { get; init; } = ScoredFindingsSortField.RiskScore;
public bool Descending { get; init; } = true;
public IReadOnlyList<AttestationType>? AttestationTypes { get; init; }
public AttestationVerificationFilter? AttestationVerification { get; init; }
public OverallVerificationStatus? AttestationStatus { get; init; }
}
/// <summary>
@@ -57,6 +62,11 @@ public sealed record ScoredFinding
public Guid? RiskExplanationId { get; init; }
public string? ExplainRef { get; init; }
public DateTimeOffset UpdatedAt { get; init; }
public int AttestationCount { get; init; }
public int VerifiedAttestationCount { get; init; }
public int FailedAttestationCount { get; init; }
public int UnverifiedAttestationCount { get; init; }
public OverallVerificationStatus AttestationStatus { get; init; } = OverallVerificationStatus.NoAttestations;
}
/// <summary>

View File

@@ -164,7 +164,12 @@ public sealed class ScoredFindingsQueryService : IScoredFindingsQueryService
RiskProfileVersion = projection.RiskProfileVersion,
RiskExplanationId = projection.RiskExplanationId,
ExplainRef = projection.ExplainRef,
UpdatedAt = projection.UpdatedAt
UpdatedAt = projection.UpdatedAt,
AttestationCount = projection.AttestationCount,
VerifiedAttestationCount = projection.VerifiedAttestationCount,
FailedAttestationCount = projection.FailedAttestationCount,
UnverifiedAttestationCount = projection.UnverifiedAttestationCount,
AttestationStatus = projection.AttestationStatus
};
}

View File

@@ -1,5 +1,6 @@
namespace StellaOps.Findings.Ledger.Services;
using System.Collections.Generic;
using System.Security.Cryptography;
using System.Text;
using System.Text.Json;
@@ -7,6 +8,7 @@ using Microsoft.Extensions.Logging;
using StellaOps.Findings.Ledger.Domain;
using StellaOps.Findings.Ledger.Infrastructure.Snapshot;
using StellaOps.Findings.Ledger.Observability;
using StellaOps.Findings.Ledger.Services.Incident;
/// <summary>
/// Service for managing ledger snapshots and time-travel queries.
@@ -17,15 +19,18 @@ public sealed class SnapshotService
private readonly ITimeTravelRepository _timeTravelRepository;
private readonly ILogger<SnapshotService> _logger;
private readonly JsonSerializerOptions _jsonOptions;
private readonly ILedgerIncidentDiagnostics? _incidentDiagnostics;
public SnapshotService(
ISnapshotRepository snapshotRepository,
ITimeTravelRepository timeTravelRepository,
ILogger<SnapshotService> logger)
ILogger<SnapshotService> logger,
ILedgerIncidentDiagnostics? incidentDiagnostics = null)
{
_snapshotRepository = snapshotRepository;
_timeTravelRepository = timeTravelRepository;
_logger = logger;
_incidentDiagnostics = incidentDiagnostics;
_jsonOptions = new JsonSerializerOptions
{
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
@@ -42,32 +47,33 @@ public sealed class SnapshotService
{
try
{
var effectiveInput = ApplyIncidentRetention(input);
_logger.LogInformation(
"Creating snapshot for tenant {TenantId} at sequence {Sequence} / timestamp {Timestamp}",
input.TenantId,
input.AtSequence,
input.AtTimestamp);
effectiveInput.TenantId,
effectiveInput.AtSequence,
effectiveInput.AtTimestamp);
// Get current ledger state
var currentPoint = await _timeTravelRepository.GetCurrentPointAsync(input.TenantId, ct);
var currentPoint = await _timeTravelRepository.GetCurrentPointAsync(effectiveInput.TenantId, ct);
// Create the snapshot record
var snapshot = await _snapshotRepository.CreateAsync(
input.TenantId,
input,
effectiveInput.TenantId,
effectiveInput,
currentPoint.SequenceNumber,
currentPoint.Timestamp,
ct);
// Compute statistics asynchronously
var statistics = await ComputeStatisticsAsync(
input.TenantId,
effectiveInput.TenantId,
snapshot.SequenceNumber,
input.IncludeEntityTypes,
effectiveInput.IncludeEntityTypes,
ct);
await _snapshotRepository.UpdateStatisticsAsync(
input.TenantId,
effectiveInput.TenantId,
snapshot.SnapshotId,
statistics,
ct);
@@ -79,12 +85,12 @@ public sealed class SnapshotService
if (input.Sign)
{
merkleRoot = await ComputeMerkleRootAsync(
input.TenantId,
effectiveInput.TenantId,
snapshot.SequenceNumber,
ct);
await _snapshotRepository.SetMerkleRootAsync(
input.TenantId,
effectiveInput.TenantId,
snapshot.SnapshotId,
merkleRoot,
dsseDigest,
@@ -93,20 +99,20 @@ public sealed class SnapshotService
// Mark as available
await _snapshotRepository.UpdateStatusAsync(
input.TenantId,
effectiveInput.TenantId,
snapshot.SnapshotId,
SnapshotStatus.Available,
ct);
// Retrieve updated snapshot
var finalSnapshot = await _snapshotRepository.GetByIdAsync(
input.TenantId,
effectiveInput.TenantId,
snapshot.SnapshotId,
ct);
LedgerTimeline.EmitSnapshotCreated(
_logger,
input.TenantId,
effectiveInput.TenantId,
snapshot.SnapshotId,
snapshot.SequenceNumber,
statistics.FindingsCount);
@@ -196,7 +202,20 @@ public sealed class SnapshotService
ReplayRequest request,
CancellationToken ct = default)
{
return await _timeTravelRepository.ReplayEventsAsync(request, ct);
var result = await _timeTravelRepository.ReplayEventsAsync(request, ct);
_incidentDiagnostics?.RecordReplayTrace(new ReplayTraceSample(
TenantId: request.TenantId,
FromSequence: result.Metadata.FromSequence,
ToSequence: result.Metadata.ToSequence,
EventsCount: result.Metadata.EventsCount,
HasMore: result.Metadata.HasMore,
DurationMs: result.Metadata.ReplayDurationMs,
ObservedAt: DateTimeOffset.UtcNow,
ChainFilterCount: request.ChainIds?.Count ?? 0,
EventTypeFilterCount: request.EventTypes?.Count ?? 0));
return result;
}
/// <summary>
@@ -249,6 +268,15 @@ public sealed class SnapshotService
public async Task<int> ExpireOldSnapshotsAsync(CancellationToken ct = default)
{
var cutoff = DateTimeOffset.UtcNow;
if (_incidentDiagnostics?.IsActive == true && _incidentDiagnostics.Current.RetentionExtensionDays > 0)
{
cutoff = cutoff.AddDays(-_incidentDiagnostics.Current.RetentionExtensionDays);
_logger.LogInformation(
"Incident mode active; extending snapshot expiry cutoff by {ExtensionDays} days (activation {ActivationId}).",
_incidentDiagnostics.Current.RetentionExtensionDays,
_incidentDiagnostics.Current.ActivationId ?? string.Empty);
}
var count = await _snapshotRepository.ExpireSnapshotsAsync(cutoff, ct);
if (count > 0)
@@ -367,4 +395,44 @@ public sealed class SnapshotService
var bytes = SHA256.HashData(Encoding.UTF8.GetBytes(input));
return Convert.ToHexStringLower(bytes);
}
private CreateSnapshotInput ApplyIncidentRetention(CreateSnapshotInput input)
{
if (_incidentDiagnostics is null || !_incidentDiagnostics.IsActive)
{
return input;
}
var incident = _incidentDiagnostics.Current;
if (incident.RetentionExtensionDays <= 0)
{
return input;
}
TimeSpan? expiresIn = input.ExpiresIn;
if (expiresIn.HasValue)
{
expiresIn = expiresIn.Value.Add(TimeSpan.FromDays(incident.RetentionExtensionDays));
}
var metadata = input.Metadata is null
? new Dictionary<string, object>()
: new Dictionary<string, object>(input.Metadata);
metadata["incident.mode"] = "enabled";
metadata["incident.activationId"] = incident.ActivationId ?? string.Empty;
metadata["incident.retentionExtensionDays"] = incident.RetentionExtensionDays;
metadata["incident.changedAt"] = incident.ChangedAt.ToString("O");
if (incident.ExpiresAt is not null)
{
metadata["incident.expiresAt"] = incident.ExpiresAt.Value.ToString("O");
}
_logger.LogInformation(
"Incident mode active; extending snapshot retention by {ExtensionDays} days (activation {ActivationId}).",
incident.RetentionExtensionDays,
incident.ActivationId ?? string.Empty);
return input with { ExpiresIn = expiresIn, Metadata = metadata };
}
}

View File

@@ -32,6 +32,7 @@
<ItemGroup>
<ProjectReference Include="..\..\__Libraries\StellaOps.Cryptography\StellaOps.Cryptography.csproj" />
<ProjectReference Include="..\..\Telemetry\StellaOps.Telemetry.Core\StellaOps.Telemetry.Core\StellaOps.Telemetry.Core.csproj" />
</ItemGroup>
</Project>

View File

@@ -1,4 +1,4 @@
# Findings Ledger · Sprint 0120-0000-0001
# Findings Ledger · Sprint 0120-0000-0001
| Task ID | Status | Notes | Updated (UTC) |
| --- | --- | --- | --- |
@@ -8,9 +8,18 @@
Status changes must be mirrored in `docs/implplan/SPRINT_0120_0001_0001_policy_reasoning.md`.
# Findings Ledger · Sprint 0121-0001-0001
# Findings Ledger · Sprint 0121-0001-0001
| Task ID | Status | Notes | Updated (UTC) |
| --- | --- | --- | --- |
| LEDGER-OBS-54-001 | DONE | Implemented `/v1/ledger/attestations` with deterministic paging, filter hash guard, and schema/OpenAPI updates. | 2025-11-22 |
| LEDGER-GAPS-121-009 | DONE | FL1FL10 remediation: schema catalog + export canonicals, Merkle/external anchor policy, tenant isolation/redaction manifest, offline verifier + checksum guard, golden fixtures, backpressure metrics. | 2025-12-02 |
| LEDGER-GAPS-121-009 | DONE | FL1–FL10 remediation: schema catalog + export canonicals, Merkle/external anchor policy, tenant isolation/redaction manifest, offline verifier + checksum guard, golden fixtures, backpressure metrics. | 2025-12-02 |
# Findings Ledger Aú Sprint 0121-0001-0002
| Task ID | Status | Notes | Updated (UTC) |
| --- | --- | --- | --- |
| LEDGER-ATTEST-73-002 | DONE | Verification-result and attestation-status filters wired into findings projection queries and exports; tests added. | 2025-12-08 |
| LEDGER-OAS-61-002 | DONE | `/.well-known/openapi` serves spec with version/build headers, ETag, cache hints. | 2025-12-08 |
| LEDGER-OAS-62-001 | DONE | SDK-facing OpenAPI assertions for pagination, evidence links, provenance added. | 2025-12-08 |
| LEDGER-OAS-63-001 | DONE | Deprecation headers and notifications applied to legacy findings export endpoint. | 2025-12-08 |
| LEDGER-OBS-55-001 | DONE | Incident-mode diagnostics (lag/conflict/replay traces), retention extension for snapshots, timeline/notifier hooks. | 2025-12-08 |

View File

@@ -0,0 +1,18 @@
using FluentAssertions;
using StellaOps.Findings.Ledger.Infrastructure.Attestation;
namespace StellaOps.Findings.Ledger.Tests;
public class AttestationStatusCalculatorTests
{
[Theory]
[InlineData(0, 0, OverallVerificationStatus.NoAttestations)]
[InlineData(3, 3, OverallVerificationStatus.AllVerified)]
[InlineData(4, 1, OverallVerificationStatus.PartiallyVerified)]
[InlineData(2, 0, OverallVerificationStatus.NoneVerified)]
public void Compute_ReturnsExpectedStatus(int attestationCount, int verifiedCount, OverallVerificationStatus expected)
{
AttestationStatusCalculator.Compute(attestationCount, verifiedCount)
.Should().Be(expected);
}
}

View File

@@ -0,0 +1,20 @@
using FluentAssertions;
using Microsoft.AspNetCore.Http;
using StellaOps.Findings.Ledger;
namespace StellaOps.Findings.Ledger.Tests;
public class DeprecationHeadersTests
{
[Fact]
public void Apply_SetsStandardDeprecationHeaders()
{
var context = new DefaultHttpContext();
DeprecationHeaders.Apply(context.Response, "ledger.export.findings");
context.Response.Headers["Deprecation"].ToString().Should().Be("true");
context.Response.Headers["Sunset"].ToString().Should().Be(DeprecationHeaders.SunsetDate);
context.Response.Headers["X-Deprecated-Endpoint"].ToString().Should().Be("ledger.export.findings");
context.Response.Headers["Link"].ToString().Should().Contain("/.well-known/openapi");
}
}

View File

@@ -0,0 +1,28 @@
using System.Text;
using FluentAssertions;
using StellaOps.Findings.Ledger.OpenApi;
namespace StellaOps.Findings.Ledger.Tests;
public class OpenApiMetadataFactoryTests
{
[Fact]
public void ComputeEtag_IsDeterministicAndWeak()
{
var bytes = Encoding.UTF8.GetBytes("spec-content");
var etag1 = OpenApiMetadataFactory.ComputeEtag(bytes);
var etag2 = OpenApiMetadataFactory.ComputeEtag(bytes);
etag1.Should().StartWith("W/\"");
etag1.Should().Be(etag2);
etag1.Length.Should().BeGreaterThan(6);
}
[Fact]
public void GetSpecPath_ResolvesExistingSpec()
{
var path = OpenApiMetadataFactory.GetSpecPath(AppContext.BaseDirectory);
File.Exists(path).Should().BeTrue();
}
}

View File

@@ -0,0 +1,39 @@
using System.Text;
using FluentAssertions;
using StellaOps.Findings.Ledger.OpenApi;
namespace StellaOps.Findings.Ledger.Tests;
public class OpenApiSdkSurfaceTests
{
private readonly string _specContent;
public OpenApiSdkSurfaceTests()
{
var path = OpenApiMetadataFactory.GetSpecPath(AppContext.BaseDirectory);
_specContent = File.ReadAllText(path, Encoding.UTF8);
}
[Fact]
public void FindingsEndpoints_ExposePaginationAndFilters()
{
_specContent.Should().Contain("/findings");
_specContent.Should().Contain("page_token");
_specContent.Should().MatchRegex("nextPageToken|next_page_token");
}
[Fact]
public void EvidenceSchemas_ExposeEvidenceLinks()
{
_specContent.Should().Contain("evidenceBundleRef");
_specContent.Should().Contain("ExportProvenance");
}
[Fact]
public void AttestationPointers_ExposeProvenanceMetadata()
{
_specContent.Should().Contain("/v1/ledger/attestations");
_specContent.Should().Contain("attestation");
_specContent.Should().Contain("provenance");
}
}

View File

@@ -0,0 +1,116 @@
using System.Text.Json.Nodes;
using Microsoft.Extensions.Logging.Abstractions;
using StellaOps.Findings.Ledger.Domain;
using StellaOps.Findings.Ledger.Infrastructure;
using StellaOps.Findings.Ledger.Infrastructure.Attestation;
using StellaOps.Findings.Ledger.Services;
using FluentAssertions;
namespace StellaOps.Findings.Ledger.Tests;
public class ScoredFindingsQueryServiceTests
{
[Fact]
public async Task QueryAsync_MapsAttestationMetadata()
{
var projection = new FindingProjection(
TenantId: "tenant-a",
FindingId: "finding-123",
PolicyVersion: "v1",
Status: "affected",
Severity: 7.5m,
RiskScore: 0.9m,
RiskSeverity: "critical",
RiskProfileVersion: "p1",
RiskExplanationId: Guid.NewGuid(),
RiskEventSequence: 42,
Labels: new(),
CurrentEventId: Guid.NewGuid(),
ExplainRef: "explain-1",
PolicyRationale: new(),
UpdatedAt: DateTimeOffset.UtcNow,
CycleHash: "abc123",
AttestationCount: 3,
VerifiedAttestationCount: 2,
FailedAttestationCount: 1,
UnverifiedAttestationCount: 0,
AttestationStatus: OverallVerificationStatus.PartiallyVerified);
var repo = new FakeFindingProjectionRepository(projection);
var service = new ScoredFindingsQueryService(
repo,
new NullRiskExplanationStore(),
TimeProvider.System,
NullLogger<ScoredFindingsQueryService>.Instance);
var result = await service.QueryAsync(new ScoredFindingsQuery
{
TenantId = "tenant-a",
Limit = 10
});
result.TotalCount.Should().Be(1);
result.Findings.Should().HaveCount(1);
var finding = result.Findings.Single();
finding.AttestationCount.Should().Be(3);
finding.VerifiedAttestationCount.Should().Be(2);
finding.FailedAttestationCount.Should().Be(1);
finding.UnverifiedAttestationCount.Should().Be(0);
finding.AttestationStatus.Should().Be(OverallVerificationStatus.PartiallyVerified);
}
private sealed class FakeFindingProjectionRepository : IFindingProjectionRepository
{
private readonly FindingProjection _projection;
public FakeFindingProjectionRepository(FindingProjection projection)
{
_projection = projection;
}
public Task<ProjectionCheckpoint> GetCheckpointAsync(CancellationToken cancellationToken) =>
Task.FromResult(ProjectionCheckpoint.Initial(TimeProvider.System));
public Task<(IReadOnlyList<FindingProjection> Projections, int TotalCount)> QueryScoredAsync(
ScoredFindingsQuery query,
CancellationToken cancellationToken) =>
Task.FromResult((new List<FindingProjection> { _projection } as IReadOnlyList<FindingProjection>, 1));
public Task<FindingStatsResult> GetFindingStatsSinceAsync(string tenantId, DateTimeOffset since, CancellationToken cancellationToken) =>
Task.FromResult(new FindingStatsResult(0, 0, 0, 0, 0, 0));
public Task<(int Total, int Scored, decimal AvgScore, decimal MaxScore)> GetRiskAggregatesAsync(string tenantId, string? policyVersion, CancellationToken cancellationToken) =>
Task.FromResult((0, 0, 0m, 0m));
public Task<ScoreDistribution> GetScoreDistributionAsync(string tenantId, string? policyVersion, CancellationToken cancellationToken) =>
Task.FromResult(new ScoreDistribution());
public Task<SeverityDistribution> GetSeverityDistributionAsync(string tenantId, string? policyVersion, CancellationToken cancellationToken) =>
Task.FromResult(new SeverityDistribution());
public Task SaveCheckpointAsync(ProjectionCheckpoint checkpoint, CancellationToken cancellationToken) =>
Task.CompletedTask;
public Task InsertHistoryAsync(FindingHistoryEntry entry, CancellationToken cancellationToken) =>
Task.CompletedTask;
public Task InsertActionAsync(TriageActionEntry entry, CancellationToken cancellationToken) =>
Task.CompletedTask;
public Task<FindingProjection?> GetAsync(string tenantId, string findingId, string policyVersion, CancellationToken cancellationToken) =>
Task.FromResult<FindingProjection?>(_projection);
public Task UpsertAsync(FindingProjection projection, CancellationToken cancellationToken) =>
Task.CompletedTask;
}
private sealed class NullRiskExplanationStore : IRiskExplanationStore
{
public Task<ScoredFindingExplanation?> GetAsync(string tenantId, string findingId, Guid? explanationId, CancellationToken cancellationToken) =>
Task.FromResult<ScoredFindingExplanation?>(null);
public Task StoreAsync(string tenantId, ScoredFindingExplanation explanation, CancellationToken cancellationToken) =>
Task.CompletedTask;
}
}

View File

@@ -8,3 +8,4 @@
| MIRROR-CRT-57-002 | DONE | Time-anchor DSSE emitted when SIGN_KEY is set; bundle meta + verifier check anchor integrity. |
| MIRROR-CRT-58-001 | DONE | CLI wrappers (`mirror-create.sh`, `mirror-verify.sh`) for deterministic build/verify flows; uses existing assembler + verifier. |
| MIRROR-CRT-58-002 | DOING (dev) | Export Center scheduling helper (`src/Mirror/StellaOps.Mirror.Creator/schedule-export-center-run.sh`) added; production signing still pending MIRROR-CRT-56-002 key. |
| EXPORT-OBS-51-001 / 54-001 | DONE | Export Center handoff scripted via `export-center-wire.sh`, scheduler payload now carries bundle metadata, and mirror-sign CI uploads handoff outputs. |

View File

@@ -3,6 +3,18 @@ set -euo pipefail
# Schedule an Export Center run for mirror bundles and emit an audit log entry.
# Requires curl. Uses bearer token auth for simplicity; swap to DPoP if/when gateway enforces it.
# Usage:
# EXPORT_CENTER_BASE_URL=https://export.example.com \
# EXPORT_CENTER_TENANT=tenant-a \
# EXPORT_CENTER_TOKEN=token123 \
# ./schedule-export-center-run.sh mirror:thin '["vex","advisory"]' '["tar.gz","json"]'
# Env:
# EXPORT_CENTER_BASE_URL (default: http://localhost:8080)
# EXPORT_CENTER_TENANT (default: tenant-default)
# EXPORT_CENTER_PROJECT (optional header)
# EXPORT_CENTER_TOKEN (optional Bearer token)
# EXPORT_CENTER_ARTIFACTS_JSON (optional JSON array of {name,path,sha256} to include in payload)
# AUDIT_LOG_PATH (default: ./logs/export-center-schedule.log)
BASE_URL="${EXPORT_CENTER_BASE_URL:-http://localhost:8080}"
TENANT="${EXPORT_CENTER_TENANT:-tenant-default}"
@@ -19,6 +31,7 @@ fi
TARGETS_JSON="${2:-[\"vex\",\"advisory\",\"policy\"]}"
FORMATS_JSON="${3:-[\"json\",\"ndjson\"]}"
ARTIFACTS_JSON="${EXPORT_CENTER_ARTIFACTS_JSON:-}"
mkdir -p "$(dirname "$AUDIT_LOG")"
@@ -27,15 +40,39 @@ if [[ -n "$TOKEN" ]]; then
AUTH_HEADER=(-H "Authorization: Bearer ${TOKEN}")
fi
payload="$(cat <<JSON
{
"profileId": "${PROFILE_ID}",
"targets": ${TARGETS_JSON},
"formats": ${FORMATS_JSON},
"retentionDays": 30,
"priority": "normal"
payload="$(PROFILE_ID="${PROFILE_ID}" TARGETS_JSON="${TARGETS_JSON}" FORMATS_JSON="${FORMATS_JSON}" ARTIFACTS_JSON="${ARTIFACTS_JSON}" python3 - <<'PY'
import json
import os
import sys
def parse_json(env_key: str) -> object:
try:
return json.loads(os.environ[env_key])
except KeyError:
print(f"missing env: {env_key}", file=sys.stderr)
sys.exit(1)
except json.JSONDecodeError as exc:
print(f"invalid JSON in {env_key}: {exc}", file=sys.stderr)
sys.exit(1)
payload = {
"profileId": os.environ["PROFILE_ID"],
"targets": parse_json("TARGETS_JSON"),
"formats": parse_json("FORMATS_JSON"),
"retentionDays": 30,
"priority": "normal",
}
JSON
artifacts_raw = os.environ.get("ARTIFACTS_JSON", "").strip()
if artifacts_raw:
try:
payload["artifacts"] = json.loads(artifacts_raw)
except json.JSONDecodeError as exc:
print(f"invalid JSON in EXPORT_CENTER_ARTIFACTS_JSON: {exc}", file=sys.stderr)
sys.exit(1)
print(json.dumps(payload))
PY
)"
response="$(curl -sS -X POST "${BASE_URL}/export-center/runs" \

View File

@@ -0,0 +1,179 @@
using System.Collections.Immutable;
using System.Security.Cryptography;
using System.Text;
using StellaOps.Scanner.Analyzers.Native.Observations;
namespace StellaOps.Scanner.Analyzers.Native.Reachability;
/// <summary>
/// Builds a deterministic reachability graph from native observations.
/// </summary>
internal static class NativeReachabilityGraphBuilder
{
private const string PayloadType = "stellaops.native.graph@1";
public static NativeReachabilityGraph Build(NativeObservationDocument document, string? layerDigest = null)
{
ArgumentNullException.ThrowIfNull(document);
ArgumentNullException.ThrowIfNull(document.Binary);
var nodes = new List<NativeReachabilityNode>();
var edges = new List<NativeReachabilityEdge>();
var binaryId = ResolveBinaryId(document);
var codeId = ResolveCodeId(document);
var binaryNodeId = $"bin::{binaryId}";
var rootNodeId = $"root::{binaryId}";
// Root node to capture synthetic sources (init arrays, entrypoints).
nodes.Add(new NativeReachabilityNode(
Id: rootNodeId,
Kind: "root",
Name: document.Binary.Path ?? "native-binary",
Path: document.Binary.Path,
BuildId: document.Binary.BuildId,
CodeId: codeId,
Format: document.Binary.Format,
Architecture: document.Binary.Architecture));
nodes.Add(new NativeReachabilityNode(
Id: binaryNodeId,
Kind: "binary",
Name: document.Binary.Path ?? "native-binary",
Path: document.Binary.Path,
BuildId: document.Binary.BuildId,
CodeId: codeId,
Format: document.Binary.Format,
Architecture: document.Binary.Architecture));
edges.Add(new NativeReachabilityEdge(rootNodeId, binaryNodeId, "binary"));
// Entrypoints -> binary (synthetic roots)
foreach (var entry in document.Entrypoints ?? Array.Empty<NativeObservationEntrypoint>())
{
var entryId = $"entry::{entry.Symbol ?? entry.Type ?? "entry"}";
nodes.Add(new NativeReachabilityNode(
Id: entryId,
Kind: "entrypoint",
Name: entry.Symbol ?? entry.Type ?? "entry",
Path: document.Binary.Path,
BuildId: document.Binary.BuildId,
CodeId: codeId,
Format: document.Binary.Format,
Architecture: document.Binary.Architecture));
edges.Add(new NativeReachabilityEdge(rootNodeId, entryId, "entrypoint"));
edges.Add(new NativeReachabilityEdge(entryId, binaryNodeId, "entrypoint"));
}
// Declared dependencies -> binary
foreach (var dep in document.DeclaredEdges ?? Array.Empty<NativeObservationDeclaredEdge>())
{
var targetId = $"decl::{dep.Target}";
nodes.Add(new NativeReachabilityNode(
Id: targetId,
Kind: "dependency",
Name: dep.Target ?? "unknown",
BuildId: null,
CodeId: null,
Format: null,
Path: dep.Target));
edges.Add(new NativeReachabilityEdge(binaryNodeId, targetId, dep.Reason ?? "declared"));
}
// Heuristic/runtime edges as unknown targets
foreach (var edge in document.HeuristicEdges ?? Array.Empty<NativeObservationHeuristicEdge>())
{
var targetId = $"heur::{edge.Target}";
nodes.Add(new NativeReachabilityNode(
Id: targetId,
Kind: "dependency",
Name: edge.Target ?? "unknown",
Path: edge.Target));
edges.Add(new NativeReachabilityEdge(binaryNodeId, targetId, edge.Reason ?? "heuristic"));
}
foreach (var edge in document.RuntimeEdges ?? Array.Empty<NativeObservationRuntimeEdge>())
{
var targetId = $"rt::{edge.Target}";
nodes.Add(new NativeReachabilityNode(
Id: targetId,
Kind: "runtime",
Name: edge.Target ?? "runtime",
Path: edge.Target));
edges.Add(new NativeReachabilityEdge(binaryNodeId, targetId, edge.Reason ?? "runtime"));
}
var distinctNodes = nodes
.GroupBy(n => n.Id, StringComparer.Ordinal)
.Select(g => g.First())
.OrderBy(n => n.Id, StringComparer.Ordinal)
.ToImmutableArray();
var distinctEdges = edges
.GroupBy(e => (e.From, e.To, e.Reason), ValueTuple.Create)
.Select(g => g.First())
.OrderBy(e => e.From, StringComparer.Ordinal)
.ThenBy(e => e.To, StringComparer.Ordinal)
.ThenBy(e => e.Reason, StringComparer.Ordinal)
.ToImmutableArray();
return new NativeReachabilityGraph(distinctNodes, distinctEdges, layerDigest, document.Binary.BuildId, codeId);
}
public static NativeReachabilityBundle ToBundle(NativeObservationDocument document, string? layerDigest = null)
{
var graph = Build(document, layerDigest);
return new NativeReachabilityBundle(
PayloadType,
graph,
layerDigest,
graph.BuildId,
graph.CodeId);
}
private static string ResolveBinaryId(NativeObservationDocument document)
{
if (!string.IsNullOrWhiteSpace(document.Binary.BuildId))
{
return $"buildid:{document.Binary.BuildId}";
}
if (!string.IsNullOrWhiteSpace(document.Binary.Sha256))
{
return $"sha256:{document.Binary.Sha256}";
}
return $"path:{document.Binary.Path}";
}
private static string? ResolveCodeId(NativeObservationDocument document)
{
if (!string.IsNullOrWhiteSpace(document.Binary.BuildId))
{
return document.Binary.BuildId;
}
if (!string.IsNullOrWhiteSpace(document.Binary.Sha256))
{
return document.Binary.Sha256;
}
if (!string.IsNullOrWhiteSpace(document.Binary.Path))
{
return ComputeSha256(document.Binary.Path);
}
return null;
}
private static string ComputeSha256(string value)
{
var bytes = Encoding.UTF8.GetBytes(value);
var hash = SHA256.HashData(bytes);
return Convert.ToHexString(hash).ToLowerInvariant();
}
}

View File

@@ -0,0 +1,32 @@
using System.Collections.Immutable;
namespace StellaOps.Scanner.Analyzers.Native.Reachability;
internal sealed record NativeReachabilityNode(
string Id,
string Kind,
string Name,
string? BuildId = null,
string? CodeId = null,
string? Format = null,
string? Path = null,
string? Architecture = null);
internal sealed record NativeReachabilityEdge(
string From,
string To,
string Reason);
internal sealed record NativeReachabilityGraph(
ImmutableArray<NativeReachabilityNode> Nodes,
ImmutableArray<NativeReachabilityEdge> Edges,
string? LayerDigest,
string? BuildId,
string? CodeId);
internal sealed record NativeReachabilityBundle(
string PayloadType,
NativeReachabilityGraph Graph,
string? LayerDigest,
string? BuildId,
string? CodeId);

View File

@@ -0,0 +1,57 @@
using System.Text.Json;
using System.Text.Json.Serialization;
namespace StellaOps.Scanner.Analyzers.Lang.DotNet.Internal;
internal sealed class DotNetAnalyzerOptions
{
private const string DefaultConfigFileName = "dotnet-il.config.json";
[JsonPropertyName("emitDependencyEdges")]
public bool EmitDependencyEdges { get; init; } = false;
[JsonPropertyName("includeEntrypoints")]
public bool IncludeEntrypoints { get; init; } = false;
[JsonPropertyName("runtimeEvidencePath")]
public string? RuntimeEvidencePath { get; init; }
[JsonPropertyName("runtimeEvidenceConfidence")]
public string? RuntimeEvidenceConfidence { get; init; }
public static DotNetAnalyzerOptions Load(LanguageAnalyzerContext context)
{
ArgumentNullException.ThrowIfNull(context);
var path = Path.Combine(context.RootPath, DefaultConfigFileName);
if (!File.Exists(path))
{
return new DotNetAnalyzerOptions();
}
try
{
var json = File.ReadAllText(path);
var options = new JsonSerializerOptions
{
PropertyNameCaseInsensitive = true,
ReadCommentHandling = JsonCommentHandling.Skip,
AllowTrailingCommas = true
};
return JsonSerializer.Deserialize<DotNetAnalyzerOptions>(json, options) ?? new DotNetAnalyzerOptions();
}
catch (IOException)
{
return new DotNetAnalyzerOptions();
}
catch (JsonException)
{
return new DotNetAnalyzerOptions();
}
catch (UnauthorizedAccessException)
{
return new DotNetAnalyzerOptions();
}
}
}

View File

@@ -0,0 +1,7 @@
namespace StellaOps.Scanner.Analyzers.Lang.DotNet.Internal;
internal sealed record DotNetDependencyEdge(
string Target,
string Reason,
string Confidence,
string Source);

View File

@@ -0,0 +1,110 @@
using System.Text.Json;
namespace StellaOps.Scanner.Analyzers.Lang.DotNet.Internal;
internal static class DotNetRuntimeEvidenceLoader
{
public static IReadOnlyDictionary<string, IReadOnlyList<DotNetDependencyEdge>> Load(
LanguageAnalyzerContext context,
DotNetAnalyzerOptions options,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(context);
ArgumentNullException.ThrowIfNull(options);
if (string.IsNullOrWhiteSpace(options.RuntimeEvidencePath))
{
return new Dictionary<string, IReadOnlyList<DotNetDependencyEdge>>(StringComparer.OrdinalIgnoreCase);
}
var absolute = context.ResolvePath(options.RuntimeEvidencePath);
if (!File.Exists(absolute))
{
return new Dictionary<string, IReadOnlyList<DotNetDependencyEdge>>(StringComparer.OrdinalIgnoreCase);
}
var edges = new Dictionary<string, List<DotNetDependencyEdge>>(StringComparer.OrdinalIgnoreCase);
var confidence = string.IsNullOrWhiteSpace(options.RuntimeEvidenceConfidence) ? "medium" : options.RuntimeEvidenceConfidence!.Trim();
foreach (var line in File.ReadLines(absolute))
{
cancellationToken.ThrowIfCancellationRequested();
if (string.IsNullOrWhiteSpace(line))
{
continue;
}
try
{
using var document = JsonDocument.Parse(line);
var root = document.RootElement;
if (!root.TryGetProperty("package", out var packageElement) || packageElement.ValueKind != JsonValueKind.String)
{
continue;
}
var packageId = packageElement.GetString();
if (string.IsNullOrWhiteSpace(packageId))
{
continue;
}
if (!root.TryGetProperty("target", out var targetElement) || targetElement.ValueKind != JsonValueKind.String)
{
continue;
}
var target = targetElement.GetString();
if (string.IsNullOrWhiteSpace(target))
{
continue;
}
var reason = root.TryGetProperty("reason", out var reasonElement) && reasonElement.ValueKind == JsonValueKind.String
? reasonElement.GetString()
: "runtime";
var conf = root.TryGetProperty("confidence", out var confidenceElement) && confidenceElement.ValueKind == JsonValueKind.String
? confidenceElement.GetString()
: confidence;
var source = root.TryGetProperty("source", out var sourceElement) && sourceElement.ValueKind == JsonValueKind.String
? sourceElement.GetString()
: "runtime-evidence";
var edge = new DotNetDependencyEdge(
Target: target!.Trim(),
Reason: string.IsNullOrWhiteSpace(reason) ? "runtime" : reason!.Trim(),
Confidence: string.IsNullOrWhiteSpace(conf) ? confidence : conf!.Trim(),
Source: string.IsNullOrWhiteSpace(source) ? "runtime-evidence" : source!.Trim());
if (!edges.TryGetValue(packageId.Trim().ToLowerInvariant(), out var list))
{
list = new List<DotNetDependencyEdge>();
edges[packageId.Trim().ToLowerInvariant()] = list;
}
list.Add(edge);
}
catch (JsonException)
{
continue;
}
catch (IOException)
{
continue;
}
catch (UnauthorizedAccessException)
{
continue;
}
}
return edges.ToDictionary(
kvp => kvp.Key,
kvp => (IReadOnlyList<DotNetDependencyEdge>)kvp.Value.OrderBy(edge => edge.Target, StringComparer.OrdinalIgnoreCase).ToArray(),
StringComparer.OrdinalIgnoreCase);
}
}

View File

@@ -0,0 +1,15 @@
namespace StellaOps.Scanner.Analyzers.Lang.Node.Internal;
internal static class NodeDeclarationKeyBuilder
{
public static string Build(string name, string? version)
{
if (string.IsNullOrWhiteSpace(name) || string.IsNullOrWhiteSpace(version))
{
return string.Empty;
}
return $"{name.Trim().ToLowerInvariant()}@{version.Trim()}";
}
}

Some files were not shown because too many files have changed in this diff Show More