This commit is contained in:
StellaOps Bot
2025-12-09 00:20:52 +02:00
parent 3d01bf9edc
commit bc0762e97d
261 changed files with 14033 additions and 4427 deletions

View File

@@ -9,7 +9,7 @@ using Microsoft.Extensions.Options;
using MongoDB.Bson;
using MongoDB.Driver;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.WebService.Contracts;
using StellaOps.Excititor.WebService.Services;
@@ -27,7 +27,7 @@ public static class AttestationEndpoints
// GET /attestations/vex/list - List attestations
app.MapGet("/attestations/vex/list", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IMongoDatabase database,
TimeProvider timeProvider,
[FromQuery] int? limit,
@@ -102,7 +102,7 @@ public static class AttestationEndpoints
app.MapGet("/attestations/vex/{attestationId}", async (
HttpContext context,
string attestationId,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IVexAttestationLinkStore attestationStore,
TimeProvider timeProvider,
CancellationToken cancellationToken) =>
@@ -209,7 +209,7 @@ public static class AttestationEndpoints
// GET /attestations/vex/lookup - Lookup attestations by linkset or observation
app.MapGet("/attestations/vex/lookup", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IMongoDatabase database,
TimeProvider timeProvider,
[FromQuery] string? linksetId,
@@ -283,7 +283,7 @@ public static class AttestationEndpoints
BuilderId: doc.GetValue("SupplierId", BsonNull.Value).AsString);
}
private static bool TryResolveTenant(HttpContext context, VexMongoStorageOptions options, out string tenant, out IResult? problem)
private static bool TryResolveTenant(HttpContext context, VexStorageOptions options, out string tenant, out IResult? problem)
{
tenant = options.DefaultTenant;
problem = null;

View File

@@ -16,7 +16,7 @@ using MongoDB.Driver;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Core.Canonicalization;
using StellaOps.Excititor.Core.Observations;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.WebService.Contracts;
using StellaOps.Excititor.WebService.Services;
using StellaOps.Excititor.WebService.Telemetry;
@@ -36,7 +36,7 @@ public static class EvidenceEndpoints
// GET /evidence/vex/list - List evidence exports
app.MapGet("/evidence/vex/list", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IMongoDatabase database,
TimeProvider timeProvider,
[FromQuery] int? limit,
@@ -114,7 +114,7 @@ public static class EvidenceEndpoints
app.MapGet("/evidence/vex/bundle/{bundleId}", async (
HttpContext context,
string bundleId,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IMongoDatabase database,
TimeProvider timeProvider,
CancellationToken cancellationToken) =>
@@ -191,7 +191,7 @@ public static class EvidenceEndpoints
// GET /evidence/vex/lookup - Lookup evidence for vuln/product pair
app.MapGet("/evidence/vex/lookup", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IVexObservationProjectionService projectionService,
TimeProvider timeProvider,
[FromQuery] string vulnerabilityId,
@@ -256,7 +256,7 @@ public static class EvidenceEndpoints
app.MapGet("/vuln/evidence/vex/{advisory_key}", async (
HttpContext context,
string advisory_key,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IMongoDatabase database,
TimeProvider timeProvider,
[FromQuery] int? limit,
@@ -446,7 +446,7 @@ public static class EvidenceEndpoints
HttpContext context,
string bundleId,
[FromQuery] string? generation,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
IOptions<AirgapOptions> airgapOptions,
[FromServices] IAirgapImportStore airgapImportStore,
[FromServices] IVexHashingService hashingService,
@@ -528,7 +528,7 @@ public static class EvidenceEndpoints
HttpContext context,
string bundleId,
[FromQuery] string? generation,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
IOptions<AirgapOptions> airgapOptions,
[FromServices] IAirgapImportStore airgapImportStore,
CancellationToken cancellationToken) =>
@@ -575,7 +575,7 @@ public static class EvidenceEndpoints
HttpContext context,
string bundleId,
[FromQuery] string? generation,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
IOptions<AirgapOptions> airgapOptions,
[FromServices] IAirgapImportStore airgapImportStore,
CancellationToken cancellationToken) =>
@@ -679,7 +679,7 @@ public static class EvidenceEndpoints
return (digest, size);
}
private static bool TryResolveTenant(HttpContext context, VexMongoStorageOptions options, out string tenant, out IResult? problem)
private static bool TryResolveTenant(HttpContext context, VexStorageOptions options, out string tenant, out IResult? problem)
{
tenant = options.DefaultTenant;
problem = null;

View File

@@ -20,49 +20,49 @@ internal static class IngestEndpoints
group.MapPost("/reconcile", HandleReconcileAsync);
}
internal static async Task<IResult> HandleInitAsync(
HttpContext httpContext,
ExcititorInitRequest request,
IVexIngestOrchestrator orchestrator,
TimeProvider timeProvider,
CancellationToken cancellationToken)
internal static async Task<IResult> HandleInitAsync(
HttpContext httpContext,
ExcititorInitRequest request,
IVexIngestOrchestrator orchestrator,
TimeProvider timeProvider,
CancellationToken cancellationToken)
{
var scopeResult = ScopeAuthorization.RequireScope(httpContext, AdminScope);
if (scopeResult is not null)
{
return scopeResult;
}
var providerIds = NormalizeProviders(request.Providers);
_ = timeProvider;
var options = new IngestInitOptions(providerIds, request.Resume ?? false);
{
return scopeResult;
}
var providerIds = NormalizeProviders(request.Providers);
_ = timeProvider;
var options = new IngestInitOptions(providerIds, request.Resume ?? false);
var summary = await orchestrator.InitializeAsync(options, cancellationToken).ConfigureAwait(false);
var message = $"Initialized {summary.ProviderCount} provider(s); {summary.SuccessCount} succeeded, {summary.FailureCount} failed.";
return TypedResults.Ok<object>(new
{
message,
runId = summary.RunId,
startedAt = summary.StartedAt,
completedAt = summary.CompletedAt,
providers = summary.Providers.Select(static provider => new
{
providerId = provider.ProviderId,
displayName = provider.DisplayName,
status = provider.Status,
durationMs = provider.Duration.TotalMilliseconds,
error = provider.Error
})
});
}
return TypedResults.Ok<object>(new
{
message,
runId = summary.RunId,
startedAt = summary.StartedAt,
completedAt = summary.CompletedAt,
providers = summary.Providers.Select(static provider => new
{
providerId = provider.ProviderId,
displayName = provider.DisplayName,
status = provider.Status,
durationMs = provider.Duration.TotalMilliseconds,
error = provider.Error
})
});
}
internal static async Task<IResult> HandleRunAsync(
HttpContext httpContext,
ExcititorIngestRunRequest request,
IVexIngestOrchestrator orchestrator,
TimeProvider timeProvider,
CancellationToken cancellationToken)
internal static async Task<IResult> HandleRunAsync(
HttpContext httpContext,
ExcititorIngestRunRequest request,
IVexIngestOrchestrator orchestrator,
TimeProvider timeProvider,
CancellationToken cancellationToken)
{
var scopeResult = ScopeAuthorization.RequireScope(httpContext, AdminScope);
if (scopeResult is not null)
@@ -72,98 +72,55 @@ internal static class IngestEndpoints
if (!TryParseDateTimeOffset(request.Since, out var since, out var sinceError))
{
return TypedResults.BadRequest<object>(new { message = sinceError });
}
if (!TryParseTimeSpan(request.Window, out var window, out var windowError))
{
return TypedResults.BadRequest<object>(new { message = windowError });
}
_ = timeProvider;
var providerIds = NormalizeProviders(request.Providers);
var options = new IngestRunOptions(
providerIds,
since,
window,
request.Force ?? false);
var summary = await orchestrator.RunAsync(options, cancellationToken).ConfigureAwait(false);
var message = $"Ingest run completed for {summary.ProviderCount} provider(s); {summary.SuccessCount} succeeded, {summary.FailureCount} failed.";
return TypedResults.Ok<object>(new
{
message,
runId = summary.RunId,
startedAt = summary.StartedAt,
completedAt = summary.CompletedAt,
durationMs = summary.Duration.TotalMilliseconds,
providers = summary.Providers.Select(static provider => new
{
providerId = provider.ProviderId,
status = provider.Status,
documents = provider.Documents,
claims = provider.Claims,
startedAt = provider.StartedAt,
completedAt = provider.CompletedAt,
durationMs = provider.Duration.TotalMilliseconds,
lastDigest = provider.LastDigest,
lastUpdated = provider.LastUpdated,
checkpoint = provider.Checkpoint,
error = provider.Error
})
});
}
return TypedResults.BadRequest<object>(new { message = sinceError });
}
internal static async Task<IResult> HandleResumeAsync(
HttpContext httpContext,
ExcititorIngestResumeRequest request,
IVexIngestOrchestrator orchestrator,
TimeProvider timeProvider,
CancellationToken cancellationToken)
{
var scopeResult = ScopeAuthorization.RequireScope(httpContext, AdminScope);
if (scopeResult is not null)
{
return scopeResult;
}
_ = timeProvider;
var providerIds = NormalizeProviders(request.Providers);
var options = new IngestResumeOptions(providerIds, request.Checkpoint);
var summary = await orchestrator.ResumeAsync(options, cancellationToken).ConfigureAwait(false);
var message = $"Resume run completed for {summary.ProviderCount} provider(s); {summary.SuccessCount} succeeded, {summary.FailureCount} failed.";
return TypedResults.Ok<object>(new
{
message,
runId = summary.RunId,
startedAt = summary.StartedAt,
completedAt = summary.CompletedAt,
durationMs = summary.Duration.TotalMilliseconds,
providers = summary.Providers.Select(static provider => new
{
providerId = provider.ProviderId,
status = provider.Status,
documents = provider.Documents,
claims = provider.Claims,
startedAt = provider.StartedAt,
completedAt = provider.CompletedAt,
durationMs = provider.Duration.TotalMilliseconds,
since = provider.Since,
checkpoint = provider.Checkpoint,
error = provider.Error
})
});
}
if (!TryParseTimeSpan(request.Window, out var window, out var windowError))
{
return TypedResults.BadRequest<object>(new { message = windowError });
}
internal static async Task<IResult> HandleReconcileAsync(
HttpContext httpContext,
ExcititorReconcileRequest request,
IVexIngestOrchestrator orchestrator,
TimeProvider timeProvider,
CancellationToken cancellationToken)
_ = timeProvider;
var providerIds = NormalizeProviders(request.Providers);
var options = new IngestRunOptions(
providerIds,
since,
window,
request.Force ?? false);
var summary = await orchestrator.RunAsync(options, cancellationToken).ConfigureAwait(false);
var message = $"Ingest run completed for {summary.ProviderCount} provider(s); {summary.SuccessCount} succeeded, {summary.FailureCount} failed.";
return TypedResults.Ok<object>(new
{
message,
runId = summary.RunId,
startedAt = summary.StartedAt,
completedAt = summary.CompletedAt,
durationMs = summary.Duration.TotalMilliseconds,
providers = summary.Providers.Select(static provider => new
{
providerId = provider.ProviderId,
status = provider.Status,
documents = provider.Documents,
claims = provider.Claims,
startedAt = provider.StartedAt,
completedAt = provider.CompletedAt,
durationMs = provider.Duration.TotalMilliseconds,
lastDigest = provider.LastDigest,
lastUpdated = provider.LastUpdated,
checkpoint = provider.Checkpoint,
error = provider.Error
})
});
}
internal static async Task<IResult> HandleResumeAsync(
HttpContext httpContext,
ExcititorIngestResumeRequest request,
IVexIngestOrchestrator orchestrator,
TimeProvider timeProvider,
CancellationToken cancellationToken)
{
var scopeResult = ScopeAuthorization.RequireScope(httpContext, AdminScope);
if (scopeResult is not null)
@@ -171,40 +128,83 @@ internal static class IngestEndpoints
return scopeResult;
}
if (!TryParseTimeSpan(request.MaxAge, out var maxAge, out var error))
{
return TypedResults.BadRequest<object>(new { message = error });
}
_ = timeProvider;
var providerIds = NormalizeProviders(request.Providers);
var options = new ReconcileOptions(providerIds, maxAge);
var summary = await orchestrator.ReconcileAsync(options, cancellationToken).ConfigureAwait(false);
var message = $"Reconcile completed for {summary.ProviderCount} provider(s); {summary.ReconciledCount} reconciled, {summary.SkippedCount} skipped, {summary.FailureCount} failed.";
return TypedResults.Ok<object>(new
{
message,
runId = summary.RunId,
startedAt = summary.StartedAt,
completedAt = summary.CompletedAt,
durationMs = summary.Duration.TotalMilliseconds,
providers = summary.Providers.Select(static provider => new
{
providerId = provider.ProviderId,
status = provider.Status,
action = provider.Action,
lastUpdated = provider.LastUpdated,
threshold = provider.Threshold,
documents = provider.Documents,
claims = provider.Claims,
error = provider.Error
})
});
}
_ = timeProvider;
var providerIds = NormalizeProviders(request.Providers);
var options = new IngestResumeOptions(providerIds, request.Checkpoint);
internal static ImmutableArray<string> NormalizeProviders(IReadOnlyCollection<string>? providers)
var summary = await orchestrator.ResumeAsync(options, cancellationToken).ConfigureAwait(false);
var message = $"Resume run completed for {summary.ProviderCount} provider(s); {summary.SuccessCount} succeeded, {summary.FailureCount} failed.";
return TypedResults.Ok<object>(new
{
message,
runId = summary.RunId,
startedAt = summary.StartedAt,
completedAt = summary.CompletedAt,
durationMs = summary.Duration.TotalMilliseconds,
providers = summary.Providers.Select(static provider => new
{
providerId = provider.ProviderId,
status = provider.Status,
documents = provider.Documents,
claims = provider.Claims,
startedAt = provider.StartedAt,
completedAt = provider.CompletedAt,
durationMs = provider.Duration.TotalMilliseconds,
since = provider.Since,
checkpoint = provider.Checkpoint,
error = provider.Error
})
});
}
internal static async Task<IResult> HandleReconcileAsync(
HttpContext httpContext,
ExcititorReconcileRequest request,
IVexIngestOrchestrator orchestrator,
TimeProvider timeProvider,
CancellationToken cancellationToken)
{
var scopeResult = ScopeAuthorization.RequireScope(httpContext, AdminScope);
if (scopeResult is not null)
{
return scopeResult;
}
if (!TryParseTimeSpan(request.MaxAge, out var maxAge, out var error))
{
return TypedResults.BadRequest<object>(new { message = error });
}
_ = timeProvider;
var providerIds = NormalizeProviders(request.Providers);
var options = new ReconcileOptions(providerIds, maxAge);
var summary = await orchestrator.ReconcileAsync(options, cancellationToken).ConfigureAwait(false);
var message = $"Reconcile completed for {summary.ProviderCount} provider(s); {summary.ReconciledCount} reconciled, {summary.SkippedCount} skipped, {summary.FailureCount} failed.";
return TypedResults.Ok<object>(new
{
message,
runId = summary.RunId,
startedAt = summary.StartedAt,
completedAt = summary.CompletedAt,
durationMs = summary.Duration.TotalMilliseconds,
providers = summary.Providers.Select(static provider => new
{
providerId = provider.ProviderId,
status = provider.Status,
action = provider.Action,
lastUpdated = provider.LastUpdated,
threshold = provider.Threshold,
documents = provider.Documents,
claims = provider.Claims,
error = provider.Error
})
});
}
internal static ImmutableArray<string> NormalizeProviders(IReadOnlyCollection<string>? providers)
{
if (providers is null || providers.Count == 0)
{
@@ -225,7 +225,7 @@ internal static class IngestEndpoints
return set.ToImmutableArray();
}
internal static bool TryParseDateTimeOffset(string? value, out DateTimeOffset? result, out string? error)
internal static bool TryParseDateTimeOffset(string? value, out DateTimeOffset? result, out string? error)
{
result = null;
error = null;
@@ -249,7 +249,7 @@ internal static class IngestEndpoints
return false;
}
internal static bool TryParseTimeSpan(string? value, out TimeSpan? result, out string? error)
internal static bool TryParseTimeSpan(string? value, out TimeSpan? result, out string? error)
{
result = null;
error = null;
@@ -269,19 +269,19 @@ internal static class IngestEndpoints
return false;
}
internal sealed record ExcititorInitRequest(IReadOnlyList<string>? Providers, bool? Resume);
internal sealed record ExcititorIngestRunRequest(
IReadOnlyList<string>? Providers,
string? Since,
string? Window,
bool? Force);
internal sealed record ExcititorIngestResumeRequest(
IReadOnlyList<string>? Providers,
string? Checkpoint);
internal sealed record ExcititorReconcileRequest(
IReadOnlyList<string>? Providers,
string? MaxAge);
}
internal sealed record ExcititorInitRequest(IReadOnlyList<string>? Providers, bool? Resume);
internal sealed record ExcititorIngestRunRequest(
IReadOnlyList<string>? Providers,
string? Since,
string? Window,
bool? Force);
internal sealed record ExcititorIngestResumeRequest(
IReadOnlyList<string>? Providers,
string? Checkpoint);
internal sealed record ExcititorReconcileRequest(
IReadOnlyList<string>? Providers,
string? MaxAge);
}

View File

@@ -10,7 +10,7 @@ using Microsoft.AspNetCore.Mvc;
using Microsoft.Extensions.Options;
using StellaOps.Excititor.Core.Canonicalization;
using StellaOps.Excititor.Core.Observations;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.WebService.Contracts;
using StellaOps.Excititor.WebService.Services;
using StellaOps.Excititor.WebService.Telemetry;
@@ -32,7 +32,7 @@ public static class LinksetEndpoints
// GET /vex/linksets - List linksets with filters
group.MapGet("", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IVexLinksetStore linksetStore,
[FromQuery] int? limit,
[FromQuery] string? cursor,
@@ -124,7 +124,7 @@ public static class LinksetEndpoints
group.MapGet("/{linksetId}", async (
HttpContext context,
string linksetId,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IVexLinksetStore linksetStore,
CancellationToken cancellationToken) =>
{
@@ -166,7 +166,7 @@ public static class LinksetEndpoints
// GET /vex/linksets/lookup - Lookup linkset by vulnerability and product
group.MapGet("/lookup", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IVexLinksetStore linksetStore,
[FromQuery] string? vulnerabilityId,
[FromQuery] string? productKey,
@@ -211,7 +211,7 @@ public static class LinksetEndpoints
// GET /vex/linksets/count - Get linkset counts for tenant
group.MapGet("/count", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IVexLinksetStore linksetStore,
CancellationToken cancellationToken) =>
{
@@ -240,7 +240,7 @@ public static class LinksetEndpoints
// GET /vex/linksets/conflicts - List linksets with conflicts (shorthand)
group.MapGet("/conflicts", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IVexLinksetStore linksetStore,
[FromQuery] int? limit,
CancellationToken cancellationToken) =>
@@ -317,7 +317,7 @@ public static class LinksetEndpoints
private static bool TryResolveTenant(
HttpContext context,
VexMongoStorageOptions options,
VexStorageOptions options,
out string tenant,
out IResult? problem)
{

View File

@@ -8,8 +8,8 @@ using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Export;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.WebService.Services;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.WebService.Services;
namespace StellaOps.Excititor.WebService.Endpoints;
@@ -98,13 +98,13 @@ internal static class MirrorEndpoints
}
var resolvedExports = new List<MirrorExportIndexEntry>();
foreach (var exportOption in domain.Exports)
{
if (!MirrorExportPlanner.TryBuild(exportOption, out var plan, out var error))
{
resolvedExports.Add(new MirrorExportIndexEntry(
exportOption.Key,
null,
foreach (var exportOption in domain.Exports)
{
if (!MirrorExportPlanner.TryBuild(exportOption, out var plan, out var error))
{
resolvedExports.Add(new MirrorExportIndexEntry(
exportOption.Key,
null,
null,
exportOption.Format,
null,
@@ -116,7 +116,7 @@ internal static class MirrorEndpoints
continue;
}
var manifest = await exportStore.FindAsync(plan.Signature, plan.Format, cancellationToken).ConfigureAwait(false);
var manifest = await exportStore.FindAsync(plan.Signature, plan.Format, cancellationToken).ConfigureAwait(false);
if (manifest is null)
{
@@ -177,16 +177,16 @@ internal static class MirrorEndpoints
return Results.Unauthorized();
}
if (!TryFindExport(domain, exportKey, out var exportOptions))
{
return Results.NotFound();
}
if (!MirrorExportPlanner.TryBuild(exportOptions, out var plan, out var error))
{
await WritePlainTextAsync(httpContext, error ?? "invalid_export_configuration", StatusCodes.Status503ServiceUnavailable, cancellationToken).ConfigureAwait(false);
return Results.Empty;
}
if (!TryFindExport(domain, exportKey, out var exportOptions))
{
return Results.NotFound();
}
if (!MirrorExportPlanner.TryBuild(exportOptions, out var plan, out var error))
{
await WritePlainTextAsync(httpContext, error ?? "invalid_export_configuration", StatusCodes.Status503ServiceUnavailable, cancellationToken).ConfigureAwait(false);
return Results.Empty;
}
var manifest = await exportStore.FindAsync(plan.Signature, plan.Format, cancellationToken).ConfigureAwait(false);
if (manifest is null)
@@ -241,10 +241,10 @@ internal static class MirrorEndpoints
return Results.Empty;
}
if (!TryFindExport(domain, exportKey, out var exportOptions) || !MirrorExportPlanner.TryBuild(exportOptions, out var plan, out _))
{
return Results.NotFound();
}
if (!TryFindExport(domain, exportKey, out var exportOptions) || !MirrorExportPlanner.TryBuild(exportOptions, out var plan, out _))
{
return Results.NotFound();
}
var manifest = await exportStore.FindAsync(plan.Signature, plan.Format, cancellationToken).ConfigureAwait(false);
if (manifest is null)
@@ -286,36 +286,36 @@ internal static class MirrorEndpoints
return domain is not null;
}
private static bool TryFindExport(MirrorDomainOptions domain, string exportKey, out MirrorExportOptions export)
{
export = domain.Exports.FirstOrDefault(e => string.Equals(e.Key, exportKey, StringComparison.OrdinalIgnoreCase))!;
return export is not null;
}
private static bool TryFindExport(MirrorDomainOptions domain, string exportKey, out MirrorExportOptions export)
{
export = domain.Exports.FirstOrDefault(e => string.Equals(e.Key, exportKey, StringComparison.OrdinalIgnoreCase))!;
return export is not null;
}
private static string ResolveContentType(VexExportFormat format)
=> format switch
{
VexExportFormat.Json => "application/json",
VexExportFormat.JsonLines => "application/jsonl",
VexExportFormat.OpenVex => "application/json",
VexExportFormat.Csaf => "application/json",
VexExportFormat.CycloneDx => "application/json",
_ => "application/octet-stream",
};
private static string ResolveContentType(VexExportFormat format)
=> format switch
{
VexExportFormat.Json => "application/json",
VexExportFormat.JsonLines => "application/jsonl",
VexExportFormat.OpenVex => "application/json",
VexExportFormat.Csaf => "application/json",
VexExportFormat.CycloneDx => "application/json",
_ => "application/octet-stream",
};
private static string BuildDownloadFileName(string domainId, string exportKey, VexExportFormat format)
{
var builder = new StringBuilder(domainId.Length + exportKey.Length + 8);
builder.Append(domainId).Append('-').Append(exportKey);
builder.Append(format switch
{
VexExportFormat.Json => ".json",
VexExportFormat.JsonLines => ".jsonl",
VexExportFormat.OpenVex => ".openvex.json",
VexExportFormat.Csaf => ".csaf.json",
VexExportFormat.CycloneDx => ".cyclonedx.json",
_ => ".bin",
});
builder.Append(format switch
{
VexExportFormat.Json => ".json",
VexExportFormat.JsonLines => ".jsonl",
VexExportFormat.OpenVex => ".openvex.json",
VexExportFormat.Csaf => ".csaf.json",
VexExportFormat.CycloneDx => ".cyclonedx.json",
_ => ".bin",
});
return builder.ToString();
}
@@ -326,15 +326,15 @@ internal static class MirrorEndpoints
await context.Response.WriteAsync(message, cancellationToken);
}
private static async Task WriteJsonAsync<T>(HttpContext context, T payload, int statusCode, CancellationToken cancellationToken)
{
context.Response.StatusCode = statusCode;
context.Response.ContentType = "application/json";
var json = VexCanonicalJsonSerializer.Serialize(payload);
await context.Response.WriteAsync(json, cancellationToken);
private static async Task WriteJsonAsync<T>(HttpContext context, T payload, int statusCode, CancellationToken cancellationToken)
{
context.Response.StatusCode = statusCode;
context.Response.ContentType = "application/json";
var json = VexCanonicalJsonSerializer.Serialize(payload);
await context.Response.WriteAsync(json, cancellationToken);
}
}
}
internal sealed record MirrorDomainListResponse(IReadOnlyList<MirrorDomainSummary> Domains);

View File

@@ -8,7 +8,7 @@ using Microsoft.AspNetCore.Http;
using Microsoft.AspNetCore.Mvc;
using Microsoft.Extensions.Logging;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.WebService.Contracts;
namespace StellaOps.Excititor.WebService.Endpoints;

View File

@@ -6,7 +6,7 @@ using Microsoft.AspNetCore.Http;
using Microsoft.AspNetCore.Mvc;
using Microsoft.Extensions.Options;
using StellaOps.Excititor.Core.Observations;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.WebService.Contracts;
using StellaOps.Excititor.WebService.Services;
@@ -26,7 +26,7 @@ public static class ObservationEndpoints
// GET /vex/observations - List observations with filters
group.MapGet("", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IVexObservationStore observationStore,
TimeProvider timeProvider,
[FromQuery] int? limit,
@@ -98,7 +98,7 @@ public static class ObservationEndpoints
group.MapGet("/{observationId}", async (
HttpContext context,
string observationId,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IVexObservationStore observationStore,
CancellationToken cancellationToken) =>
{
@@ -140,7 +140,7 @@ public static class ObservationEndpoints
// GET /vex/observations/count - Get observation count for tenant
group.MapGet("/count", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IVexObservationStore observationStore,
CancellationToken cancellationToken) =>
{
@@ -230,7 +230,7 @@ public static class ObservationEndpoints
private static bool TryResolveTenant(
HttpContext context,
VexMongoStorageOptions options,
VexStorageOptions options,
out string tenant,
out IResult? problem)
{

View File

@@ -11,7 +11,7 @@ using Microsoft.Extensions.Options;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Core.Canonicalization;
using StellaOps.Excititor.Core.Orchestration;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.WebService.Contracts;
using StellaOps.Excititor.WebService.Services;
@@ -33,7 +33,7 @@ public static class PolicyEndpoints
private static async Task<IResult> LookupVexAsync(
HttpContext context,
[FromBody] PolicyVexLookupRequest request,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IVexClaimStore claimStore,
TimeProvider timeProvider,
CancellationToken cancellationToken)
@@ -174,7 +174,7 @@ public static class PolicyEndpoints
private static bool TryResolveTenant(
HttpContext context,
VexMongoStorageOptions options,
VexStorageOptions options,
out string tenant,
out IResult? problem)
{

View File

@@ -5,55 +5,55 @@ using System.Collections.Generic;
using System.Collections.Immutable;
using System.Linq;
using System.Security.Cryptography;
using System.Text;
using System.Text.Json;
using Microsoft.AspNetCore.Builder;
using Microsoft.AspNetCore.Http;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.DependencyInjection;
using StellaOps.Excititor.Attestation;
using StellaOps.Excititor.Attestation.Dsse;
using StellaOps.Excititor.Attestation.Signing;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Policy;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.WebService.Services;
internal static class ResolveEndpoint
{
private const int MaxSubjectPairs = 256;
private const string ReadScope = "vex.read";
public static void MapResolveEndpoint(WebApplication app)
{
app.MapPost("/excititor/resolve", HandleResolveAsync);
}
using System.Text;
using System.Text.Json;
using Microsoft.AspNetCore.Builder;
using Microsoft.AspNetCore.Http;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.DependencyInjection;
using StellaOps.Excititor.Attestation;
using StellaOps.Excititor.Attestation.Dsse;
using StellaOps.Excititor.Attestation.Signing;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Policy;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.WebService.Services;
internal static class ResolveEndpoint
{
private const int MaxSubjectPairs = 256;
private const string ReadScope = "vex.read";
public static void MapResolveEndpoint(WebApplication app)
{
app.MapPost("/excititor/resolve", HandleResolveAsync);
}
private static async Task<IResult> HandleResolveAsync(
VexResolveRequest request,
HttpContext httpContext,
IVexClaimStore claimStore,
IVexConsensusStore consensusStore,
IVexProviderStore providerStore,
IVexPolicyProvider policyProvider,
TimeProvider timeProvider,
ILoggerFactory loggerFactory,
IVexAttestationClient? attestationClient,
CancellationToken cancellationToken)
{
var scopeResult = ScopeAuthorization.RequireScope(httpContext, ReadScope);
if (scopeResult is not null)
{
return scopeResult;
}
if (request is null)
{
return Results.BadRequest("Request payload is required.");
}
var logger = loggerFactory.CreateLogger("ResolveEndpoint");
var signer = httpContext.RequestServices.GetService<IVexSigner>();
IVexProviderStore providerStore,
IVexPolicyProvider policyProvider,
TimeProvider timeProvider,
ILoggerFactory loggerFactory,
IVexAttestationClient? attestationClient,
CancellationToken cancellationToken)
{
var scopeResult = ScopeAuthorization.RequireScope(httpContext, ReadScope);
if (scopeResult is not null)
{
return scopeResult;
}
if (request is null)
{
return Results.BadRequest("Request payload is required.");
}
var logger = loggerFactory.CreateLogger("ResolveEndpoint");
var signer = httpContext.RequestServices.GetService<IVexSigner>();
var productKeys = NormalizeValues(request.ProductKeys, request.Purls);
var vulnerabilityIds = NormalizeValues(request.VulnerabilityIds);

View File

@@ -6,7 +6,7 @@ using Microsoft.AspNetCore.Mvc;
using Microsoft.Extensions.Options;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Core.RiskFeed;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.WebService.Services;
namespace StellaOps.Excititor.WebService.Endpoints;
@@ -25,7 +25,7 @@ public static class RiskFeedEndpoints
// POST /risk/v1/feed - Generate risk feed
group.MapPost("/feed", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IRiskFeedService riskFeedService,
[FromBody] RiskFeedRequestDto request,
CancellationToken cancellationToken) =>
@@ -67,7 +67,7 @@ public static class RiskFeedEndpoints
// GET /risk/v1/feed/item - Get single risk feed item
group.MapGet("/feed/item", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IRiskFeedService riskFeedService,
[FromQuery] string? advisoryKey,
[FromQuery] string? artifact,
@@ -112,7 +112,7 @@ public static class RiskFeedEndpoints
group.MapGet("/feed/by-advisory/{advisoryKey}", async (
HttpContext context,
string advisoryKey,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IRiskFeedService riskFeedService,
[FromQuery] int? limit,
CancellationToken cancellationToken) =>
@@ -153,7 +153,7 @@ public static class RiskFeedEndpoints
group.MapGet("/feed/by-artifact/{**artifact}", async (
HttpContext context,
string artifact,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IRiskFeedService riskFeedService,
[FromQuery] int? limit,
CancellationToken cancellationToken) =>
@@ -235,7 +235,7 @@ public static class RiskFeedEndpoints
private static bool TryResolveTenant(
HttpContext context,
VexMongoStorageOptions options,
VexStorageOptions options,
out string tenant,
out IResult? problem)
{

View File

@@ -0,0 +1,71 @@
using System.Collections.Immutable;
using System.Text.Json;
using StellaOps.Concelier.RawModels;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Core.Storage;
namespace StellaOps.Excititor.WebService.Extensions;
internal static class VexRawDocumentMapper
{
public static VexRawDocument ToRawModel(VexRawRecord record, string defaultTenant)
{
ArgumentNullException.ThrowIfNull(record);
var metadata = record.Metadata ?? ImmutableDictionary<string, string>.Empty;
var tenant = Get(metadata, "tenant", record.Tenant) ?? defaultTenant;
var source = new RawSourceMetadata(
Vendor: Get(metadata, "source.vendor", record.ProviderId) ?? record.ProviderId,
Connector: Get(metadata, "source.connector", record.ProviderId) ?? record.ProviderId,
ConnectorVersion: Get(metadata, "source.connector_version", "unknown") ?? "unknown",
Stream: Get(metadata, "source.stream", record.Format.ToString().ToLowerInvariant()));
var signature = new RawSignatureMetadata(
Present: string.Equals(Get(metadata, "signature.present"), "true", StringComparison.OrdinalIgnoreCase),
Format: Get(metadata, "signature.format"),
KeyId: Get(metadata, "signature.key_id"),
Signature: Get(metadata, "signature.sig"),
Certificate: Get(metadata, "signature.certificate"),
Digest: Get(metadata, "signature.digest"));
var upstream = new RawUpstreamMetadata(
UpstreamId: Get(metadata, "upstream.id", record.Digest) ?? record.Digest,
DocumentVersion: Get(metadata, "upstream.version"),
RetrievedAt: record.RetrievedAt,
ContentHash: Get(metadata, "upstream.content_hash", record.Digest) ?? record.Digest,
Signature: signature,
Provenance: metadata);
var content = new RawContent(
Format: record.Format.ToString().ToLowerInvariant(),
SpecVersion: Get(metadata, "content.spec_version"),
Raw: ParseJson(record.Content),
Encoding: Get(metadata, "content.encoding"));
return new VexRawDocument(
tenant,
source,
upstream,
content,
new RawLinkset(),
statements: null,
supersedes: record.SupersedesDigest);
}
private static string? Get(IReadOnlyDictionary<string, string> metadata, string key, string? fallback = null)
{
if (metadata.TryGetValue(key, out var value) && !string.IsNullOrWhiteSpace(value))
{
return value;
}
return fallback;
}
private static JsonElement ParseJson(ReadOnlyMemory<byte> content)
{
using var document = JsonDocument.Parse(content);
return document.RootElement.Clone();
}
}

View File

@@ -6,17 +6,16 @@ using System.Linq;
using System.Text;
using Microsoft.AspNetCore.Http;
using Microsoft.Extensions.Primitives;
using MongoDB.Bson;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Core.Aoc;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.WebService.Contracts;
using StellaOps.Excititor.WebService.Services;
public partial class Program
{
private const string TenantHeaderName = "X-Stella-Tenant";
private static bool TryResolveTenant(HttpContext context, VexMongoStorageOptions options, bool requireHeader, out string tenant, out IResult? problem)
private static bool TryResolveTenant(HttpContext context, VexStorageOptions options, bool requireHeader, out string tenant, out IResult? problem)
{
tenant = options.DefaultTenant;
problem = null;
@@ -51,27 +50,6 @@ public partial class Program
return true;
}
private static IReadOnlyDictionary<string, string> ReadMetadata(BsonValue value)
{
if (value is not BsonDocument doc || doc.ElementCount == 0)
{
return new Dictionary<string, string>(StringComparer.Ordinal);
}
var result = new Dictionary<string, string>(StringComparer.Ordinal);
foreach (var element in doc.Elements)
{
if (string.IsNullOrWhiteSpace(element.Name))
{
continue;
}
result[element.Name] = element.Value?.ToString() ?? string.Empty;
}
return result;
}
private static bool TryDecodeCursor(string? cursor, out DateTimeOffset timestamp, out string digest)
{
timestamp = default;

View File

@@ -27,28 +27,27 @@ using StellaOps.Excititor.Formats.CSAF;
using StellaOps.Excititor.Formats.CycloneDX;
using StellaOps.Excititor.Formats.OpenVEX;
using StellaOps.Excititor.Policy;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Storage.Postgres;
using StellaOps.Excititor.WebService.Endpoints;
using StellaOps.Excititor.WebService.Extensions;
using StellaOps.Excititor.WebService.Options;
using StellaOps.Excititor.WebService.Services;
using StellaOps.Excititor.Core.Aoc;
using StellaOps.Excititor.WebService.Telemetry;
using MongoDB.Driver;
using MongoDB.Bson;
using Microsoft.Extensions.Caching.Memory;
using StellaOps.Excititor.WebService.Contracts;
using System.Globalization;
using StellaOps.Excititor.WebService.Graph;
using StellaOps.Excititor.Core.Storage;
var builder = WebApplication.CreateBuilder(args);
var configuration = builder.Configuration;
var services = builder.Services;
services.AddOptions<VexMongoStorageOptions>()
.Bind(configuration.GetSection("Excititor:Storage:Mongo"))
services.AddOptions<VexStorageOptions>()
.Bind(configuration.GetSection("Excititor:Storage"))
.ValidateOnStart();
services.AddExcititorMongoStorage();
services.AddExcititorPostgresStorage(configuration);
services.AddCsafNormalizer();
services.AddCycloneDxNormalizer();
services.AddOpenVexNormalizer();
@@ -147,7 +146,7 @@ app.UseObservabilityHeaders();
app.MapGet("/excititor/status", async (HttpContext context,
IEnumerable<IVexArtifactStore> artifactStores,
IOptions<VexMongoStorageOptions> mongoOptions,
IOptions<VexStorageOptions> mongoOptions,
TimeProvider timeProvider) =>
{
var payload = new StatusResponse(
@@ -1260,7 +1259,7 @@ app.MapPost("/excititor/admin/backfill-statements", async (
app.MapGet("/console/vex", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
IVexObservationQueryService queryService,
ConsoleTelemetry telemetry,
IMemoryCache cache,
@@ -1459,7 +1458,7 @@ var response = new GraphLinkoutsResponse(items, notFound);
app.MapGet("/v1/graph/status", async (
HttpContext context,
[FromQuery(Name = "purl")] string[]? purls,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
IOptions<GraphOptions> graphOptions,
IVexObservationQueryService queryService,
IMemoryCache cache,
@@ -1519,7 +1518,7 @@ app.MapGet("/v1/graph/overlays", async (
HttpContext context,
[FromQuery(Name = "purl")] string[]? purls,
[FromQuery] bool includeJustifications,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
IOptions<GraphOptions> graphOptions,
IVexObservationQueryService queryService,
IMemoryCache cache,
@@ -1580,7 +1579,7 @@ app.MapGet("/v1/graph/observations", async (
[FromQuery] bool includeJustifications,
[FromQuery] int? limitPerPurl,
[FromQuery] string? cursor,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
IOptions<GraphOptions> graphOptions,
IVexObservationQueryService queryService,
CancellationToken cancellationToken) =>
@@ -1638,7 +1637,7 @@ app.MapPost("/ingest/vex", async (
HttpContext context,
VexIngestRequest request,
IVexRawStore rawStore,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
TimeProvider timeProvider,
ILogger<Program> logger,
CancellationToken cancellationToken) =>
@@ -1692,8 +1691,8 @@ app.MapPost("/ingest/vex", async (
app.MapGet("/vex/raw", async (
HttpContext context,
IMongoDatabase database,
IOptions<VexMongoStorageOptions> storageOptions,
IVexRawStore rawStore,
IOptions<VexStorageOptions> storageOptions,
CancellationToken cancellationToken) =>
{
var scopeResult = ScopeAuthorization.RequireScope(context, "vex.read");
@@ -1702,132 +1701,69 @@ app.MapGet("/vex/raw", async (
return scopeResult;
}
if (!TryResolveTenant(context, storageOptions.Value, requireHeader: false, out _, out var tenantError))
if (!TryResolveTenant(context, storageOptions.Value, requireHeader: false, out var tenant, out var tenantError))
{
return tenantError;
}
var collection = database.GetCollection<BsonDocument>(VexMongoCollectionNames.Raw);
var query = context.Request.Query;
var filters = new List<FilterDefinition<BsonDocument>>();
var builder = Builders<BsonDocument>.Filter;
var providerFilter = BuildStringFilterSet(query["providerId"]);
var digestFilter = BuildStringFilterSet(query["digest"]);
var formatFilter = query.TryGetValue("format", out var formats)
? formats
.Where(static f => !string.IsNullOrWhiteSpace(f))
.Select(static f => Enum.TryParse<VexDocumentFormat>(f, true, out var parsed) ? parsed : VexDocumentFormat.Unknown)
.Where(static f => f != VexDocumentFormat.Unknown)
.ToArray()
: Array.Empty<VexDocumentFormat>();
if (query.TryGetValue("providerId", out var providerValues))
{
var providers = providerValues
.Where(static value => !string.IsNullOrWhiteSpace(value))
.Select(static value => value!.Trim())
.ToArray();
if (providers.Length > 0)
{
filters.Add(builder.In("ProviderId", providers));
}
}
if (query.TryGetValue("digest", out var digestValues))
{
var digests = digestValues
.Where(static value => !string.IsNullOrWhiteSpace(value))
.Select(static value => value!.Trim())
.ToArray();
if (digests.Length > 0)
{
filters.Add(builder.In("Digest", digests));
}
}
if (query.TryGetValue("format", out var formatValues))
{
var formats = formatValues
.Where(static value => !string.IsNullOrWhiteSpace(value))
.Select(static value => value!.Trim().ToLowerInvariant())
.ToArray();
if (formats.Length > 0)
{
filters.Add(builder.In("Format", formats));
}
}
if (query.TryGetValue("since", out var sinceValues) && DateTimeOffset.TryParse(sinceValues.FirstOrDefault(), CultureInfo.InvariantCulture, DateTimeStyles.AssumeUniversal, out var sinceValue))
{
filters.Add(builder.Gte("RetrievedAt", sinceValue.UtcDateTime));
}
var since = ParseSinceTimestamp(query["since"]);
var cursorToken = query.TryGetValue("cursor", out var cursorValues) ? cursorValues.FirstOrDefault() : null;
DateTime? cursorTimestamp = null;
string? cursorDigest = null;
if (!string.IsNullOrWhiteSpace(cursorToken) && TryDecodeCursor(cursorToken, out var cursorTime, out var cursorId))
VexRawCursor? cursor = null;
if (!string.IsNullOrWhiteSpace(cursorToken) &&
TryDecodeCursor(cursorToken, out var cursorTime, out var cursorId))
{
cursorTimestamp = cursorTime.UtcDateTime;
cursorDigest = cursorId;
cursor = new VexRawCursor(cursorTime, cursorId);
}
if (cursorTimestamp is not null && cursorDigest is not null)
{
var ltTime = builder.Lt("RetrievedAt", cursorTimestamp.Value);
var eqTimeLtDigest = builder.And(
builder.Eq("RetrievedAt", cursorTimestamp.Value),
builder.Lt("Digest", cursorDigest));
filters.Add(builder.Or(ltTime, eqTimeLtDigest));
}
var limit = ResolveLimit(query["limit"], defaultValue: 50, min: 1, max: 200);
var limit = 50;
if (query.TryGetValue("limit", out var limitValues) && int.TryParse(limitValues.FirstOrDefault(), NumberStyles.Integer, CultureInfo.InvariantCulture, out var requestedLimit))
{
limit = Math.Clamp(requestedLimit, 1, 200);
}
var page = await rawStore.QueryAsync(
new VexRawQuery(
tenant,
providerFilter,
digestFilter,
formatFilter,
since,
Until: null,
cursor,
limit),
cancellationToken).ConfigureAwait(false);
var filter = filters.Count == 0 ? builder.Empty : builder.And(filters);
var sort = Builders<BsonDocument>.Sort.Descending("RetrievedAt").Descending("Digest");
var documents = await collection
.Find(filter)
.Sort(sort)
.Limit(limit)
.Project(Builders<BsonDocument>.Projection.Include("Digest").Include("ProviderId").Include("Format").Include("SourceUri").Include("RetrievedAt").Include("Metadata").Include("GridFsObjectId"))
.ToListAsync(cancellationToken)
.ConfigureAwait(false);
var summaries = page.Items
.Select(summary => new VexRawSummaryResponse(
summary.Digest,
summary.ProviderId,
summary.Format.ToString().ToLowerInvariant(),
summary.SourceUri.ToString(),
summary.RetrievedAt,
summary.InlineContent,
summary.Metadata))
.ToList();
var summaries = new List<VexRawSummaryResponse>(documents.Count);
foreach (var document in documents)
{
var digest = document.TryGetValue("Digest", out var digestValue) && digestValue.IsString ? digestValue.AsString : string.Empty;
var providerId = document.TryGetValue("ProviderId", out var providerValue) && providerValue.IsString ? providerValue.AsString : string.Empty;
var format = document.TryGetValue("Format", out var formatValue) && formatValue.IsString ? formatValue.AsString : string.Empty;
var sourceUri = document.TryGetValue("SourceUri", out var sourceValue) && sourceValue.IsString ? sourceValue.AsString : string.Empty;
var retrievedAt = document.TryGetValue("RetrievedAt", out var retrievedValue) && retrievedValue is BsonDateTime bsonDate
? bsonDate.ToUniversalTime()
: DateTime.UtcNow;
var metadata = ReadMetadata(document.TryGetValue("Metadata", out var metadataValue) ? metadataValue : BsonNull.Value);
var inlineContent = !document.TryGetValue("GridFsObjectId", out var gridId) || gridId.IsBsonNull || (gridId.IsString && string.IsNullOrWhiteSpace(gridId.AsString));
var nextCursor = page.NextCursor is null
? null
: EncodeCursor(page.NextCursor.RetrievedAt.UtcDateTime, page.NextCursor.Digest);
summaries.Add(new VexRawSummaryResponse(
digest,
providerId,
format,
sourceUri,
new DateTimeOffset(retrievedAt),
inlineContent,
metadata));
}
var hasMore = documents.Count == limit;
string? nextCursor = null;
if (hasMore && documents.Count > 0)
{
var last = documents[^1];
var lastTime = last.GetValue("RetrievedAt", BsonNull.Value).ToUniversalTime();
var lastDigest = last.GetValue("Digest", BsonNull.Value).AsString;
nextCursor = EncodeCursor(lastTime, lastDigest);
}
return Results.Json(new VexRawListResponse(summaries, nextCursor, hasMore));
return Results.Json(new VexRawListResponse(summaries, nextCursor, page.HasMore));
});
app.MapGet("/vex/raw/{digest}", async (
string digest,
HttpContext context,
IVexRawStore rawStore,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
CancellationToken cancellationToken) =>
{
var scopeResult = ScopeAuthorization.RequireScope(context, "vex.read");
@@ -1861,7 +1797,7 @@ app.MapGet("/vex/raw/{digest}/provenance", async (
string digest,
HttpContext context,
IVexRawStore rawStore,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
CancellationToken cancellationToken) =>
{
var scopeResult = ScopeAuthorization.RequireScope(context, "vex.read");
@@ -1901,7 +1837,7 @@ app.MapGet("/v1/vex/observations/{vulnerabilityId}/{productKey}", async (
string vulnerabilityId,
string productKey,
[FromServices] IVexObservationProjectionService projectionService,
[FromServices] IOptions<VexMongoStorageOptions> storageOptions,
[FromServices] IOptions<VexStorageOptions> storageOptions,
CancellationToken cancellationToken) =>
{
var scopeResult = ScopeAuthorization.RequireScope(context, "vex.read");
@@ -1977,7 +1913,7 @@ app.MapGet("/v1/vex/observations/{vulnerabilityId}/{productKey}", async (
app.MapGet("/v1/vex/evidence/chunks", async (
HttpContext context,
[FromServices] IVexEvidenceChunkService chunkService,
[FromServices] IOptions<VexMongoStorageOptions> storageOptions,
[FromServices] IOptions<VexStorageOptions> storageOptions,
[FromServices] ChunkTelemetry chunkTelemetry,
[FromServices] ILogger<VexEvidenceChunkRequest> logger,
[FromServices] TimeProvider timeProvider,
@@ -2083,10 +2019,9 @@ app.MapGet("/v1/vex/evidence/chunks", async (
app.MapPost("/aoc/verify", async (
HttpContext context,
VexAocVerifyRequest? request,
IMongoDatabase database,
IVexRawStore rawStore,
IVexRawWriteGuard guard,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
TimeProvider timeProvider,
CancellationToken cancellationToken) =>
{
@@ -2119,33 +2054,26 @@ app.MapPost("/aoc/verify", async (
.Select(static value => value!.Trim())
.ToArray();
var builder = Builders<BsonDocument>.Filter;
var filter = builder.And(
builder.Gte("RetrievedAt", since),
builder.Lte("RetrievedAt", until));
if (sources is { Length: > 0 })
{
filter &= builder.In("ProviderId", sources);
}
var collection = database.GetCollection<BsonDocument>(VexMongoCollectionNames.Raw);
var digests = await collection
.Find(filter)
.Sort(Builders<BsonDocument>.Sort.Descending("RetrievedAt"))
.Limit(limit)
.Project(Builders<BsonDocument>.Projection.Include("Digest").Include("RetrievedAt").Include("ProviderId"))
.ToListAsync(cancellationToken)
.ConfigureAwait(false);
var page = await rawStore.QueryAsync(
new VexRawQuery(
tenant,
sources ?? Array.Empty<string>(),
Array.Empty<string>(),
Array.Empty<VexDocumentFormat>(),
since: new DateTimeOffset(since, TimeSpan.Zero),
until: new DateTimeOffset(until, TimeSpan.Zero),
cursor: null,
limit),
cancellationToken).ConfigureAwait(false);
var checkedCount = 0;
var violationMap = new Dictionary<string, (int Count, List<VexAocVerifyViolationExample> Examples)>(StringComparer.OrdinalIgnoreCase);
const int MaxExamplesPerCode = 5;
foreach (var digestDocument in digests)
foreach (var item in page.Items)
{
var digestValue = digestDocument.GetValue("Digest", BsonNull.Value).AsString;
var provider = digestDocument.GetValue("ProviderId", BsonNull.Value).AsString;
var digestValue = item.Digest;
var provider = item.ProviderId;
var domainDocument = await rawStore.FindByDigestAsync(digestValue, cancellationToken).ConfigureAwait(false);
if (domainDocument is null)
@@ -2202,7 +2130,7 @@ app.MapPost("/aoc/verify", async (
new VexAocVerifyChecked(0, checkedCount),
violations,
new VexAocVerifyMetrics(checkedCount, violations.Sum(v => v.Count)),
digests.Count == limit);
page.HasMore);
return Results.Json(response);
});
@@ -2225,7 +2153,7 @@ app.MapGet("/obs/excititor/health", async (
// VEX timeline SSE (WEB-OBS-52-001)
app.MapGet("/obs/excititor/timeline", async (
HttpContext context,
IOptions<VexMongoStorageOptions> storageOptions,
IOptions<VexStorageOptions> storageOptions,
[FromServices] IVexTimelineEventStore timelineStore,
TimeProvider timeProvider,
ILoggerFactory loggerFactory,

View File

@@ -1,4 +1,4 @@
using System.Runtime.CompilerServices;
[assembly: InternalsVisibleTo("StellaOps.Excititor.WebService.Tests")]
[assembly: InternalsVisibleTo("StellaOps.Excititor.Core.UnitTests")]
[assembly: InternalsVisibleTo("StellaOps.Excititor.WebService.Tests")]
[assembly: InternalsVisibleTo("StellaOps.Excititor.Core.UnitTests")]

View File

@@ -7,7 +7,7 @@ using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using StellaOps.Excititor.Connectors.Abstractions;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.WebService.Options;
namespace StellaOps.Excititor.WebService.Services;

View File

@@ -6,7 +6,7 @@ using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.WebService.Contracts;
namespace StellaOps.Excititor.WebService.Services;

View File

@@ -1,14 +1,14 @@
using System.Collections.Generic;
using System.Collections.Immutable;
using System.Diagnostics;
using System.Globalization;
using System.Linq;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using MongoDB.Driver;
using System.Collections.Generic;
using System.Collections.Immutable;
using System.Diagnostics;
using System.Globalization;
using System.Linq;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using MongoDB.Driver;
using StellaOps.Excititor.Connectors.Abstractions;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Core.Storage;
namespace StellaOps.Excititor.WebService.Services;
@@ -23,50 +23,47 @@ internal interface IVexIngestOrchestrator
Task<ReconcileSummary> ReconcileAsync(ReconcileOptions options, CancellationToken cancellationToken);
}
internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
{
private readonly IServiceProvider _serviceProvider;
private readonly IReadOnlyDictionary<string, IVexConnector> _connectors;
private readonly IVexRawStore _rawStore;
private readonly IVexClaimStore _claimStore;
private readonly IVexProviderStore _providerStore;
private readonly IVexConnectorStateRepository _stateRepository;
private readonly IVexNormalizerRouter _normalizerRouter;
private readonly IVexSignatureVerifier _signatureVerifier;
private readonly IVexMongoSessionProvider _sessionProvider;
private readonly TimeProvider _timeProvider;
private readonly ILogger<VexIngestOrchestrator> _logger;
private readonly string _defaultTenant;
internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
{
private readonly IServiceProvider _serviceProvider;
private readonly IReadOnlyDictionary<string, IVexConnector> _connectors;
private readonly IVexRawStore _rawStore;
private readonly IVexClaimStore _claimStore;
private readonly IVexProviderStore _providerStore;
private readonly IVexConnectorStateRepository _stateRepository;
private readonly IVexNormalizerRouter _normalizerRouter;
private readonly IVexSignatureVerifier _signatureVerifier;
private readonly TimeProvider _timeProvider;
private readonly ILogger<VexIngestOrchestrator> _logger;
private readonly string _defaultTenant;
public VexIngestOrchestrator(
IServiceProvider serviceProvider,
IEnumerable<IVexConnector> connectors,
IVexRawStore rawStore,
IVexClaimStore claimStore,
IVexProviderStore providerStore,
IVexConnectorStateRepository stateRepository,
IVexNormalizerRouter normalizerRouter,
IVexSignatureVerifier signatureVerifier,
IVexMongoSessionProvider sessionProvider,
TimeProvider timeProvider,
IOptions<VexMongoStorageOptions> storageOptions,
ILogger<VexIngestOrchestrator> logger)
{
_serviceProvider = serviceProvider ?? throw new ArgumentNullException(nameof(serviceProvider));
_rawStore = rawStore ?? throw new ArgumentNullException(nameof(rawStore));
_claimStore = claimStore ?? throw new ArgumentNullException(nameof(claimStore));
public VexIngestOrchestrator(
IServiceProvider serviceProvider,
IEnumerable<IVexConnector> connectors,
IVexRawStore rawStore,
IVexClaimStore claimStore,
IVexProviderStore providerStore,
IVexConnectorStateRepository stateRepository,
IVexNormalizerRouter normalizerRouter,
IVexSignatureVerifier signatureVerifier,
TimeProvider timeProvider,
IOptions<VexStorageOptions> storageOptions,
ILogger<VexIngestOrchestrator> logger)
{
_serviceProvider = serviceProvider ?? throw new ArgumentNullException(nameof(serviceProvider));
_rawStore = rawStore ?? throw new ArgumentNullException(nameof(rawStore));
_claimStore = claimStore ?? throw new ArgumentNullException(nameof(claimStore));
_providerStore = providerStore ?? throw new ArgumentNullException(nameof(providerStore));
_stateRepository = stateRepository ?? throw new ArgumentNullException(nameof(stateRepository));
_normalizerRouter = normalizerRouter ?? throw new ArgumentNullException(nameof(normalizerRouter));
_signatureVerifier = signatureVerifier ?? throw new ArgumentNullException(nameof(signatureVerifier));
_sessionProvider = sessionProvider ?? throw new ArgumentNullException(nameof(sessionProvider));
_timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
var optionsValue = (storageOptions ?? throw new ArgumentNullException(nameof(storageOptions))).Value
?? throw new ArgumentNullException(nameof(storageOptions));
_defaultTenant = string.IsNullOrWhiteSpace(optionsValue.DefaultTenant)
? "default"
: optionsValue.DefaultTenant.Trim();
_timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
var optionsValue = (storageOptions ?? throw new ArgumentNullException(nameof(storageOptions))).Value
?? throw new ArgumentNullException(nameof(storageOptions));
_defaultTenant = string.IsNullOrWhiteSpace(optionsValue.DefaultTenant)
? "default"
: optionsValue.DefaultTenant.Trim();
if (connectors is null)
{
@@ -86,8 +83,6 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
var startedAt = _timeProvider.GetUtcNow();
var results = ImmutableArray.CreateBuilder<InitProviderResult>();
var session = await _sessionProvider.StartSessionAsync(cancellationToken).ConfigureAwait(false);
var (handles, missing) = ResolveConnectors(options.Providers);
foreach (var providerId in missing)
{
@@ -100,15 +95,15 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
try
{
await ValidateConnectorAsync(handle, cancellationToken).ConfigureAwait(false);
await EnsureProviderRegistrationAsync(handle.Descriptor, session, cancellationToken).ConfigureAwait(false);
await EnsureProviderRegistrationAsync(handle.Descriptor, cancellationToken).ConfigureAwait(false);
stopwatch.Stop();
results.Add(new InitProviderResult(
handle.Descriptor.Id,
handle.Descriptor.DisplayName,
"succeeded",
stopwatch.Elapsed,
Error: null));
results.Add(new InitProviderResult(
handle.Descriptor.Id,
handle.Descriptor.DisplayName,
"succeeded",
stopwatch.Elapsed,
Error: null));
_logger.LogInformation("Excititor init validated provider {ProviderId} in {Duration}ms.", handle.Descriptor.Id, stopwatch.Elapsed.TotalMilliseconds);
}
@@ -148,8 +143,6 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
var startedAt = _timeProvider.GetUtcNow();
var since = ResolveSince(options.Since, options.Window, startedAt);
var results = ImmutableArray.CreateBuilder<ProviderRunResult>();
var session = await _sessionProvider.StartSessionAsync(cancellationToken).ConfigureAwait(false);
var (handles, missing) = ResolveConnectors(options.Providers);
foreach (var providerId in missing)
{
@@ -158,7 +151,7 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
foreach (var handle in handles)
{
var result = await ExecuteRunAsync(runId, handle, since, options.Force, session, cancellationToken).ConfigureAwait(false);
var result = await ExecuteRunAsync(runId, handle, since, options.Force, session, cancellationToken).ConfigureAwait(false);
results.Add(result);
}
@@ -173,20 +166,18 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
var runId = Guid.NewGuid();
var startedAt = _timeProvider.GetUtcNow();
var results = ImmutableArray.CreateBuilder<ProviderRunResult>();
var session = await _sessionProvider.StartSessionAsync(cancellationToken).ConfigureAwait(false);
var (handles, missing) = ResolveConnectors(options.Providers);
foreach (var providerId in missing)
{
results.Add(ProviderRunResult.Missing(providerId, since: null));
}
foreach (var handle in handles)
{
var since = await ResolveResumeSinceAsync(handle.Descriptor.Id, options.Checkpoint, session, cancellationToken).ConfigureAwait(false);
var result = await ExecuteRunAsync(runId, handle, since, force: false, session, cancellationToken).ConfigureAwait(false);
results.Add(result);
}
foreach (var handle in handles)
{
var since = await ResolveResumeSinceAsync(handle.Descriptor.Id, options.Checkpoint, session, cancellationToken).ConfigureAwait(false);
var result = await ExecuteRunAsync(runId, handle, since, force: false, session, cancellationToken).ConfigureAwait(false);
results.Add(result);
}
var completedAt = _timeProvider.GetUtcNow();
return new IngestRunSummary(runId, startedAt, completedAt, results.ToImmutable());
@@ -200,8 +191,6 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
var startedAt = _timeProvider.GetUtcNow();
var threshold = options.MaxAge is null ? (DateTimeOffset?)null : startedAt - options.MaxAge.Value;
var results = ImmutableArray.CreateBuilder<ReconcileProviderResult>();
var session = await _sessionProvider.StartSessionAsync(cancellationToken).ConfigureAwait(false);
var (handles, missing) = ResolveConnectors(options.Providers);
foreach (var providerId in missing)
{
@@ -219,8 +208,8 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
if (stale || state is null)
{
var since = stale ? threshold : lastUpdated;
var result = await ExecuteRunAsync(runId, handle, since, force: false, session, cancellationToken).ConfigureAwait(false);
results.Add(new ReconcileProviderResult(
var result = await ExecuteRunAsync(runId, handle, since, force: false, session, cancellationToken).ConfigureAwait(false);
results.Add(new ReconcileProviderResult(
handle.Descriptor.Id,
result.Status,
"reconciled",
@@ -232,15 +221,15 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
}
else
{
results.Add(new ReconcileProviderResult(
handle.Descriptor.Id,
"succeeded",
"skipped",
lastUpdated,
threshold,
Documents: 0,
Claims: 0,
Error: null));
results.Add(new ReconcileProviderResult(
handle.Descriptor.Id,
"succeeded",
"skipped",
lastUpdated,
threshold,
Documents: 0,
Claims: 0,
Error: null));
}
}
catch (OperationCanceledException) when (cancellationToken.IsCancellationRequested)
@@ -280,7 +269,7 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
await handle.Connector.ValidateAsync(VexConnectorSettings.Empty, cancellationToken).ConfigureAwait(false);
}
private async Task EnsureProviderRegistrationAsync(VexConnectorDescriptor descriptor, IClientSessionHandle session, CancellationToken cancellationToken)
private async Task EnsureProviderRegistrationAsync(VexConnectorDescriptor descriptor, CancellationToken cancellationToken)
{
var existing = await _providerStore.FindAsync(descriptor.Id, cancellationToken, session).ConfigureAwait(false);
if (existing is not null)
@@ -292,48 +281,48 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
await _providerStore.SaveAsync(provider, cancellationToken, session).ConfigureAwait(false);
}
private async Task<ProviderRunResult> ExecuteRunAsync(
Guid runId,
ConnectorHandle handle,
DateTimeOffset? since,
bool force,
IClientSessionHandle session,
CancellationToken cancellationToken)
{
var providerId = handle.Descriptor.Id;
var startedAt = _timeProvider.GetUtcNow();
var stopwatch = Stopwatch.StartNew();
using var scope = _logger.BeginScope(new Dictionary<string, object?>(StringComparer.Ordinal)
{
["tenant"] = _defaultTenant,
["runId"] = runId,
["providerId"] = providerId,
["window.since"] = since?.ToString("O", CultureInfo.InvariantCulture),
["force"] = force,
});
private async Task<ProviderRunResult> ExecuteRunAsync(
Guid runId,
ConnectorHandle handle,
DateTimeOffset? since,
bool force,
IClientSessionHandle session,
CancellationToken cancellationToken)
{
var providerId = handle.Descriptor.Id;
var startedAt = _timeProvider.GetUtcNow();
var stopwatch = Stopwatch.StartNew();
using var scope = _logger.BeginScope(new Dictionary<string, object?>(StringComparer.Ordinal)
{
["tenant"] = _defaultTenant,
["runId"] = runId,
["providerId"] = providerId,
["window.since"] = since?.ToString("O", CultureInfo.InvariantCulture),
["force"] = force,
});
try
{
await ValidateConnectorAsync(handle, cancellationToken).ConfigureAwait(false);
await EnsureProviderRegistrationAsync(handle.Descriptor, session, cancellationToken).ConfigureAwait(false);
if (force)
{
var resetState = new VexConnectorState(providerId, null, ImmutableArray<string>.Empty);
await _stateRepository.SaveAsync(resetState, cancellationToken, session).ConfigureAwait(false);
}
var stateBeforeRun = await _stateRepository.GetAsync(providerId, cancellationToken, session).ConfigureAwait(false);
var resumeTokens = stateBeforeRun?.ResumeTokens ?? ImmutableDictionary<string, string>.Empty;
var context = new VexConnectorContext(
since,
VexConnectorSettings.Empty,
_rawStore,
_signatureVerifier,
_normalizerRouter,
_serviceProvider,
resumeTokens);
if (force)
{
var resetState = new VexConnectorState(providerId, null, ImmutableArray<string>.Empty);
await _stateRepository.SaveAsync(resetState, cancellationToken, session).ConfigureAwait(false);
}
var stateBeforeRun = await _stateRepository.GetAsync(providerId, cancellationToken, session).ConfigureAwait(false);
var resumeTokens = stateBeforeRun?.ResumeTokens ?? ImmutableDictionary<string, string>.Empty;
var context = new VexConnectorContext(
since,
VexConnectorSettings.Empty,
_rawStore,
_signatureVerifier,
_normalizerRouter,
_serviceProvider,
resumeTokens);
var documents = 0;
var claims = 0;
@@ -354,25 +343,25 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator
stopwatch.Stop();
var completedAt = _timeProvider.GetUtcNow();
var stateAfterRun = await _stateRepository.GetAsync(providerId, cancellationToken, session).ConfigureAwait(false);
var checkpoint = stateAfterRun?.DocumentDigests.IsDefaultOrEmpty == false
? stateAfterRun.DocumentDigests[^1]
: lastDigest;
var result = new ProviderRunResult(
providerId,
"succeeded",
var stateAfterRun = await _stateRepository.GetAsync(providerId, cancellationToken, session).ConfigureAwait(false);
var checkpoint = stateAfterRun?.DocumentDigests.IsDefaultOrEmpty == false
? stateAfterRun.DocumentDigests[^1]
: lastDigest;
var result = new ProviderRunResult(
providerId,
"succeeded",
documents,
claims,
startedAt,
completedAt,
stopwatch.Elapsed,
lastDigest,
stateAfterRun?.LastUpdated,
checkpoint,
null,
since);
lastDigest,
stateAfterRun?.LastUpdated,
checkpoint,
null,
since);
_logger.LogInformation(
"Excititor ingest provider {ProviderId} completed: documents={Documents} claims={Claims} since={Since} duration={Duration}ms",

View File

@@ -6,7 +6,7 @@ using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Storage.Mongo;
using StellaOps.Excititor.Core.Storage;
namespace StellaOps.Excititor.WebService.Services;

View File

@@ -75,13 +75,7 @@ public sealed class AppendOnlyLinksetExtractionService
results.Add(result);
if (result.HadChanges && _eventPublisher is not null)
{
await _eventPublisher.PublishLinksetUpdatedAsync(
normalizedTenant,
result.Linkset,
cancellationToken);
}
await PublishIfNeededAsync(normalizedTenant, result, cancellationToken);
}
catch (Exception ex)
{
@@ -142,13 +136,7 @@ public sealed class AppendOnlyLinksetExtractionService
disagreement,
cancellationToken);
if (storeResult.HadChanges && _eventPublisher is not null)
{
await _eventPublisher.PublishLinksetUpdatedAsync(
normalizedTenant,
storeResult.Linkset,
cancellationToken);
}
await PublishIfNeededAsync(normalizedTenant, storeResult, cancellationToken);
return LinksetAppendResult.Succeeded(
normalizedTenant,
@@ -193,7 +181,7 @@ public sealed class AppendOnlyLinksetExtractionService
ProviderId: obs.ProviderId,
Status: stmt.Status.ToString().ToLowerInvariant(),
Confidence: null)))
.Distinct(VexLinksetObservationRefComparer.Instance)
.DistinctBy(refModel => $"{refModel.ProviderId}:{refModel.Status}:{refModel.ObservationId}", StringComparer.OrdinalIgnoreCase)
.ToList();
if (observationRefs.Count == 0)
@@ -263,6 +251,60 @@ public sealed class AppendOnlyLinksetExtractionService
return at >= 0 && at < key.Length - 1 ? key[(at + 1)..] : null;
}
private async Task PublishIfNeededAsync(string tenant, AppendLinksetResult result, CancellationToken cancellationToken)
{
if (_eventPublisher is null || !result.HadChanges)
{
return;
}
var evt = ToEvent(tenant, result.Linkset);
await _eventPublisher.PublishAsync(evt, cancellationToken).ConfigureAwait(false);
}
private static VexLinksetUpdatedEvent ToEvent(string tenant, VexLinkset linkset)
{
var observationRefs = linkset.Observations
.Select(o => new VexLinksetObservationRefCore(
o.ObservationId,
o.ProviderId,
o.Status,
o.Confidence,
ImmutableDictionary<string, string>.Empty))
.OrderBy(o => o.ProviderId, StringComparer.OrdinalIgnoreCase)
.ThenBy(o => o.Status, StringComparer.OrdinalIgnoreCase)
.ThenBy(o => o.ObservationId, StringComparer.Ordinal)
.ToImmutableArray();
var disagreements = linkset.Disagreements
.OrderBy(d => d.ProviderId, StringComparer.OrdinalIgnoreCase)
.ThenBy(d => d.Status, StringComparer.OrdinalIgnoreCase)
.ThenBy(d => d.Justification ?? string.Empty, StringComparer.OrdinalIgnoreCase)
.ToImmutableArray();
return new VexLinksetUpdatedEvent(
VexLinksetUpdatedEventFactory.EventType,
tenant,
linkset.LinksetId,
linkset.VulnerabilityId,
linkset.ProductKey,
linkset.Scope,
observationRefs,
disagreements,
linkset.UpdatedAt);
}
private async Task PublishIfNeededAsync(string tenant, LinksetAppendResult result, CancellationToken cancellationToken)
{
if (_eventPublisher is null || !result.HadChanges || result.Linkset is null)
{
return;
}
var evt = ToEvent(tenant, result.Linkset);
await _eventPublisher.PublishAsync(evt, cancellationToken).ConfigureAwait(false);
}
private static string Normalize(string value) =>
VexObservation.EnsureNotNullOrWhiteSpace(value, nameof(value));

View File

@@ -9,6 +9,7 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Microsoft.Extensions.Options" Version="10.0.0" />
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="10.0.0" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="../../../Aoc/__Libraries/StellaOps.Aoc/StellaOps.Aoc.csproj" />

View File

@@ -0,0 +1,7 @@
// Temporary stubs to allow legacy interfaces to compile while MongoDB is removed.
// These types are intentionally minimal; they do not perform any database operations.
namespace MongoDB.Driver;
public interface IClientSessionHandle : IAsyncDisposable, IDisposable
{
}

View File

@@ -0,0 +1,80 @@
using System;
using System.Collections.Generic;
using System.Collections.Immutable;
namespace StellaOps.Excititor.Core.Storage;
/// <summary>
/// Query envelope for listing raw VEX documents.
/// </summary>
public sealed record VexRawQuery(
string Tenant,
IReadOnlyCollection<string> ProviderIds,
IReadOnlyCollection<string> Digests,
IReadOnlyCollection<VexDocumentFormat> Formats,
DateTimeOffset? Since,
DateTimeOffset? Until,
VexRawCursor? Cursor,
int Limit);
/// <summary>
/// Stable pagination cursor based on retrieved-at and digest ordering.
/// </summary>
public sealed record VexRawCursor(DateTimeOffset RetrievedAt, string Digest);
/// <summary>
/// Lightweight summary used for list endpoints.
/// </summary>
public sealed record VexRawDocumentSummary(
string Digest,
string ProviderId,
VexDocumentFormat Format,
Uri SourceUri,
DateTimeOffset RetrievedAt,
bool InlineContent,
ImmutableDictionary<string, string> Metadata);
/// <summary>
/// Paged result for raw document listings.
/// </summary>
public sealed record VexRawDocumentPage(
IReadOnlyList<VexRawDocumentSummary> Items,
VexRawCursor? NextCursor,
bool HasMore);
/// <summary>
/// Stored raw VEX document with canonical content and metadata.
/// </summary>
public sealed record VexRawRecord(
string Digest,
string Tenant,
string ProviderId,
VexDocumentFormat Format,
Uri SourceUri,
DateTimeOffset RetrievedAt,
ImmutableDictionary<string, string> Metadata,
ReadOnlyMemory<byte> Content,
bool InlineContent,
string? SupersedesDigest = null,
string? ETag = null,
DateTimeOffset? RecordedAt = null);
/// <summary>
/// Append-only raw document store abstraction (backed by Postgres for Excititor).
/// </summary>
public interface IVexRawStore : IVexRawDocumentSink
{
/// <summary>
/// Finds a raw document by digest.
/// </summary>
/// <param name="digest">Content-addressed digest (sha256:...)</param>
/// <param name="cancellationToken">Cancellation token.</param>
ValueTask<VexRawRecord?> FindByDigestAsync(string digest, CancellationToken cancellationToken);
/// <summary>
/// Lists raw documents using deterministic ordering.
/// </summary>
/// <param name="query">Query filters and pagination cursor.</param>
/// <param name="cancellationToken">Cancellation token.</param>
ValueTask<VexRawDocumentPage> QueryAsync(VexRawQuery query, CancellationToken cancellationToken);
}

View File

@@ -0,0 +1,38 @@
namespace StellaOps.Excititor.Core.Storage;
/// <summary>
/// Storage options for Excititor persistence (Postgres-backed, legacy name retained for compatibility).
/// </summary>
public class VexStorageOptions
{
/// <summary>
/// Default tenant to apply when no tenant header is supplied.
/// </summary>
public string DefaultTenant { get; set; } = "default";
/// <summary>
/// Inline content threshold in bytes; larger payloads are stored in the blob table.
/// </summary>
public int InlineThresholdBytes { get; set; } = 256 * 1024;
}
/// <summary>
/// Legacy alias preserved while migrating off MongoDB-specific naming.
/// </summary>
[System.Obsolete("Use VexStorageOptions; retained for backwards compatibility during Mongo removal.")]
public sealed class VexMongoStorageOptions : VexStorageOptions
{
/// <summary>
/// Historical bucket name (unused in Postgres mode).
/// </summary>
public string RawBucketName { get; set; } = "vex-raw";
/// <summary>
/// Backwards-compatible inline threshold property.
/// </summary>
public int GridFsInlineThresholdBytes
{
get => InlineThresholdBytes;
set => InlineThresholdBytes = value;
}
}

View File

@@ -1,308 +1,92 @@
-- VEX Schema Migration 001: Initial Schema
-- Creates the vex schema for VEX statements and dependency graphs
-- VEX Schema Migration 001: Append-only linksets (no Mongo, no consensus)
-- This migration defines an append-only Postgres backend for Excititor linksets,
-- observations, disagreements, and mutation logs. All operations are additive and
-- preserve deterministic ordering for audit/replay.
-- Create schema
CREATE SCHEMA IF NOT EXISTS vex;
-- Projects table
CREATE TABLE IF NOT EXISTS vex.projects (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
name TEXT NOT NULL,
display_name TEXT,
description TEXT,
repository_url TEXT,
default_branch TEXT,
settings JSONB NOT NULL DEFAULT '{}',
metadata JSONB NOT NULL DEFAULT '{}',
-- Drop legacy tables that carried mutable/consensus state
DROP TABLE IF EXISTS vex.linkset_mutations CASCADE;
DROP TABLE IF EXISTS vex.linkset_disagreements CASCADE;
DROP TABLE IF EXISTS vex.linkset_observations CASCADE;
DROP TABLE IF EXISTS vex.linksets CASCADE;
DROP TABLE IF EXISTS vex.observations CASCADE;
DROP TABLE IF EXISTS vex.consensus_holds CASCADE;
DROP TABLE IF EXISTS vex.consensus CASCADE;
DROP TABLE IF EXISTS vex.statements CASCADE;
DROP TABLE IF EXISTS vex.graph_edges CASCADE;
DROP TABLE IF EXISTS vex.graph_nodes CASCADE;
DROP TABLE IF EXISTS vex.graph_revisions CASCADE;
DROP TABLE IF EXISTS vex.projects CASCADE;
DROP TABLE IF EXISTS vex.linkset_events CASCADE;
DROP TABLE IF EXISTS vex.evidence_manifests CASCADE;
DROP TABLE IF EXISTS vex.cvss_receipts CASCADE;
DROP TABLE IF EXISTS vex.attestations CASCADE;
DROP TABLE IF EXISTS vex.timeline_events CASCADE;
DROP TABLE IF EXISTS vex.unknown_items CASCADE;
DROP TABLE IF EXISTS vex.unknowns_snapshots CASCADE;
-- Core linkset table (append-only semantics; updated_at is refreshed on append)
CREATE TABLE vex.linksets (
linkset_id TEXT PRIMARY KEY,
tenant TEXT NOT NULL,
vulnerability_id TEXT NOT NULL,
product_key TEXT NOT NULL,
scope JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
created_by TEXT,
UNIQUE(tenant_id, name)
UNIQUE (tenant, vulnerability_id, product_key)
);
CREATE INDEX idx_projects_tenant ON vex.projects(tenant_id);
CREATE INDEX idx_linksets_updated ON vex.linksets (tenant, updated_at DESC);
-- Graph revisions table
CREATE TABLE IF NOT EXISTS vex.graph_revisions (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
project_id UUID NOT NULL REFERENCES vex.projects(id) ON DELETE CASCADE,
revision_id TEXT NOT NULL UNIQUE,
parent_revision_id TEXT,
sbom_digest TEXT NOT NULL,
feed_snapshot_id TEXT,
policy_version TEXT,
node_count INT NOT NULL DEFAULT 0,
edge_count INT NOT NULL DEFAULT 0,
metadata JSONB NOT NULL DEFAULT '{}',
-- Observation references recorded per linkset (immutable; deduplicated)
CREATE TABLE vex.linkset_observations (
id BIGSERIAL PRIMARY KEY,
linkset_id TEXT NOT NULL REFERENCES vex.linksets(linkset_id) ON DELETE CASCADE,
observation_id TEXT NOT NULL,
provider_id TEXT NOT NULL,
status TEXT NOT NULL CHECK (status IN ('affected', 'not_affected', 'fixed', 'under_investigation')),
confidence NUMERIC(4,3),
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
created_by TEXT
UNIQUE (linkset_id, observation_id, provider_id, status)
);
CREATE INDEX idx_graph_revisions_project ON vex.graph_revisions(project_id);
CREATE INDEX idx_graph_revisions_revision ON vex.graph_revisions(revision_id);
CREATE INDEX idx_graph_revisions_created ON vex.graph_revisions(project_id, created_at DESC);
CREATE INDEX idx_linkset_observations_linkset ON vex.linkset_observations (linkset_id);
CREATE INDEX idx_linkset_observations_provider ON vex.linkset_observations (linkset_id, provider_id);
CREATE INDEX idx_linkset_observations_status ON vex.linkset_observations (linkset_id, status);
-- Graph nodes table (BIGSERIAL for high volume)
CREATE TABLE IF NOT EXISTS vex.graph_nodes (
-- Disagreements/conflicts recorded per linkset (immutable; deduplicated)
CREATE TABLE vex.linkset_disagreements (
id BIGSERIAL PRIMARY KEY,
graph_revision_id UUID NOT NULL REFERENCES vex.graph_revisions(id) ON DELETE CASCADE,
node_key TEXT NOT NULL,
node_type TEXT NOT NULL,
purl TEXT,
name TEXT,
version TEXT,
attributes JSONB NOT NULL DEFAULT '{}',
UNIQUE(graph_revision_id, node_key)
);
CREATE INDEX idx_graph_nodes_revision ON vex.graph_nodes(graph_revision_id);
CREATE INDEX idx_graph_nodes_key ON vex.graph_nodes(graph_revision_id, node_key);
CREATE INDEX idx_graph_nodes_purl ON vex.graph_nodes(purl);
CREATE INDEX idx_graph_nodes_type ON vex.graph_nodes(graph_revision_id, node_type);
-- Graph edges table (BIGSERIAL for high volume)
CREATE TABLE IF NOT EXISTS vex.graph_edges (
id BIGSERIAL PRIMARY KEY,
graph_revision_id UUID NOT NULL REFERENCES vex.graph_revisions(id) ON DELETE CASCADE,
from_node_id BIGINT NOT NULL REFERENCES vex.graph_nodes(id) ON DELETE CASCADE,
to_node_id BIGINT NOT NULL REFERENCES vex.graph_nodes(id) ON DELETE CASCADE,
edge_type TEXT NOT NULL,
attributes JSONB NOT NULL DEFAULT '{}'
);
CREATE INDEX idx_graph_edges_revision ON vex.graph_edges(graph_revision_id);
CREATE INDEX idx_graph_edges_from ON vex.graph_edges(from_node_id);
CREATE INDEX idx_graph_edges_to ON vex.graph_edges(to_node_id);
-- VEX statements table
CREATE TABLE IF NOT EXISTS vex.statements (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
project_id UUID REFERENCES vex.projects(id),
graph_revision_id UUID REFERENCES vex.graph_revisions(id),
vulnerability_id TEXT NOT NULL,
product_id TEXT,
status TEXT NOT NULL CHECK (status IN (
'not_affected', 'affected', 'fixed', 'under_investigation'
)),
justification TEXT CHECK (justification IN (
'component_not_present', 'vulnerable_code_not_present',
'vulnerable_code_not_in_execute_path', 'vulnerable_code_cannot_be_controlled_by_adversary',
'inline_mitigations_already_exist'
)),
impact_statement TEXT,
action_statement TEXT,
action_statement_timestamp TIMESTAMPTZ,
first_issued TIMESTAMPTZ NOT NULL DEFAULT NOW(),
last_updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
source TEXT,
source_url TEXT,
evidence JSONB NOT NULL DEFAULT '{}',
provenance JSONB NOT NULL DEFAULT '{}',
metadata JSONB NOT NULL DEFAULT '{}',
created_by TEXT
);
CREATE INDEX idx_statements_tenant ON vex.statements(tenant_id);
CREATE INDEX idx_statements_project ON vex.statements(project_id);
CREATE INDEX idx_statements_revision ON vex.statements(graph_revision_id);
CREATE INDEX idx_statements_vuln ON vex.statements(vulnerability_id);
CREATE INDEX idx_statements_status ON vex.statements(tenant_id, status);
-- VEX observations table
CREATE TABLE IF NOT EXISTS vex.observations (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
statement_id UUID REFERENCES vex.statements(id) ON DELETE CASCADE,
vulnerability_id TEXT NOT NULL,
product_id TEXT NOT NULL,
observed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
observer TEXT NOT NULL,
observation_type TEXT NOT NULL,
confidence NUMERIC(3,2),
details JSONB NOT NULL DEFAULT '{}',
linkset_id TEXT NOT NULL REFERENCES vex.linksets(linkset_id) ON DELETE CASCADE,
provider_id TEXT NOT NULL,
status TEXT NOT NULL,
justification TEXT,
confidence NUMERIC(4,3),
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(tenant_id, vulnerability_id, product_id, observer, observation_type)
UNIQUE (linkset_id, provider_id, status, justification)
);
CREATE INDEX idx_observations_tenant ON vex.observations(tenant_id);
CREATE INDEX idx_observations_statement ON vex.observations(statement_id);
CREATE INDEX idx_observations_vuln ON vex.observations(vulnerability_id, product_id);
CREATE INDEX idx_linkset_disagreements_linkset ON vex.linkset_disagreements (linkset_id);
-- Linksets table
CREATE TABLE IF NOT EXISTS vex.linksets (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
name TEXT NOT NULL,
description TEXT,
source_type TEXT NOT NULL,
source_url TEXT,
enabled BOOLEAN NOT NULL DEFAULT TRUE,
priority INT NOT NULL DEFAULT 0,
filter JSONB NOT NULL DEFAULT '{}',
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(tenant_id, name)
-- Append-only mutation log for deterministic replay/audit
CREATE TABLE vex.linkset_mutations (
sequence_number BIGSERIAL PRIMARY KEY,
linkset_id TEXT NOT NULL REFERENCES vex.linksets(linkset_id) ON DELETE CASCADE,
mutation_type TEXT NOT NULL CHECK (mutation_type IN ('linkset_created', 'observation_added', 'disagreement_added')),
observation_id TEXT,
provider_id TEXT,
status TEXT,
confidence NUMERIC(4,3),
justification TEXT,
occurred_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_linksets_tenant ON vex.linksets(tenant_id);
CREATE INDEX idx_linksets_enabled ON vex.linksets(tenant_id, enabled, priority DESC);
CREATE INDEX idx_linkset_mutations_linkset ON vex.linkset_mutations (linkset_id, sequence_number);
-- Linkset events table
CREATE TABLE IF NOT EXISTS vex.linkset_events (
id BIGSERIAL PRIMARY KEY,
linkset_id UUID NOT NULL REFERENCES vex.linksets(id) ON DELETE CASCADE,
event_type TEXT NOT NULL,
statement_count INT NOT NULL DEFAULT 0,
error_message TEXT,
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_linkset_events_linkset ON vex.linkset_events(linkset_id);
CREATE INDEX idx_linkset_events_created ON vex.linkset_events(created_at);
-- Consensus table (VEX consensus state)
CREATE TABLE IF NOT EXISTS vex.consensus (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
vulnerability_id TEXT NOT NULL,
product_id TEXT NOT NULL,
consensus_status TEXT NOT NULL,
contributing_statements UUID[] NOT NULL DEFAULT '{}',
confidence NUMERIC(3,2),
computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
metadata JSONB NOT NULL DEFAULT '{}',
UNIQUE(tenant_id, vulnerability_id, product_id)
);
CREATE INDEX idx_consensus_tenant ON vex.consensus(tenant_id);
CREATE INDEX idx_consensus_vuln ON vex.consensus(vulnerability_id, product_id);
-- Consensus holds table
CREATE TABLE IF NOT EXISTS vex.consensus_holds (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
consensus_id UUID NOT NULL REFERENCES vex.consensus(id) ON DELETE CASCADE,
hold_type TEXT NOT NULL,
reason TEXT NOT NULL,
held_by TEXT NOT NULL,
held_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
released_at TIMESTAMPTZ,
released_by TEXT,
metadata JSONB NOT NULL DEFAULT '{}'
);
CREATE INDEX idx_consensus_holds_consensus ON vex.consensus_holds(consensus_id);
CREATE INDEX idx_consensus_holds_active ON vex.consensus_holds(consensus_id, released_at)
WHERE released_at IS NULL;
-- Unknown snapshots table
CREATE TABLE IF NOT EXISTS vex.unknowns_snapshots (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
project_id UUID REFERENCES vex.projects(id),
graph_revision_id UUID REFERENCES vex.graph_revisions(id),
snapshot_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
unknown_count INT NOT NULL DEFAULT 0,
metadata JSONB NOT NULL DEFAULT '{}'
);
CREATE INDEX idx_unknowns_snapshots_tenant ON vex.unknowns_snapshots(tenant_id);
CREATE INDEX idx_unknowns_snapshots_project ON vex.unknowns_snapshots(project_id);
-- Unknown items table
CREATE TABLE IF NOT EXISTS vex.unknown_items (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
snapshot_id UUID NOT NULL REFERENCES vex.unknowns_snapshots(id) ON DELETE CASCADE,
vulnerability_id TEXT NOT NULL,
product_id TEXT,
reason TEXT NOT NULL,
metadata JSONB NOT NULL DEFAULT '{}'
);
CREATE INDEX idx_unknown_items_snapshot ON vex.unknown_items(snapshot_id);
CREATE INDEX idx_unknown_items_vuln ON vex.unknown_items(vulnerability_id);
-- Evidence manifests table
CREATE TABLE IF NOT EXISTS vex.evidence_manifests (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
statement_id UUID REFERENCES vex.statements(id) ON DELETE CASCADE,
manifest_type TEXT NOT NULL,
content_hash TEXT NOT NULL,
content JSONB NOT NULL,
source TEXT,
collected_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
metadata JSONB NOT NULL DEFAULT '{}'
);
CREATE INDEX idx_evidence_manifests_tenant ON vex.evidence_manifests(tenant_id);
CREATE INDEX idx_evidence_manifests_statement ON vex.evidence_manifests(statement_id);
-- CVSS receipts table
CREATE TABLE IF NOT EXISTS vex.cvss_receipts (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
statement_id UUID REFERENCES vex.statements(id) ON DELETE CASCADE,
vulnerability_id TEXT NOT NULL,
cvss_version TEXT NOT NULL,
vector_string TEXT NOT NULL,
base_score NUMERIC(3,1) NOT NULL,
environmental_score NUMERIC(3,1),
temporal_score NUMERIC(3,1),
computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
metadata JSONB NOT NULL DEFAULT '{}'
);
CREATE INDEX idx_cvss_receipts_tenant ON vex.cvss_receipts(tenant_id);
CREATE INDEX idx_cvss_receipts_statement ON vex.cvss_receipts(statement_id);
CREATE INDEX idx_cvss_receipts_vuln ON vex.cvss_receipts(vulnerability_id);
-- Attestations table
CREATE TABLE IF NOT EXISTS vex.attestations (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
statement_id UUID REFERENCES vex.statements(id),
subject_digest TEXT NOT NULL,
predicate_type TEXT NOT NULL,
predicate JSONB NOT NULL,
signature TEXT,
signature_algorithm TEXT,
signed_by TEXT,
signed_at TIMESTAMPTZ,
verified BOOLEAN NOT NULL DEFAULT FALSE,
verified_at TIMESTAMPTZ,
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_attestations_tenant ON vex.attestations(tenant_id);
CREATE INDEX idx_attestations_statement ON vex.attestations(statement_id);
CREATE INDEX idx_attestations_subject ON vex.attestations(subject_digest);
-- Timeline events table
CREATE TABLE IF NOT EXISTS vex.timeline_events (
id BIGSERIAL PRIMARY KEY,
tenant_id TEXT NOT NULL,
project_id UUID REFERENCES vex.projects(id),
statement_id UUID REFERENCES vex.statements(id),
event_type TEXT NOT NULL,
event_data JSONB NOT NULL DEFAULT '{}',
actor TEXT,
correlation_id TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_timeline_events_tenant ON vex.timeline_events(tenant_id);
CREATE INDEX idx_timeline_events_project ON vex.timeline_events(project_id);
CREATE INDEX idx_timeline_events_statement ON vex.timeline_events(statement_id);
CREATE INDEX idx_timeline_events_created ON vex.timeline_events(tenant_id, created_at);
CREATE INDEX idx_timeline_events_correlation ON vex.timeline_events(correlation_id);
-- Update timestamp function
CREATE OR REPLACE FUNCTION vex.update_updated_at()
-- Refresh updated_at whenever linkset rows change
CREATE OR REPLACE FUNCTION vex.touch_updated_at()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = NOW();
@@ -310,15 +94,6 @@ BEGIN
END;
$$ LANGUAGE plpgsql;
-- Triggers
CREATE TRIGGER trg_projects_updated_at
BEFORE UPDATE ON vex.projects
FOR EACH ROW EXECUTE FUNCTION vex.update_updated_at();
CREATE TRIGGER trg_linksets_updated_at
CREATE TRIGGER trg_linksets_touch_updated_at
BEFORE UPDATE ON vex.linksets
FOR EACH ROW EXECUTE FUNCTION vex.update_updated_at();
CREATE TRIGGER trg_statements_updated_at
BEFORE UPDATE ON vex.statements
FOR EACH ROW EXECUTE FUNCTION vex.update_updated_at();
FOR EACH ROW EXECUTE FUNCTION vex.touch_updated_at();

View File

@@ -0,0 +1,43 @@
-- VEX Raw Store Migration 002: Postgres-backed raw document and blob storage (Mongo/BSON removed)
-- Raw documents (append-only)
CREATE TABLE IF NOT EXISTS vex.vex_raw_documents (
digest TEXT PRIMARY KEY,
tenant TEXT NOT NULL,
provider_id TEXT NOT NULL,
format TEXT NOT NULL CHECK (format IN ('openvex','csaf','cyclonedx','custom','unknown')),
source_uri TEXT NOT NULL,
etag TEXT NULL,
retrieved_at TIMESTAMPTZ NOT NULL,
recorded_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
supersedes_digest TEXT NULL REFERENCES vex.vex_raw_documents(digest),
content_json JSONB NOT NULL,
content_size_bytes INT NOT NULL,
metadata_json JSONB NOT NULL,
provenance_json JSONB NOT NULL,
inline_payload BOOLEAN NOT NULL DEFAULT TRUE,
UNIQUE (tenant, provider_id, source_uri, COALESCE(etag, ''))
);
CREATE INDEX IF NOT EXISTS idx_vex_raw_documents_tenant_retrieved ON vex.vex_raw_documents (tenant, retrieved_at DESC, digest);
CREATE INDEX IF NOT EXISTS idx_vex_raw_documents_provider ON vex.vex_raw_documents (tenant, provider_id, retrieved_at DESC);
CREATE INDEX IF NOT EXISTS idx_vex_raw_documents_supersedes ON vex.vex_raw_documents (tenant, supersedes_digest);
CREATE INDEX IF NOT EXISTS idx_vex_raw_documents_metadata ON vex.vex_raw_documents USING GIN (metadata_json);
CREATE INDEX IF NOT EXISTS idx_vex_raw_documents_provenance ON vex.vex_raw_documents USING GIN (provenance_json);
-- Large payloads stored separately when inline threshold exceeded
CREATE TABLE IF NOT EXISTS vex.vex_raw_blobs (
digest TEXT PRIMARY KEY REFERENCES vex.vex_raw_documents(digest) ON DELETE CASCADE,
payload BYTEA NOT NULL,
payload_hash TEXT NOT NULL
);
-- Optional attachment support (kept for parity with prior GridFS usage)
CREATE TABLE IF NOT EXISTS vex.vex_raw_attachments (
digest TEXT REFERENCES vex.vex_raw_documents(digest) ON DELETE CASCADE,
name TEXT NOT NULL,
media_type TEXT NOT NULL,
payload BYTEA NOT NULL,
payload_hash TEXT NOT NULL,
PRIMARY KEY (digest, name)
);

View File

@@ -0,0 +1,858 @@
using System.Text.Json;
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Excititor.Core.Observations;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Excititor.Storage.Postgres.Repositories;
/// <summary>
/// PostgreSQL implementation of <see cref="IAppendOnlyLinksetStore"/> backed by append-only tables.
/// Uses deterministic ordering and mutation logs for audit/replay.
/// </summary>
public sealed class PostgresAppendOnlyLinksetStore : RepositoryBase<ExcititorDataSource>, IAppendOnlyLinksetStore, IVexLinksetStore
{
private const string MutationCreated = "linkset_created";
private const string MutationObservationAdded = "observation_added";
private const string MutationDisagreementAdded = "disagreement_added";
private static readonly JsonSerializerOptions JsonOptions = new()
{
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
WriteIndented = false
};
public PostgresAppendOnlyLinksetStore(
ExcititorDataSource dataSource,
ILogger<PostgresAppendOnlyLinksetStore> logger)
: base(dataSource, logger)
{
}
public async ValueTask<bool> InsertAsync(
VexLinkset linkset,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(linkset);
var tenant = linkset.Tenant;
var linksetId = linkset.LinksetId;
await using var connection = await DataSource.OpenConnectionAsync(tenant, "writer", cancellationToken)
.ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
var sequenceNumbers = new List<long>();
var created = await EnsureLinksetAsync(
connection,
linksetId,
tenant,
linkset.VulnerabilityId,
linkset.ProductKey,
linkset.Scope,
sequenceNumbers,
cancellationToken).ConfigureAwait(false);
if (!created)
{
await transaction.RollbackAsync(cancellationToken).ConfigureAwait(false);
return false;
}
foreach (var observation in linkset.Observations)
{
await InsertObservationAsync(connection, linksetId, observation, sequenceNumbers, cancellationToken)
.ConfigureAwait(false);
}
foreach (var disagreement in linkset.Disagreements)
{
await InsertDisagreementAsync(connection, linksetId, disagreement, sequenceNumbers, cancellationToken)
.ConfigureAwait(false);
}
await TouchLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
return true;
}
public async ValueTask<bool> UpsertAsync(
VexLinkset linkset,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(linkset);
var tenant = linkset.Tenant;
var linksetId = linkset.LinksetId;
await using var connection = await DataSource.OpenConnectionAsync(tenant, "writer", cancellationToken)
.ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
var sequenceNumbers = new List<long>();
var created = await EnsureLinksetAsync(
connection,
linksetId,
tenant,
linkset.VulnerabilityId,
linkset.ProductKey,
linkset.Scope,
sequenceNumbers,
cancellationToken).ConfigureAwait(false);
foreach (var observation in linkset.Observations)
{
await InsertObservationAsync(connection, linksetId, observation, sequenceNumbers, cancellationToken)
.ConfigureAwait(false);
}
foreach (var disagreement in linkset.Disagreements)
{
await InsertDisagreementAsync(connection, linksetId, disagreement, sequenceNumbers, cancellationToken)
.ConfigureAwait(false);
}
if (created || sequenceNumbers.Count > 0)
{
await TouchLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
}
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
return created;
}
public async ValueTask<VexLinkset> GetOrCreateAsync(
string tenant,
string vulnerabilityId,
string productKey,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(vulnerabilityId);
ArgumentNullException.ThrowIfNull(productKey);
var linksetId = VexLinkset.CreateLinksetId(tenant, vulnerabilityId, productKey);
await using var connection = await DataSource.OpenConnectionAsync(tenant, "writer", cancellationToken)
.ConfigureAwait(false);
var existing = await ReadLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
if (existing is not null)
{
return existing;
}
var sequenceNumbers = new List<long>();
await EnsureLinksetAsync(
connection,
linksetId,
tenant,
vulnerabilityId,
productKey,
VexProductScope.Unknown(productKey),
sequenceNumbers,
cancellationToken).ConfigureAwait(false);
return await ReadLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false)
?? throw new InvalidOperationException($"Failed to create linkset {linksetId}.");
}
public async ValueTask<AppendLinksetResult> AppendObservationAsync(
string tenant,
string vulnerabilityId,
string productKey,
VexLinksetObservationRefModel observation,
VexProductScope scope,
CancellationToken cancellationToken)
{
return await AppendObservationsBatchAsync(
tenant,
vulnerabilityId,
productKey,
new[] { observation },
scope,
cancellationToken).ConfigureAwait(false);
}
public async ValueTask<AppendLinksetResult> AppendObservationsBatchAsync(
string tenant,
string vulnerabilityId,
string productKey,
IEnumerable<VexLinksetObservationRefModel> observations,
VexProductScope scope,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(vulnerabilityId);
ArgumentNullException.ThrowIfNull(productKey);
ArgumentNullException.ThrowIfNull(observations);
ArgumentNullException.ThrowIfNull(scope);
var linksetId = VexLinkset.CreateLinksetId(tenant, vulnerabilityId, productKey);
var observationList = observations.ToList();
await using var connection = await DataSource.OpenConnectionAsync(tenant, "writer", cancellationToken)
.ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
var sequenceNumbers = new List<long>();
var wasCreated = await EnsureLinksetAsync(connection, linksetId, tenant, vulnerabilityId, productKey, scope, sequenceNumbers, cancellationToken)
.ConfigureAwait(false);
var observationsAdded = 0;
foreach (var obs in observationList)
{
var added = await InsertObservationAsync(connection, linksetId, obs, sequenceNumbers, cancellationToken)
.ConfigureAwait(false);
if (added)
{
observationsAdded++;
}
}
if (wasCreated || observationsAdded > 0)
{
await TouchLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
}
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
var linkset = await ReadLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false)
?? throw new InvalidOperationException($"Linkset {linksetId} not found after append.");
var sequenceNumber = await GetLatestSequenceAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
if (observationsAdded == 0 && !wasCreated)
{
return AppendLinksetResult.NoChange(linkset, sequenceNumber);
}
if (wasCreated)
{
return AppendLinksetResult.Created(linkset, observationsAdded, sequenceNumber);
}
return AppendLinksetResult.Updated(linkset, observationsAdded, disagreementsAdded: 0, sequenceNumber);
}
public async ValueTask<AppendLinksetResult> AppendDisagreementAsync(
string tenant,
string vulnerabilityId,
string productKey,
VexObservationDisagreement disagreement,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(vulnerabilityId);
ArgumentNullException.ThrowIfNull(productKey);
ArgumentNullException.ThrowIfNull(disagreement);
var linksetId = VexLinkset.CreateLinksetId(tenant, vulnerabilityId, productKey);
await using var connection = await DataSource.OpenConnectionAsync(tenant, "writer", cancellationToken)
.ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
var sequenceNumbers = new List<long>();
var wasCreated = await EnsureLinksetAsync(
connection,
linksetId,
tenant,
vulnerabilityId,
productKey,
VexProductScope.Unknown(productKey),
sequenceNumbers,
cancellationToken).ConfigureAwait(false);
var disagreementsAdded = await InsertDisagreementAsync(connection, linksetId, disagreement, sequenceNumbers, cancellationToken)
.ConfigureAwait(false)
? 1
: 0;
if (wasCreated || disagreementsAdded > 0)
{
await TouchLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
}
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
var linkset = await ReadLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false)
?? throw new InvalidOperationException($"Linkset {linksetId} not found after append.");
var sequenceNumber = await GetLatestSequenceAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
if (disagreementsAdded == 0 && !wasCreated)
{
return AppendLinksetResult.NoChange(linkset, sequenceNumber);
}
if (wasCreated)
{
return AppendLinksetResult.Created(linkset, observationsAdded: 0, sequenceNumber);
}
return AppendLinksetResult.Updated(linkset, observationsAdded: 0, disagreementsAdded, sequenceNumber);
}
public async ValueTask<VexLinkset?> GetByIdAsync(
string tenant,
string linksetId,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(linksetId);
await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken)
.ConfigureAwait(false);
return await ReadLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
}
public async ValueTask<VexLinkset?> GetByKeyAsync(
string tenant,
string vulnerabilityId,
string productKey,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(vulnerabilityId);
ArgumentNullException.ThrowIfNull(productKey);
var linksetId = VexLinkset.CreateLinksetId(tenant, vulnerabilityId, productKey);
return await GetByIdAsync(tenant, linksetId, cancellationToken).ConfigureAwait(false);
}
public async ValueTask<IReadOnlyList<VexLinkset>> FindByVulnerabilityAsync(
string tenant,
string vulnerabilityId,
int limit,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(vulnerabilityId);
await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken)
.ConfigureAwait(false);
var linksetIds = await GetLinksetIdsAsync(connection, "vulnerability_id = @vulnerability_id", cmd =>
{
AddParameter(cmd, "vulnerability_id", vulnerabilityId);
AddParameter(cmd, "tenant", tenant);
AddParameter(cmd, "limit", limit);
}, cancellationToken).ConfigureAwait(false);
return await ReadLinksetsAsync(connection, linksetIds, cancellationToken).ConfigureAwait(false);
}
public async ValueTask<IReadOnlyList<VexLinkset>> FindByProductKeyAsync(
string tenant,
string productKey,
int limit,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(productKey);
await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken)
.ConfigureAwait(false);
var linksetIds = await GetLinksetIdsAsync(connection, "product_key = @product_key", cmd =>
{
AddParameter(cmd, "product_key", productKey);
AddParameter(cmd, "tenant", tenant);
AddParameter(cmd, "limit", limit);
}, cancellationToken).ConfigureAwait(false);
return await ReadLinksetsAsync(connection, linksetIds, cancellationToken).ConfigureAwait(false);
}
public async ValueTask<IReadOnlyList<VexLinkset>> FindWithConflictsAsync(
string tenant,
int limit,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
const string sql = """
SELECT DISTINCT ls.linkset_id, ls.updated_at
FROM vex.linksets ls
JOIN vex.linkset_disagreements d ON d.linkset_id = ls.linkset_id
WHERE ls.tenant = @tenant
ORDER BY ls.updated_at DESC, ls.linkset_id
LIMIT @limit;
""";
await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "tenant", tenant);
AddParameter(command, "limit", limit);
var linksetIds = new List<string>();
await using (var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false))
{
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
linksetIds.Add(reader.GetString(0));
}
}
return await ReadLinksetsAsync(connection, linksetIds, cancellationToken).ConfigureAwait(false);
}
public async ValueTask<IReadOnlyList<VexLinkset>> FindByProviderAsync(
string tenant,
string providerId,
int limit,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(providerId);
const string sql = """
SELECT DISTINCT ls.linkset_id, ls.updated_at
FROM vex.linksets ls
JOIN vex.linkset_observations o ON o.linkset_id = ls.linkset_id
WHERE ls.tenant = @tenant AND o.provider_id = @provider_id
ORDER BY ls.updated_at DESC, ls.linkset_id
LIMIT @limit;
""";
await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "tenant", tenant);
AddParameter(command, "provider_id", providerId);
AddParameter(command, "limit", limit);
var ids = new List<string>();
await using (var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false))
{
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
ids.Add(reader.GetString(0));
}
}
return await ReadLinksetsAsync(connection, ids, cancellationToken).ConfigureAwait(false);
}
public ValueTask<bool> DeleteAsync(
string tenant,
string linksetId,
CancellationToken cancellationToken)
{
// Append-only store does not support deletions; signal no-op.
return ValueTask.FromResult(false);
}
public async ValueTask<long> CountAsync(string tenant, CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
const string sql = "SELECT COUNT(*) FROM vex.linksets WHERE tenant = @tenant;";
await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "tenant", tenant);
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
return result is long count ? count : Convert.ToInt64(result);
}
public async ValueTask<long> CountWithConflictsAsync(string tenant, CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
const string sql = """
SELECT COUNT(DISTINCT ls.linkset_id)
FROM vex.linksets ls
JOIN vex.linkset_disagreements d ON d.linkset_id = ls.linkset_id
WHERE ls.tenant = @tenant;
""";
await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "tenant", tenant);
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
return result is long count ? count : Convert.ToInt64(result);
}
public async ValueTask<IReadOnlyList<LinksetMutationEvent>> GetMutationLogAsync(
string tenant,
string linksetId,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(linksetId);
const string sql = """
SELECT sequence_number, mutation_type, occurred_at, observation_id, provider_id, status, confidence, justification
FROM vex.linkset_mutations
WHERE linkset_id = @linkset_id
ORDER BY sequence_number;
""";
await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
var mutations = new List<LinksetMutationEvent>();
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
mutations.Add(new LinksetMutationEvent(
sequenceNumber: reader.GetInt64(0),
mutationType: reader.GetString(1),
timestamp: reader.GetFieldValue<DateTimeOffset>(2),
observationId: GetNullableString(reader, 3),
providerId: GetNullableString(reader, 4),
status: GetNullableString(reader, 5),
confidence: reader.IsDBNull(6) ? null : reader.GetDouble(6),
justification: GetNullableString(reader, 7)));
}
return mutations;
}
private async Task<bool> EnsureLinksetAsync(
NpgsqlConnection connection,
string linksetId,
string tenant,
string vulnerabilityId,
string productKey,
VexProductScope scope,
List<long> sequenceNumbers,
CancellationToken cancellationToken)
{
const string sql = """
INSERT INTO vex.linksets (linkset_id, tenant, vulnerability_id, product_key, scope)
VALUES (@linkset_id, @tenant, @vulnerability_id, @product_key, @scope::jsonb)
ON CONFLICT (linkset_id) DO NOTHING
RETURNING linkset_id;
""";
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
AddParameter(command, "tenant", tenant);
AddParameter(command, "vulnerability_id", vulnerabilityId);
AddParameter(command, "product_key", productKey);
AddJsonbParameter(command, "scope", SerializeScope(scope));
var inserted = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
if (inserted is not null)
{
var seq = await InsertMutationAsync(connection, linksetId, MutationCreated, null, null, null, null, null, cancellationToken)
.ConfigureAwait(false);
sequenceNumbers.Add(seq);
return true;
}
return false;
}
private async Task<bool> InsertObservationAsync(
NpgsqlConnection connection,
string linksetId,
VexLinksetObservationRefModel observation,
List<long> sequenceNumbers,
CancellationToken cancellationToken)
{
const string sql = """
INSERT INTO vex.linkset_observations (
linkset_id, observation_id, provider_id, status, confidence)
VALUES (@linkset_id, @observation_id, @provider_id, @status, @confidence)
ON CONFLICT DO NOTHING
RETURNING id;
""";
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
AddParameter(command, "observation_id", observation.ObservationId);
AddParameter(command, "provider_id", observation.ProviderId);
AddParameter(command, "status", observation.Status);
AddParameter(command, "confidence", observation.Confidence);
var inserted = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
if (inserted is not null)
{
var seq = await InsertMutationAsync(
connection,
linksetId,
MutationObservationAdded,
observation.ObservationId,
observation.ProviderId,
observation.Status,
observation.Confidence,
null,
cancellationToken).ConfigureAwait(false);
sequenceNumbers.Add(seq);
return true;
}
return false;
}
private async Task<bool> InsertDisagreementAsync(
NpgsqlConnection connection,
string linksetId,
VexObservationDisagreement disagreement,
List<long> sequenceNumbers,
CancellationToken cancellationToken)
{
const string sql = """
INSERT INTO vex.linkset_disagreements (
linkset_id, provider_id, status, justification, confidence)
VALUES (@linkset_id, @provider_id, @status, @justification, @confidence)
ON CONFLICT DO NOTHING
RETURNING id;
""";
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
AddParameter(command, "provider_id", disagreement.ProviderId);
AddParameter(command, "status", disagreement.Status);
AddParameter(command, "justification", disagreement.Justification);
AddParameter(command, "confidence", disagreement.Confidence);
var inserted = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
if (inserted is not null)
{
var seq = await InsertMutationAsync(
connection,
linksetId,
MutationDisagreementAdded,
null,
disagreement.ProviderId,
disagreement.Status,
disagreement.Confidence,
disagreement.Justification,
cancellationToken).ConfigureAwait(false);
sequenceNumbers.Add(seq);
return true;
}
return false;
}
private async Task<long> InsertMutationAsync(
NpgsqlConnection connection,
string linksetId,
string mutationType,
string? observationId,
string? providerId,
string? status,
double? confidence,
string? justification,
CancellationToken cancellationToken)
{
const string sql = """
INSERT INTO vex.linkset_mutations (
linkset_id, mutation_type, observation_id, provider_id, status, confidence, justification)
VALUES (@linkset_id, @mutation_type, @observation_id, @provider_id, @status, @confidence, @justification)
RETURNING sequence_number;
""";
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
AddParameter(command, "mutation_type", mutationType);
AddParameter(command, "observation_id", observationId);
AddParameter(command, "provider_id", providerId);
AddParameter(command, "status", status);
AddParameter(command, "confidence", confidence);
AddParameter(command, "justification", justification);
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false)
?? throw new InvalidOperationException("Failed to insert mutation log entry.");
return Convert.ToInt64(result);
}
private static async Task TouchLinksetAsync(
NpgsqlConnection connection,
string linksetId,
CancellationToken cancellationToken)
{
const string sql = "UPDATE vex.linksets SET updated_at = NOW() WHERE linkset_id = @linkset_id;";
await using var command = new NpgsqlCommand(sql, connection);
command.Parameters.AddWithValue("linkset_id", linksetId);
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
private async Task<long> GetLatestSequenceAsync(
NpgsqlConnection connection,
string linksetId,
CancellationToken cancellationToken)
{
const string sql = "SELECT COALESCE(MAX(sequence_number), 0) FROM vex.linkset_mutations WHERE linkset_id = @linkset_id;";
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
return result is long value ? value : Convert.ToInt64(result);
}
private async Task<IReadOnlyList<string>> GetLinksetIdsAsync(
NpgsqlConnection connection,
string predicate,
Action<NpgsqlCommand> configure,
CancellationToken cancellationToken)
{
var sql = $"""
SELECT linkset_id
FROM vex.linksets
WHERE {predicate} AND tenant = @tenant
ORDER BY updated_at DESC, linkset_id
LIMIT @limit;
""";
await using var command = CreateCommand(sql, connection);
configure(command);
var ids = new List<string>();
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
ids.Add(reader.GetString(0));
}
return ids;
}
private async Task<IReadOnlyList<VexLinkset>> ReadLinksetsAsync(
NpgsqlConnection connection,
IReadOnlyList<string> linksetIds,
CancellationToken cancellationToken)
{
var results = new List<VexLinkset>();
foreach (var id in linksetIds)
{
var linkset = await ReadLinksetAsync(connection, id, cancellationToken).ConfigureAwait(false);
if (linkset is not null)
{
results.Add(linkset);
}
}
return results;
}
private async Task<VexLinkset?> ReadLinksetAsync(
NpgsqlConnection connection,
string linksetId,
CancellationToken cancellationToken)
{
const string sql = """
SELECT linkset_id, tenant, vulnerability_id, product_key, scope::text, created_at, updated_at
FROM vex.linksets
WHERE linkset_id = @linkset_id;
""";
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
if (!await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
return null;
}
var id = reader.GetString(0);
var tenant = reader.GetString(1);
var vulnerabilityId = reader.GetString(2);
var productKey = reader.GetString(3);
var scopeJson = reader.GetString(4);
var createdAt = reader.GetFieldValue<DateTimeOffset>(5);
var updatedAt = reader.GetFieldValue<DateTimeOffset>(6);
var scope = DeserializeScope(scopeJson) ?? VexProductScope.Unknown(productKey);
await reader.CloseAsync();
var observations = await ReadObservationsAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
var disagreements = await ReadDisagreementsAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
return new VexLinkset(
id,
tenant,
vulnerabilityId,
productKey,
scope,
observations,
disagreements,
createdAt,
updatedAt);
}
private async Task<IReadOnlyList<VexLinksetObservationRefModel>> ReadObservationsAsync(
NpgsqlConnection connection,
string linksetId,
CancellationToken cancellationToken)
{
const string sql = """
SELECT observation_id, provider_id, status, confidence
FROM vex.linkset_observations
WHERE linkset_id = @linkset_id
ORDER BY provider_id, status, observation_id;
""";
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
var observations = new List<VexLinksetObservationRefModel>();
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
observations.Add(new VexLinksetObservationRefModel(
reader.GetString(0),
reader.GetString(1),
reader.GetString(2),
reader.IsDBNull(3) ? null : reader.GetDouble(3)));
}
return observations;
}
private async Task<IReadOnlyList<VexObservationDisagreement>> ReadDisagreementsAsync(
NpgsqlConnection connection,
string linksetId,
CancellationToken cancellationToken)
{
const string sql = """
SELECT provider_id, status, justification, confidence
FROM vex.linkset_disagreements
WHERE linkset_id = @linkset_id
ORDER BY provider_id, status, COALESCE(justification, ''), id;
""";
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
var disagreements = new List<VexObservationDisagreement>();
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
disagreements.Add(new VexObservationDisagreement(
reader.GetString(0),
reader.GetString(1),
GetNullableString(reader, 2),
reader.IsDBNull(3) ? null : reader.GetDouble(3)));
}
return disagreements;
}
private static string? SerializeScope(VexProductScope scope)
{
return JsonSerializer.Serialize(scope, JsonOptions);
}
private static VexProductScope? DeserializeScope(string? json)
{
if (string.IsNullOrWhiteSpace(json))
{
return null;
}
return JsonSerializer.Deserialize<VexProductScope>(json, JsonOptions);
}
}

View File

@@ -0,0 +1,441 @@
using System;
using System.Buffers;
using System.Collections.Immutable;
using System.Linq;
using System.Text;
using System.Text.Json;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using Npgsql;
using NpgsqlTypes;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Excititor.Storage.Postgres.Repositories;
/// <summary>
/// PostgreSQL-backed implementation of <see cref="IVexRawStore"/> replacing Mongo/GridFS.
/// </summary>
public sealed class PostgresVexRawStore : RepositoryBase<ExcititorDataSource>, IVexRawStore
{
private readonly int _inlineThreshold;
public PostgresVexRawStore(
ExcititorDataSource dataSource,
IOptions<VexStorageOptions> options,
ILogger<PostgresVexRawStore> logger)
: base(dataSource, logger)
{
if (options is null)
{
throw new ArgumentNullException(nameof(options));
}
_inlineThreshold = Math.Max(1, options.Value?.InlineThresholdBytes ?? 256 * 1024);
}
public async ValueTask StoreAsync(VexRawDocument document, CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(document);
var canonicalContent = CanonicalizeJson(document.Content);
var digest = EnsureDigest(document.Digest, canonicalContent);
var metadata = document.Metadata ?? ImmutableDictionary<string, string>.Empty;
var tenant = ResolveTenant(metadata);
var format = document.Format.ToString().ToLowerInvariant();
var providerId = document.ProviderId;
var sourceUri = document.SourceUri.ToString();
var retrievedAt = document.RetrievedAt.UtcDateTime;
var inline = canonicalContent.Length <= _inlineThreshold;
await using var connection = await DataSource.OpenConnectionAsync(tenant, "writer", cancellationToken)
.ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
var metadataJson = JsonSerializer.Serialize(metadata, JsonSerializerOptions);
// Provenance is currently stored as a clone of metadata; callers may slice it as needed.
var provenanceJson = metadataJson;
var contentJson = GetJsonString(canonicalContent);
const string insertDocumentSql = """
INSERT INTO vex.vex_raw_documents (
digest,
tenant,
provider_id,
format,
source_uri,
etag,
retrieved_at,
supersedes_digest,
content_json,
content_size_bytes,
metadata_json,
provenance_json,
inline_payload)
VALUES (
@digest,
@tenant,
@provider_id,
@format,
@source_uri,
@etag,
@retrieved_at,
@supersedes_digest,
@content_json::jsonb,
@content_size_bytes,
@metadata_json::jsonb,
@provenance_json::jsonb,
@inline_payload)
ON CONFLICT (digest) DO NOTHING;
""";
await using (var command = CreateCommand(insertDocumentSql, connection, transaction))
{
AddParameter(command, "digest", digest);
AddParameter(command, "tenant", tenant);
AddParameter(command, "provider_id", providerId);
AddParameter(command, "format", format);
AddParameter(command, "source_uri", sourceUri);
AddParameter(command, "etag", metadata.TryGetValue("etag", out var etag) ? etag : null);
AddParameter(command, "retrieved_at", retrievedAt);
AddParameter(command, "supersedes_digest", metadata.TryGetValue("supersedes", out var supersedes) ? supersedes : null);
AddJsonbParameter(command, "content_json", contentJson);
AddParameter(command, "content_size_bytes", canonicalContent.Length);
AddJsonbParameter(command, "metadata_json", metadataJson);
AddJsonbParameter(command, "provenance_json", provenanceJson);
AddParameter(command, "inline_payload", inline);
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
if (!inline)
{
const string insertBlobSql = """
INSERT INTO vex.vex_raw_blobs (digest, payload, payload_hash)
VALUES (@digest, @payload, @payload_hash)
ON CONFLICT (digest) DO NOTHING;
""";
await using var blobCommand = CreateCommand(insertBlobSql, connection, transaction);
AddParameter(blobCommand, "digest", digest);
blobCommand.Parameters.Add(new NpgsqlParameter("payload", NpgsqlDbType.Bytea)
{
Value = canonicalContent.ToArray()
});
AddParameter(blobCommand, "payload_hash", digest);
await blobCommand.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
}
public async ValueTask<VexRawRecord?> FindByDigestAsync(string digest, CancellationToken cancellationToken)
{
ArgumentException.ThrowIfNullOrWhiteSpace(digest);
const string sql = """
SELECT d.digest,
d.tenant,
d.provider_id,
d.format,
d.source_uri,
d.retrieved_at,
d.metadata_json,
d.inline_payload,
d.content_json,
d.supersedes_digest,
d.etag,
d.recorded_at,
b.payload
FROM vex.vex_raw_documents d
LEFT JOIN vex.vex_raw_blobs b ON b.digest = d.digest
WHERE d.digest = @digest;
""";
await using var connection = await DataSource.OpenConnectionAsync("public", "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "digest", digest);
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
if (!await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
return null;
}
var tenant = reader.GetString(1);
var providerId = reader.GetString(2);
var format = ParseFormat(reader.GetString(3));
var sourceUri = new Uri(reader.GetString(4));
var retrievedAt = reader.GetFieldValue<DateTime>(5);
var metadata = ParseMetadata(reader.GetString(6));
var inline = reader.GetFieldValue<bool>(7);
var contentJson = reader.GetString(8);
var supersedes = reader.IsDBNull(9) ? null : reader.GetString(9);
var etag = reader.IsDBNull(10) ? null : reader.GetString(10);
var recordedAt = reader.IsDBNull(11) ? (DateTimeOffset?)null : reader.GetFieldValue<DateTimeOffset>(11);
ReadOnlyMemory<byte> contentBytes;
if (!inline && !reader.IsDBNull(12))
{
contentBytes = (byte[])reader.GetValue(12);
}
else
{
contentBytes = Encoding.UTF8.GetBytes(contentJson);
}
return new VexRawRecord(
digest,
tenant,
providerId,
format,
sourceUri,
new DateTimeOffset(retrievedAt, TimeSpan.Zero),
metadata,
contentBytes,
inline,
supersedes,
etag,
recordedAt);
}
public async ValueTask<VexRawDocumentPage> QueryAsync(VexRawQuery query, CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(query);
var conditions = new List<string> { "tenant = @tenant" };
if (query.ProviderIds.Count > 0)
{
conditions.Add("provider_id = ANY(@providers)");
}
if (query.Digests.Count > 0)
{
conditions.Add("digest = ANY(@digests)");
}
if (query.Formats.Count > 0)
{
conditions.Add("format = ANY(@formats)");
}
if (query.Since is not null)
{
conditions.Add("retrieved_at >= @since");
}
if (query.Until is not null)
{
conditions.Add("retrieved_at <= @until");
}
if (query.Cursor is not null)
{
conditions.Add("(retrieved_at < @cursor_retrieved_at OR (retrieved_at = @cursor_retrieved_at AND digest < @cursor_digest))");
}
var sql = $"""
SELECT digest, provider_id, format, source_uri, retrieved_at, metadata_json, inline_payload
FROM vex.vex_raw_documents
WHERE {string.Join(" AND ", conditions)}
ORDER BY retrieved_at DESC, digest DESC
LIMIT @limit;
""";
await using var connection = await DataSource.OpenConnectionAsync(query.Tenant, "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "tenant", query.Tenant);
AddArray(command, "providers", query.ProviderIds);
AddArray(command, "digests", query.Digests);
AddArray(command, "formats", query.Formats.Select(static f => f.ToString().ToLowerInvariant()).ToArray());
if (query.Since is not null)
{
AddParameter(command, "since", query.Since.Value.UtcDateTime);
}
if (query.Until is not null)
{
AddParameter(command, "until", query.Until.Value.UtcDateTime);
}
if (query.Cursor is not null)
{
AddParameter(command, "cursor_retrieved_at", query.Cursor.RetrievedAt.UtcDateTime);
AddParameter(command, "cursor_digest", query.Cursor.Digest);
}
AddParameter(command, "limit", query.Limit);
var summaries = new List<VexRawDocumentSummary>();
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
var digest = reader.GetString(0);
var providerId = reader.GetString(1);
var format = ParseFormat(reader.GetString(2));
var sourceUri = new Uri(reader.GetString(3));
var retrievedAt = reader.GetFieldValue<DateTime>(4);
var metadata = ParseMetadata(reader.GetString(5));
var inline = reader.GetFieldValue<bool>(6);
summaries.Add(new VexRawDocumentSummary(
digest,
providerId,
format,
sourceUri,
new DateTimeOffset(retrievedAt, TimeSpan.Zero),
inline,
metadata));
}
var hasMore = summaries.Count == query.Limit;
var nextCursor = hasMore && summaries.Count > 0
? new VexRawCursor(summaries[^1].RetrievedAt, summaries[^1].Digest)
: null;
return new VexRawDocumentPage(summaries, nextCursor, hasMore);
}
private static void AddArray(NpgsqlCommand command, string name, IReadOnlyCollection<string> values)
{
command.Parameters.Add(new NpgsqlParameter
{
ParameterName = name,
NpgsqlDbType = NpgsqlDbType.Array | NpgsqlDbType.Text,
Value = values.Count == 0 ? Array.Empty<string>() : values.ToArray()
});
}
private static string ResolveTenant(IReadOnlyDictionary<string, string> metadata)
{
if (metadata.TryGetValue("tenant", out var tenant) && !string.IsNullOrWhiteSpace(tenant))
{
return tenant.Trim();
}
return "default";
}
private static VexDocumentFormat ParseFormat(string value)
=> Enum.TryParse<VexDocumentFormat>(value, ignoreCase: true, out var parsed)
? parsed
: VexDocumentFormat.Unknown;
private static ImmutableDictionary<string, string> ParseMetadata(string json)
{
if (string.IsNullOrWhiteSpace(json))
{
return ImmutableDictionary<string, string>.Empty;
}
try
{
var doc = JsonDocument.Parse(json);
var builder = ImmutableDictionary.CreateBuilder<string, string>(StringComparer.Ordinal);
foreach (var property in doc.RootElement.EnumerateObject())
{
builder[property.Name] = property.Value.ToString();
}
return builder.ToImmutable();
}
catch
{
return ImmutableDictionary<string, string>.Empty;
}
}
private static byte[] CanonicalizeJson(ReadOnlyMemory<byte> content)
{
using var jsonDocument = JsonDocument.Parse(content);
var buffer = new ArrayBufferWriter<byte>();
using (var writer = new Utf8JsonWriter(buffer, new JsonWriterOptions { Indented = false }))
{
WriteCanonical(writer, jsonDocument.RootElement);
}
return buffer.WrittenMemory.ToArray();
}
private static void WriteCanonical(Utf8JsonWriter writer, JsonElement element)
{
switch (element.ValueKind)
{
case JsonValueKind.Object:
writer.WriteStartObject();
foreach (var property in element.EnumerateObject().OrderBy(static p => p.Name, StringComparer.Ordinal))
{
writer.WritePropertyName(property.Name);
WriteCanonical(writer, property.Value);
}
writer.WriteEndObject();
break;
case JsonValueKind.Array:
writer.WriteStartArray();
foreach (var item in element.EnumerateArray())
{
WriteCanonical(writer, item);
}
writer.WriteEndArray();
break;
case JsonValueKind.String:
writer.WriteStringValue(element.GetString());
break;
case JsonValueKind.Number:
if (element.TryGetInt64(out var l))
{
writer.WriteNumberValue(l);
}
else if (element.TryGetDouble(out var d))
{
writer.WriteNumberValue(d);
}
else
{
writer.WriteRawValue(element.GetRawText());
}
break;
case JsonValueKind.True:
writer.WriteBooleanValue(true);
break;
case JsonValueKind.False:
writer.WriteBooleanValue(false);
break;
case JsonValueKind.Null:
case JsonValueKind.Undefined:
writer.WriteNullValue();
break;
default:
writer.WriteRawValue(element.GetRawText());
break;
}
}
private static string EnsureDigest(string digest, ReadOnlyMemory<byte> canonicalContent)
{
if (!string.IsNullOrWhiteSpace(digest) && digest.StartsWith("sha256:", StringComparison.OrdinalIgnoreCase))
{
return digest;
}
Span<byte> hash = stackalloc byte[32];
if (!System.Security.Cryptography.SHA256.TryHashData(canonicalContent.Span, hash, out _))
{
hash = System.Security.Cryptography.SHA256.HashData(canonicalContent.ToArray());
}
return "sha256:" + Convert.ToHexString(hash).ToLowerInvariant();
}
private static string GetJsonString(ReadOnlyMemory<byte> canonicalContent)
=> Encoding.UTF8.GetString(canonicalContent.Span);
private static readonly JsonSerializerOptions JsonSerializerOptions = new()
{
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
DictionaryKeyPolicy = JsonNamingPolicy.CamelCase
};
}

View File

@@ -1,5 +1,7 @@
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using StellaOps.Excititor.Core.Observations;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.Storage.Postgres.Repositories;
using StellaOps.Infrastructure.Postgres;
using StellaOps.Infrastructure.Postgres.Options;
@@ -24,10 +26,14 @@ public static class ServiceCollectionExtensions
string sectionName = "Postgres:Excititor")
{
services.Configure<PostgresOptions>(sectionName, configuration.GetSection(sectionName));
services.Configure<VexStorageOptions>(configuration.GetSection("Excititor:Storage"));
services.AddSingleton<ExcititorDataSource>();
// Register repositories
services.AddScoped<IVexStatementRepository, VexStatementRepository>();
services.AddScoped<IAppendOnlyLinksetStore, PostgresAppendOnlyLinksetStore>();
services.AddScoped<IVexLinksetStore, PostgresAppendOnlyLinksetStore>();
services.AddScoped<IVexRawStore, PostgresVexRawStore>();
return services;
}
@@ -47,6 +53,9 @@ public static class ServiceCollectionExtensions
// Register repositories
services.AddScoped<IVexStatementRepository, VexStatementRepository>();
services.AddScoped<IAppendOnlyLinksetStore, PostgresAppendOnlyLinksetStore>();
services.AddScoped<IVexLinksetStore, PostgresAppendOnlyLinksetStore>();
services.AddScoped<IVexRawStore, PostgresVexRawStore>();
return services;
}

View File

@@ -15,6 +15,7 @@
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\StellaOps.Excititor.Core\StellaOps.Excititor.Core.csproj" />
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Infrastructure.Postgres\StellaOps.Infrastructure.Postgres.csproj" />
</ItemGroup>

View File

@@ -11,6 +11,6 @@
<ProjectReference Include="../../__Libraries/StellaOps.Excititor.Storage.Mongo/StellaOps.Excititor.Storage.Mongo.csproj" />
<ProjectReference Include="../../__Libraries/StellaOps.Excititor.Core/StellaOps.Excititor.Core.csproj" />
<ProjectReference Include="../../__Libraries/StellaOps.Excititor.Policy/StellaOps.Excititor.Policy.csproj" />
<ProjectReference Include="../../../Concelier/__Libraries/StellaOps.Concelier.Storage.Mongo/StellaOps.Concelier.Storage.Mongo.csproj" />
<ProjectReference Include="../../../Concelier/__Libraries/StellaOps.Concelier.Models/StellaOps.Concelier.Models.csproj" />
</ItemGroup>
</Project>
</Project>

View File

@@ -15,6 +15,8 @@ public sealed class ExcititorPostgresFixture : PostgresIntegrationFixture, IColl
=> typeof(ExcititorDataSource).Assembly;
protected override string GetModuleName() => "Excititor";
protected override string? GetResourcePrefix() => "Migrations";
}
/// <summary>

View File

@@ -0,0 +1,136 @@
using FluentAssertions;
using Microsoft.Extensions.Logging.Abstractions;
using Microsoft.Extensions.Options;
using StellaOps.Excititor.Core.Observations;
using StellaOps.Excititor.Storage.Postgres;
using StellaOps.Excititor.Storage.Postgres.Repositories;
using StellaOps.Infrastructure.Postgres.Options;
using Xunit;
namespace StellaOps.Excititor.Storage.Postgres.Tests;
[Collection(ExcititorPostgresCollection.Name)]
public sealed class PostgresAppendOnlyLinksetStoreTests : IAsyncLifetime
{
private readonly ExcititorPostgresFixture _fixture;
private readonly PostgresAppendOnlyLinksetStore _store;
private readonly ExcititorDataSource _dataSource;
public PostgresAppendOnlyLinksetStoreTests(ExcititorPostgresFixture fixture)
{
_fixture = fixture;
var options = Options.Create(new PostgresOptions
{
ConnectionString = fixture.ConnectionString,
SchemaName = fixture.SchemaName,
AutoMigrate = false
});
_dataSource = new ExcititorDataSource(options, NullLogger<ExcititorDataSource>.Instance);
_store = new PostgresAppendOnlyLinksetStore(_dataSource, NullLogger<PostgresAppendOnlyLinksetStore>.Instance);
}
public async Task InitializeAsync()
{
await _fixture.Fixture.RunMigrationsFromAssemblyAsync(
typeof(ExcititorDataSource).Assembly,
moduleName: "Excititor",
resourcePrefix: "Migrations",
cancellationToken: CancellationToken.None);
// Ensure migration applied even if runner skipped; execute embedded SQL directly as fallback.
var resourceName = typeof(ExcititorDataSource).Assembly
.GetManifestResourceNames()
.FirstOrDefault(n => n.EndsWith("001_initial_schema.sql", StringComparison.OrdinalIgnoreCase));
await using var stream = resourceName is null
? null
: typeof(ExcititorDataSource).Assembly.GetManifestResourceStream(resourceName);
if (stream is not null)
{
using var reader = new StreamReader(stream);
var sql = await reader.ReadToEndAsync();
await _fixture.Fixture.ExecuteSqlAsync(sql);
}
await _fixture.TruncateAllTablesAsync();
}
public async Task DisposeAsync()
{
await _dataSource.DisposeAsync();
}
[Fact]
public async Task AppendObservation_CreatesLinksetAndDedupes()
{
var tenant = "tenant-a";
var vuln = "CVE-2025-1234";
var product = "pkg:nuget/demo@1.0.0";
var scope = VexProductScope.Unknown(product);
var observation = new VexLinksetObservationRefModel("obs-1", "provider-a", "not_affected", 0.9);
var first = await _store.AppendObservationAsync(tenant, vuln, product, observation, scope, CancellationToken.None);
first.WasCreated.Should().BeTrue();
first.ObservationsAdded.Should().Be(1);
first.SequenceNumber.Should().BeGreaterThan(0);
first.Linkset.Observations.Should().HaveCount(1);
var second = await _store.AppendObservationAsync(tenant, vuln, product, observation, scope, CancellationToken.None);
second.HadChanges.Should().BeFalse();
second.Linkset.Observations.Should().HaveCount(1);
second.SequenceNumber.Should().Be(first.SequenceNumber);
var mutations = await _store.GetMutationLogAsync(tenant, first.Linkset.LinksetId, CancellationToken.None);
mutations.Select(m => m.SequenceNumber).Should().BeInAscendingOrder();
mutations.Should().HaveCount(2); // created + observation
}
[Fact]
public async Task AppendBatch_AppendsMultipleAndMaintainsOrder()
{
var tenant = "tenant-b";
var vuln = "CVE-2025-2000";
var product = "pkg:maven/demo/demo@2.0.0";
var scope = VexProductScope.Unknown(product);
var observations = new[]
{
new VexLinksetObservationRefModel("obs-2", "provider-b", "affected", 0.7),
new VexLinksetObservationRefModel("obs-1", "provider-a", "affected", 0.8),
new VexLinksetObservationRefModel("obs-3", "provider-a", "fixed", 0.9)
};
var result = await _store.AppendObservationsBatchAsync(tenant, vuln, product, observations, scope, CancellationToken.None);
result.Linkset.Observations.Should().HaveCount(3);
result.Linkset.Observations
.Select(o => $"{o.ProviderId}:{o.Status}:{o.ObservationId}")
.Should()
.ContainInOrder(
"provider-a:affected:obs-1",
"provider-a:fixed:obs-3",
"provider-b:affected:obs-2");
result.SequenceNumber.Should().BeGreaterThan(0);
}
[Fact]
public async Task AppendDisagreement_RegistersConflictAndCounts()
{
var tenant = "tenant-c";
var vuln = "CVE-2025-3000";
var product = "pkg:deb/debian/demo@1.2.3";
var disagreement = new VexObservationDisagreement("provider-c", "not_affected", "component_not_present", 0.6);
var result = await _store.AppendDisagreementAsync(tenant, vuln, product, disagreement, CancellationToken.None);
result.Linkset.HasConflicts.Should().BeTrue();
result.SequenceNumber.Should().BeGreaterThan(0);
var conflicts = await _store.FindWithConflictsAsync(tenant, limit: 10, CancellationToken.None);
conflicts.Should().ContainSingle(ls => ls.LinksetId == result.Linkset.LinksetId);
var conflictCount = await _store.CountWithConflictsAsync(tenant, CancellationToken.None);
conflictCount.Should().Be(1);
}
}

View File

@@ -10,9 +10,16 @@
<IsTestProject>true</IsTestProject>
</PropertyGroup>
<ItemGroup>
<PackageReference Remove="Microsoft.NET.Test.Sdk" />
<PackageReference Remove="xunit" />
<PackageReference Remove="xunit.runner.visualstudio" />
<PackageReference Remove="coverlet.collector" />
</ItemGroup>
<ItemGroup>
<PackageReference Include="FluentAssertions" Version="6.12.0" />
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.11.1" />
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.0" />
<PackageReference Include="Moq" Version="4.20.70" />
<PackageReference Include="xunit" Version="2.9.2" />
<PackageReference Include="xunit.runner.visualstudio" Version="2.8.2">
@@ -27,6 +34,7 @@
<ItemGroup>
<ProjectReference Include="..\..\__Libraries\StellaOps.Excititor.Storage.Postgres\StellaOps.Excititor.Storage.Postgres.csproj" />
<ProjectReference Include="..\..\__Libraries\StellaOps.Excititor.Core\StellaOps.Excititor.Core.csproj" />
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Infrastructure.Postgres.Testing\StellaOps.Infrastructure.Postgres.Testing.csproj" />
</ItemGroup>