This commit is contained in:
StellaOps Bot
2025-12-09 00:20:52 +02:00
parent 3d01bf9edc
commit bc0762e97d
261 changed files with 14033 additions and 4427 deletions

View File

@@ -75,13 +75,7 @@ public sealed class AppendOnlyLinksetExtractionService
results.Add(result);
if (result.HadChanges && _eventPublisher is not null)
{
await _eventPublisher.PublishLinksetUpdatedAsync(
normalizedTenant,
result.Linkset,
cancellationToken);
}
await PublishIfNeededAsync(normalizedTenant, result, cancellationToken);
}
catch (Exception ex)
{
@@ -142,13 +136,7 @@ public sealed class AppendOnlyLinksetExtractionService
disagreement,
cancellationToken);
if (storeResult.HadChanges && _eventPublisher is not null)
{
await _eventPublisher.PublishLinksetUpdatedAsync(
normalizedTenant,
storeResult.Linkset,
cancellationToken);
}
await PublishIfNeededAsync(normalizedTenant, storeResult, cancellationToken);
return LinksetAppendResult.Succeeded(
normalizedTenant,
@@ -193,7 +181,7 @@ public sealed class AppendOnlyLinksetExtractionService
ProviderId: obs.ProviderId,
Status: stmt.Status.ToString().ToLowerInvariant(),
Confidence: null)))
.Distinct(VexLinksetObservationRefComparer.Instance)
.DistinctBy(refModel => $"{refModel.ProviderId}:{refModel.Status}:{refModel.ObservationId}", StringComparer.OrdinalIgnoreCase)
.ToList();
if (observationRefs.Count == 0)
@@ -263,6 +251,60 @@ public sealed class AppendOnlyLinksetExtractionService
return at >= 0 && at < key.Length - 1 ? key[(at + 1)..] : null;
}
private async Task PublishIfNeededAsync(string tenant, AppendLinksetResult result, CancellationToken cancellationToken)
{
if (_eventPublisher is null || !result.HadChanges)
{
return;
}
var evt = ToEvent(tenant, result.Linkset);
await _eventPublisher.PublishAsync(evt, cancellationToken).ConfigureAwait(false);
}
private static VexLinksetUpdatedEvent ToEvent(string tenant, VexLinkset linkset)
{
var observationRefs = linkset.Observations
.Select(o => new VexLinksetObservationRefCore(
o.ObservationId,
o.ProviderId,
o.Status,
o.Confidence,
ImmutableDictionary<string, string>.Empty))
.OrderBy(o => o.ProviderId, StringComparer.OrdinalIgnoreCase)
.ThenBy(o => o.Status, StringComparer.OrdinalIgnoreCase)
.ThenBy(o => o.ObservationId, StringComparer.Ordinal)
.ToImmutableArray();
var disagreements = linkset.Disagreements
.OrderBy(d => d.ProviderId, StringComparer.OrdinalIgnoreCase)
.ThenBy(d => d.Status, StringComparer.OrdinalIgnoreCase)
.ThenBy(d => d.Justification ?? string.Empty, StringComparer.OrdinalIgnoreCase)
.ToImmutableArray();
return new VexLinksetUpdatedEvent(
VexLinksetUpdatedEventFactory.EventType,
tenant,
linkset.LinksetId,
linkset.VulnerabilityId,
linkset.ProductKey,
linkset.Scope,
observationRefs,
disagreements,
linkset.UpdatedAt);
}
private async Task PublishIfNeededAsync(string tenant, LinksetAppendResult result, CancellationToken cancellationToken)
{
if (_eventPublisher is null || !result.HadChanges || result.Linkset is null)
{
return;
}
var evt = ToEvent(tenant, result.Linkset);
await _eventPublisher.PublishAsync(evt, cancellationToken).ConfigureAwait(false);
}
private static string Normalize(string value) =>
VexObservation.EnsureNotNullOrWhiteSpace(value, nameof(value));

View File

@@ -9,6 +9,7 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Microsoft.Extensions.Options" Version="10.0.0" />
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="10.0.0" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="../../../Aoc/__Libraries/StellaOps.Aoc/StellaOps.Aoc.csproj" />

View File

@@ -0,0 +1,7 @@
// Temporary stubs to allow legacy interfaces to compile while MongoDB is removed.
// These types are intentionally minimal; they do not perform any database operations.
namespace MongoDB.Driver;
public interface IClientSessionHandle : IAsyncDisposable, IDisposable
{
}

View File

@@ -0,0 +1,80 @@
using System;
using System.Collections.Generic;
using System.Collections.Immutable;
namespace StellaOps.Excititor.Core.Storage;
/// <summary>
/// Query envelope for listing raw VEX documents.
/// </summary>
public sealed record VexRawQuery(
string Tenant,
IReadOnlyCollection<string> ProviderIds,
IReadOnlyCollection<string> Digests,
IReadOnlyCollection<VexDocumentFormat> Formats,
DateTimeOffset? Since,
DateTimeOffset? Until,
VexRawCursor? Cursor,
int Limit);
/// <summary>
/// Stable pagination cursor based on retrieved-at and digest ordering.
/// </summary>
public sealed record VexRawCursor(DateTimeOffset RetrievedAt, string Digest);
/// <summary>
/// Lightweight summary used for list endpoints.
/// </summary>
public sealed record VexRawDocumentSummary(
string Digest,
string ProviderId,
VexDocumentFormat Format,
Uri SourceUri,
DateTimeOffset RetrievedAt,
bool InlineContent,
ImmutableDictionary<string, string> Metadata);
/// <summary>
/// Paged result for raw document listings.
/// </summary>
public sealed record VexRawDocumentPage(
IReadOnlyList<VexRawDocumentSummary> Items,
VexRawCursor? NextCursor,
bool HasMore);
/// <summary>
/// Stored raw VEX document with canonical content and metadata.
/// </summary>
public sealed record VexRawRecord(
string Digest,
string Tenant,
string ProviderId,
VexDocumentFormat Format,
Uri SourceUri,
DateTimeOffset RetrievedAt,
ImmutableDictionary<string, string> Metadata,
ReadOnlyMemory<byte> Content,
bool InlineContent,
string? SupersedesDigest = null,
string? ETag = null,
DateTimeOffset? RecordedAt = null);
/// <summary>
/// Append-only raw document store abstraction (backed by Postgres for Excititor).
/// </summary>
public interface IVexRawStore : IVexRawDocumentSink
{
/// <summary>
/// Finds a raw document by digest.
/// </summary>
/// <param name="digest">Content-addressed digest (sha256:...)</param>
/// <param name="cancellationToken">Cancellation token.</param>
ValueTask<VexRawRecord?> FindByDigestAsync(string digest, CancellationToken cancellationToken);
/// <summary>
/// Lists raw documents using deterministic ordering.
/// </summary>
/// <param name="query">Query filters and pagination cursor.</param>
/// <param name="cancellationToken">Cancellation token.</param>
ValueTask<VexRawDocumentPage> QueryAsync(VexRawQuery query, CancellationToken cancellationToken);
}

View File

@@ -0,0 +1,38 @@
namespace StellaOps.Excititor.Core.Storage;
/// <summary>
/// Storage options for Excititor persistence (Postgres-backed, legacy name retained for compatibility).
/// </summary>
public class VexStorageOptions
{
/// <summary>
/// Default tenant to apply when no tenant header is supplied.
/// </summary>
public string DefaultTenant { get; set; } = "default";
/// <summary>
/// Inline content threshold in bytes; larger payloads are stored in the blob table.
/// </summary>
public int InlineThresholdBytes { get; set; } = 256 * 1024;
}
/// <summary>
/// Legacy alias preserved while migrating off MongoDB-specific naming.
/// </summary>
[System.Obsolete("Use VexStorageOptions; retained for backwards compatibility during Mongo removal.")]
public sealed class VexMongoStorageOptions : VexStorageOptions
{
/// <summary>
/// Historical bucket name (unused in Postgres mode).
/// </summary>
public string RawBucketName { get; set; } = "vex-raw";
/// <summary>
/// Backwards-compatible inline threshold property.
/// </summary>
public int GridFsInlineThresholdBytes
{
get => InlineThresholdBytes;
set => InlineThresholdBytes = value;
}
}

View File

@@ -1,308 +1,92 @@
-- VEX Schema Migration 001: Initial Schema
-- Creates the vex schema for VEX statements and dependency graphs
-- VEX Schema Migration 001: Append-only linksets (no Mongo, no consensus)
-- This migration defines an append-only Postgres backend for Excititor linksets,
-- observations, disagreements, and mutation logs. All operations are additive and
-- preserve deterministic ordering for audit/replay.
-- Create schema
CREATE SCHEMA IF NOT EXISTS vex;
-- Projects table
CREATE TABLE IF NOT EXISTS vex.projects (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
name TEXT NOT NULL,
display_name TEXT,
description TEXT,
repository_url TEXT,
default_branch TEXT,
settings JSONB NOT NULL DEFAULT '{}',
metadata JSONB NOT NULL DEFAULT '{}',
-- Drop legacy tables that carried mutable/consensus state
DROP TABLE IF EXISTS vex.linkset_mutations CASCADE;
DROP TABLE IF EXISTS vex.linkset_disagreements CASCADE;
DROP TABLE IF EXISTS vex.linkset_observations CASCADE;
DROP TABLE IF EXISTS vex.linksets CASCADE;
DROP TABLE IF EXISTS vex.observations CASCADE;
DROP TABLE IF EXISTS vex.consensus_holds CASCADE;
DROP TABLE IF EXISTS vex.consensus CASCADE;
DROP TABLE IF EXISTS vex.statements CASCADE;
DROP TABLE IF EXISTS vex.graph_edges CASCADE;
DROP TABLE IF EXISTS vex.graph_nodes CASCADE;
DROP TABLE IF EXISTS vex.graph_revisions CASCADE;
DROP TABLE IF EXISTS vex.projects CASCADE;
DROP TABLE IF EXISTS vex.linkset_events CASCADE;
DROP TABLE IF EXISTS vex.evidence_manifests CASCADE;
DROP TABLE IF EXISTS vex.cvss_receipts CASCADE;
DROP TABLE IF EXISTS vex.attestations CASCADE;
DROP TABLE IF EXISTS vex.timeline_events CASCADE;
DROP TABLE IF EXISTS vex.unknown_items CASCADE;
DROP TABLE IF EXISTS vex.unknowns_snapshots CASCADE;
-- Core linkset table (append-only semantics; updated_at is refreshed on append)
CREATE TABLE vex.linksets (
linkset_id TEXT PRIMARY KEY,
tenant TEXT NOT NULL,
vulnerability_id TEXT NOT NULL,
product_key TEXT NOT NULL,
scope JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
created_by TEXT,
UNIQUE(tenant_id, name)
UNIQUE (tenant, vulnerability_id, product_key)
);
CREATE INDEX idx_projects_tenant ON vex.projects(tenant_id);
CREATE INDEX idx_linksets_updated ON vex.linksets (tenant, updated_at DESC);
-- Graph revisions table
CREATE TABLE IF NOT EXISTS vex.graph_revisions (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
project_id UUID NOT NULL REFERENCES vex.projects(id) ON DELETE CASCADE,
revision_id TEXT NOT NULL UNIQUE,
parent_revision_id TEXT,
sbom_digest TEXT NOT NULL,
feed_snapshot_id TEXT,
policy_version TEXT,
node_count INT NOT NULL DEFAULT 0,
edge_count INT NOT NULL DEFAULT 0,
metadata JSONB NOT NULL DEFAULT '{}',
-- Observation references recorded per linkset (immutable; deduplicated)
CREATE TABLE vex.linkset_observations (
id BIGSERIAL PRIMARY KEY,
linkset_id TEXT NOT NULL REFERENCES vex.linksets(linkset_id) ON DELETE CASCADE,
observation_id TEXT NOT NULL,
provider_id TEXT NOT NULL,
status TEXT NOT NULL CHECK (status IN ('affected', 'not_affected', 'fixed', 'under_investigation')),
confidence NUMERIC(4,3),
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
created_by TEXT
UNIQUE (linkset_id, observation_id, provider_id, status)
);
CREATE INDEX idx_graph_revisions_project ON vex.graph_revisions(project_id);
CREATE INDEX idx_graph_revisions_revision ON vex.graph_revisions(revision_id);
CREATE INDEX idx_graph_revisions_created ON vex.graph_revisions(project_id, created_at DESC);
CREATE INDEX idx_linkset_observations_linkset ON vex.linkset_observations (linkset_id);
CREATE INDEX idx_linkset_observations_provider ON vex.linkset_observations (linkset_id, provider_id);
CREATE INDEX idx_linkset_observations_status ON vex.linkset_observations (linkset_id, status);
-- Graph nodes table (BIGSERIAL for high volume)
CREATE TABLE IF NOT EXISTS vex.graph_nodes (
-- Disagreements/conflicts recorded per linkset (immutable; deduplicated)
CREATE TABLE vex.linkset_disagreements (
id BIGSERIAL PRIMARY KEY,
graph_revision_id UUID NOT NULL REFERENCES vex.graph_revisions(id) ON DELETE CASCADE,
node_key TEXT NOT NULL,
node_type TEXT NOT NULL,
purl TEXT,
name TEXT,
version TEXT,
attributes JSONB NOT NULL DEFAULT '{}',
UNIQUE(graph_revision_id, node_key)
);
CREATE INDEX idx_graph_nodes_revision ON vex.graph_nodes(graph_revision_id);
CREATE INDEX idx_graph_nodes_key ON vex.graph_nodes(graph_revision_id, node_key);
CREATE INDEX idx_graph_nodes_purl ON vex.graph_nodes(purl);
CREATE INDEX idx_graph_nodes_type ON vex.graph_nodes(graph_revision_id, node_type);
-- Graph edges table (BIGSERIAL for high volume)
CREATE TABLE IF NOT EXISTS vex.graph_edges (
id BIGSERIAL PRIMARY KEY,
graph_revision_id UUID NOT NULL REFERENCES vex.graph_revisions(id) ON DELETE CASCADE,
from_node_id BIGINT NOT NULL REFERENCES vex.graph_nodes(id) ON DELETE CASCADE,
to_node_id BIGINT NOT NULL REFERENCES vex.graph_nodes(id) ON DELETE CASCADE,
edge_type TEXT NOT NULL,
attributes JSONB NOT NULL DEFAULT '{}'
);
CREATE INDEX idx_graph_edges_revision ON vex.graph_edges(graph_revision_id);
CREATE INDEX idx_graph_edges_from ON vex.graph_edges(from_node_id);
CREATE INDEX idx_graph_edges_to ON vex.graph_edges(to_node_id);
-- VEX statements table
CREATE TABLE IF NOT EXISTS vex.statements (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
project_id UUID REFERENCES vex.projects(id),
graph_revision_id UUID REFERENCES vex.graph_revisions(id),
vulnerability_id TEXT NOT NULL,
product_id TEXT,
status TEXT NOT NULL CHECK (status IN (
'not_affected', 'affected', 'fixed', 'under_investigation'
)),
justification TEXT CHECK (justification IN (
'component_not_present', 'vulnerable_code_not_present',
'vulnerable_code_not_in_execute_path', 'vulnerable_code_cannot_be_controlled_by_adversary',
'inline_mitigations_already_exist'
)),
impact_statement TEXT,
action_statement TEXT,
action_statement_timestamp TIMESTAMPTZ,
first_issued TIMESTAMPTZ NOT NULL DEFAULT NOW(),
last_updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
source TEXT,
source_url TEXT,
evidence JSONB NOT NULL DEFAULT '{}',
provenance JSONB NOT NULL DEFAULT '{}',
metadata JSONB NOT NULL DEFAULT '{}',
created_by TEXT
);
CREATE INDEX idx_statements_tenant ON vex.statements(tenant_id);
CREATE INDEX idx_statements_project ON vex.statements(project_id);
CREATE INDEX idx_statements_revision ON vex.statements(graph_revision_id);
CREATE INDEX idx_statements_vuln ON vex.statements(vulnerability_id);
CREATE INDEX idx_statements_status ON vex.statements(tenant_id, status);
-- VEX observations table
CREATE TABLE IF NOT EXISTS vex.observations (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
statement_id UUID REFERENCES vex.statements(id) ON DELETE CASCADE,
vulnerability_id TEXT NOT NULL,
product_id TEXT NOT NULL,
observed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
observer TEXT NOT NULL,
observation_type TEXT NOT NULL,
confidence NUMERIC(3,2),
details JSONB NOT NULL DEFAULT '{}',
linkset_id TEXT NOT NULL REFERENCES vex.linksets(linkset_id) ON DELETE CASCADE,
provider_id TEXT NOT NULL,
status TEXT NOT NULL,
justification TEXT,
confidence NUMERIC(4,3),
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(tenant_id, vulnerability_id, product_id, observer, observation_type)
UNIQUE (linkset_id, provider_id, status, justification)
);
CREATE INDEX idx_observations_tenant ON vex.observations(tenant_id);
CREATE INDEX idx_observations_statement ON vex.observations(statement_id);
CREATE INDEX idx_observations_vuln ON vex.observations(vulnerability_id, product_id);
CREATE INDEX idx_linkset_disagreements_linkset ON vex.linkset_disagreements (linkset_id);
-- Linksets table
CREATE TABLE IF NOT EXISTS vex.linksets (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
name TEXT NOT NULL,
description TEXT,
source_type TEXT NOT NULL,
source_url TEXT,
enabled BOOLEAN NOT NULL DEFAULT TRUE,
priority INT NOT NULL DEFAULT 0,
filter JSONB NOT NULL DEFAULT '{}',
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(tenant_id, name)
-- Append-only mutation log for deterministic replay/audit
CREATE TABLE vex.linkset_mutations (
sequence_number BIGSERIAL PRIMARY KEY,
linkset_id TEXT NOT NULL REFERENCES vex.linksets(linkset_id) ON DELETE CASCADE,
mutation_type TEXT NOT NULL CHECK (mutation_type IN ('linkset_created', 'observation_added', 'disagreement_added')),
observation_id TEXT,
provider_id TEXT,
status TEXT,
confidence NUMERIC(4,3),
justification TEXT,
occurred_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_linksets_tenant ON vex.linksets(tenant_id);
CREATE INDEX idx_linksets_enabled ON vex.linksets(tenant_id, enabled, priority DESC);
CREATE INDEX idx_linkset_mutations_linkset ON vex.linkset_mutations (linkset_id, sequence_number);
-- Linkset events table
CREATE TABLE IF NOT EXISTS vex.linkset_events (
id BIGSERIAL PRIMARY KEY,
linkset_id UUID NOT NULL REFERENCES vex.linksets(id) ON DELETE CASCADE,
event_type TEXT NOT NULL,
statement_count INT NOT NULL DEFAULT 0,
error_message TEXT,
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_linkset_events_linkset ON vex.linkset_events(linkset_id);
CREATE INDEX idx_linkset_events_created ON vex.linkset_events(created_at);
-- Consensus table (VEX consensus state)
CREATE TABLE IF NOT EXISTS vex.consensus (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
vulnerability_id TEXT NOT NULL,
product_id TEXT NOT NULL,
consensus_status TEXT NOT NULL,
contributing_statements UUID[] NOT NULL DEFAULT '{}',
confidence NUMERIC(3,2),
computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
metadata JSONB NOT NULL DEFAULT '{}',
UNIQUE(tenant_id, vulnerability_id, product_id)
);
CREATE INDEX idx_consensus_tenant ON vex.consensus(tenant_id);
CREATE INDEX idx_consensus_vuln ON vex.consensus(vulnerability_id, product_id);
-- Consensus holds table
CREATE TABLE IF NOT EXISTS vex.consensus_holds (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
consensus_id UUID NOT NULL REFERENCES vex.consensus(id) ON DELETE CASCADE,
hold_type TEXT NOT NULL,
reason TEXT NOT NULL,
held_by TEXT NOT NULL,
held_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
released_at TIMESTAMPTZ,
released_by TEXT,
metadata JSONB NOT NULL DEFAULT '{}'
);
CREATE INDEX idx_consensus_holds_consensus ON vex.consensus_holds(consensus_id);
CREATE INDEX idx_consensus_holds_active ON vex.consensus_holds(consensus_id, released_at)
WHERE released_at IS NULL;
-- Unknown snapshots table
CREATE TABLE IF NOT EXISTS vex.unknowns_snapshots (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
project_id UUID REFERENCES vex.projects(id),
graph_revision_id UUID REFERENCES vex.graph_revisions(id),
snapshot_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
unknown_count INT NOT NULL DEFAULT 0,
metadata JSONB NOT NULL DEFAULT '{}'
);
CREATE INDEX idx_unknowns_snapshots_tenant ON vex.unknowns_snapshots(tenant_id);
CREATE INDEX idx_unknowns_snapshots_project ON vex.unknowns_snapshots(project_id);
-- Unknown items table
CREATE TABLE IF NOT EXISTS vex.unknown_items (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
snapshot_id UUID NOT NULL REFERENCES vex.unknowns_snapshots(id) ON DELETE CASCADE,
vulnerability_id TEXT NOT NULL,
product_id TEXT,
reason TEXT NOT NULL,
metadata JSONB NOT NULL DEFAULT '{}'
);
CREATE INDEX idx_unknown_items_snapshot ON vex.unknown_items(snapshot_id);
CREATE INDEX idx_unknown_items_vuln ON vex.unknown_items(vulnerability_id);
-- Evidence manifests table
CREATE TABLE IF NOT EXISTS vex.evidence_manifests (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
statement_id UUID REFERENCES vex.statements(id) ON DELETE CASCADE,
manifest_type TEXT NOT NULL,
content_hash TEXT NOT NULL,
content JSONB NOT NULL,
source TEXT,
collected_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
metadata JSONB NOT NULL DEFAULT '{}'
);
CREATE INDEX idx_evidence_manifests_tenant ON vex.evidence_manifests(tenant_id);
CREATE INDEX idx_evidence_manifests_statement ON vex.evidence_manifests(statement_id);
-- CVSS receipts table
CREATE TABLE IF NOT EXISTS vex.cvss_receipts (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
statement_id UUID REFERENCES vex.statements(id) ON DELETE CASCADE,
vulnerability_id TEXT NOT NULL,
cvss_version TEXT NOT NULL,
vector_string TEXT NOT NULL,
base_score NUMERIC(3,1) NOT NULL,
environmental_score NUMERIC(3,1),
temporal_score NUMERIC(3,1),
computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
metadata JSONB NOT NULL DEFAULT '{}'
);
CREATE INDEX idx_cvss_receipts_tenant ON vex.cvss_receipts(tenant_id);
CREATE INDEX idx_cvss_receipts_statement ON vex.cvss_receipts(statement_id);
CREATE INDEX idx_cvss_receipts_vuln ON vex.cvss_receipts(vulnerability_id);
-- Attestations table
CREATE TABLE IF NOT EXISTS vex.attestations (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
statement_id UUID REFERENCES vex.statements(id),
subject_digest TEXT NOT NULL,
predicate_type TEXT NOT NULL,
predicate JSONB NOT NULL,
signature TEXT,
signature_algorithm TEXT,
signed_by TEXT,
signed_at TIMESTAMPTZ,
verified BOOLEAN NOT NULL DEFAULT FALSE,
verified_at TIMESTAMPTZ,
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_attestations_tenant ON vex.attestations(tenant_id);
CREATE INDEX idx_attestations_statement ON vex.attestations(statement_id);
CREATE INDEX idx_attestations_subject ON vex.attestations(subject_digest);
-- Timeline events table
CREATE TABLE IF NOT EXISTS vex.timeline_events (
id BIGSERIAL PRIMARY KEY,
tenant_id TEXT NOT NULL,
project_id UUID REFERENCES vex.projects(id),
statement_id UUID REFERENCES vex.statements(id),
event_type TEXT NOT NULL,
event_data JSONB NOT NULL DEFAULT '{}',
actor TEXT,
correlation_id TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_timeline_events_tenant ON vex.timeline_events(tenant_id);
CREATE INDEX idx_timeline_events_project ON vex.timeline_events(project_id);
CREATE INDEX idx_timeline_events_statement ON vex.timeline_events(statement_id);
CREATE INDEX idx_timeline_events_created ON vex.timeline_events(tenant_id, created_at);
CREATE INDEX idx_timeline_events_correlation ON vex.timeline_events(correlation_id);
-- Update timestamp function
CREATE OR REPLACE FUNCTION vex.update_updated_at()
-- Refresh updated_at whenever linkset rows change
CREATE OR REPLACE FUNCTION vex.touch_updated_at()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = NOW();
@@ -310,15 +94,6 @@ BEGIN
END;
$$ LANGUAGE plpgsql;
-- Triggers
CREATE TRIGGER trg_projects_updated_at
BEFORE UPDATE ON vex.projects
FOR EACH ROW EXECUTE FUNCTION vex.update_updated_at();
CREATE TRIGGER trg_linksets_updated_at
CREATE TRIGGER trg_linksets_touch_updated_at
BEFORE UPDATE ON vex.linksets
FOR EACH ROW EXECUTE FUNCTION vex.update_updated_at();
CREATE TRIGGER trg_statements_updated_at
BEFORE UPDATE ON vex.statements
FOR EACH ROW EXECUTE FUNCTION vex.update_updated_at();
FOR EACH ROW EXECUTE FUNCTION vex.touch_updated_at();

View File

@@ -0,0 +1,43 @@
-- VEX Raw Store Migration 002: Postgres-backed raw document and blob storage (Mongo/BSON removed)
-- Raw documents (append-only)
CREATE TABLE IF NOT EXISTS vex.vex_raw_documents (
digest TEXT PRIMARY KEY,
tenant TEXT NOT NULL,
provider_id TEXT NOT NULL,
format TEXT NOT NULL CHECK (format IN ('openvex','csaf','cyclonedx','custom','unknown')),
source_uri TEXT NOT NULL,
etag TEXT NULL,
retrieved_at TIMESTAMPTZ NOT NULL,
recorded_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
supersedes_digest TEXT NULL REFERENCES vex.vex_raw_documents(digest),
content_json JSONB NOT NULL,
content_size_bytes INT NOT NULL,
metadata_json JSONB NOT NULL,
provenance_json JSONB NOT NULL,
inline_payload BOOLEAN NOT NULL DEFAULT TRUE,
UNIQUE (tenant, provider_id, source_uri, COALESCE(etag, ''))
);
CREATE INDEX IF NOT EXISTS idx_vex_raw_documents_tenant_retrieved ON vex.vex_raw_documents (tenant, retrieved_at DESC, digest);
CREATE INDEX IF NOT EXISTS idx_vex_raw_documents_provider ON vex.vex_raw_documents (tenant, provider_id, retrieved_at DESC);
CREATE INDEX IF NOT EXISTS idx_vex_raw_documents_supersedes ON vex.vex_raw_documents (tenant, supersedes_digest);
CREATE INDEX IF NOT EXISTS idx_vex_raw_documents_metadata ON vex.vex_raw_documents USING GIN (metadata_json);
CREATE INDEX IF NOT EXISTS idx_vex_raw_documents_provenance ON vex.vex_raw_documents USING GIN (provenance_json);
-- Large payloads stored separately when inline threshold exceeded
CREATE TABLE IF NOT EXISTS vex.vex_raw_blobs (
digest TEXT PRIMARY KEY REFERENCES vex.vex_raw_documents(digest) ON DELETE CASCADE,
payload BYTEA NOT NULL,
payload_hash TEXT NOT NULL
);
-- Optional attachment support (kept for parity with prior GridFS usage)
CREATE TABLE IF NOT EXISTS vex.vex_raw_attachments (
digest TEXT REFERENCES vex.vex_raw_documents(digest) ON DELETE CASCADE,
name TEXT NOT NULL,
media_type TEXT NOT NULL,
payload BYTEA NOT NULL,
payload_hash TEXT NOT NULL,
PRIMARY KEY (digest, name)
);

View File

@@ -0,0 +1,858 @@
using System.Text.Json;
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Excititor.Core.Observations;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Excititor.Storage.Postgres.Repositories;
/// <summary>
/// PostgreSQL implementation of <see cref="IAppendOnlyLinksetStore"/> backed by append-only tables.
/// Uses deterministic ordering and mutation logs for audit/replay.
/// </summary>
public sealed class PostgresAppendOnlyLinksetStore : RepositoryBase<ExcititorDataSource>, IAppendOnlyLinksetStore, IVexLinksetStore
{
private const string MutationCreated = "linkset_created";
private const string MutationObservationAdded = "observation_added";
private const string MutationDisagreementAdded = "disagreement_added";
private static readonly JsonSerializerOptions JsonOptions = new()
{
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
WriteIndented = false
};
public PostgresAppendOnlyLinksetStore(
ExcititorDataSource dataSource,
ILogger<PostgresAppendOnlyLinksetStore> logger)
: base(dataSource, logger)
{
}
public async ValueTask<bool> InsertAsync(
VexLinkset linkset,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(linkset);
var tenant = linkset.Tenant;
var linksetId = linkset.LinksetId;
await using var connection = await DataSource.OpenConnectionAsync(tenant, "writer", cancellationToken)
.ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
var sequenceNumbers = new List<long>();
var created = await EnsureLinksetAsync(
connection,
linksetId,
tenant,
linkset.VulnerabilityId,
linkset.ProductKey,
linkset.Scope,
sequenceNumbers,
cancellationToken).ConfigureAwait(false);
if (!created)
{
await transaction.RollbackAsync(cancellationToken).ConfigureAwait(false);
return false;
}
foreach (var observation in linkset.Observations)
{
await InsertObservationAsync(connection, linksetId, observation, sequenceNumbers, cancellationToken)
.ConfigureAwait(false);
}
foreach (var disagreement in linkset.Disagreements)
{
await InsertDisagreementAsync(connection, linksetId, disagreement, sequenceNumbers, cancellationToken)
.ConfigureAwait(false);
}
await TouchLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
return true;
}
public async ValueTask<bool> UpsertAsync(
VexLinkset linkset,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(linkset);
var tenant = linkset.Tenant;
var linksetId = linkset.LinksetId;
await using var connection = await DataSource.OpenConnectionAsync(tenant, "writer", cancellationToken)
.ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
var sequenceNumbers = new List<long>();
var created = await EnsureLinksetAsync(
connection,
linksetId,
tenant,
linkset.VulnerabilityId,
linkset.ProductKey,
linkset.Scope,
sequenceNumbers,
cancellationToken).ConfigureAwait(false);
foreach (var observation in linkset.Observations)
{
await InsertObservationAsync(connection, linksetId, observation, sequenceNumbers, cancellationToken)
.ConfigureAwait(false);
}
foreach (var disagreement in linkset.Disagreements)
{
await InsertDisagreementAsync(connection, linksetId, disagreement, sequenceNumbers, cancellationToken)
.ConfigureAwait(false);
}
if (created || sequenceNumbers.Count > 0)
{
await TouchLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
}
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
return created;
}
public async ValueTask<VexLinkset> GetOrCreateAsync(
string tenant,
string vulnerabilityId,
string productKey,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(vulnerabilityId);
ArgumentNullException.ThrowIfNull(productKey);
var linksetId = VexLinkset.CreateLinksetId(tenant, vulnerabilityId, productKey);
await using var connection = await DataSource.OpenConnectionAsync(tenant, "writer", cancellationToken)
.ConfigureAwait(false);
var existing = await ReadLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
if (existing is not null)
{
return existing;
}
var sequenceNumbers = new List<long>();
await EnsureLinksetAsync(
connection,
linksetId,
tenant,
vulnerabilityId,
productKey,
VexProductScope.Unknown(productKey),
sequenceNumbers,
cancellationToken).ConfigureAwait(false);
return await ReadLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false)
?? throw new InvalidOperationException($"Failed to create linkset {linksetId}.");
}
public async ValueTask<AppendLinksetResult> AppendObservationAsync(
string tenant,
string vulnerabilityId,
string productKey,
VexLinksetObservationRefModel observation,
VexProductScope scope,
CancellationToken cancellationToken)
{
return await AppendObservationsBatchAsync(
tenant,
vulnerabilityId,
productKey,
new[] { observation },
scope,
cancellationToken).ConfigureAwait(false);
}
public async ValueTask<AppendLinksetResult> AppendObservationsBatchAsync(
string tenant,
string vulnerabilityId,
string productKey,
IEnumerable<VexLinksetObservationRefModel> observations,
VexProductScope scope,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(vulnerabilityId);
ArgumentNullException.ThrowIfNull(productKey);
ArgumentNullException.ThrowIfNull(observations);
ArgumentNullException.ThrowIfNull(scope);
var linksetId = VexLinkset.CreateLinksetId(tenant, vulnerabilityId, productKey);
var observationList = observations.ToList();
await using var connection = await DataSource.OpenConnectionAsync(tenant, "writer", cancellationToken)
.ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
var sequenceNumbers = new List<long>();
var wasCreated = await EnsureLinksetAsync(connection, linksetId, tenant, vulnerabilityId, productKey, scope, sequenceNumbers, cancellationToken)
.ConfigureAwait(false);
var observationsAdded = 0;
foreach (var obs in observationList)
{
var added = await InsertObservationAsync(connection, linksetId, obs, sequenceNumbers, cancellationToken)
.ConfigureAwait(false);
if (added)
{
observationsAdded++;
}
}
if (wasCreated || observationsAdded > 0)
{
await TouchLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
}
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
var linkset = await ReadLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false)
?? throw new InvalidOperationException($"Linkset {linksetId} not found after append.");
var sequenceNumber = await GetLatestSequenceAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
if (observationsAdded == 0 && !wasCreated)
{
return AppendLinksetResult.NoChange(linkset, sequenceNumber);
}
if (wasCreated)
{
return AppendLinksetResult.Created(linkset, observationsAdded, sequenceNumber);
}
return AppendLinksetResult.Updated(linkset, observationsAdded, disagreementsAdded: 0, sequenceNumber);
}
public async ValueTask<AppendLinksetResult> AppendDisagreementAsync(
string tenant,
string vulnerabilityId,
string productKey,
VexObservationDisagreement disagreement,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(vulnerabilityId);
ArgumentNullException.ThrowIfNull(productKey);
ArgumentNullException.ThrowIfNull(disagreement);
var linksetId = VexLinkset.CreateLinksetId(tenant, vulnerabilityId, productKey);
await using var connection = await DataSource.OpenConnectionAsync(tenant, "writer", cancellationToken)
.ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
var sequenceNumbers = new List<long>();
var wasCreated = await EnsureLinksetAsync(
connection,
linksetId,
tenant,
vulnerabilityId,
productKey,
VexProductScope.Unknown(productKey),
sequenceNumbers,
cancellationToken).ConfigureAwait(false);
var disagreementsAdded = await InsertDisagreementAsync(connection, linksetId, disagreement, sequenceNumbers, cancellationToken)
.ConfigureAwait(false)
? 1
: 0;
if (wasCreated || disagreementsAdded > 0)
{
await TouchLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
}
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
var linkset = await ReadLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false)
?? throw new InvalidOperationException($"Linkset {linksetId} not found after append.");
var sequenceNumber = await GetLatestSequenceAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
if (disagreementsAdded == 0 && !wasCreated)
{
return AppendLinksetResult.NoChange(linkset, sequenceNumber);
}
if (wasCreated)
{
return AppendLinksetResult.Created(linkset, observationsAdded: 0, sequenceNumber);
}
return AppendLinksetResult.Updated(linkset, observationsAdded: 0, disagreementsAdded, sequenceNumber);
}
public async ValueTask<VexLinkset?> GetByIdAsync(
string tenant,
string linksetId,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(linksetId);
await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken)
.ConfigureAwait(false);
return await ReadLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
}
public async ValueTask<VexLinkset?> GetByKeyAsync(
string tenant,
string vulnerabilityId,
string productKey,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(vulnerabilityId);
ArgumentNullException.ThrowIfNull(productKey);
var linksetId = VexLinkset.CreateLinksetId(tenant, vulnerabilityId, productKey);
return await GetByIdAsync(tenant, linksetId, cancellationToken).ConfigureAwait(false);
}
public async ValueTask<IReadOnlyList<VexLinkset>> FindByVulnerabilityAsync(
string tenant,
string vulnerabilityId,
int limit,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(vulnerabilityId);
await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken)
.ConfigureAwait(false);
var linksetIds = await GetLinksetIdsAsync(connection, "vulnerability_id = @vulnerability_id", cmd =>
{
AddParameter(cmd, "vulnerability_id", vulnerabilityId);
AddParameter(cmd, "tenant", tenant);
AddParameter(cmd, "limit", limit);
}, cancellationToken).ConfigureAwait(false);
return await ReadLinksetsAsync(connection, linksetIds, cancellationToken).ConfigureAwait(false);
}
public async ValueTask<IReadOnlyList<VexLinkset>> FindByProductKeyAsync(
string tenant,
string productKey,
int limit,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(productKey);
await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken)
.ConfigureAwait(false);
var linksetIds = await GetLinksetIdsAsync(connection, "product_key = @product_key", cmd =>
{
AddParameter(cmd, "product_key", productKey);
AddParameter(cmd, "tenant", tenant);
AddParameter(cmd, "limit", limit);
}, cancellationToken).ConfigureAwait(false);
return await ReadLinksetsAsync(connection, linksetIds, cancellationToken).ConfigureAwait(false);
}
public async ValueTask<IReadOnlyList<VexLinkset>> FindWithConflictsAsync(
string tenant,
int limit,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
const string sql = """
SELECT DISTINCT ls.linkset_id, ls.updated_at
FROM vex.linksets ls
JOIN vex.linkset_disagreements d ON d.linkset_id = ls.linkset_id
WHERE ls.tenant = @tenant
ORDER BY ls.updated_at DESC, ls.linkset_id
LIMIT @limit;
""";
await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "tenant", tenant);
AddParameter(command, "limit", limit);
var linksetIds = new List<string>();
await using (var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false))
{
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
linksetIds.Add(reader.GetString(0));
}
}
return await ReadLinksetsAsync(connection, linksetIds, cancellationToken).ConfigureAwait(false);
}
public async ValueTask<IReadOnlyList<VexLinkset>> FindByProviderAsync(
string tenant,
string providerId,
int limit,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(providerId);
const string sql = """
SELECT DISTINCT ls.linkset_id, ls.updated_at
FROM vex.linksets ls
JOIN vex.linkset_observations o ON o.linkset_id = ls.linkset_id
WHERE ls.tenant = @tenant AND o.provider_id = @provider_id
ORDER BY ls.updated_at DESC, ls.linkset_id
LIMIT @limit;
""";
await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "tenant", tenant);
AddParameter(command, "provider_id", providerId);
AddParameter(command, "limit", limit);
var ids = new List<string>();
await using (var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false))
{
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
ids.Add(reader.GetString(0));
}
}
return await ReadLinksetsAsync(connection, ids, cancellationToken).ConfigureAwait(false);
}
public ValueTask<bool> DeleteAsync(
string tenant,
string linksetId,
CancellationToken cancellationToken)
{
// Append-only store does not support deletions; signal no-op.
return ValueTask.FromResult(false);
}
public async ValueTask<long> CountAsync(string tenant, CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
const string sql = "SELECT COUNT(*) FROM vex.linksets WHERE tenant = @tenant;";
await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "tenant", tenant);
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
return result is long count ? count : Convert.ToInt64(result);
}
public async ValueTask<long> CountWithConflictsAsync(string tenant, CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
const string sql = """
SELECT COUNT(DISTINCT ls.linkset_id)
FROM vex.linksets ls
JOIN vex.linkset_disagreements d ON d.linkset_id = ls.linkset_id
WHERE ls.tenant = @tenant;
""";
await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "tenant", tenant);
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
return result is long count ? count : Convert.ToInt64(result);
}
public async ValueTask<IReadOnlyList<LinksetMutationEvent>> GetMutationLogAsync(
string tenant,
string linksetId,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(tenant);
ArgumentNullException.ThrowIfNull(linksetId);
const string sql = """
SELECT sequence_number, mutation_type, occurred_at, observation_id, provider_id, status, confidence, justification
FROM vex.linkset_mutations
WHERE linkset_id = @linkset_id
ORDER BY sequence_number;
""";
await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
var mutations = new List<LinksetMutationEvent>();
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
mutations.Add(new LinksetMutationEvent(
sequenceNumber: reader.GetInt64(0),
mutationType: reader.GetString(1),
timestamp: reader.GetFieldValue<DateTimeOffset>(2),
observationId: GetNullableString(reader, 3),
providerId: GetNullableString(reader, 4),
status: GetNullableString(reader, 5),
confidence: reader.IsDBNull(6) ? null : reader.GetDouble(6),
justification: GetNullableString(reader, 7)));
}
return mutations;
}
private async Task<bool> EnsureLinksetAsync(
NpgsqlConnection connection,
string linksetId,
string tenant,
string vulnerabilityId,
string productKey,
VexProductScope scope,
List<long> sequenceNumbers,
CancellationToken cancellationToken)
{
const string sql = """
INSERT INTO vex.linksets (linkset_id, tenant, vulnerability_id, product_key, scope)
VALUES (@linkset_id, @tenant, @vulnerability_id, @product_key, @scope::jsonb)
ON CONFLICT (linkset_id) DO NOTHING
RETURNING linkset_id;
""";
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
AddParameter(command, "tenant", tenant);
AddParameter(command, "vulnerability_id", vulnerabilityId);
AddParameter(command, "product_key", productKey);
AddJsonbParameter(command, "scope", SerializeScope(scope));
var inserted = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
if (inserted is not null)
{
var seq = await InsertMutationAsync(connection, linksetId, MutationCreated, null, null, null, null, null, cancellationToken)
.ConfigureAwait(false);
sequenceNumbers.Add(seq);
return true;
}
return false;
}
private async Task<bool> InsertObservationAsync(
NpgsqlConnection connection,
string linksetId,
VexLinksetObservationRefModel observation,
List<long> sequenceNumbers,
CancellationToken cancellationToken)
{
const string sql = """
INSERT INTO vex.linkset_observations (
linkset_id, observation_id, provider_id, status, confidence)
VALUES (@linkset_id, @observation_id, @provider_id, @status, @confidence)
ON CONFLICT DO NOTHING
RETURNING id;
""";
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
AddParameter(command, "observation_id", observation.ObservationId);
AddParameter(command, "provider_id", observation.ProviderId);
AddParameter(command, "status", observation.Status);
AddParameter(command, "confidence", observation.Confidence);
var inserted = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
if (inserted is not null)
{
var seq = await InsertMutationAsync(
connection,
linksetId,
MutationObservationAdded,
observation.ObservationId,
observation.ProviderId,
observation.Status,
observation.Confidence,
null,
cancellationToken).ConfigureAwait(false);
sequenceNumbers.Add(seq);
return true;
}
return false;
}
private async Task<bool> InsertDisagreementAsync(
NpgsqlConnection connection,
string linksetId,
VexObservationDisagreement disagreement,
List<long> sequenceNumbers,
CancellationToken cancellationToken)
{
const string sql = """
INSERT INTO vex.linkset_disagreements (
linkset_id, provider_id, status, justification, confidence)
VALUES (@linkset_id, @provider_id, @status, @justification, @confidence)
ON CONFLICT DO NOTHING
RETURNING id;
""";
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
AddParameter(command, "provider_id", disagreement.ProviderId);
AddParameter(command, "status", disagreement.Status);
AddParameter(command, "justification", disagreement.Justification);
AddParameter(command, "confidence", disagreement.Confidence);
var inserted = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
if (inserted is not null)
{
var seq = await InsertMutationAsync(
connection,
linksetId,
MutationDisagreementAdded,
null,
disagreement.ProviderId,
disagreement.Status,
disagreement.Confidence,
disagreement.Justification,
cancellationToken).ConfigureAwait(false);
sequenceNumbers.Add(seq);
return true;
}
return false;
}
private async Task<long> InsertMutationAsync(
NpgsqlConnection connection,
string linksetId,
string mutationType,
string? observationId,
string? providerId,
string? status,
double? confidence,
string? justification,
CancellationToken cancellationToken)
{
const string sql = """
INSERT INTO vex.linkset_mutations (
linkset_id, mutation_type, observation_id, provider_id, status, confidence, justification)
VALUES (@linkset_id, @mutation_type, @observation_id, @provider_id, @status, @confidence, @justification)
RETURNING sequence_number;
""";
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
AddParameter(command, "mutation_type", mutationType);
AddParameter(command, "observation_id", observationId);
AddParameter(command, "provider_id", providerId);
AddParameter(command, "status", status);
AddParameter(command, "confidence", confidence);
AddParameter(command, "justification", justification);
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false)
?? throw new InvalidOperationException("Failed to insert mutation log entry.");
return Convert.ToInt64(result);
}
private static async Task TouchLinksetAsync(
NpgsqlConnection connection,
string linksetId,
CancellationToken cancellationToken)
{
const string sql = "UPDATE vex.linksets SET updated_at = NOW() WHERE linkset_id = @linkset_id;";
await using var command = new NpgsqlCommand(sql, connection);
command.Parameters.AddWithValue("linkset_id", linksetId);
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
private async Task<long> GetLatestSequenceAsync(
NpgsqlConnection connection,
string linksetId,
CancellationToken cancellationToken)
{
const string sql = "SELECT COALESCE(MAX(sequence_number), 0) FROM vex.linkset_mutations WHERE linkset_id = @linkset_id;";
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
return result is long value ? value : Convert.ToInt64(result);
}
private async Task<IReadOnlyList<string>> GetLinksetIdsAsync(
NpgsqlConnection connection,
string predicate,
Action<NpgsqlCommand> configure,
CancellationToken cancellationToken)
{
var sql = $"""
SELECT linkset_id
FROM vex.linksets
WHERE {predicate} AND tenant = @tenant
ORDER BY updated_at DESC, linkset_id
LIMIT @limit;
""";
await using var command = CreateCommand(sql, connection);
configure(command);
var ids = new List<string>();
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
ids.Add(reader.GetString(0));
}
return ids;
}
private async Task<IReadOnlyList<VexLinkset>> ReadLinksetsAsync(
NpgsqlConnection connection,
IReadOnlyList<string> linksetIds,
CancellationToken cancellationToken)
{
var results = new List<VexLinkset>();
foreach (var id in linksetIds)
{
var linkset = await ReadLinksetAsync(connection, id, cancellationToken).ConfigureAwait(false);
if (linkset is not null)
{
results.Add(linkset);
}
}
return results;
}
private async Task<VexLinkset?> ReadLinksetAsync(
NpgsqlConnection connection,
string linksetId,
CancellationToken cancellationToken)
{
const string sql = """
SELECT linkset_id, tenant, vulnerability_id, product_key, scope::text, created_at, updated_at
FROM vex.linksets
WHERE linkset_id = @linkset_id;
""";
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
if (!await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
return null;
}
var id = reader.GetString(0);
var tenant = reader.GetString(1);
var vulnerabilityId = reader.GetString(2);
var productKey = reader.GetString(3);
var scopeJson = reader.GetString(4);
var createdAt = reader.GetFieldValue<DateTimeOffset>(5);
var updatedAt = reader.GetFieldValue<DateTimeOffset>(6);
var scope = DeserializeScope(scopeJson) ?? VexProductScope.Unknown(productKey);
await reader.CloseAsync();
var observations = await ReadObservationsAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
var disagreements = await ReadDisagreementsAsync(connection, linksetId, cancellationToken).ConfigureAwait(false);
return new VexLinkset(
id,
tenant,
vulnerabilityId,
productKey,
scope,
observations,
disagreements,
createdAt,
updatedAt);
}
private async Task<IReadOnlyList<VexLinksetObservationRefModel>> ReadObservationsAsync(
NpgsqlConnection connection,
string linksetId,
CancellationToken cancellationToken)
{
const string sql = """
SELECT observation_id, provider_id, status, confidence
FROM vex.linkset_observations
WHERE linkset_id = @linkset_id
ORDER BY provider_id, status, observation_id;
""";
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
var observations = new List<VexLinksetObservationRefModel>();
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
observations.Add(new VexLinksetObservationRefModel(
reader.GetString(0),
reader.GetString(1),
reader.GetString(2),
reader.IsDBNull(3) ? null : reader.GetDouble(3)));
}
return observations;
}
private async Task<IReadOnlyList<VexObservationDisagreement>> ReadDisagreementsAsync(
NpgsqlConnection connection,
string linksetId,
CancellationToken cancellationToken)
{
const string sql = """
SELECT provider_id, status, justification, confidence
FROM vex.linkset_disagreements
WHERE linkset_id = @linkset_id
ORDER BY provider_id, status, COALESCE(justification, ''), id;
""";
await using var command = CreateCommand(sql, connection);
AddParameter(command, "linkset_id", linksetId);
var disagreements = new List<VexObservationDisagreement>();
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
disagreements.Add(new VexObservationDisagreement(
reader.GetString(0),
reader.GetString(1),
GetNullableString(reader, 2),
reader.IsDBNull(3) ? null : reader.GetDouble(3)));
}
return disagreements;
}
private static string? SerializeScope(VexProductScope scope)
{
return JsonSerializer.Serialize(scope, JsonOptions);
}
private static VexProductScope? DeserializeScope(string? json)
{
if (string.IsNullOrWhiteSpace(json))
{
return null;
}
return JsonSerializer.Deserialize<VexProductScope>(json, JsonOptions);
}
}

View File

@@ -0,0 +1,441 @@
using System;
using System.Buffers;
using System.Collections.Immutable;
using System.Linq;
using System.Text;
using System.Text.Json;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using Npgsql;
using NpgsqlTypes;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Excititor.Storage.Postgres.Repositories;
/// <summary>
/// PostgreSQL-backed implementation of <see cref="IVexRawStore"/> replacing Mongo/GridFS.
/// </summary>
public sealed class PostgresVexRawStore : RepositoryBase<ExcititorDataSource>, IVexRawStore
{
private readonly int _inlineThreshold;
public PostgresVexRawStore(
ExcititorDataSource dataSource,
IOptions<VexStorageOptions> options,
ILogger<PostgresVexRawStore> logger)
: base(dataSource, logger)
{
if (options is null)
{
throw new ArgumentNullException(nameof(options));
}
_inlineThreshold = Math.Max(1, options.Value?.InlineThresholdBytes ?? 256 * 1024);
}
public async ValueTask StoreAsync(VexRawDocument document, CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(document);
var canonicalContent = CanonicalizeJson(document.Content);
var digest = EnsureDigest(document.Digest, canonicalContent);
var metadata = document.Metadata ?? ImmutableDictionary<string, string>.Empty;
var tenant = ResolveTenant(metadata);
var format = document.Format.ToString().ToLowerInvariant();
var providerId = document.ProviderId;
var sourceUri = document.SourceUri.ToString();
var retrievedAt = document.RetrievedAt.UtcDateTime;
var inline = canonicalContent.Length <= _inlineThreshold;
await using var connection = await DataSource.OpenConnectionAsync(tenant, "writer", cancellationToken)
.ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
var metadataJson = JsonSerializer.Serialize(metadata, JsonSerializerOptions);
// Provenance is currently stored as a clone of metadata; callers may slice it as needed.
var provenanceJson = metadataJson;
var contentJson = GetJsonString(canonicalContent);
const string insertDocumentSql = """
INSERT INTO vex.vex_raw_documents (
digest,
tenant,
provider_id,
format,
source_uri,
etag,
retrieved_at,
supersedes_digest,
content_json,
content_size_bytes,
metadata_json,
provenance_json,
inline_payload)
VALUES (
@digest,
@tenant,
@provider_id,
@format,
@source_uri,
@etag,
@retrieved_at,
@supersedes_digest,
@content_json::jsonb,
@content_size_bytes,
@metadata_json::jsonb,
@provenance_json::jsonb,
@inline_payload)
ON CONFLICT (digest) DO NOTHING;
""";
await using (var command = CreateCommand(insertDocumentSql, connection, transaction))
{
AddParameter(command, "digest", digest);
AddParameter(command, "tenant", tenant);
AddParameter(command, "provider_id", providerId);
AddParameter(command, "format", format);
AddParameter(command, "source_uri", sourceUri);
AddParameter(command, "etag", metadata.TryGetValue("etag", out var etag) ? etag : null);
AddParameter(command, "retrieved_at", retrievedAt);
AddParameter(command, "supersedes_digest", metadata.TryGetValue("supersedes", out var supersedes) ? supersedes : null);
AddJsonbParameter(command, "content_json", contentJson);
AddParameter(command, "content_size_bytes", canonicalContent.Length);
AddJsonbParameter(command, "metadata_json", metadataJson);
AddJsonbParameter(command, "provenance_json", provenanceJson);
AddParameter(command, "inline_payload", inline);
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
if (!inline)
{
const string insertBlobSql = """
INSERT INTO vex.vex_raw_blobs (digest, payload, payload_hash)
VALUES (@digest, @payload, @payload_hash)
ON CONFLICT (digest) DO NOTHING;
""";
await using var blobCommand = CreateCommand(insertBlobSql, connection, transaction);
AddParameter(blobCommand, "digest", digest);
blobCommand.Parameters.Add(new NpgsqlParameter("payload", NpgsqlDbType.Bytea)
{
Value = canonicalContent.ToArray()
});
AddParameter(blobCommand, "payload_hash", digest);
await blobCommand.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
}
public async ValueTask<VexRawRecord?> FindByDigestAsync(string digest, CancellationToken cancellationToken)
{
ArgumentException.ThrowIfNullOrWhiteSpace(digest);
const string sql = """
SELECT d.digest,
d.tenant,
d.provider_id,
d.format,
d.source_uri,
d.retrieved_at,
d.metadata_json,
d.inline_payload,
d.content_json,
d.supersedes_digest,
d.etag,
d.recorded_at,
b.payload
FROM vex.vex_raw_documents d
LEFT JOIN vex.vex_raw_blobs b ON b.digest = d.digest
WHERE d.digest = @digest;
""";
await using var connection = await DataSource.OpenConnectionAsync("public", "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "digest", digest);
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
if (!await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
return null;
}
var tenant = reader.GetString(1);
var providerId = reader.GetString(2);
var format = ParseFormat(reader.GetString(3));
var sourceUri = new Uri(reader.GetString(4));
var retrievedAt = reader.GetFieldValue<DateTime>(5);
var metadata = ParseMetadata(reader.GetString(6));
var inline = reader.GetFieldValue<bool>(7);
var contentJson = reader.GetString(8);
var supersedes = reader.IsDBNull(9) ? null : reader.GetString(9);
var etag = reader.IsDBNull(10) ? null : reader.GetString(10);
var recordedAt = reader.IsDBNull(11) ? (DateTimeOffset?)null : reader.GetFieldValue<DateTimeOffset>(11);
ReadOnlyMemory<byte> contentBytes;
if (!inline && !reader.IsDBNull(12))
{
contentBytes = (byte[])reader.GetValue(12);
}
else
{
contentBytes = Encoding.UTF8.GetBytes(contentJson);
}
return new VexRawRecord(
digest,
tenant,
providerId,
format,
sourceUri,
new DateTimeOffset(retrievedAt, TimeSpan.Zero),
metadata,
contentBytes,
inline,
supersedes,
etag,
recordedAt);
}
public async ValueTask<VexRawDocumentPage> QueryAsync(VexRawQuery query, CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(query);
var conditions = new List<string> { "tenant = @tenant" };
if (query.ProviderIds.Count > 0)
{
conditions.Add("provider_id = ANY(@providers)");
}
if (query.Digests.Count > 0)
{
conditions.Add("digest = ANY(@digests)");
}
if (query.Formats.Count > 0)
{
conditions.Add("format = ANY(@formats)");
}
if (query.Since is not null)
{
conditions.Add("retrieved_at >= @since");
}
if (query.Until is not null)
{
conditions.Add("retrieved_at <= @until");
}
if (query.Cursor is not null)
{
conditions.Add("(retrieved_at < @cursor_retrieved_at OR (retrieved_at = @cursor_retrieved_at AND digest < @cursor_digest))");
}
var sql = $"""
SELECT digest, provider_id, format, source_uri, retrieved_at, metadata_json, inline_payload
FROM vex.vex_raw_documents
WHERE {string.Join(" AND ", conditions)}
ORDER BY retrieved_at DESC, digest DESC
LIMIT @limit;
""";
await using var connection = await DataSource.OpenConnectionAsync(query.Tenant, "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "tenant", query.Tenant);
AddArray(command, "providers", query.ProviderIds);
AddArray(command, "digests", query.Digests);
AddArray(command, "formats", query.Formats.Select(static f => f.ToString().ToLowerInvariant()).ToArray());
if (query.Since is not null)
{
AddParameter(command, "since", query.Since.Value.UtcDateTime);
}
if (query.Until is not null)
{
AddParameter(command, "until", query.Until.Value.UtcDateTime);
}
if (query.Cursor is not null)
{
AddParameter(command, "cursor_retrieved_at", query.Cursor.RetrievedAt.UtcDateTime);
AddParameter(command, "cursor_digest", query.Cursor.Digest);
}
AddParameter(command, "limit", query.Limit);
var summaries = new List<VexRawDocumentSummary>();
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
var digest = reader.GetString(0);
var providerId = reader.GetString(1);
var format = ParseFormat(reader.GetString(2));
var sourceUri = new Uri(reader.GetString(3));
var retrievedAt = reader.GetFieldValue<DateTime>(4);
var metadata = ParseMetadata(reader.GetString(5));
var inline = reader.GetFieldValue<bool>(6);
summaries.Add(new VexRawDocumentSummary(
digest,
providerId,
format,
sourceUri,
new DateTimeOffset(retrievedAt, TimeSpan.Zero),
inline,
metadata));
}
var hasMore = summaries.Count == query.Limit;
var nextCursor = hasMore && summaries.Count > 0
? new VexRawCursor(summaries[^1].RetrievedAt, summaries[^1].Digest)
: null;
return new VexRawDocumentPage(summaries, nextCursor, hasMore);
}
private static void AddArray(NpgsqlCommand command, string name, IReadOnlyCollection<string> values)
{
command.Parameters.Add(new NpgsqlParameter
{
ParameterName = name,
NpgsqlDbType = NpgsqlDbType.Array | NpgsqlDbType.Text,
Value = values.Count == 0 ? Array.Empty<string>() : values.ToArray()
});
}
private static string ResolveTenant(IReadOnlyDictionary<string, string> metadata)
{
if (metadata.TryGetValue("tenant", out var tenant) && !string.IsNullOrWhiteSpace(tenant))
{
return tenant.Trim();
}
return "default";
}
private static VexDocumentFormat ParseFormat(string value)
=> Enum.TryParse<VexDocumentFormat>(value, ignoreCase: true, out var parsed)
? parsed
: VexDocumentFormat.Unknown;
private static ImmutableDictionary<string, string> ParseMetadata(string json)
{
if (string.IsNullOrWhiteSpace(json))
{
return ImmutableDictionary<string, string>.Empty;
}
try
{
var doc = JsonDocument.Parse(json);
var builder = ImmutableDictionary.CreateBuilder<string, string>(StringComparer.Ordinal);
foreach (var property in doc.RootElement.EnumerateObject())
{
builder[property.Name] = property.Value.ToString();
}
return builder.ToImmutable();
}
catch
{
return ImmutableDictionary<string, string>.Empty;
}
}
private static byte[] CanonicalizeJson(ReadOnlyMemory<byte> content)
{
using var jsonDocument = JsonDocument.Parse(content);
var buffer = new ArrayBufferWriter<byte>();
using (var writer = new Utf8JsonWriter(buffer, new JsonWriterOptions { Indented = false }))
{
WriteCanonical(writer, jsonDocument.RootElement);
}
return buffer.WrittenMemory.ToArray();
}
private static void WriteCanonical(Utf8JsonWriter writer, JsonElement element)
{
switch (element.ValueKind)
{
case JsonValueKind.Object:
writer.WriteStartObject();
foreach (var property in element.EnumerateObject().OrderBy(static p => p.Name, StringComparer.Ordinal))
{
writer.WritePropertyName(property.Name);
WriteCanonical(writer, property.Value);
}
writer.WriteEndObject();
break;
case JsonValueKind.Array:
writer.WriteStartArray();
foreach (var item in element.EnumerateArray())
{
WriteCanonical(writer, item);
}
writer.WriteEndArray();
break;
case JsonValueKind.String:
writer.WriteStringValue(element.GetString());
break;
case JsonValueKind.Number:
if (element.TryGetInt64(out var l))
{
writer.WriteNumberValue(l);
}
else if (element.TryGetDouble(out var d))
{
writer.WriteNumberValue(d);
}
else
{
writer.WriteRawValue(element.GetRawText());
}
break;
case JsonValueKind.True:
writer.WriteBooleanValue(true);
break;
case JsonValueKind.False:
writer.WriteBooleanValue(false);
break;
case JsonValueKind.Null:
case JsonValueKind.Undefined:
writer.WriteNullValue();
break;
default:
writer.WriteRawValue(element.GetRawText());
break;
}
}
private static string EnsureDigest(string digest, ReadOnlyMemory<byte> canonicalContent)
{
if (!string.IsNullOrWhiteSpace(digest) && digest.StartsWith("sha256:", StringComparison.OrdinalIgnoreCase))
{
return digest;
}
Span<byte> hash = stackalloc byte[32];
if (!System.Security.Cryptography.SHA256.TryHashData(canonicalContent.Span, hash, out _))
{
hash = System.Security.Cryptography.SHA256.HashData(canonicalContent.ToArray());
}
return "sha256:" + Convert.ToHexString(hash).ToLowerInvariant();
}
private static string GetJsonString(ReadOnlyMemory<byte> canonicalContent)
=> Encoding.UTF8.GetString(canonicalContent.Span);
private static readonly JsonSerializerOptions JsonSerializerOptions = new()
{
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
DictionaryKeyPolicy = JsonNamingPolicy.CamelCase
};
}

View File

@@ -1,5 +1,7 @@
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using StellaOps.Excititor.Core.Observations;
using StellaOps.Excititor.Core.Storage;
using StellaOps.Excititor.Storage.Postgres.Repositories;
using StellaOps.Infrastructure.Postgres;
using StellaOps.Infrastructure.Postgres.Options;
@@ -24,10 +26,14 @@ public static class ServiceCollectionExtensions
string sectionName = "Postgres:Excititor")
{
services.Configure<PostgresOptions>(sectionName, configuration.GetSection(sectionName));
services.Configure<VexStorageOptions>(configuration.GetSection("Excititor:Storage"));
services.AddSingleton<ExcititorDataSource>();
// Register repositories
services.AddScoped<IVexStatementRepository, VexStatementRepository>();
services.AddScoped<IAppendOnlyLinksetStore, PostgresAppendOnlyLinksetStore>();
services.AddScoped<IVexLinksetStore, PostgresAppendOnlyLinksetStore>();
services.AddScoped<IVexRawStore, PostgresVexRawStore>();
return services;
}
@@ -47,6 +53,9 @@ public static class ServiceCollectionExtensions
// Register repositories
services.AddScoped<IVexStatementRepository, VexStatementRepository>();
services.AddScoped<IAppendOnlyLinksetStore, PostgresAppendOnlyLinksetStore>();
services.AddScoped<IVexLinksetStore, PostgresAppendOnlyLinksetStore>();
services.AddScoped<IVexRawStore, PostgresVexRawStore>();
return services;
}

View File

@@ -15,6 +15,7 @@
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\StellaOps.Excititor.Core\StellaOps.Excititor.Core.csproj" />
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Infrastructure.Postgres\StellaOps.Infrastructure.Postgres.csproj" />
</ItemGroup>