Fix build and code structure improvements. New but essential UI functionality. CI improvements. Documentation improvements. AI module improvements.

This commit is contained in:
StellaOps Bot
2025-12-26 21:54:17 +02:00
parent 335ff7da16
commit c2b9cd8d1f
3717 changed files with 264714 additions and 48202 deletions

View File

@@ -0,0 +1,21 @@
using Microsoft.EntityFrameworkCore;
namespace StellaOps.Concelier.Persistence.EfCore.Context;
/// <summary>
/// EF Core DbContext for Concelier module.
/// This is a stub that will be scaffolded from the PostgreSQL database.
/// </summary>
public class ConcelierDbContext : DbContext
{
public ConcelierDbContext(DbContextOptions<ConcelierDbContext> options)
: base(options)
{
}
protected override void OnModelCreating(ModelBuilder modelBuilder)
{
modelBuilder.HasDefaultSchema("vuln");
base.OnModelCreating(modelBuilder);
}
}

View File

@@ -0,0 +1,118 @@
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using StellaOps.Concelier.Persistence.Postgres.Repositories;
using StellaOps.Concelier.Persistence.Postgres.Advisories;
using StellaOps.Concelier.Persistence.Postgres;
using StellaOps.Infrastructure.Postgres.Options;
using StellaOps.Concelier.Core.Linksets;
using StorageContracts = StellaOps.Concelier.Storage;
using AdvisoryContracts = StellaOps.Concelier.Storage.Advisories;
using ExportingContracts = StellaOps.Concelier.Storage.Exporting;
using JpFlagsContracts = StellaOps.Concelier.Storage.JpFlags;
using PsirtContracts = StellaOps.Concelier.Storage.PsirtFlags;
using HistoryContracts = StellaOps.Concelier.Storage.ChangeHistory;
using StellaOps.Concelier.Merge.Backport;
namespace StellaOps.Concelier.Persistence.Extensions;
/// <summary>
/// Extension methods for configuring Concelier persistence services.
/// </summary>
public static class ConcelierPersistenceExtensions
{
/// <summary>
/// Adds Concelier PostgreSQL persistence services.
/// </summary>
/// <param name="services">Service collection.</param>
/// <param name="configuration">Configuration root.</param>
/// <param name="sectionName">Configuration section name for PostgreSQL options.</param>
/// <returns>Service collection for chaining.</returns>
public static IServiceCollection AddConcelierPersistence(
this IServiceCollection services,
IConfiguration configuration,
string sectionName = "Postgres:Concelier")
{
services.Configure<PostgresOptions>(sectionName, configuration.GetSection(sectionName));
services.AddSingleton<ConcelierDataSource>();
// Register repositories
services.AddScoped<IAdvisoryRepository, AdvisoryRepository>();
services.AddScoped<IPostgresAdvisoryStore, PostgresAdvisoryStore>();
services.AddScoped<ISourceRepository, SourceRepository>();
services.AddScoped<IAdvisoryAliasRepository, AdvisoryAliasRepository>();
services.AddScoped<IAdvisoryCvssRepository, AdvisoryCvssRepository>();
services.AddScoped<IAdvisoryAffectedRepository, AdvisoryAffectedRepository>();
services.AddScoped<IAdvisoryReferenceRepository, AdvisoryReferenceRepository>();
services.AddScoped<IAdvisoryCreditRepository, AdvisoryCreditRepository>();
services.AddScoped<IAdvisoryWeaknessRepository, AdvisoryWeaknessRepository>();
services.AddScoped<IKevFlagRepository, KevFlagRepository>();
services.AddScoped<StellaOps.Concelier.Persistence.Postgres.Repositories.ISourceStateRepository, SourceStateRepository>();
services.AddScoped<AdvisoryContracts.IAdvisoryStore, PostgresAdvisoryStore>();
services.AddScoped<IDocumentRepository, DocumentRepository>();
services.AddScoped<StorageContracts.ISourceStateRepository, PostgresSourceStateAdapter>();
services.AddScoped<IFeedSnapshotRepository, FeedSnapshotRepository>();
services.AddScoped<IAdvisorySnapshotRepository, AdvisorySnapshotRepository>();
services.AddScoped<IMergeEventRepository, MergeEventRepository>();
services.AddScoped<IAdvisoryLinksetStore, AdvisoryLinksetCacheRepository>();
services.AddScoped<IAdvisoryLinksetLookup>(sp => sp.GetRequiredService<IAdvisoryLinksetStore>());
services.AddScoped<StorageContracts.IDocumentStore, PostgresDocumentStore>();
services.AddScoped<StorageContracts.IDtoStore, PostgresDtoStore>();
services.AddScoped<ExportingContracts.IExportStateStore, PostgresExportStateStore>();
services.AddScoped<PsirtContracts.IPsirtFlagStore, PostgresPsirtFlagStore>();
services.AddScoped<JpFlagsContracts.IJpFlagStore, PostgresJpFlagStore>();
services.AddScoped<HistoryContracts.IChangeHistoryStore, PostgresChangeHistoryStore>();
// Provenance scope services (backport integration)
services.AddScoped<IProvenanceScopeRepository, ProvenanceScopeRepository>();
services.AddScoped<IProvenanceScopeStore, PostgresProvenanceScopeStore>();
return services;
}
/// <summary>
/// Adds Concelier PostgreSQL persistence services with explicit options.
/// </summary>
/// <param name="services">Service collection.</param>
/// <param name="configureOptions">Options configuration action.</param>
/// <returns>Service collection for chaining.</returns>
public static IServiceCollection AddConcelierPersistence(
this IServiceCollection services,
Action<PostgresOptions> configureOptions)
{
services.Configure(configureOptions);
services.AddSingleton<ConcelierDataSource>();
// Register repositories
services.AddScoped<IAdvisoryRepository, AdvisoryRepository>();
services.AddScoped<IPostgresAdvisoryStore, PostgresAdvisoryStore>();
services.AddScoped<ISourceRepository, SourceRepository>();
services.AddScoped<IAdvisoryAliasRepository, AdvisoryAliasRepository>();
services.AddScoped<IAdvisoryCvssRepository, AdvisoryCvssRepository>();
services.AddScoped<IAdvisoryAffectedRepository, AdvisoryAffectedRepository>();
services.AddScoped<IAdvisoryReferenceRepository, AdvisoryReferenceRepository>();
services.AddScoped<IAdvisoryCreditRepository, AdvisoryCreditRepository>();
services.AddScoped<IAdvisoryWeaknessRepository, AdvisoryWeaknessRepository>();
services.AddScoped<IKevFlagRepository, KevFlagRepository>();
services.AddScoped<StellaOps.Concelier.Persistence.Postgres.Repositories.ISourceStateRepository, SourceStateRepository>();
services.AddScoped<AdvisoryContracts.IAdvisoryStore, PostgresAdvisoryStore>();
services.AddScoped<IDocumentRepository, DocumentRepository>();
services.AddScoped<StorageContracts.ISourceStateRepository, PostgresSourceStateAdapter>();
services.AddScoped<IFeedSnapshotRepository, FeedSnapshotRepository>();
services.AddScoped<IAdvisorySnapshotRepository, AdvisorySnapshotRepository>();
services.AddScoped<IMergeEventRepository, MergeEventRepository>();
services.AddScoped<IAdvisoryLinksetStore, AdvisoryLinksetCacheRepository>();
services.AddScoped<IAdvisoryLinksetLookup>(sp => sp.GetRequiredService<IAdvisoryLinksetStore>());
services.AddScoped<StorageContracts.IDocumentStore, PostgresDocumentStore>();
services.AddScoped<StorageContracts.IDtoStore, PostgresDtoStore>();
services.AddScoped<ExportingContracts.IExportStateStore, PostgresExportStateStore>();
services.AddScoped<PsirtContracts.IPsirtFlagStore, PostgresPsirtFlagStore>();
services.AddScoped<JpFlagsContracts.IJpFlagStore, PostgresJpFlagStore>();
services.AddScoped<HistoryContracts.IChangeHistoryStore, PostgresChangeHistoryStore>();
// Provenance scope services (backport integration)
services.AddScoped<IProvenanceScopeRepository, ProvenanceScopeRepository>();
services.AddScoped<IProvenanceScopeStore, PostgresProvenanceScopeStore>();
return services;
}
}

View File

@@ -0,0 +1,728 @@
-- Concelier/Vuln Schema: Consolidated Initial Schema
-- Consolidated from migrations 001-017 (pre_1.0 archived)
-- Creates the complete vuln and concelier schemas for vulnerability advisory management
BEGIN;
-- ============================================================================
-- SECTION 1: Schema and Extension Creation
-- ============================================================================
CREATE SCHEMA IF NOT EXISTS vuln;
CREATE SCHEMA IF NOT EXISTS concelier;
CREATE EXTENSION IF NOT EXISTS pg_trgm;
-- ============================================================================
-- SECTION 2: Helper Functions
-- ============================================================================
CREATE OR REPLACE FUNCTION vuln.update_updated_at()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = NOW();
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION vuln.update_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = NOW();
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION vuln.update_advisory_search_vector()
RETURNS TRIGGER AS $$
BEGIN
NEW.search_vector =
setweight(to_tsvector('english', COALESCE(NEW.primary_vuln_id, '')), 'A') ||
setweight(to_tsvector('english', COALESCE(NEW.title, '')), 'B') ||
setweight(to_tsvector('english', COALESCE(NEW.summary, '')), 'C') ||
setweight(to_tsvector('english', COALESCE(NEW.description, '')), 'D');
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- ============================================================================
-- SECTION 3: Core vuln Tables
-- ============================================================================
-- Sources table (feed sources)
CREATE TABLE IF NOT EXISTS vuln.sources (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
key TEXT NOT NULL UNIQUE,
name TEXT NOT NULL,
source_type TEXT NOT NULL,
url TEXT,
priority INT NOT NULL DEFAULT 0,
enabled BOOLEAN NOT NULL DEFAULT TRUE,
config JSONB NOT NULL DEFAULT '{}',
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_sources_enabled ON vuln.sources(enabled, priority DESC);
CREATE TRIGGER trg_sources_updated_at
BEFORE UPDATE ON vuln.sources
FOR EACH ROW EXECUTE FUNCTION vuln.update_updated_at();
-- Feed snapshots table
CREATE TABLE IF NOT EXISTS vuln.feed_snapshots (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
source_id UUID NOT NULL REFERENCES vuln.sources(id),
snapshot_id TEXT NOT NULL,
advisory_count INT NOT NULL DEFAULT 0,
checksum TEXT,
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(source_id, snapshot_id)
);
CREATE INDEX idx_feed_snapshots_source ON vuln.feed_snapshots(source_id);
CREATE INDEX idx_feed_snapshots_created ON vuln.feed_snapshots(created_at);
-- Advisory snapshots table
CREATE TABLE IF NOT EXISTS vuln.advisory_snapshots (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
feed_snapshot_id UUID NOT NULL REFERENCES vuln.feed_snapshots(id),
advisory_key TEXT NOT NULL,
content_hash TEXT NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(feed_snapshot_id, advisory_key)
);
CREATE INDEX idx_advisory_snapshots_feed ON vuln.advisory_snapshots(feed_snapshot_id);
CREATE INDEX idx_advisory_snapshots_key ON vuln.advisory_snapshots(advisory_key);
-- Advisories table with generated columns
CREATE TABLE IF NOT EXISTS vuln.advisories (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
advisory_key TEXT NOT NULL UNIQUE,
primary_vuln_id TEXT NOT NULL,
source_id UUID REFERENCES vuln.sources(id),
title TEXT,
summary TEXT,
description TEXT,
severity TEXT CHECK (severity IN ('critical', 'high', 'medium', 'low', 'unknown')),
published_at TIMESTAMPTZ,
modified_at TIMESTAMPTZ,
withdrawn_at TIMESTAMPTZ,
provenance JSONB NOT NULL DEFAULT '{}',
raw_payload JSONB,
search_vector TSVECTOR,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Generated columns for provenance
provenance_source_key TEXT GENERATED ALWAYS AS (provenance->>'source_key') STORED,
provenance_feed_id TEXT GENERATED ALWAYS AS (provenance->>'feed_id') STORED,
provenance_ingested_at TIMESTAMPTZ GENERATED ALWAYS AS ((provenance->>'ingested_at')::TIMESTAMPTZ) STORED
);
CREATE INDEX idx_advisories_vuln_id ON vuln.advisories(primary_vuln_id);
CREATE INDEX idx_advisories_source ON vuln.advisories(source_id);
CREATE INDEX idx_advisories_severity ON vuln.advisories(severity);
CREATE INDEX idx_advisories_published ON vuln.advisories(published_at);
CREATE INDEX idx_advisories_modified ON vuln.advisories(modified_at);
CREATE INDEX idx_advisories_search ON vuln.advisories USING GIN(search_vector);
CREATE INDEX IF NOT EXISTS ix_advisories_provenance_source ON vuln.advisories(provenance_source_key) WHERE provenance_source_key IS NOT NULL;
CREATE INDEX IF NOT EXISTS ix_advisories_provenance_feed ON vuln.advisories(provenance_feed_id) WHERE provenance_feed_id IS NOT NULL;
CREATE INDEX IF NOT EXISTS ix_advisories_provenance_ingested ON vuln.advisories(provenance_ingested_at DESC) WHERE provenance_ingested_at IS NOT NULL;
CREATE INDEX IF NOT EXISTS ix_advisories_severity_ingested ON vuln.advisories(severity, provenance_ingested_at DESC) WHERE provenance_ingested_at IS NOT NULL;
CREATE TRIGGER trg_advisories_search_vector
BEFORE INSERT OR UPDATE ON vuln.advisories
FOR EACH ROW EXECUTE FUNCTION vuln.update_advisory_search_vector();
CREATE TRIGGER trg_advisories_updated_at
BEFORE UPDATE ON vuln.advisories
FOR EACH ROW EXECUTE FUNCTION vuln.update_updated_at();
-- Advisory aliases table
CREATE TABLE IF NOT EXISTS vuln.advisory_aliases (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
advisory_id UUID NOT NULL REFERENCES vuln.advisories(id) ON DELETE CASCADE,
alias_type TEXT NOT NULL,
alias_value TEXT NOT NULL,
is_primary BOOLEAN NOT NULL DEFAULT FALSE,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(advisory_id, alias_type, alias_value)
);
CREATE INDEX idx_advisory_aliases_advisory ON vuln.advisory_aliases(advisory_id);
CREATE INDEX idx_advisory_aliases_value ON vuln.advisory_aliases(alias_type, alias_value);
CREATE INDEX idx_advisory_aliases_cve ON vuln.advisory_aliases(alias_value) WHERE alias_type = 'CVE';
-- Advisory CVSS scores table
CREATE TABLE IF NOT EXISTS vuln.advisory_cvss (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
advisory_id UUID NOT NULL REFERENCES vuln.advisories(id) ON DELETE CASCADE,
cvss_version TEXT NOT NULL,
vector_string TEXT NOT NULL,
base_score NUMERIC(3,1) NOT NULL,
base_severity TEXT,
exploitability_score NUMERIC(3,1),
impact_score NUMERIC(3,1),
source TEXT,
is_primary BOOLEAN NOT NULL DEFAULT FALSE,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(advisory_id, cvss_version, source)
);
CREATE INDEX idx_advisory_cvss_advisory ON vuln.advisory_cvss(advisory_id);
CREATE INDEX idx_advisory_cvss_score ON vuln.advisory_cvss(base_score DESC);
-- Advisory affected packages with generated columns
CREATE TABLE IF NOT EXISTS vuln.advisory_affected (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
advisory_id UUID NOT NULL REFERENCES vuln.advisories(id) ON DELETE CASCADE,
ecosystem TEXT NOT NULL,
package_name TEXT NOT NULL,
purl TEXT,
version_range JSONB NOT NULL DEFAULT '{}',
versions_affected TEXT[],
versions_fixed TEXT[],
database_specific JSONB,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Generated columns for PURL parsing
purl_type TEXT GENERATED ALWAYS AS (
CASE WHEN purl IS NOT NULL AND purl LIKE 'pkg:%'
THEN split_part(split_part(purl, ':', 2), '/', 1) ELSE NULL END
) STORED,
purl_name TEXT GENERATED ALWAYS AS (
CASE WHEN purl IS NOT NULL AND purl LIKE 'pkg:%'
THEN split_part(split_part(split_part(purl, ':', 2), '@', 1), '/', -1) ELSE NULL END
) STORED
);
CREATE INDEX idx_advisory_affected_advisory ON vuln.advisory_affected(advisory_id);
CREATE INDEX idx_advisory_affected_ecosystem ON vuln.advisory_affected(ecosystem, package_name);
CREATE INDEX idx_advisory_affected_purl ON vuln.advisory_affected(purl);
CREATE INDEX idx_advisory_affected_purl_trgm ON vuln.advisory_affected USING GIN(purl gin_trgm_ops);
CREATE INDEX IF NOT EXISTS ix_advisory_affected_purl_type ON vuln.advisory_affected(purl_type) WHERE purl_type IS NOT NULL;
CREATE INDEX IF NOT EXISTS ix_advisory_affected_purl_name ON vuln.advisory_affected(purl_name) WHERE purl_name IS NOT NULL;
CREATE INDEX IF NOT EXISTS ix_advisory_affected_ecosystem_type ON vuln.advisory_affected(ecosystem, purl_type) WHERE purl_type IS NOT NULL;
-- Advisory references table
CREATE TABLE IF NOT EXISTS vuln.advisory_references (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
advisory_id UUID NOT NULL REFERENCES vuln.advisories(id) ON DELETE CASCADE,
ref_type TEXT NOT NULL,
url TEXT NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_advisory_references_advisory ON vuln.advisory_references(advisory_id);
-- Advisory credits table
CREATE TABLE IF NOT EXISTS vuln.advisory_credits (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
advisory_id UUID NOT NULL REFERENCES vuln.advisories(id) ON DELETE CASCADE,
name TEXT NOT NULL,
contact TEXT,
credit_type TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_advisory_credits_advisory ON vuln.advisory_credits(advisory_id);
-- Advisory weaknesses table (CWE)
CREATE TABLE IF NOT EXISTS vuln.advisory_weaknesses (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
advisory_id UUID NOT NULL REFERENCES vuln.advisories(id) ON DELETE CASCADE,
cwe_id TEXT NOT NULL,
description TEXT,
source TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(advisory_id, cwe_id)
);
CREATE INDEX idx_advisory_weaknesses_advisory ON vuln.advisory_weaknesses(advisory_id);
CREATE INDEX idx_advisory_weaknesses_cwe ON vuln.advisory_weaknesses(cwe_id);
-- KEV flags table
CREATE TABLE IF NOT EXISTS vuln.kev_flags (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
advisory_id UUID NOT NULL REFERENCES vuln.advisories(id) ON DELETE CASCADE,
cve_id TEXT NOT NULL,
vendor_project TEXT,
product TEXT,
vulnerability_name TEXT,
date_added DATE NOT NULL,
due_date DATE,
known_ransomware_use BOOLEAN NOT NULL DEFAULT FALSE,
notes TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(advisory_id, cve_id)
);
CREATE INDEX idx_kev_flags_advisory ON vuln.kev_flags(advisory_id);
CREATE INDEX idx_kev_flags_cve ON vuln.kev_flags(cve_id);
CREATE INDEX idx_kev_flags_date ON vuln.kev_flags(date_added);
-- Source states table
CREATE TABLE IF NOT EXISTS vuln.source_states (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
source_id UUID NOT NULL REFERENCES vuln.sources(id) UNIQUE,
cursor TEXT,
last_sync_at TIMESTAMPTZ,
last_success_at TIMESTAMPTZ,
last_error TEXT,
sync_count BIGINT NOT NULL DEFAULT 0,
error_count INT NOT NULL DEFAULT 0,
metadata JSONB NOT NULL DEFAULT '{}',
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_source_states_source ON vuln.source_states(source_id);
CREATE TRIGGER trg_source_states_updated_at
BEFORE UPDATE ON vuln.source_states
FOR EACH ROW EXECUTE FUNCTION vuln.update_updated_at();
-- ============================================================================
-- SECTION 4: Partitioned Merge Events Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS vuln.merge_events (
id BIGSERIAL,
advisory_id UUID NOT NULL,
source_id UUID,
event_type TEXT NOT NULL,
old_value JSONB,
new_value JSONB,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
PRIMARY KEY (id, created_at)
) PARTITION BY RANGE (created_at);
DO $$
DECLARE
v_start DATE;
v_end DATE;
v_partition_name TEXT;
BEGIN
v_start := date_trunc('month', NOW() - INTERVAL '12 months')::DATE;
WHILE v_start <= date_trunc('month', NOW() + INTERVAL '3 months')::DATE LOOP
v_end := (v_start + INTERVAL '1 month')::DATE;
v_partition_name := 'merge_events_' || to_char(v_start, 'YYYY_MM');
IF NOT EXISTS (
SELECT 1 FROM pg_class c JOIN pg_namespace n ON c.relnamespace = n.oid
WHERE n.nspname = 'vuln' AND c.relname = v_partition_name
) THEN
EXECUTE format(
'CREATE TABLE vuln.%I PARTITION OF vuln.merge_events FOR VALUES FROM (%L) TO (%L)',
v_partition_name, v_start, v_end
);
END IF;
v_start := v_end;
END LOOP;
END $$;
CREATE TABLE IF NOT EXISTS vuln.merge_events_default PARTITION OF vuln.merge_events DEFAULT;
CREATE INDEX IF NOT EXISTS ix_merge_events_part_advisory ON vuln.merge_events(advisory_id);
CREATE INDEX IF NOT EXISTS ix_merge_events_part_source ON vuln.merge_events(source_id) WHERE source_id IS NOT NULL;
CREATE INDEX IF NOT EXISTS ix_merge_events_part_event_type ON vuln.merge_events(event_type);
CREATE INDEX IF NOT EXISTS brin_merge_events_part_created ON vuln.merge_events USING BRIN(created_at) WITH (pages_per_range = 128);
COMMENT ON TABLE vuln.merge_events IS 'Advisory merge event log. Partitioned monthly by created_at.';
-- ============================================================================
-- SECTION 5: LNM Linkset Cache
-- ============================================================================
CREATE TABLE IF NOT EXISTS vuln.lnm_linkset_cache (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
source TEXT NOT NULL,
advisory_id TEXT NOT NULL,
observations TEXT[] NOT NULL DEFAULT '{}',
normalized JSONB,
conflicts JSONB,
provenance JSONB,
confidence DOUBLE PRECISION,
built_by_job_id TEXT,
created_at TIMESTAMPTZ NOT NULL,
CONSTRAINT uq_lnm_linkset_cache UNIQUE (tenant_id, advisory_id, source)
);
CREATE INDEX IF NOT EXISTS idx_lnm_linkset_cache_order ON vuln.lnm_linkset_cache(tenant_id, created_at DESC, advisory_id, source);
-- ============================================================================
-- SECTION 6: Sync Ledger and Site Policy
-- ============================================================================
CREATE TABLE IF NOT EXISTS vuln.sync_ledger (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
site_id TEXT NOT NULL,
cursor TEXT NOT NULL,
bundle_hash TEXT NOT NULL,
items_count INT NOT NULL DEFAULT 0,
signed_at TIMESTAMPTZ NOT NULL,
imported_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT uq_sync_ledger_site_cursor UNIQUE (site_id, cursor),
CONSTRAINT uq_sync_ledger_bundle UNIQUE (bundle_hash)
);
CREATE INDEX IF NOT EXISTS idx_sync_ledger_site ON vuln.sync_ledger(site_id);
CREATE INDEX IF NOT EXISTS idx_sync_ledger_site_time ON vuln.sync_ledger(site_id, signed_at DESC);
COMMENT ON TABLE vuln.sync_ledger IS 'Federation sync cursor tracking per remote site';
CREATE TABLE IF NOT EXISTS vuln.site_policy (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
site_id TEXT NOT NULL UNIQUE,
display_name TEXT,
allowed_sources TEXT[] NOT NULL DEFAULT '{}',
denied_sources TEXT[] NOT NULL DEFAULT '{}',
max_bundle_size_mb INT NOT NULL DEFAULT 100,
max_items_per_bundle INT NOT NULL DEFAULT 10000,
require_signature BOOLEAN NOT NULL DEFAULT TRUE,
allowed_signers TEXT[] NOT NULL DEFAULT '{}',
enabled BOOLEAN NOT NULL DEFAULT TRUE,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_site_policy_enabled ON vuln.site_policy(enabled) WHERE enabled = TRUE;
CREATE TRIGGER trg_site_policy_updated
BEFORE UPDATE ON vuln.site_policy
FOR EACH ROW EXECUTE FUNCTION vuln.update_timestamp();
-- ============================================================================
-- SECTION 7: Advisory Canonical and Source Edge
-- ============================================================================
CREATE TABLE IF NOT EXISTS vuln.advisory_canonical (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
cve TEXT NOT NULL,
affects_key TEXT NOT NULL,
version_range JSONB,
weakness TEXT[] NOT NULL DEFAULT '{}',
merge_hash TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'active' CHECK (status IN ('active', 'stub', 'withdrawn')),
severity TEXT CHECK (severity IN ('critical', 'high', 'medium', 'low', 'none', 'unknown')),
epss_score NUMERIC(5,4),
exploit_known BOOLEAN NOT NULL DEFAULT FALSE,
title TEXT,
summary TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT uq_advisory_canonical_merge_hash UNIQUE (merge_hash)
);
CREATE INDEX IF NOT EXISTS idx_advisory_canonical_cve ON vuln.advisory_canonical(cve);
CREATE INDEX IF NOT EXISTS idx_advisory_canonical_affects ON vuln.advisory_canonical(affects_key);
CREATE INDEX IF NOT EXISTS idx_advisory_canonical_merge_hash ON vuln.advisory_canonical(merge_hash);
CREATE INDEX IF NOT EXISTS idx_advisory_canonical_status ON vuln.advisory_canonical(status) WHERE status = 'active';
CREATE INDEX IF NOT EXISTS idx_advisory_canonical_severity ON vuln.advisory_canonical(severity) WHERE severity IS NOT NULL;
CREATE INDEX IF NOT EXISTS idx_advisory_canonical_exploit ON vuln.advisory_canonical(exploit_known) WHERE exploit_known = TRUE;
CREATE INDEX IF NOT EXISTS idx_advisory_canonical_updated ON vuln.advisory_canonical(updated_at DESC);
CREATE TRIGGER trg_advisory_canonical_updated
BEFORE UPDATE ON vuln.advisory_canonical
FOR EACH ROW EXECUTE FUNCTION vuln.update_timestamp();
CREATE TABLE IF NOT EXISTS vuln.advisory_source_edge (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
canonical_id UUID NOT NULL REFERENCES vuln.advisory_canonical(id) ON DELETE CASCADE,
source_id UUID NOT NULL REFERENCES vuln.sources(id) ON DELETE RESTRICT,
source_advisory_id TEXT NOT NULL,
source_doc_hash TEXT NOT NULL,
vendor_status TEXT CHECK (vendor_status IN ('affected', 'not_affected', 'fixed', 'under_investigation')),
precedence_rank INT NOT NULL DEFAULT 100,
dsse_envelope JSONB,
raw_payload JSONB,
fetched_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT uq_advisory_source_edge_unique UNIQUE (canonical_id, source_id, source_doc_hash)
);
CREATE INDEX IF NOT EXISTS idx_source_edge_canonical ON vuln.advisory_source_edge(canonical_id);
CREATE INDEX IF NOT EXISTS idx_source_edge_source ON vuln.advisory_source_edge(source_id);
CREATE INDEX IF NOT EXISTS idx_source_edge_advisory_id ON vuln.advisory_source_edge(source_advisory_id);
CREATE INDEX IF NOT EXISTS idx_source_edge_canonical_source ON vuln.advisory_source_edge(canonical_id, source_id);
CREATE INDEX IF NOT EXISTS idx_source_edge_fetched ON vuln.advisory_source_edge(fetched_at DESC);
CREATE INDEX IF NOT EXISTS idx_source_edge_dsse_gin ON vuln.advisory_source_edge USING GIN(dsse_envelope jsonb_path_ops);
-- ============================================================================
-- SECTION 8: Interest Score and SBOM Registry
-- ============================================================================
CREATE TABLE IF NOT EXISTS vuln.interest_score (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
canonical_id UUID NOT NULL REFERENCES vuln.advisory_canonical(id) ON DELETE CASCADE,
score NUMERIC(3,2) NOT NULL CHECK (score >= 0 AND score <= 1),
reasons JSONB NOT NULL DEFAULT '[]',
last_seen_in_build UUID,
computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT uq_interest_score_canonical UNIQUE (canonical_id)
);
CREATE INDEX IF NOT EXISTS idx_interest_score_score ON vuln.interest_score(score DESC);
CREATE INDEX IF NOT EXISTS idx_interest_score_computed ON vuln.interest_score(computed_at DESC);
CREATE INDEX IF NOT EXISTS idx_interest_score_high ON vuln.interest_score(canonical_id) WHERE score >= 0.7;
CREATE INDEX IF NOT EXISTS idx_interest_score_low ON vuln.interest_score(canonical_id) WHERE score < 0.2;
CREATE TABLE IF NOT EXISTS vuln.sbom_registry (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
digest TEXT NOT NULL,
format TEXT NOT NULL CHECK (format IN ('cyclonedx', 'spdx')),
spec_version TEXT NOT NULL,
primary_name TEXT,
primary_version TEXT,
component_count INT NOT NULL DEFAULT 0,
affected_count INT NOT NULL DEFAULT 0,
source TEXT NOT NULL,
tenant_id TEXT,
registered_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
last_matched_at TIMESTAMPTZ,
CONSTRAINT uq_sbom_registry_digest UNIQUE (digest)
);
CREATE INDEX IF NOT EXISTS idx_sbom_registry_tenant ON vuln.sbom_registry(tenant_id) WHERE tenant_id IS NOT NULL;
CREATE INDEX IF NOT EXISTS idx_sbom_registry_primary ON vuln.sbom_registry(primary_name, primary_version);
CREATE INDEX IF NOT EXISTS idx_sbom_registry_registered ON vuln.sbom_registry(registered_at DESC);
CREATE INDEX IF NOT EXISTS idx_sbom_registry_affected ON vuln.sbom_registry(affected_count DESC) WHERE affected_count > 0;
CREATE TABLE IF NOT EXISTS vuln.sbom_canonical_match (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
sbom_id UUID NOT NULL REFERENCES vuln.sbom_registry(id) ON DELETE CASCADE,
canonical_id UUID NOT NULL REFERENCES vuln.advisory_canonical(id) ON DELETE CASCADE,
purl TEXT NOT NULL,
match_method TEXT NOT NULL CHECK (match_method IN ('exact_purl', 'purl_version_range', 'cpe', 'name_version')),
confidence NUMERIC(3,2) NOT NULL DEFAULT 1.0 CHECK (confidence >= 0 AND confidence <= 1),
is_reachable BOOLEAN NOT NULL DEFAULT FALSE,
is_deployed BOOLEAN NOT NULL DEFAULT FALSE,
matched_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT uq_sbom_canonical_match UNIQUE (sbom_id, canonical_id, purl)
);
CREATE INDEX IF NOT EXISTS idx_sbom_match_sbom ON vuln.sbom_canonical_match(sbom_id);
CREATE INDEX IF NOT EXISTS idx_sbom_match_canonical ON vuln.sbom_canonical_match(canonical_id);
CREATE INDEX IF NOT EXISTS idx_sbom_match_purl ON vuln.sbom_canonical_match(purl);
CREATE INDEX IF NOT EXISTS idx_sbom_match_reachable ON vuln.sbom_canonical_match(canonical_id) WHERE is_reachable = TRUE;
CREATE TABLE IF NOT EXISTS vuln.purl_canonical_index (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
purl TEXT NOT NULL,
purl_type TEXT NOT NULL,
purl_namespace TEXT,
purl_name TEXT NOT NULL,
canonical_id UUID NOT NULL REFERENCES vuln.advisory_canonical(id) ON DELETE CASCADE,
version_constraint TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT uq_purl_canonical UNIQUE (purl, canonical_id)
);
CREATE INDEX IF NOT EXISTS idx_purl_index_lookup ON vuln.purl_canonical_index(purl_type, purl_namespace, purl_name);
CREATE INDEX IF NOT EXISTS idx_purl_index_canonical ON vuln.purl_canonical_index(canonical_id);
-- ============================================================================
-- SECTION 9: Provenance Scope
-- ============================================================================
CREATE TABLE IF NOT EXISTS vuln.provenance_scope (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
canonical_id UUID NOT NULL REFERENCES vuln.advisory_canonical(id) ON DELETE CASCADE,
distro_release TEXT NOT NULL,
backport_semver TEXT,
patch_id TEXT,
patch_origin TEXT CHECK (patch_origin IN ('upstream', 'distro', 'vendor')),
evidence_ref UUID,
confidence NUMERIC(3,2) NOT NULL DEFAULT 0.5 CHECK (confidence >= 0 AND confidence <= 1),
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT uq_provenance_scope_canonical_distro UNIQUE (canonical_id, distro_release)
);
CREATE INDEX IF NOT EXISTS idx_provenance_scope_canonical ON vuln.provenance_scope(canonical_id);
CREATE INDEX IF NOT EXISTS idx_provenance_scope_distro ON vuln.provenance_scope(distro_release);
CREATE INDEX IF NOT EXISTS idx_provenance_scope_patch ON vuln.provenance_scope(patch_id) WHERE patch_id IS NOT NULL;
CREATE INDEX IF NOT EXISTS idx_provenance_scope_high_confidence ON vuln.provenance_scope(confidence DESC) WHERE confidence >= 0.7;
CREATE INDEX IF NOT EXISTS idx_provenance_scope_origin ON vuln.provenance_scope(patch_origin) WHERE patch_origin IS NOT NULL;
CREATE INDEX IF NOT EXISTS idx_provenance_scope_updated ON vuln.provenance_scope(updated_at DESC);
CREATE TRIGGER trg_provenance_scope_updated
BEFORE UPDATE ON vuln.provenance_scope
FOR EACH ROW EXECUTE FUNCTION vuln.update_timestamp();
-- ============================================================================
-- SECTION 10: Concelier Schema Tables
-- ============================================================================
CREATE TABLE IF NOT EXISTS concelier.source_documents (
id UUID NOT NULL,
source_id UUID NOT NULL,
source_name TEXT NOT NULL,
uri TEXT NOT NULL,
sha256 TEXT NOT NULL,
status TEXT NOT NULL,
content_type TEXT,
headers_json JSONB,
metadata_json JSONB,
etag TEXT,
last_modified TIMESTAMPTZ,
payload BYTEA NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
expires_at TIMESTAMPTZ,
CONSTRAINT pk_source_documents PRIMARY KEY (source_name, uri)
);
CREATE INDEX IF NOT EXISTS idx_source_documents_source_id ON concelier.source_documents(source_id);
CREATE INDEX IF NOT EXISTS idx_source_documents_status ON concelier.source_documents(status);
CREATE TABLE IF NOT EXISTS concelier.dtos (
id UUID NOT NULL,
document_id UUID NOT NULL,
source_name TEXT NOT NULL,
format TEXT NOT NULL,
payload_json JSONB NOT NULL,
schema_version TEXT NOT NULL DEFAULT '',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
validated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT pk_concelier_dtos PRIMARY KEY (document_id)
);
CREATE INDEX IF NOT EXISTS idx_concelier_dtos_source ON concelier.dtos(source_name, created_at DESC);
CREATE TABLE IF NOT EXISTS concelier.export_states (
id TEXT NOT NULL,
export_cursor TEXT NOT NULL,
last_full_digest TEXT,
last_delta_digest TEXT,
base_export_id TEXT,
base_digest TEXT,
target_repository TEXT,
files JSONB NOT NULL,
exporter_version TEXT NOT NULL,
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT pk_concelier_export_states PRIMARY KEY (id)
);
CREATE TABLE IF NOT EXISTS concelier.psirt_flags (
advisory_id TEXT NOT NULL,
vendor TEXT NOT NULL,
source_name TEXT NOT NULL,
external_id TEXT,
recorded_at TIMESTAMPTZ NOT NULL,
CONSTRAINT pk_concelier_psirt_flags PRIMARY KEY (advisory_id, vendor)
);
CREATE INDEX IF NOT EXISTS idx_concelier_psirt_source ON concelier.psirt_flags(source_name, recorded_at DESC);
CREATE TABLE IF NOT EXISTS concelier.jp_flags (
advisory_key TEXT NOT NULL,
source_name TEXT NOT NULL,
category TEXT NOT NULL,
vendor_status TEXT,
created_at TIMESTAMPTZ NOT NULL,
CONSTRAINT pk_concelier_jp_flags PRIMARY KEY (advisory_key)
);
CREATE TABLE IF NOT EXISTS concelier.change_history (
id UUID NOT NULL,
source_name TEXT NOT NULL,
advisory_key TEXT NOT NULL,
document_id UUID NOT NULL,
document_hash TEXT NOT NULL,
snapshot_hash TEXT NOT NULL,
previous_snapshot_hash TEXT,
snapshot JSONB NOT NULL,
previous_snapshot JSONB,
changes JSONB NOT NULL,
created_at TIMESTAMPTZ NOT NULL,
CONSTRAINT pk_concelier_change_history PRIMARY KEY (id)
);
CREATE INDEX IF NOT EXISTS idx_concelier_change_history_advisory ON concelier.change_history(advisory_key, created_at DESC);
-- ============================================================================
-- SECTION 11: Helper Functions for Canonical Operations
-- ============================================================================
CREATE OR REPLACE FUNCTION vuln.get_canonical_by_hash(p_merge_hash TEXT)
RETURNS vuln.advisory_canonical
LANGUAGE sql STABLE
AS $$
SELECT * FROM vuln.advisory_canonical WHERE merge_hash = p_merge_hash;
$$;
CREATE OR REPLACE FUNCTION vuln.get_source_edges(p_canonical_id UUID)
RETURNS SETOF vuln.advisory_source_edge
LANGUAGE sql STABLE
AS $$
SELECT * FROM vuln.advisory_source_edge
WHERE canonical_id = p_canonical_id
ORDER BY precedence_rank ASC, fetched_at DESC;
$$;
CREATE OR REPLACE FUNCTION vuln.upsert_canonical(
p_cve TEXT, p_affects_key TEXT, p_version_range JSONB, p_weakness TEXT[],
p_merge_hash TEXT, p_severity TEXT DEFAULT NULL, p_epss_score NUMERIC DEFAULT NULL,
p_exploit_known BOOLEAN DEFAULT FALSE, p_title TEXT DEFAULT NULL, p_summary TEXT DEFAULT NULL
)
RETURNS UUID
LANGUAGE plpgsql
AS $$
DECLARE v_id UUID;
BEGIN
INSERT INTO vuln.advisory_canonical (
cve, affects_key, version_range, weakness, merge_hash,
severity, epss_score, exploit_known, title, summary
) VALUES (
p_cve, p_affects_key, p_version_range, p_weakness, p_merge_hash,
p_severity, p_epss_score, p_exploit_known, p_title, p_summary
)
ON CONFLICT (merge_hash) DO UPDATE SET
severity = COALESCE(EXCLUDED.severity, vuln.advisory_canonical.severity),
epss_score = COALESCE(EXCLUDED.epss_score, vuln.advisory_canonical.epss_score),
exploit_known = EXCLUDED.exploit_known OR vuln.advisory_canonical.exploit_known,
title = COALESCE(EXCLUDED.title, vuln.advisory_canonical.title),
summary = COALESCE(EXCLUDED.summary, vuln.advisory_canonical.summary),
updated_at = NOW()
RETURNING id INTO v_id;
RETURN v_id;
END;
$$;
CREATE OR REPLACE FUNCTION vuln.add_source_edge(
p_canonical_id UUID, p_source_id UUID, p_source_advisory_id TEXT, p_source_doc_hash TEXT,
p_vendor_status TEXT DEFAULT NULL, p_precedence_rank INT DEFAULT 100,
p_dsse_envelope JSONB DEFAULT NULL, p_raw_payload JSONB DEFAULT NULL, p_fetched_at TIMESTAMPTZ DEFAULT NOW()
)
RETURNS UUID
LANGUAGE plpgsql
AS $$
DECLARE v_id UUID;
BEGIN
INSERT INTO vuln.advisory_source_edge (
canonical_id, source_id, source_advisory_id, source_doc_hash,
vendor_status, precedence_rank, dsse_envelope, raw_payload, fetched_at
) VALUES (
p_canonical_id, p_source_id, p_source_advisory_id, p_source_doc_hash,
p_vendor_status, p_precedence_rank, p_dsse_envelope, p_raw_payload, p_fetched_at
)
ON CONFLICT (canonical_id, source_id, source_doc_hash) DO UPDATE SET
vendor_status = COALESCE(EXCLUDED.vendor_status, vuln.advisory_source_edge.vendor_status),
precedence_rank = LEAST(EXCLUDED.precedence_rank, vuln.advisory_source_edge.precedence_rank),
dsse_envelope = COALESCE(EXCLUDED.dsse_envelope, vuln.advisory_source_edge.dsse_envelope),
raw_payload = COALESCE(EXCLUDED.raw_payload, vuln.advisory_source_edge.raw_payload)
RETURNING id INTO v_id;
RETURN v_id;
END;
$$;
CREATE OR REPLACE FUNCTION vuln.count_canonicals_by_cve_year(p_year INT)
RETURNS BIGINT
LANGUAGE sql STABLE
AS $$
SELECT COUNT(*) FROM vuln.advisory_canonical
WHERE cve LIKE 'CVE-' || p_year::TEXT || '-%' AND status = 'active';
$$;
COMMIT;

View File

@@ -0,0 +1,261 @@
-- Vulnerability Schema Migration 001: Initial Schema
-- Creates the vuln schema for advisories and vulnerability data
-- Create schema
CREATE SCHEMA IF NOT EXISTS vuln;
-- Enable pg_trgm for fuzzy text search
CREATE EXTENSION IF NOT EXISTS pg_trgm;
-- Sources table (feed sources)
CREATE TABLE IF NOT EXISTS vuln.sources (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
key TEXT NOT NULL UNIQUE,
name TEXT NOT NULL,
source_type TEXT NOT NULL,
url TEXT,
priority INT NOT NULL DEFAULT 0,
enabled BOOLEAN NOT NULL DEFAULT TRUE,
config JSONB NOT NULL DEFAULT '{}',
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_sources_enabled ON vuln.sources(enabled, priority DESC);
-- Feed snapshots table
CREATE TABLE IF NOT EXISTS vuln.feed_snapshots (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
source_id UUID NOT NULL REFERENCES vuln.sources(id),
snapshot_id TEXT NOT NULL,
advisory_count INT NOT NULL DEFAULT 0,
checksum TEXT,
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(source_id, snapshot_id)
);
CREATE INDEX idx_feed_snapshots_source ON vuln.feed_snapshots(source_id);
CREATE INDEX idx_feed_snapshots_created ON vuln.feed_snapshots(created_at);
-- Advisory snapshots table (point-in-time snapshots)
CREATE TABLE IF NOT EXISTS vuln.advisory_snapshots (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
feed_snapshot_id UUID NOT NULL REFERENCES vuln.feed_snapshots(id),
advisory_key TEXT NOT NULL,
content_hash TEXT NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(feed_snapshot_id, advisory_key)
);
CREATE INDEX idx_advisory_snapshots_feed ON vuln.advisory_snapshots(feed_snapshot_id);
CREATE INDEX idx_advisory_snapshots_key ON vuln.advisory_snapshots(advisory_key);
-- Advisories table (main vulnerability data)
CREATE TABLE IF NOT EXISTS vuln.advisories (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
advisory_key TEXT NOT NULL UNIQUE,
primary_vuln_id TEXT NOT NULL,
source_id UUID REFERENCES vuln.sources(id),
title TEXT,
summary TEXT,
description TEXT,
severity TEXT CHECK (severity IN ('critical', 'high', 'medium', 'low', 'unknown')),
published_at TIMESTAMPTZ,
modified_at TIMESTAMPTZ,
withdrawn_at TIMESTAMPTZ,
provenance JSONB NOT NULL DEFAULT '{}',
raw_payload JSONB,
search_vector TSVECTOR,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_advisories_vuln_id ON vuln.advisories(primary_vuln_id);
CREATE INDEX idx_advisories_source ON vuln.advisories(source_id);
CREATE INDEX idx_advisories_severity ON vuln.advisories(severity);
CREATE INDEX idx_advisories_published ON vuln.advisories(published_at);
CREATE INDEX idx_advisories_modified ON vuln.advisories(modified_at);
CREATE INDEX idx_advisories_search ON vuln.advisories USING GIN(search_vector);
-- Advisory aliases table (CVE, GHSA, etc.)
CREATE TABLE IF NOT EXISTS vuln.advisory_aliases (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
advisory_id UUID NOT NULL REFERENCES vuln.advisories(id) ON DELETE CASCADE,
alias_type TEXT NOT NULL,
alias_value TEXT NOT NULL,
is_primary BOOLEAN NOT NULL DEFAULT FALSE,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(advisory_id, alias_type, alias_value)
);
CREATE INDEX idx_advisory_aliases_advisory ON vuln.advisory_aliases(advisory_id);
CREATE INDEX idx_advisory_aliases_value ON vuln.advisory_aliases(alias_type, alias_value);
CREATE INDEX idx_advisory_aliases_cve ON vuln.advisory_aliases(alias_value)
WHERE alias_type = 'CVE';
-- Advisory CVSS scores table
CREATE TABLE IF NOT EXISTS vuln.advisory_cvss (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
advisory_id UUID NOT NULL REFERENCES vuln.advisories(id) ON DELETE CASCADE,
cvss_version TEXT NOT NULL,
vector_string TEXT NOT NULL,
base_score NUMERIC(3,1) NOT NULL,
base_severity TEXT,
exploitability_score NUMERIC(3,1),
impact_score NUMERIC(3,1),
source TEXT,
is_primary BOOLEAN NOT NULL DEFAULT FALSE,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(advisory_id, cvss_version, source)
);
CREATE INDEX idx_advisory_cvss_advisory ON vuln.advisory_cvss(advisory_id);
CREATE INDEX idx_advisory_cvss_score ON vuln.advisory_cvss(base_score DESC);
-- Advisory affected packages table
CREATE TABLE IF NOT EXISTS vuln.advisory_affected (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
advisory_id UUID NOT NULL REFERENCES vuln.advisories(id) ON DELETE CASCADE,
ecosystem TEXT NOT NULL,
package_name TEXT NOT NULL,
purl TEXT,
version_range JSONB NOT NULL DEFAULT '{}',
versions_affected TEXT[],
versions_fixed TEXT[],
database_specific JSONB,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_advisory_affected_advisory ON vuln.advisory_affected(advisory_id);
CREATE INDEX idx_advisory_affected_ecosystem ON vuln.advisory_affected(ecosystem, package_name);
CREATE INDEX idx_advisory_affected_purl ON vuln.advisory_affected(purl);
CREATE INDEX idx_advisory_affected_purl_trgm ON vuln.advisory_affected USING GIN(purl gin_trgm_ops);
-- Advisory references table
CREATE TABLE IF NOT EXISTS vuln.advisory_references (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
advisory_id UUID NOT NULL REFERENCES vuln.advisories(id) ON DELETE CASCADE,
ref_type TEXT NOT NULL,
url TEXT NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_advisory_references_advisory ON vuln.advisory_references(advisory_id);
-- Advisory credits table
CREATE TABLE IF NOT EXISTS vuln.advisory_credits (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
advisory_id UUID NOT NULL REFERENCES vuln.advisories(id) ON DELETE CASCADE,
name TEXT NOT NULL,
contact TEXT,
credit_type TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_advisory_credits_advisory ON vuln.advisory_credits(advisory_id);
-- Advisory weaknesses table (CWE)
CREATE TABLE IF NOT EXISTS vuln.advisory_weaknesses (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
advisory_id UUID NOT NULL REFERENCES vuln.advisories(id) ON DELETE CASCADE,
cwe_id TEXT NOT NULL,
description TEXT,
source TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(advisory_id, cwe_id)
);
CREATE INDEX idx_advisory_weaknesses_advisory ON vuln.advisory_weaknesses(advisory_id);
CREATE INDEX idx_advisory_weaknesses_cwe ON vuln.advisory_weaknesses(cwe_id);
-- KEV flags table (Known Exploited Vulnerabilities)
CREATE TABLE IF NOT EXISTS vuln.kev_flags (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
advisory_id UUID NOT NULL REFERENCES vuln.advisories(id) ON DELETE CASCADE,
cve_id TEXT NOT NULL,
vendor_project TEXT,
product TEXT,
vulnerability_name TEXT,
date_added DATE NOT NULL,
due_date DATE,
known_ransomware_use BOOLEAN NOT NULL DEFAULT FALSE,
notes TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(advisory_id, cve_id)
);
CREATE INDEX idx_kev_flags_advisory ON vuln.kev_flags(advisory_id);
CREATE INDEX idx_kev_flags_cve ON vuln.kev_flags(cve_id);
CREATE INDEX idx_kev_flags_date ON vuln.kev_flags(date_added);
-- Source states table (cursor tracking)
CREATE TABLE IF NOT EXISTS vuln.source_states (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
source_id UUID NOT NULL REFERENCES vuln.sources(id) UNIQUE,
cursor TEXT,
last_sync_at TIMESTAMPTZ,
last_success_at TIMESTAMPTZ,
last_error TEXT,
sync_count BIGINT NOT NULL DEFAULT 0,
error_count INT NOT NULL DEFAULT 0,
metadata JSONB NOT NULL DEFAULT '{}',
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_source_states_source ON vuln.source_states(source_id);
-- Merge events table (advisory merge audit)
CREATE TABLE IF NOT EXISTS vuln.merge_events (
id BIGSERIAL PRIMARY KEY,
advisory_id UUID NOT NULL REFERENCES vuln.advisories(id),
source_id UUID REFERENCES vuln.sources(id),
event_type TEXT NOT NULL,
old_value JSONB,
new_value JSONB,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_merge_events_advisory ON vuln.merge_events(advisory_id);
CREATE INDEX idx_merge_events_created ON vuln.merge_events(created_at);
-- Function to update search vector
CREATE OR REPLACE FUNCTION vuln.update_advisory_search_vector()
RETURNS TRIGGER AS $$
BEGIN
NEW.search_vector =
setweight(to_tsvector('english', COALESCE(NEW.primary_vuln_id, '')), 'A') ||
setweight(to_tsvector('english', COALESCE(NEW.title, '')), 'B') ||
setweight(to_tsvector('english', COALESCE(NEW.summary, '')), 'C') ||
setweight(to_tsvector('english', COALESCE(NEW.description, '')), 'D');
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Trigger for search vector
CREATE TRIGGER trg_advisories_search_vector
BEFORE INSERT OR UPDATE ON vuln.advisories
FOR EACH ROW EXECUTE FUNCTION vuln.update_advisory_search_vector();
-- Update timestamp function
CREATE OR REPLACE FUNCTION vuln.update_updated_at()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = NOW();
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Triggers
CREATE TRIGGER trg_sources_updated_at
BEFORE UPDATE ON vuln.sources
FOR EACH ROW EXECUTE FUNCTION vuln.update_updated_at();
CREATE TRIGGER trg_advisories_updated_at
BEFORE UPDATE ON vuln.advisories
FOR EACH ROW EXECUTE FUNCTION vuln.update_updated_at();
CREATE TRIGGER trg_source_states_updated_at
BEFORE UPDATE ON vuln.source_states
FOR EACH ROW EXECUTE FUNCTION vuln.update_updated_at();

View File

@@ -0,0 +1,21 @@
-- Link-Not-Merge linkset cache (PostgreSQL)
-- Stores deterministic cache entries for advisory linksets per tenant/source/advisory.
CREATE TABLE IF NOT EXISTS vuln.lnm_linkset_cache (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id TEXT NOT NULL,
source TEXT NOT NULL,
advisory_id TEXT NOT NULL,
observations TEXT[] NOT NULL DEFAULT '{}',
normalized JSONB,
conflicts JSONB,
provenance JSONB,
confidence DOUBLE PRECISION,
built_by_job_id TEXT,
created_at TIMESTAMPTZ NOT NULL,
CONSTRAINT uq_lnm_linkset_cache UNIQUE (tenant_id, advisory_id, source)
);
CREATE INDEX IF NOT EXISTS idx_lnm_linkset_cache_order
ON vuln.lnm_linkset_cache (tenant_id, created_at DESC, advisory_id, source);

View File

@@ -0,0 +1,23 @@
-- Concelier Postgres Migration 004: Source documents and payload storage (Mongo replacement)
CREATE TABLE IF NOT EXISTS concelier.source_documents (
id UUID NOT NULL,
source_id UUID NOT NULL,
source_name TEXT NOT NULL,
uri TEXT NOT NULL,
sha256 TEXT NOT NULL,
status TEXT NOT NULL,
content_type TEXT,
headers_json JSONB,
metadata_json JSONB,
etag TEXT,
last_modified TIMESTAMPTZ,
payload BYTEA NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
expires_at TIMESTAMPTZ,
CONSTRAINT pk_source_documents PRIMARY KEY (source_name, uri)
);
CREATE INDEX IF NOT EXISTS idx_source_documents_source_id ON concelier.source_documents(source_id);
CREATE INDEX IF NOT EXISTS idx_source_documents_status ON concelier.source_documents(status);

View File

@@ -0,0 +1,69 @@
-- Concelier Migration 005: Postgres equivalents for DTO, export, PSIRT/JP flags, and change history.
CREATE SCHEMA IF NOT EXISTS concelier;
CREATE TABLE IF NOT EXISTS concelier.dtos (
id UUID NOT NULL,
document_id UUID NOT NULL,
source_name TEXT NOT NULL,
format TEXT NOT NULL,
payload_json JSONB NOT NULL,
schema_version TEXT NOT NULL DEFAULT '',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
validated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT pk_concelier_dtos PRIMARY KEY (document_id)
);
CREATE INDEX IF NOT EXISTS idx_concelier_dtos_source ON concelier.dtos(source_name, created_at DESC);
CREATE TABLE IF NOT EXISTS concelier.export_states (
id TEXT NOT NULL,
export_cursor TEXT NOT NULL,
last_full_digest TEXT,
last_delta_digest TEXT,
base_export_id TEXT,
base_digest TEXT,
target_repository TEXT,
files JSONB NOT NULL,
exporter_version TEXT NOT NULL,
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT pk_concelier_export_states PRIMARY KEY (id)
);
CREATE TABLE IF NOT EXISTS concelier.psirt_flags (
advisory_id TEXT NOT NULL,
vendor TEXT NOT NULL,
source_name TEXT NOT NULL,
external_id TEXT,
recorded_at TIMESTAMPTZ NOT NULL,
CONSTRAINT pk_concelier_psirt_flags PRIMARY KEY (advisory_id, vendor)
);
CREATE INDEX IF NOT EXISTS idx_concelier_psirt_source ON concelier.psirt_flags(source_name, recorded_at DESC);
CREATE TABLE IF NOT EXISTS concelier.jp_flags (
advisory_key TEXT NOT NULL,
source_name TEXT NOT NULL,
category TEXT NOT NULL,
vendor_status TEXT,
created_at TIMESTAMPTZ NOT NULL,
CONSTRAINT pk_concelier_jp_flags PRIMARY KEY (advisory_key)
);
CREATE TABLE IF NOT EXISTS concelier.change_history (
id UUID NOT NULL,
source_name TEXT NOT NULL,
advisory_key TEXT NOT NULL,
document_id UUID NOT NULL,
document_hash TEXT NOT NULL,
snapshot_hash TEXT NOT NULL,
previous_snapshot_hash TEXT,
snapshot JSONB NOT NULL,
previous_snapshot JSONB,
changes JSONB NOT NULL,
created_at TIMESTAMPTZ NOT NULL,
CONSTRAINT pk_concelier_change_history PRIMARY KEY (id)
);
CREATE INDEX IF NOT EXISTS idx_concelier_change_history_advisory
ON concelier.change_history(advisory_key, created_at DESC);

View File

@@ -0,0 +1,136 @@
-- Vuln Schema Migration 006: Partition Merge Events Table
-- Sprint: SPRINT_3422_0001_0001 - Time-Based Partitioning
-- Category: C (infrastructure change, requires maintenance window)
--
-- Purpose: Convert vuln.merge_events to a partitioned table for improved
-- query performance on time-range queries and easier data lifecycle management.
--
-- Partition strategy: Monthly by created_at
BEGIN;
-- ============================================================================
-- Step 1: Create partitioned merge_events table
-- ============================================================================
CREATE TABLE IF NOT EXISTS vuln.merge_events_partitioned (
id BIGSERIAL,
advisory_id UUID NOT NULL,
source_id UUID,
event_type TEXT NOT NULL,
old_value JSONB,
new_value JSONB,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
PRIMARY KEY (id, created_at)
) PARTITION BY RANGE (created_at);
-- ============================================================================
-- Step 2: Create initial partitions (past 12 months + 3 months ahead)
-- ============================================================================
DO $$
DECLARE
v_start DATE;
v_end DATE;
v_partition_name TEXT;
BEGIN
-- Start from 12 months ago (merge events accumulate fast)
v_start := date_trunc('month', NOW() - INTERVAL '12 months')::DATE;
-- Create partitions until 3 months ahead
WHILE v_start <= date_trunc('month', NOW() + INTERVAL '3 months')::DATE LOOP
v_end := (v_start + INTERVAL '1 month')::DATE;
v_partition_name := 'merge_events_' || to_char(v_start, 'YYYY_MM');
IF NOT EXISTS (
SELECT 1 FROM pg_class c
JOIN pg_namespace n ON c.relnamespace = n.oid
WHERE n.nspname = 'vuln' AND c.relname = v_partition_name
) THEN
EXECUTE format(
'CREATE TABLE vuln.%I PARTITION OF vuln.merge_events_partitioned
FOR VALUES FROM (%L) TO (%L)',
v_partition_name, v_start, v_end
);
RAISE NOTICE 'Created partition vuln.%', v_partition_name;
END IF;
v_start := v_end;
END LOOP;
END
$$;
-- Create default partition for any data outside defined ranges
CREATE TABLE IF NOT EXISTS vuln.merge_events_default
PARTITION OF vuln.merge_events_partitioned DEFAULT;
-- ============================================================================
-- Step 3: Create indexes on partitioned table
-- ============================================================================
CREATE INDEX IF NOT EXISTS ix_merge_events_part_advisory
ON vuln.merge_events_partitioned (advisory_id);
CREATE INDEX IF NOT EXISTS ix_merge_events_part_source
ON vuln.merge_events_partitioned (source_id)
WHERE source_id IS NOT NULL;
CREATE INDEX IF NOT EXISTS ix_merge_events_part_event_type
ON vuln.merge_events_partitioned (event_type);
-- BRIN index for time-range queries
CREATE INDEX IF NOT EXISTS brin_merge_events_part_created
ON vuln.merge_events_partitioned USING BRIN (created_at)
WITH (pages_per_range = 128);
-- ============================================================================
-- Step 4: Migrate data from old table to partitioned table
-- ============================================================================
INSERT INTO vuln.merge_events_partitioned (
id, advisory_id, source_id, event_type, old_value, new_value, created_at
)
SELECT
id, advisory_id, source_id, event_type, old_value, new_value, created_at
FROM vuln.merge_events
ON CONFLICT DO NOTHING;
-- ============================================================================
-- Step 5: Swap tables
-- ============================================================================
-- Drop foreign key constraint first (advisory_id references advisories)
ALTER TABLE vuln.merge_events DROP CONSTRAINT IF EXISTS merge_events_advisory_id_fkey;
ALTER TABLE vuln.merge_events DROP CONSTRAINT IF EXISTS merge_events_source_id_fkey;
-- Rename old table to backup
ALTER TABLE IF EXISTS vuln.merge_events RENAME TO merge_events_old;
-- Rename partitioned table to production name
ALTER TABLE vuln.merge_events_partitioned RENAME TO merge_events;
-- Update sequence to continue from max ID
DO $$
DECLARE
v_max_id BIGINT;
BEGIN
SELECT COALESCE(MAX(id), 0) INTO v_max_id FROM vuln.merge_events;
PERFORM setval('vuln.merge_events_id_seq', v_max_id + 1, false);
END
$$;
-- ============================================================================
-- Step 6: Add comment about partitioning strategy
-- ============================================================================
COMMENT ON TABLE vuln.merge_events IS
'Advisory merge event log. Partitioned monthly by created_at. FK to advisories removed for partition support.';
COMMIT;
-- ============================================================================
-- Cleanup (run manually after validation)
-- ============================================================================
-- After confirming the migration is successful, drop the old table:
-- DROP TABLE IF EXISTS vuln.merge_events_old;

View File

@@ -0,0 +1,148 @@
-- Vuln Schema Migration 006b: Complete merge_events Partition Migration
-- Sprint: SPRINT_3422_0001_0001 - Time-Based Partitioning
-- Task: 3.3 - Migrate data from existing table
-- Category: C (data migration, requires maintenance window)
--
-- IMPORTANT: Run this during maintenance window AFTER 006_partition_merge_events.sql
-- Prerequisites:
-- 1. Stop concelier/vuln services (pause advisory merge operations)
-- 2. Verify partitioned table exists: \d+ vuln.merge_events_partitioned
--
-- Execution time depends on data volume. For large tables (>1M rows), consider
-- batched migration (see bottom of file).
BEGIN;
-- ============================================================================
-- Step 1: Verify partitioned table exists
-- ============================================================================
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_class c
JOIN pg_namespace n ON c.relnamespace = n.oid
WHERE n.nspname = 'vuln' AND c.relname = 'merge_events_partitioned'
) THEN
RAISE EXCEPTION 'Partitioned table vuln.merge_events_partitioned does not exist. Run 006_partition_merge_events.sql first.';
END IF;
END
$$;
-- ============================================================================
-- Step 2: Record row counts for verification
-- ============================================================================
DO $$
DECLARE
v_source_count BIGINT;
BEGIN
SELECT COUNT(*) INTO v_source_count FROM vuln.merge_events;
RAISE NOTICE 'Source table row count: %', v_source_count;
END
$$;
-- ============================================================================
-- Step 3: Migrate data from old table to partitioned table
-- ============================================================================
INSERT INTO vuln.merge_events_partitioned (
id, advisory_id, source_id, event_type, old_value, new_value, created_at
)
SELECT
id, advisory_id, source_id, event_type, old_value, new_value, created_at
FROM vuln.merge_events
ON CONFLICT DO NOTHING;
-- ============================================================================
-- Step 4: Verify row counts match
-- ============================================================================
DO $$
DECLARE
v_source_count BIGINT;
v_target_count BIGINT;
BEGIN
SELECT COUNT(*) INTO v_source_count FROM vuln.merge_events;
SELECT COUNT(*) INTO v_target_count FROM vuln.merge_events_partitioned;
IF v_source_count <> v_target_count THEN
RAISE WARNING 'Row count mismatch: source=% target=%. Check for conflicts.', v_source_count, v_target_count;
ELSE
RAISE NOTICE 'Row counts match: % rows migrated successfully', v_target_count;
END IF;
END
$$;
-- ============================================================================
-- Step 5: Drop foreign key constraints referencing this table
-- ============================================================================
-- Drop FK constraints first (advisory_id references advisories)
ALTER TABLE vuln.merge_events DROP CONSTRAINT IF EXISTS merge_events_advisory_id_fkey;
ALTER TABLE vuln.merge_events DROP CONSTRAINT IF EXISTS merge_events_source_id_fkey;
-- ============================================================================
-- Step 6: Swap tables
-- ============================================================================
-- Rename old table to backup
ALTER TABLE IF EXISTS vuln.merge_events RENAME TO merge_events_old;
-- Rename partitioned table to production name
ALTER TABLE vuln.merge_events_partitioned RENAME TO merge_events;
-- Update sequence to continue from max ID
DO $$
DECLARE
v_max_id BIGINT;
BEGIN
SELECT COALESCE(MAX(id), 0) INTO v_max_id FROM vuln.merge_events;
IF EXISTS (SELECT 1 FROM pg_sequences WHERE schemaname = 'vuln' AND sequencename = 'merge_events_id_seq') THEN
PERFORM setval('vuln.merge_events_id_seq', v_max_id + 1, false);
END IF;
END
$$;
-- ============================================================================
-- Step 7: Add comment about partitioning strategy
-- ============================================================================
COMMENT ON TABLE vuln.merge_events IS
'Advisory merge event log. Partitioned monthly by created_at. FK to advisories removed for partition support. Migrated: ' || NOW()::TEXT;
COMMIT;
-- ============================================================================
-- Cleanup (run manually after validation)
-- ============================================================================
-- After confirming the migration is successful (wait 24-48h), drop the old table:
-- DROP TABLE IF EXISTS vuln.merge_events_old;
-- ============================================================================
-- Batched Migration Alternative (for very large tables)
-- ============================================================================
-- If the table has >10M rows, consider this batched approach instead:
--
-- DO $$
-- DECLARE
-- v_batch_size INT := 100000;
-- v_offset INT := 0;
-- v_migrated INT := 0;
-- BEGIN
-- LOOP
-- INSERT INTO vuln.merge_events_partitioned
-- SELECT * FROM vuln.merge_events
-- ORDER BY id
-- OFFSET v_offset LIMIT v_batch_size
-- ON CONFLICT DO NOTHING;
--
-- GET DIAGNOSTICS v_migrated = ROW_COUNT;
-- EXIT WHEN v_migrated < v_batch_size;
-- v_offset := v_offset + v_batch_size;
-- RAISE NOTICE 'Migrated % rows total', v_offset;
-- COMMIT;
-- END LOOP;
-- END
-- $$;

View File

@@ -0,0 +1,141 @@
-- Vuln Schema Migration 007: Generated Columns for Advisory Hot Fields
-- Sprint: SPRINT_3423_0001_0001 - Generated Columns
-- Category: A (safe, can run at startup)
--
-- Purpose: Extract frequently queried fields from JSONB provenance column
-- as generated columns for efficient indexing and filtering.
--
-- Performance Impact: Queries filtering on these fields will use B-tree indexes
-- instead of JSONB operators, improving query time by 10-50x for dashboard queries.
BEGIN;
-- ============================================================================
-- Step 1: Add generated columns to vuln.advisories
-- ============================================================================
-- Extract source_key from provenance for fast source filtering
ALTER TABLE vuln.advisories
ADD COLUMN IF NOT EXISTS provenance_source_key TEXT
GENERATED ALWAYS AS (provenance->>'source_key') STORED;
-- Extract feed_id from provenance for feed correlation
ALTER TABLE vuln.advisories
ADD COLUMN IF NOT EXISTS provenance_feed_id TEXT
GENERATED ALWAYS AS (provenance->>'feed_id') STORED;
-- Extract ingestion timestamp from provenance for freshness queries
ALTER TABLE vuln.advisories
ADD COLUMN IF NOT EXISTS provenance_ingested_at TIMESTAMPTZ
GENERATED ALWAYS AS ((provenance->>'ingested_at')::TIMESTAMPTZ) STORED;
-- ============================================================================
-- Step 2: Create indexes on generated columns
-- ============================================================================
CREATE INDEX IF NOT EXISTS ix_advisories_provenance_source
ON vuln.advisories (provenance_source_key)
WHERE provenance_source_key IS NOT NULL;
CREATE INDEX IF NOT EXISTS ix_advisories_provenance_feed
ON vuln.advisories (provenance_feed_id)
WHERE provenance_feed_id IS NOT NULL;
CREATE INDEX IF NOT EXISTS ix_advisories_provenance_ingested
ON vuln.advisories (provenance_ingested_at DESC)
WHERE provenance_ingested_at IS NOT NULL;
-- Composite index for common dashboard query pattern
CREATE INDEX IF NOT EXISTS ix_advisories_severity_ingested
ON vuln.advisories (severity, provenance_ingested_at DESC)
WHERE provenance_ingested_at IS NOT NULL;
-- ============================================================================
-- Step 3: Add generated columns to vuln.advisory_affected for PURL parsing
-- ============================================================================
-- Extract namespace from purl for namespace-based filtering
-- purl format: pkg:type/namespace/name@version
ALTER TABLE vuln.advisory_affected
ADD COLUMN IF NOT EXISTS purl_type TEXT
GENERATED ALWAYS AS (
CASE
WHEN purl IS NOT NULL AND purl LIKE 'pkg:%'
THEN split_part(split_part(purl, ':', 2), '/', 1)
ELSE NULL
END
) STORED;
-- Extract package name (without namespace) for faster lookups
ALTER TABLE vuln.advisory_affected
ADD COLUMN IF NOT EXISTS purl_name TEXT
GENERATED ALWAYS AS (
CASE
WHEN purl IS NOT NULL AND purl LIKE 'pkg:%'
THEN split_part(split_part(split_part(purl, ':', 2), '@', 1), '/', -1)
ELSE NULL
END
) STORED;
-- ============================================================================
-- Step 4: Create indexes on advisory_affected generated columns
-- ============================================================================
CREATE INDEX IF NOT EXISTS ix_advisory_affected_purl_type
ON vuln.advisory_affected (purl_type)
WHERE purl_type IS NOT NULL;
CREATE INDEX IF NOT EXISTS ix_advisory_affected_purl_name
ON vuln.advisory_affected (purl_name)
WHERE purl_name IS NOT NULL;
-- Composite index for ecosystem + purl_type queries
CREATE INDEX IF NOT EXISTS ix_advisory_affected_ecosystem_type
ON vuln.advisory_affected (ecosystem, purl_type)
WHERE purl_type IS NOT NULL;
-- ============================================================================
-- Step 5: Add comment documenting the generated columns
-- ============================================================================
COMMENT ON COLUMN vuln.advisories.provenance_source_key IS
'Generated: Extracted from provenance->>''source_key'' for fast source filtering';
COMMENT ON COLUMN vuln.advisories.provenance_feed_id IS
'Generated: Extracted from provenance->>''feed_id'' for feed correlation';
COMMENT ON COLUMN vuln.advisories.provenance_ingested_at IS
'Generated: Extracted from provenance->>''ingested_at'' for freshness queries';
COMMENT ON COLUMN vuln.advisory_affected.purl_type IS
'Generated: Extracted package type from purl (npm, maven, pypi, etc.)';
COMMENT ON COLUMN vuln.advisory_affected.purl_name IS
'Generated: Extracted package name from purl (without namespace/version)';
COMMIT;
-- ============================================================================
-- Example Queries (showing performance improvement)
-- ============================================================================
/*
-- Before (uses GIN index, slower):
SELECT * FROM vuln.advisories
WHERE provenance->>'source_key' = 'nvd'
AND severity = 'critical'
ORDER BY (provenance->>'ingested_at')::timestamptz DESC
LIMIT 100;
-- After (uses B-tree indexes, faster):
SELECT * FROM vuln.advisories
WHERE provenance_source_key = 'nvd'
AND severity = 'critical'
ORDER BY provenance_ingested_at DESC
LIMIT 100;
-- Package type filtering (new capability):
SELECT * FROM vuln.advisory_affected
WHERE purl_type = 'npm'
AND ecosystem = 'npm';
*/

View File

@@ -0,0 +1,63 @@
-- Concelier Migration 008: Sync Ledger for Federation
-- Sprint: SPRINT_8200_0014_0001_DB_sync_ledger_schema
-- Task: SYNC-8200-002
-- Creates sync_ledger and site_policy tables for federation cursor tracking
-- Helper function for updated_at triggers
CREATE OR REPLACE FUNCTION vuln.update_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = NOW();
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Track federation sync state per remote site
CREATE TABLE IF NOT EXISTS vuln.sync_ledger (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
site_id TEXT NOT NULL, -- Remote site identifier (e.g., "site-us-west", "airgap-dc2")
cursor TEXT NOT NULL, -- Opaque cursor (usually ISO8601 timestamp#sequence)
bundle_hash TEXT NOT NULL, -- SHA256 of imported bundle
items_count INT NOT NULL DEFAULT 0, -- Number of items in bundle
signed_at TIMESTAMPTZ NOT NULL, -- When bundle was signed by remote
imported_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT uq_sync_ledger_site_cursor UNIQUE (site_id, cursor),
CONSTRAINT uq_sync_ledger_bundle UNIQUE (bundle_hash)
);
CREATE INDEX IF NOT EXISTS idx_sync_ledger_site ON vuln.sync_ledger(site_id);
CREATE INDEX IF NOT EXISTS idx_sync_ledger_site_time ON vuln.sync_ledger(site_id, signed_at DESC);
COMMENT ON TABLE vuln.sync_ledger IS 'Federation sync cursor tracking per remote site';
COMMENT ON COLUMN vuln.sync_ledger.cursor IS 'Position marker for incremental sync (monotonically increasing)';
COMMENT ON COLUMN vuln.sync_ledger.site_id IS 'Remote site identifier for federation sync';
COMMENT ON COLUMN vuln.sync_ledger.bundle_hash IS 'SHA256 hash of imported bundle for deduplication';
-- Site federation policies
CREATE TABLE IF NOT EXISTS vuln.site_policy (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
site_id TEXT NOT NULL UNIQUE,
display_name TEXT,
allowed_sources TEXT[] NOT NULL DEFAULT '{}', -- Empty = allow all
denied_sources TEXT[] NOT NULL DEFAULT '{}',
max_bundle_size_mb INT NOT NULL DEFAULT 100,
max_items_per_bundle INT NOT NULL DEFAULT 10000,
require_signature BOOLEAN NOT NULL DEFAULT TRUE,
allowed_signers TEXT[] NOT NULL DEFAULT '{}', -- Key IDs or issuers
enabled BOOLEAN NOT NULL DEFAULT TRUE,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_site_policy_enabled ON vuln.site_policy(enabled) WHERE enabled = TRUE;
COMMENT ON TABLE vuln.site_policy IS 'Per-site federation governance policies';
COMMENT ON COLUMN vuln.site_policy.allowed_sources IS 'Source keys to allow; empty array allows all sources';
COMMENT ON COLUMN vuln.site_policy.denied_sources IS 'Source keys to deny; takes precedence over allowed';
COMMENT ON COLUMN vuln.site_policy.allowed_signers IS 'Signing key IDs or issuer patterns allowed for bundle verification';
-- Trigger for automatic updated_at
CREATE TRIGGER trg_site_policy_updated
BEFORE UPDATE ON vuln.site_policy
FOR EACH ROW EXECUTE FUNCTION vuln.update_timestamp();

View File

@@ -0,0 +1,61 @@
-- Concelier Migration 009: Advisory Canonical Table
-- Sprint: SPRINT_8200_0012_0002_DB_canonical_source_edge_schema
-- Task: SCHEMA-8200-003
-- Creates deduplicated canonical advisories with merge_hash
-- Deduplicated canonical advisory records
CREATE TABLE IF NOT EXISTS vuln.advisory_canonical (
-- Identity
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
-- Merge key components (used to compute merge_hash)
cve TEXT NOT NULL,
affects_key TEXT NOT NULL, -- normalized purl or cpe
version_range JSONB, -- structured: { introduced, fixed, last_affected }
weakness TEXT[] NOT NULL DEFAULT '{}', -- sorted CWE array
-- Computed identity
merge_hash TEXT NOT NULL, -- SHA256 of normalized (cve|affects|range|weakness|lineage)
-- Metadata
status TEXT NOT NULL DEFAULT 'active' CHECK (status IN ('active', 'stub', 'withdrawn')),
severity TEXT CHECK (severity IN ('critical', 'high', 'medium', 'low', 'none', 'unknown')),
epss_score NUMERIC(5,4), -- EPSS probability (0.0000-1.0000)
exploit_known BOOLEAN NOT NULL DEFAULT FALSE,
-- Content (for stub degradation)
title TEXT,
summary TEXT,
-- Audit
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT uq_advisory_canonical_merge_hash UNIQUE (merge_hash)
);
-- Primary lookup indexes
CREATE INDEX IF NOT EXISTS idx_advisory_canonical_cve ON vuln.advisory_canonical(cve);
CREATE INDEX IF NOT EXISTS idx_advisory_canonical_affects ON vuln.advisory_canonical(affects_key);
CREATE INDEX IF NOT EXISTS idx_advisory_canonical_merge_hash ON vuln.advisory_canonical(merge_hash);
-- Filtered indexes for common queries
CREATE INDEX IF NOT EXISTS idx_advisory_canonical_status ON vuln.advisory_canonical(status) WHERE status = 'active';
CREATE INDEX IF NOT EXISTS idx_advisory_canonical_severity ON vuln.advisory_canonical(severity) WHERE severity IS NOT NULL;
CREATE INDEX IF NOT EXISTS idx_advisory_canonical_exploit ON vuln.advisory_canonical(exploit_known) WHERE exploit_known = TRUE;
-- Time-based index for incremental queries
CREATE INDEX IF NOT EXISTS idx_advisory_canonical_updated ON vuln.advisory_canonical(updated_at DESC);
-- Trigger for automatic updated_at
CREATE TRIGGER trg_advisory_canonical_updated
BEFORE UPDATE ON vuln.advisory_canonical
FOR EACH ROW EXECUTE FUNCTION vuln.update_timestamp();
-- Comments
COMMENT ON TABLE vuln.advisory_canonical IS 'Deduplicated canonical advisories with semantic merge_hash';
COMMENT ON COLUMN vuln.advisory_canonical.merge_hash IS 'Deterministic hash of (cve, affects_key, version_range, weakness, patch_lineage)';
COMMENT ON COLUMN vuln.advisory_canonical.affects_key IS 'Normalized PURL or CPE identifying the affected package';
COMMENT ON COLUMN vuln.advisory_canonical.status IS 'active=full record, stub=minimal for low interest, withdrawn=no longer valid';
COMMENT ON COLUMN vuln.advisory_canonical.epss_score IS 'EPSS exploit prediction probability (0.0000-1.0000)';

View File

@@ -0,0 +1,64 @@
-- Concelier Migration 010: Advisory Source Edge Table
-- Sprint: SPRINT_8200_0012_0002_DB_canonical_source_edge_schema
-- Task: SCHEMA-8200-004
-- Creates source edge linking canonical advisories to source documents
-- Source edge linking canonical advisory to source documents
CREATE TABLE IF NOT EXISTS vuln.advisory_source_edge (
-- Identity
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
-- Relationships
canonical_id UUID NOT NULL REFERENCES vuln.advisory_canonical(id) ON DELETE CASCADE,
source_id UUID NOT NULL REFERENCES vuln.sources(id) ON DELETE RESTRICT,
-- Source document
source_advisory_id TEXT NOT NULL, -- vendor's advisory ID (DSA-5678, RHSA-2024:1234)
source_doc_hash TEXT NOT NULL, -- SHA256 of raw source document
-- VEX-style status
vendor_status TEXT CHECK (vendor_status IN (
'affected', 'not_affected', 'fixed', 'under_investigation'
)),
-- Precedence (lower = higher priority)
precedence_rank INT NOT NULL DEFAULT 100,
-- DSSE signature envelope
dsse_envelope JSONB, -- { payloadType, payload, signatures[] }
-- Content snapshot
raw_payload JSONB, -- original advisory document
-- Audit
fetched_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT uq_advisory_source_edge_unique
UNIQUE (canonical_id, source_id, source_doc_hash)
);
-- Primary lookup indexes
CREATE INDEX IF NOT EXISTS idx_source_edge_canonical ON vuln.advisory_source_edge(canonical_id);
CREATE INDEX IF NOT EXISTS idx_source_edge_source ON vuln.advisory_source_edge(source_id);
CREATE INDEX IF NOT EXISTS idx_source_edge_advisory_id ON vuln.advisory_source_edge(source_advisory_id);
-- Join optimization index
CREATE INDEX IF NOT EXISTS idx_source_edge_canonical_source ON vuln.advisory_source_edge(canonical_id, source_id);
-- Time-based index for incremental queries
CREATE INDEX IF NOT EXISTS idx_source_edge_fetched ON vuln.advisory_source_edge(fetched_at DESC);
-- GIN index for JSONB queries on dsse_envelope
CREATE INDEX IF NOT EXISTS idx_source_edge_dsse_gin ON vuln.advisory_source_edge
USING GIN (dsse_envelope jsonb_path_ops);
-- Comments
COMMENT ON TABLE vuln.advisory_source_edge IS 'Links canonical advisories to source documents with signatures';
COMMENT ON COLUMN vuln.advisory_source_edge.canonical_id IS 'Reference to deduplicated canonical advisory';
COMMENT ON COLUMN vuln.advisory_source_edge.source_id IS 'Reference to feed source';
COMMENT ON COLUMN vuln.advisory_source_edge.source_advisory_id IS 'Vendor advisory ID (e.g., DSA-5678, RHSA-2024:1234)';
COMMENT ON COLUMN vuln.advisory_source_edge.precedence_rank IS 'Source priority: vendor=10, distro=20, osv=30, nvd=40';
COMMENT ON COLUMN vuln.advisory_source_edge.dsse_envelope IS 'DSSE envelope with signature over raw_payload';
COMMENT ON COLUMN vuln.advisory_source_edge.vendor_status IS 'VEX-style status from source';

View File

@@ -0,0 +1,116 @@
-- Concelier Migration 011: Canonical Helper Functions
-- Sprint: SPRINT_8200_0012_0002_DB_canonical_source_edge_schema
-- Task: SCHEMA-8200-005
-- Creates helper functions for canonical advisory operations
-- Function to get canonical by merge_hash (most common lookup)
CREATE OR REPLACE FUNCTION vuln.get_canonical_by_hash(p_merge_hash TEXT)
RETURNS vuln.advisory_canonical
LANGUAGE sql STABLE
AS $$
SELECT * FROM vuln.advisory_canonical
WHERE merge_hash = p_merge_hash;
$$;
-- Function to get all source edges for a canonical
CREATE OR REPLACE FUNCTION vuln.get_source_edges(p_canonical_id UUID)
RETURNS SETOF vuln.advisory_source_edge
LANGUAGE sql STABLE
AS $$
SELECT * FROM vuln.advisory_source_edge
WHERE canonical_id = p_canonical_id
ORDER BY precedence_rank ASC, fetched_at DESC;
$$;
-- Function to upsert canonical with merge_hash dedup
CREATE OR REPLACE FUNCTION vuln.upsert_canonical(
p_cve TEXT,
p_affects_key TEXT,
p_version_range JSONB,
p_weakness TEXT[],
p_merge_hash TEXT,
p_severity TEXT DEFAULT NULL,
p_epss_score NUMERIC DEFAULT NULL,
p_exploit_known BOOLEAN DEFAULT FALSE,
p_title TEXT DEFAULT NULL,
p_summary TEXT DEFAULT NULL
)
RETURNS UUID
LANGUAGE plpgsql
AS $$
DECLARE
v_id UUID;
BEGIN
INSERT INTO vuln.advisory_canonical (
cve, affects_key, version_range, weakness, merge_hash,
severity, epss_score, exploit_known, title, summary
)
VALUES (
p_cve, p_affects_key, p_version_range, p_weakness, p_merge_hash,
p_severity, p_epss_score, p_exploit_known, p_title, p_summary
)
ON CONFLICT (merge_hash) DO UPDATE SET
severity = COALESCE(EXCLUDED.severity, vuln.advisory_canonical.severity),
epss_score = COALESCE(EXCLUDED.epss_score, vuln.advisory_canonical.epss_score),
exploit_known = EXCLUDED.exploit_known OR vuln.advisory_canonical.exploit_known,
title = COALESCE(EXCLUDED.title, vuln.advisory_canonical.title),
summary = COALESCE(EXCLUDED.summary, vuln.advisory_canonical.summary),
updated_at = NOW()
RETURNING id INTO v_id;
RETURN v_id;
END;
$$;
-- Function to add source edge with dedup
CREATE OR REPLACE FUNCTION vuln.add_source_edge(
p_canonical_id UUID,
p_source_id UUID,
p_source_advisory_id TEXT,
p_source_doc_hash TEXT,
p_vendor_status TEXT DEFAULT NULL,
p_precedence_rank INT DEFAULT 100,
p_dsse_envelope JSONB DEFAULT NULL,
p_raw_payload JSONB DEFAULT NULL,
p_fetched_at TIMESTAMPTZ DEFAULT NOW()
)
RETURNS UUID
LANGUAGE plpgsql
AS $$
DECLARE
v_id UUID;
BEGIN
INSERT INTO vuln.advisory_source_edge (
canonical_id, source_id, source_advisory_id, source_doc_hash,
vendor_status, precedence_rank, dsse_envelope, raw_payload, fetched_at
)
VALUES (
p_canonical_id, p_source_id, p_source_advisory_id, p_source_doc_hash,
p_vendor_status, p_precedence_rank, p_dsse_envelope, p_raw_payload, p_fetched_at
)
ON CONFLICT (canonical_id, source_id, source_doc_hash) DO UPDATE SET
vendor_status = COALESCE(EXCLUDED.vendor_status, vuln.advisory_source_edge.vendor_status),
precedence_rank = LEAST(EXCLUDED.precedence_rank, vuln.advisory_source_edge.precedence_rank),
dsse_envelope = COALESCE(EXCLUDED.dsse_envelope, vuln.advisory_source_edge.dsse_envelope),
raw_payload = COALESCE(EXCLUDED.raw_payload, vuln.advisory_source_edge.raw_payload)
RETURNING id INTO v_id;
RETURN v_id;
END;
$$;
-- Function to count active canonicals by CVE prefix
CREATE OR REPLACE FUNCTION vuln.count_canonicals_by_cve_year(p_year INT)
RETURNS BIGINT
LANGUAGE sql STABLE
AS $$
SELECT COUNT(*) FROM vuln.advisory_canonical
WHERE cve LIKE 'CVE-' || p_year::TEXT || '-%'
AND status = 'active';
$$;
-- Comments
COMMENT ON FUNCTION vuln.get_canonical_by_hash(TEXT) IS 'Lookup canonical advisory by merge_hash';
COMMENT ON FUNCTION vuln.get_source_edges(UUID) IS 'Get all source edges for a canonical, ordered by precedence';
COMMENT ON FUNCTION vuln.upsert_canonical IS 'Insert or update canonical advisory with merge_hash deduplication';
COMMENT ON FUNCTION vuln.add_source_edge IS 'Add source edge with deduplication by (canonical, source, doc_hash)';

View File

@@ -0,0 +1,144 @@
-- Concelier Migration 012: Populate advisory_canonical table
-- Sprint: SPRINT_8200_0012_0002_DB_canonical_source_edge_schema
-- Task: SCHEMA-8200-012
-- Populates advisory_canonical from existing advisories with placeholder merge_hash
-- NOTE: merge_hash will be backfilled by application-side MergeHashBackfillService
-- Populate advisory_canonical from existing advisories
-- Each advisory + affected package combination becomes a canonical record
INSERT INTO vuln.advisory_canonical (
id,
cve,
affects_key,
version_range,
weakness,
merge_hash,
status,
severity,
epss_score,
exploit_known,
title,
summary,
created_at,
updated_at
)
SELECT
gen_random_uuid() AS id,
COALESCE(
-- Try to get CVE from aliases
(SELECT alias_value FROM vuln.advisory_aliases
WHERE advisory_id = a.id AND alias_type = 'CVE'
ORDER BY is_primary DESC LIMIT 1),
-- Fall back to primary_vuln_id
a.primary_vuln_id
) AS cve,
COALESCE(
-- Prefer PURL if available
aa.purl,
-- Otherwise construct from ecosystem/package
CASE
WHEN aa.ecosystem IS NOT NULL AND aa.package_name IS NOT NULL
THEN 'pkg:' || lower(aa.ecosystem) || '/' || aa.package_name
ELSE 'unknown:' || a.id::text
END
) AS affects_key,
aa.version_range AS version_range,
-- Aggregate CWE IDs into sorted array
COALESCE(
(SELECT array_agg(DISTINCT upper(w.cwe_id) ORDER BY upper(w.cwe_id))
FROM vuln.advisory_weaknesses w
WHERE w.advisory_id = a.id),
'{}'::text[]
) AS weakness,
-- Placeholder merge_hash - will be backfilled by application
'PLACEHOLDER_' || a.id::text || '_' || COALESCE(aa.id::text, 'noaffects') AS merge_hash,
CASE
WHEN a.withdrawn_at IS NOT NULL THEN 'withdrawn'
ELSE 'active'
END AS status,
a.severity,
-- EPSS score if available from KEV
(SELECT CASE WHEN kf.known_ransomware_use THEN 0.95 ELSE NULL END
FROM vuln.kev_flags kf
WHERE kf.advisory_id = a.id
LIMIT 1) AS epss_score,
-- exploit_known from KEV flags
EXISTS(SELECT 1 FROM vuln.kev_flags kf WHERE kf.advisory_id = a.id) AS exploit_known,
a.title,
a.summary,
a.created_at,
NOW() AS updated_at
FROM vuln.advisories a
LEFT JOIN vuln.advisory_affected aa ON aa.advisory_id = a.id
WHERE NOT EXISTS (
-- Skip if already migrated (idempotent)
SELECT 1 FROM vuln.advisory_canonical c
WHERE c.merge_hash LIKE 'PLACEHOLDER_' || a.id::text || '%'
)
ON CONFLICT (merge_hash) DO NOTHING;
-- Handle advisories without affected packages
INSERT INTO vuln.advisory_canonical (
id,
cve,
affects_key,
version_range,
weakness,
merge_hash,
status,
severity,
exploit_known,
title,
summary,
created_at,
updated_at
)
SELECT
gen_random_uuid() AS id,
COALESCE(
(SELECT alias_value FROM vuln.advisory_aliases
WHERE advisory_id = a.id AND alias_type = 'CVE'
ORDER BY is_primary DESC LIMIT 1),
a.primary_vuln_id
) AS cve,
'unknown:' || a.primary_vuln_id AS affects_key,
NULL AS version_range,
COALESCE(
(SELECT array_agg(DISTINCT upper(w.cwe_id) ORDER BY upper(w.cwe_id))
FROM vuln.advisory_weaknesses w
WHERE w.advisory_id = a.id),
'{}'::text[]
) AS weakness,
'PLACEHOLDER_' || a.id::text || '_noaffects' AS merge_hash,
CASE
WHEN a.withdrawn_at IS NOT NULL THEN 'withdrawn'
ELSE 'active'
END AS status,
a.severity,
EXISTS(SELECT 1 FROM vuln.kev_flags kf WHERE kf.advisory_id = a.id) AS exploit_known,
a.title,
a.summary,
a.created_at,
NOW() AS updated_at
FROM vuln.advisories a
WHERE NOT EXISTS (
SELECT 1 FROM vuln.advisory_affected aa WHERE aa.advisory_id = a.id
)
AND NOT EXISTS (
SELECT 1 FROM vuln.advisory_canonical c
WHERE c.merge_hash LIKE 'PLACEHOLDER_' || a.id::text || '%'
)
ON CONFLICT (merge_hash) DO NOTHING;
-- Log migration progress
DO $$
DECLARE
canonical_count BIGINT;
placeholder_count BIGINT;
BEGIN
SELECT COUNT(*) INTO canonical_count FROM vuln.advisory_canonical;
SELECT COUNT(*) INTO placeholder_count FROM vuln.advisory_canonical WHERE merge_hash LIKE 'PLACEHOLDER_%';
RAISE NOTICE 'Migration 012 complete: % canonical records, % with placeholder hash (need backfill)',
canonical_count, placeholder_count;
END $$;

View File

@@ -0,0 +1,129 @@
-- Concelier Migration 013: Populate advisory_source_edge table
-- Sprint: SPRINT_8200_0012_0002_DB_canonical_source_edge_schema
-- Task: SCHEMA-8200-013
-- Creates source edges from existing advisory snapshots and provenance data
-- Create source edges from advisory snapshots
INSERT INTO vuln.advisory_source_edge (
id,
canonical_id,
source_id,
source_advisory_id,
source_doc_hash,
vendor_status,
precedence_rank,
dsse_envelope,
raw_payload,
fetched_at,
created_at
)
SELECT
gen_random_uuid() AS id,
c.id AS canonical_id,
a.source_id AS source_id,
a.advisory_key AS source_advisory_id,
snap.content_hash AS source_doc_hash,
CASE
WHEN a.withdrawn_at IS NOT NULL THEN 'not_affected'
ELSE 'affected'
END AS vendor_status,
COALESCE(s.priority, 100) AS precedence_rank,
NULL AS dsse_envelope, -- DSSE signatures added later
a.raw_payload AS raw_payload,
snap.created_at AS fetched_at,
NOW() AS created_at
FROM vuln.advisory_canonical c
JOIN vuln.advisories a ON (
-- Match by CVE
c.cve = a.primary_vuln_id
OR EXISTS (
SELECT 1 FROM vuln.advisory_aliases al
WHERE al.advisory_id = a.id AND al.alias_value = c.cve
)
)
JOIN vuln.advisory_snapshots snap ON snap.advisory_key = a.advisory_key
JOIN vuln.feed_snapshots fs ON fs.id = snap.feed_snapshot_id
LEFT JOIN vuln.sources s ON s.id = a.source_id
WHERE a.source_id IS NOT NULL
AND NOT EXISTS (
-- Skip if already migrated (idempotent)
SELECT 1 FROM vuln.advisory_source_edge e
WHERE e.canonical_id = c.id
AND e.source_id = a.source_id
AND e.source_doc_hash = snap.content_hash
)
ON CONFLICT (canonical_id, source_id, source_doc_hash) DO NOTHING;
-- Create source edges directly from advisories (for those without snapshots)
INSERT INTO vuln.advisory_source_edge (
id,
canonical_id,
source_id,
source_advisory_id,
source_doc_hash,
vendor_status,
precedence_rank,
dsse_envelope,
raw_payload,
fetched_at,
created_at
)
SELECT
gen_random_uuid() AS id,
c.id AS canonical_id,
a.source_id AS source_id,
a.advisory_key AS source_advisory_id,
-- Generate hash from raw_payload if available, otherwise use advisory_key
COALESCE(
encode(sha256(a.raw_payload::text::bytea), 'hex'),
encode(sha256(a.advisory_key::bytea), 'hex')
) AS source_doc_hash,
CASE
WHEN a.withdrawn_at IS NOT NULL THEN 'not_affected'
ELSE 'affected'
END AS vendor_status,
COALESCE(s.priority, 100) AS precedence_rank,
NULL AS dsse_envelope,
a.raw_payload AS raw_payload,
a.created_at AS fetched_at,
NOW() AS created_at
FROM vuln.advisory_canonical c
JOIN vuln.advisories a ON (
c.cve = a.primary_vuln_id
OR EXISTS (
SELECT 1 FROM vuln.advisory_aliases al
WHERE al.advisory_id = a.id AND al.alias_value = c.cve
)
)
LEFT JOIN vuln.sources s ON s.id = a.source_id
WHERE a.source_id IS NOT NULL
AND NOT EXISTS (
-- Only for advisories without snapshots
SELECT 1 FROM vuln.advisory_snapshots snap
WHERE snap.advisory_key = a.advisory_key
)
AND NOT EXISTS (
SELECT 1 FROM vuln.advisory_source_edge e
WHERE e.canonical_id = c.id AND e.source_id = a.source_id
)
ON CONFLICT (canonical_id, source_id, source_doc_hash) DO NOTHING;
-- Log migration progress
DO $$
DECLARE
edge_count BIGINT;
canonical_with_edges BIGINT;
avg_edges NUMERIC;
BEGIN
SELECT COUNT(*) INTO edge_count FROM vuln.advisory_source_edge;
SELECT COUNT(DISTINCT canonical_id) INTO canonical_with_edges FROM vuln.advisory_source_edge;
IF canonical_with_edges > 0 THEN
avg_edges := edge_count::numeric / canonical_with_edges;
ELSE
avg_edges := 0;
END IF;
RAISE NOTICE 'Migration 013 complete: % source edges, % canonicals with edges, avg %.2f edges/canonical',
edge_count, canonical_with_edges, avg_edges;
END $$;

View File

@@ -0,0 +1,165 @@
-- Concelier Migration 014: Verification queries for canonical migration
-- Sprint: SPRINT_8200_0012_0002_DB_canonical_source_edge_schema
-- Task: SCHEMA-8200-014
-- Verification queries to compare record counts and data integrity
-- Verification Report
DO $$
DECLARE
-- Source counts
advisory_count BIGINT;
affected_count BIGINT;
alias_count BIGINT;
weakness_count BIGINT;
kev_count BIGINT;
snapshot_count BIGINT;
source_count BIGINT;
-- Target counts
canonical_count BIGINT;
canonical_active BIGINT;
canonical_withdrawn BIGINT;
canonical_placeholder BIGINT;
edge_count BIGINT;
edge_unique_sources BIGINT;
edge_with_payload BIGINT;
-- Integrity checks
orphan_edges BIGINT;
missing_sources BIGINT;
duplicate_hashes BIGINT;
avg_edges_per_canonical NUMERIC;
BEGIN
-- Source table counts
SELECT COUNT(*) INTO advisory_count FROM vuln.advisories;
SELECT COUNT(*) INTO affected_count FROM vuln.advisory_affected;
SELECT COUNT(*) INTO alias_count FROM vuln.advisory_aliases;
SELECT COUNT(*) INTO weakness_count FROM vuln.advisory_weaknesses;
SELECT COUNT(*) INTO kev_count FROM vuln.kev_flags;
SELECT COUNT(*) INTO snapshot_count FROM vuln.advisory_snapshots;
SELECT COUNT(*) INTO source_count FROM vuln.sources WHERE enabled = true;
-- Target table counts
SELECT COUNT(*) INTO canonical_count FROM vuln.advisory_canonical;
SELECT COUNT(*) INTO canonical_active FROM vuln.advisory_canonical WHERE status = 'active';
SELECT COUNT(*) INTO canonical_withdrawn FROM vuln.advisory_canonical WHERE status = 'withdrawn';
SELECT COUNT(*) INTO canonical_placeholder FROM vuln.advisory_canonical WHERE merge_hash LIKE 'PLACEHOLDER_%';
SELECT COUNT(*) INTO edge_count FROM vuln.advisory_source_edge;
SELECT COUNT(DISTINCT source_id) INTO edge_unique_sources FROM vuln.advisory_source_edge;
SELECT COUNT(*) INTO edge_with_payload FROM vuln.advisory_source_edge WHERE raw_payload IS NOT NULL;
-- Integrity checks
SELECT COUNT(*) INTO orphan_edges
FROM vuln.advisory_source_edge e
WHERE NOT EXISTS (SELECT 1 FROM vuln.advisory_canonical c WHERE c.id = e.canonical_id);
SELECT COUNT(*) INTO missing_sources
FROM vuln.advisory_source_edge e
WHERE NOT EXISTS (SELECT 1 FROM vuln.sources s WHERE s.id = e.source_id);
SELECT COUNT(*) INTO duplicate_hashes
FROM (
SELECT merge_hash, COUNT(*) as cnt
FROM vuln.advisory_canonical
GROUP BY merge_hash
HAVING COUNT(*) > 1
) dups;
IF canonical_count > 0 THEN
avg_edges_per_canonical := edge_count::numeric / canonical_count;
ELSE
avg_edges_per_canonical := 0;
END IF;
-- Report
RAISE NOTICE '============================================';
RAISE NOTICE 'CANONICAL MIGRATION VERIFICATION REPORT';
RAISE NOTICE '============================================';
RAISE NOTICE '';
RAISE NOTICE 'SOURCE TABLE COUNTS:';
RAISE NOTICE ' Advisories: %', advisory_count;
RAISE NOTICE ' Affected packages: %', affected_count;
RAISE NOTICE ' Aliases: %', alias_count;
RAISE NOTICE ' Weaknesses (CWE): %', weakness_count;
RAISE NOTICE ' KEV flags: %', kev_count;
RAISE NOTICE ' Snapshots: %', snapshot_count;
RAISE NOTICE ' Enabled sources: %', source_count;
RAISE NOTICE '';
RAISE NOTICE 'TARGET TABLE COUNTS:';
RAISE NOTICE ' Canonicals: % (active: %, withdrawn: %)', canonical_count, canonical_active, canonical_withdrawn;
RAISE NOTICE ' Placeholder hashes:% (need backfill)', canonical_placeholder;
RAISE NOTICE ' Source edges: %', edge_count;
RAISE NOTICE ' Unique sources: %', edge_unique_sources;
RAISE NOTICE ' Edges with payload:%', edge_with_payload;
RAISE NOTICE '';
RAISE NOTICE 'METRICS:';
RAISE NOTICE ' Avg edges/canonical: %.2f', avg_edges_per_canonical;
RAISE NOTICE '';
RAISE NOTICE 'INTEGRITY CHECKS:';
RAISE NOTICE ' Orphan edges: % %', orphan_edges, CASE WHEN orphan_edges = 0 THEN '(OK)' ELSE '(FAIL)' END;
RAISE NOTICE ' Missing sources: % %', missing_sources, CASE WHEN missing_sources = 0 THEN '(OK)' ELSE '(FAIL)' END;
RAISE NOTICE ' Duplicate hashes: % %', duplicate_hashes, CASE WHEN duplicate_hashes = 0 THEN '(OK)' ELSE '(FAIL)' END;
RAISE NOTICE '';
-- Fail migration if integrity checks fail
IF orphan_edges > 0 OR missing_sources > 0 OR duplicate_hashes > 0 THEN
RAISE NOTICE 'VERIFICATION FAILED - Please investigate integrity issues';
ELSE
RAISE NOTICE 'VERIFICATION PASSED - Migration completed successfully';
END IF;
RAISE NOTICE '============================================';
END $$;
-- Additional verification queries (run individually for debugging)
-- Find CVEs that weren't migrated
-- SELECT a.primary_vuln_id, a.advisory_key, a.created_at
-- FROM vuln.advisories a
-- WHERE NOT EXISTS (
-- SELECT 1 FROM vuln.advisory_canonical c WHERE c.cve = a.primary_vuln_id
-- )
-- LIMIT 20;
-- Find canonicals without source edges
-- SELECT c.cve, c.affects_key, c.created_at
-- FROM vuln.advisory_canonical c
-- WHERE NOT EXISTS (
-- SELECT 1 FROM vuln.advisory_source_edge e WHERE e.canonical_id = c.id
-- )
-- LIMIT 20;
-- Distribution of edges per canonical
-- SELECT
-- CASE
-- WHEN edge_count = 0 THEN '0'
-- WHEN edge_count = 1 THEN '1'
-- WHEN edge_count BETWEEN 2 AND 5 THEN '2-5'
-- WHEN edge_count BETWEEN 6 AND 10 THEN '6-10'
-- ELSE '10+'
-- END AS edge_range,
-- COUNT(*) AS canonical_count
-- FROM (
-- SELECT c.id, COALESCE(e.edge_count, 0) AS edge_count
-- FROM vuln.advisory_canonical c
-- LEFT JOIN (
-- SELECT canonical_id, COUNT(*) AS edge_count
-- FROM vuln.advisory_source_edge
-- GROUP BY canonical_id
-- ) e ON e.canonical_id = c.id
-- ) sub
-- GROUP BY edge_range
-- ORDER BY edge_range;
-- Top CVEs by source coverage
-- SELECT
-- c.cve,
-- c.severity,
-- c.exploit_known,
-- COUNT(e.id) AS source_count
-- FROM vuln.advisory_canonical c
-- LEFT JOIN vuln.advisory_source_edge e ON e.canonical_id = c.id
-- GROUP BY c.id, c.cve, c.severity, c.exploit_known
-- ORDER BY source_count DESC
-- LIMIT 20;

View File

@@ -0,0 +1,45 @@
-- Concelier Migration 015: Interest Score Table
-- Sprint: SPRINT_8200_0013_0002_CONCEL_interest_scoring
-- Task: ISCORE-8200-000
-- Creates interest score table for org-specific advisory prioritization
-- Interest score table for canonical advisories
CREATE TABLE IF NOT EXISTS vuln.interest_score (
-- Identity (uses canonical_id as natural key)
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
canonical_id UUID NOT NULL REFERENCES vuln.advisory_canonical(id) ON DELETE CASCADE,
-- Score data
score NUMERIC(3,2) NOT NULL CHECK (score >= 0 AND score <= 1),
reasons JSONB NOT NULL DEFAULT '[]',
-- Context
last_seen_in_build UUID, -- Last SBOM/build that referenced this advisory
-- Audit
computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT uq_interest_score_canonical UNIQUE (canonical_id)
);
-- Primary index for score-based queries (desc for "top N" queries)
CREATE INDEX IF NOT EXISTS idx_interest_score_score ON vuln.interest_score(score DESC);
-- Index for stale score detection
CREATE INDEX IF NOT EXISTS idx_interest_score_computed ON vuln.interest_score(computed_at DESC);
-- Partial index for high-interest advisories (score >= 0.7)
CREATE INDEX IF NOT EXISTS idx_interest_score_high ON vuln.interest_score(canonical_id)
WHERE score >= 0.7;
-- Partial index for low-interest advisories (score < 0.2) for degradation queries
CREATE INDEX IF NOT EXISTS idx_interest_score_low ON vuln.interest_score(canonical_id)
WHERE score < 0.2;
-- Comments
COMMENT ON TABLE vuln.interest_score IS 'Per-canonical interest scores based on org signals (SBOM, VEX, runtime)';
COMMENT ON COLUMN vuln.interest_score.score IS 'Interest score 0.00-1.00 computed from weighted factors';
COMMENT ON COLUMN vuln.interest_score.reasons IS 'Array of reason codes: in_sbom, reachable, deployed, no_vex_na, recent';
COMMENT ON COLUMN vuln.interest_score.last_seen_in_build IS 'UUID of last SBOM/build where this advisory was relevant';
COMMENT ON COLUMN vuln.interest_score.computed_at IS 'Timestamp of last score computation';

View File

@@ -0,0 +1,112 @@
-- Concelier Migration 016: SBOM Registry Tables
-- Sprint: SPRINT_8200_0013_0003_SCAN_sbom_intersection_scoring
-- Task: SBOM-8200-002
-- Creates SBOM registry and advisory match tables for interest scoring
-- SBOM registration table
CREATE TABLE IF NOT EXISTS vuln.sbom_registry (
-- Identity
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
-- SBOM identification
digest TEXT NOT NULL, -- SHA-256 of SBOM content
format TEXT NOT NULL CHECK (format IN ('cyclonedx', 'spdx')),
spec_version TEXT NOT NULL, -- Format spec version (e.g., "1.6" for CycloneDX)
-- Primary component info
primary_name TEXT, -- Main component name (e.g., image name)
primary_version TEXT, -- Main component version
-- Statistics
component_count INT NOT NULL DEFAULT 0,
affected_count INT NOT NULL DEFAULT 0, -- Cached count of matching advisories
-- Source tracking
source TEXT NOT NULL, -- scanner, upload, api, etc.
tenant_id TEXT, -- Optional multi-tenant support
-- Audit
registered_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
last_matched_at TIMESTAMPTZ,
-- Constraints
CONSTRAINT uq_sbom_registry_digest UNIQUE (digest)
);
-- Indexes for sbom_registry
CREATE INDEX IF NOT EXISTS idx_sbom_registry_tenant ON vuln.sbom_registry(tenant_id) WHERE tenant_id IS NOT NULL;
CREATE INDEX IF NOT EXISTS idx_sbom_registry_primary ON vuln.sbom_registry(primary_name, primary_version);
CREATE INDEX IF NOT EXISTS idx_sbom_registry_registered ON vuln.sbom_registry(registered_at DESC);
CREATE INDEX IF NOT EXISTS idx_sbom_registry_affected ON vuln.sbom_registry(affected_count DESC) WHERE affected_count > 0;
-- Junction table for SBOM-to-canonical advisory matches
CREATE TABLE IF NOT EXISTS vuln.sbom_canonical_match (
-- Identity
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
-- Foreign keys
sbom_id UUID NOT NULL REFERENCES vuln.sbom_registry(id) ON DELETE CASCADE,
canonical_id UUID NOT NULL REFERENCES vuln.advisory_canonical(id) ON DELETE CASCADE,
-- Match details
purl TEXT NOT NULL, -- The PURL that matched
match_method TEXT NOT NULL CHECK (match_method IN ('exact_purl', 'purl_version_range', 'cpe', 'name_version')),
confidence NUMERIC(3,2) NOT NULL DEFAULT 1.0 CHECK (confidence >= 0 AND confidence <= 1),
-- Runtime context
is_reachable BOOLEAN NOT NULL DEFAULT FALSE,
is_deployed BOOLEAN NOT NULL DEFAULT FALSE,
-- Audit
matched_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT uq_sbom_canonical_match UNIQUE (sbom_id, canonical_id, purl)
);
-- Indexes for sbom_canonical_match
CREATE INDEX IF NOT EXISTS idx_sbom_match_sbom ON vuln.sbom_canonical_match(sbom_id);
CREATE INDEX IF NOT EXISTS idx_sbom_match_canonical ON vuln.sbom_canonical_match(canonical_id);
CREATE INDEX IF NOT EXISTS idx_sbom_match_purl ON vuln.sbom_canonical_match(purl);
CREATE INDEX IF NOT EXISTS idx_sbom_match_reachable ON vuln.sbom_canonical_match(canonical_id) WHERE is_reachable = TRUE;
-- PURL cache table for fast PURL-to-canonical lookups
CREATE TABLE IF NOT EXISTS vuln.purl_canonical_index (
-- Identity
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
-- Lookup key
purl TEXT NOT NULL, -- Normalized PURL
purl_type TEXT NOT NULL, -- Extracted type: npm, pypi, maven, etc.
purl_namespace TEXT, -- Extracted namespace
purl_name TEXT NOT NULL, -- Extracted name
-- Target
canonical_id UUID NOT NULL REFERENCES vuln.advisory_canonical(id) ON DELETE CASCADE,
-- Version matching
version_constraint TEXT, -- Version range from advisory
-- Audit
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT uq_purl_canonical UNIQUE (purl, canonical_id)
);
-- Indexes for purl_canonical_index
CREATE INDEX IF NOT EXISTS idx_purl_index_lookup ON vuln.purl_canonical_index(purl_type, purl_namespace, purl_name);
CREATE INDEX IF NOT EXISTS idx_purl_index_canonical ON vuln.purl_canonical_index(canonical_id);
-- Comments
COMMENT ON TABLE vuln.sbom_registry IS 'Registered SBOMs for interest score learning';
COMMENT ON COLUMN vuln.sbom_registry.digest IS 'SHA-256 digest of SBOM content for deduplication';
COMMENT ON COLUMN vuln.sbom_registry.format IS 'SBOM format: cyclonedx or spdx';
COMMENT ON COLUMN vuln.sbom_registry.affected_count IS 'Cached count of canonical advisories affecting this SBOM';
COMMENT ON TABLE vuln.sbom_canonical_match IS 'Junction table linking SBOMs to affected canonical advisories';
COMMENT ON COLUMN vuln.sbom_canonical_match.match_method IS 'How the match was determined: exact_purl, purl_version_range, cpe, name_version';
COMMENT ON COLUMN vuln.sbom_canonical_match.is_reachable IS 'Whether the vulnerable code path is reachable from entrypoints';
COMMENT ON COLUMN vuln.sbom_canonical_match.is_deployed IS 'Whether the component is deployed in production';
COMMENT ON TABLE vuln.purl_canonical_index IS 'Fast lookup index from PURLs to canonical advisories';

View File

@@ -0,0 +1,56 @@
-- Concelier Migration 017: Provenance Scope Table
-- Sprint: SPRINT_8200_0015_0001_CONCEL_backport_integration
-- Task: BACKPORT-8200-000
-- Creates distro-specific backport and patch provenance per canonical
-- Distro-specific provenance for canonical advisories
CREATE TABLE IF NOT EXISTS vuln.provenance_scope (
-- Identity
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
canonical_id UUID NOT NULL REFERENCES vuln.advisory_canonical(id) ON DELETE CASCADE,
-- Distro context
distro_release TEXT NOT NULL, -- e.g., 'debian:bookworm', 'rhel:9.2', 'ubuntu:22.04'
-- Patch provenance
backport_semver TEXT, -- distro's backported version if different from upstream
patch_id TEXT, -- upstream commit SHA or patch identifier
patch_origin TEXT CHECK (patch_origin IN ('upstream', 'distro', 'vendor')),
-- Evidence linkage
evidence_ref UUID, -- FK to proofchain.proof_entries (if available)
confidence NUMERIC(3,2) NOT NULL DEFAULT 0.5 CHECK (confidence >= 0 AND confidence <= 1),
-- Audit
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT uq_provenance_scope_canonical_distro UNIQUE (canonical_id, distro_release)
);
-- Primary lookup indexes
CREATE INDEX IF NOT EXISTS idx_provenance_scope_canonical ON vuln.provenance_scope(canonical_id);
CREATE INDEX IF NOT EXISTS idx_provenance_scope_distro ON vuln.provenance_scope(distro_release);
CREATE INDEX IF NOT EXISTS idx_provenance_scope_patch ON vuln.provenance_scope(patch_id) WHERE patch_id IS NOT NULL;
-- Filtered indexes for common queries
CREATE INDEX IF NOT EXISTS idx_provenance_scope_high_confidence ON vuln.provenance_scope(confidence DESC) WHERE confidence >= 0.7;
CREATE INDEX IF NOT EXISTS idx_provenance_scope_origin ON vuln.provenance_scope(patch_origin) WHERE patch_origin IS NOT NULL;
-- Time-based index for incremental queries
CREATE INDEX IF NOT EXISTS idx_provenance_scope_updated ON vuln.provenance_scope(updated_at DESC);
-- Trigger for automatic updated_at
CREATE TRIGGER trg_provenance_scope_updated
BEFORE UPDATE ON vuln.provenance_scope
FOR EACH ROW EXECUTE FUNCTION vuln.update_timestamp();
-- Comments
COMMENT ON TABLE vuln.provenance_scope IS 'Distro-specific backport and patch provenance per canonical advisory';
COMMENT ON COLUMN vuln.provenance_scope.distro_release IS 'Linux distribution release identifier (e.g., debian:bookworm, rhel:9.2)';
COMMENT ON COLUMN vuln.provenance_scope.backport_semver IS 'Distro version containing backport (may differ from upstream fixed version)';
COMMENT ON COLUMN vuln.provenance_scope.patch_id IS 'Upstream commit SHA or patch identifier for lineage tracking';
COMMENT ON COLUMN vuln.provenance_scope.patch_origin IS 'Source of the patch: upstream project, distro maintainer, or vendor';
COMMENT ON COLUMN vuln.provenance_scope.evidence_ref IS 'Reference to BackportProofService evidence in proofchain';
COMMENT ON COLUMN vuln.provenance_scope.confidence IS 'Confidence score from BackportProofService (0.0-1.0)';

View File

@@ -0,0 +1,51 @@
using StellaOps.Concelier.Models;
namespace StellaOps.Concelier.Persistence.Postgres.Advisories;
/// <summary>
/// PostgreSQL advisory storage interface.
/// This interface mirrors the IAdvisoryStore contract but without legacy-specific parameters.
/// </summary>
/// <remarks>
/// Used by connectors when configured to write to PostgreSQL storage.
/// </remarks>
public interface IPostgresAdvisoryStore
{
/// <summary>
/// Upserts an advisory and all its child entities.
/// </summary>
/// <param name="advisory">The advisory domain model to store.</param>
/// <param name="sourceId">Optional source ID to associate with the advisory.</param>
/// <param name="cancellationToken">Cancellation token.</param>
Task UpsertAsync(Advisory advisory, Guid? sourceId, CancellationToken cancellationToken);
/// <summary>
/// Finds an advisory by its key.
/// </summary>
/// <param name="advisoryKey">The advisory key.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>The advisory if found, null otherwise.</returns>
Task<Advisory?> FindAsync(string advisoryKey, CancellationToken cancellationToken);
/// <summary>
/// Gets the most recently modified advisories.
/// </summary>
/// <param name="limit">Maximum number of advisories to return.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>List of recent advisories.</returns>
Task<IReadOnlyList<Advisory>> GetRecentAsync(int limit, CancellationToken cancellationToken);
/// <summary>
/// Streams all advisories for bulk operations.
/// </summary>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Async enumerable of advisories.</returns>
IAsyncEnumerable<Advisory> StreamAsync(CancellationToken cancellationToken);
/// <summary>
/// Gets the count of advisories in the store.
/// </summary>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Total count of advisories.</returns>
Task<long> CountAsync(CancellationToken cancellationToken);
}

View File

@@ -0,0 +1,318 @@
using System.Runtime.CompilerServices;
using System.Text.Json;
using Microsoft.Extensions.Logging;
using StellaOps.Concelier.Models;
using StellaOps.Concelier.Persistence.Postgres.Conversion;
using AdvisoryContracts = StellaOps.Concelier.Storage.Advisories;
using StellaOps.Concelier.Persistence.Postgres.Models;
using StellaOps.Concelier.Persistence.Postgres.Repositories;
namespace StellaOps.Concelier.Persistence.Postgres.Advisories;
/// <summary>
/// PostgreSQL implementation of advisory storage.
/// Uses the AdvisoryConverter to transform domain models to relational entities
/// and the AdvisoryRepository to persist them.
/// </summary>
/// <remarks>
/// Tasks: PG-T5b.2.1, PG-T5b.2.2, PG-T5b.2.3 - Enables importers to write to PostgreSQL.
/// </remarks>
public sealed class PostgresAdvisoryStore : IPostgresAdvisoryStore, AdvisoryContracts.IAdvisoryStore
{
private readonly IAdvisoryRepository _advisoryRepository;
private readonly IAdvisoryAliasRepository _aliasRepository;
private readonly IAdvisoryCvssRepository _cvssRepository;
private readonly IAdvisoryAffectedRepository _affectedRepository;
private readonly IAdvisoryReferenceRepository _referenceRepository;
private readonly IAdvisoryCreditRepository _creditRepository;
private readonly IAdvisoryWeaknessRepository _weaknessRepository;
private readonly IKevFlagRepository _kevFlagRepository;
private readonly AdvisoryConverter _converter;
private readonly ILogger<PostgresAdvisoryStore> _logger;
private static readonly JsonSerializerOptions JsonOptions = new()
{
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
WriteIndented = false
};
public PostgresAdvisoryStore(
IAdvisoryRepository advisoryRepository,
IAdvisoryAliasRepository aliasRepository,
IAdvisoryCvssRepository cvssRepository,
IAdvisoryAffectedRepository affectedRepository,
IAdvisoryReferenceRepository referenceRepository,
IAdvisoryCreditRepository creditRepository,
IAdvisoryWeaknessRepository weaknessRepository,
IKevFlagRepository kevFlagRepository,
ILogger<PostgresAdvisoryStore> logger)
{
_advisoryRepository = advisoryRepository ?? throw new ArgumentNullException(nameof(advisoryRepository));
_aliasRepository = aliasRepository ?? throw new ArgumentNullException(nameof(aliasRepository));
_cvssRepository = cvssRepository ?? throw new ArgumentNullException(nameof(cvssRepository));
_affectedRepository = affectedRepository ?? throw new ArgumentNullException(nameof(affectedRepository));
_referenceRepository = referenceRepository ?? throw new ArgumentNullException(nameof(referenceRepository));
_creditRepository = creditRepository ?? throw new ArgumentNullException(nameof(creditRepository));
_weaknessRepository = weaknessRepository ?? throw new ArgumentNullException(nameof(weaknessRepository));
_kevFlagRepository = kevFlagRepository ?? throw new ArgumentNullException(nameof(kevFlagRepository));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
_converter = new AdvisoryConverter();
}
/// <inheritdoc />
public async Task UpsertAsync(Advisory advisory, Guid? sourceId, CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(advisory);
_logger.LogDebug("Upserting advisory {AdvisoryKey} to PostgreSQL", advisory.AdvisoryKey);
// Convert domain model to PostgreSQL entities
var result = _converter.ConvertFromDomain(advisory, sourceId);
// Use the repository's atomic upsert which handles all child tables in a transaction
await _advisoryRepository.UpsertAsync(
result.Advisory,
result.Aliases,
result.Cvss,
result.Affected,
result.References,
result.Credits,
result.Weaknesses,
result.KevFlags,
cancellationToken).ConfigureAwait(false);
_logger.LogDebug(
"Upserted advisory {AdvisoryKey} with {ChildCount} child entities",
advisory.AdvisoryKey,
result.TotalChildEntities);
}
/// <inheritdoc cref="AdvisoryContracts.IAdvisoryStore.UpsertAsync" />
Task AdvisoryContracts.IAdvisoryStore.UpsertAsync(Advisory advisory, CancellationToken cancellationToken)
=> UpsertAsync(advisory, sourceId: null, cancellationToken);
/// <inheritdoc />
public async Task<Advisory?> FindAsync(string advisoryKey, CancellationToken cancellationToken)
{
ArgumentException.ThrowIfNullOrEmpty(advisoryKey);
var entity = await _advisoryRepository.GetByKeyAsync(advisoryKey, cancellationToken).ConfigureAwait(false);
if (entity is null)
{
return null;
}
return await ReconstructAdvisoryAsync(entity, cancellationToken).ConfigureAwait(false);
}
/// <inheritdoc />
Task<Advisory?> AdvisoryContracts.IAdvisoryStore.FindAsync(string advisoryKey, CancellationToken cancellationToken)
=> FindAsync(advisoryKey, cancellationToken);
/// <inheritdoc />
public async Task<IReadOnlyList<Advisory>> GetRecentAsync(int limit, CancellationToken cancellationToken)
{
var entities = await _advisoryRepository.GetModifiedSinceAsync(
DateTimeOffset.MinValue,
limit,
cancellationToken).ConfigureAwait(false);
var advisories = new List<Advisory>(entities.Count);
foreach (var entity in entities)
{
var advisory = await ReconstructAdvisoryAsync(entity, cancellationToken).ConfigureAwait(false);
advisories.Add(advisory);
}
return advisories;
}
/// <inheritdoc />
Task<IReadOnlyList<Advisory>> AdvisoryContracts.IAdvisoryStore.GetRecentAsync(int limit, CancellationToken cancellationToken)
=> GetRecentAsync(limit, cancellationToken);
/// <inheritdoc />
public async IAsyncEnumerable<Advisory> StreamAsync([EnumeratorCancellation] CancellationToken cancellationToken)
{
var offset = 0;
const int batchSize = 100;
while (true)
{
cancellationToken.ThrowIfCancellationRequested();
var entities = await _advisoryRepository.GetModifiedSinceAsync(
DateTimeOffset.MinValue,
batchSize,
cancellationToken).ConfigureAwait(false);
if (entities.Count == 0)
{
break;
}
foreach (var entity in entities)
{
cancellationToken.ThrowIfCancellationRequested();
yield return await ReconstructAdvisoryAsync(entity, cancellationToken).ConfigureAwait(false);
}
if (entities.Count < batchSize)
{
break;
}
offset += batchSize;
}
}
/// <inheritdoc />
IAsyncEnumerable<Advisory> AdvisoryContracts.IAdvisoryStore.StreamAsync(CancellationToken cancellationToken)
=> StreamAsync(cancellationToken);
/// <inheritdoc />
public Task<long> CountAsync(CancellationToken cancellationToken)
{
return _advisoryRepository.CountAsync(cancellationToken);
}
/// <summary>
/// Reconstructs an Advisory domain model from a PostgreSQL entity.
/// </summary>
private async Task<Advisory> ReconstructAdvisoryAsync(AdvisoryEntity entity, CancellationToken cancellationToken)
{
// If raw payload is available, deserialize from it for full fidelity
if (!string.IsNullOrEmpty(entity.RawPayload))
{
try
{
var advisory = JsonSerializer.Deserialize<Advisory>(entity.RawPayload, JsonOptions);
if (advisory is not null)
{
return advisory;
}
}
catch (JsonException ex)
{
_logger.LogWarning(ex, "Failed to deserialize raw payload for advisory {AdvisoryKey}, reconstructing from entities", entity.AdvisoryKey);
}
}
// Reconstruct from child entities
var aliases = await _aliasRepository.GetByAdvisoryAsync(entity.Id, cancellationToken).ConfigureAwait(false);
var cvss = await _cvssRepository.GetByAdvisoryAsync(entity.Id, cancellationToken).ConfigureAwait(false);
var affected = await _affectedRepository.GetByAdvisoryAsync(entity.Id, cancellationToken).ConfigureAwait(false);
var references = await _referenceRepository.GetByAdvisoryAsync(entity.Id, cancellationToken).ConfigureAwait(false);
var credits = await _creditRepository.GetByAdvisoryAsync(entity.Id, cancellationToken).ConfigureAwait(false);
var weaknesses = await _weaknessRepository.GetByAdvisoryAsync(entity.Id, cancellationToken).ConfigureAwait(false);
// Convert entities back to domain models
var aliasStrings = aliases.Select(a => a.AliasValue).ToArray();
var creditModels = credits.Select(c => new AdvisoryCredit(
c.Name,
c.CreditType,
c.Contact is not null ? new[] { c.Contact } : Array.Empty<string>(),
AdvisoryProvenance.Empty)).ToArray();
var referenceModels = references.Select(r => new AdvisoryReference(
r.Url,
r.RefType,
null,
null,
AdvisoryProvenance.Empty)).ToArray();
var cvssModels = cvss.Select(c => new CvssMetric(
c.CvssVersion,
c.VectorString,
(double)c.BaseScore,
c.BaseSeverity ?? "unknown",
new AdvisoryProvenance(c.Source ?? "unknown", "cvss", c.VectorString, c.CreatedAt))).ToArray();
var weaknessModels = weaknesses.Select(w => new AdvisoryWeakness(
"CWE",
w.CweId,
w.Description,
null,
w.Source is not null ? new[] { new AdvisoryProvenance(w.Source, "cwe", w.CweId, w.CreatedAt) } : Array.Empty<AdvisoryProvenance>())).ToArray();
// Convert affected packages
var affectedModels = affected.Select(a =>
{
IEnumerable<AffectedVersionRange> versionRanges = Array.Empty<AffectedVersionRange>();
if (!string.IsNullOrEmpty(a.VersionRange) && a.VersionRange != "{}")
{
try
{
versionRanges = JsonSerializer.Deserialize<AffectedVersionRange[]>(a.VersionRange, JsonOptions)
?? Array.Empty<AffectedVersionRange>();
}
catch (JsonException)
{
// Fallback to empty
}
}
return new AffectedPackage(
MapEcosystemToType(a.Ecosystem),
a.PackageName,
null,
versionRanges);
}).ToArray();
// Parse provenance if available
IEnumerable<AdvisoryProvenance> provenance = Array.Empty<AdvisoryProvenance>();
if (!string.IsNullOrEmpty(entity.Provenance) && entity.Provenance != "[]" && entity.Provenance != "{}")
{
try
{
provenance = JsonSerializer.Deserialize<AdvisoryProvenance[]>(entity.Provenance, JsonOptions)
?? Array.Empty<AdvisoryProvenance>();
}
catch (JsonException)
{
// Fallback to empty
}
}
return new Advisory(
entity.AdvisoryKey,
entity.Title ?? entity.AdvisoryKey,
entity.Summary,
null,
entity.PublishedAt,
entity.ModifiedAt,
entity.Severity,
false,
aliasStrings,
creditModels,
referenceModels,
affectedModels,
cvssModels,
provenance,
entity.Description,
weaknessModels,
null);
}
private static string MapEcosystemToType(string ecosystem)
{
return ecosystem.ToLowerInvariant() switch
{
"npm" => "semver",
"pypi" => "semver",
"maven" => "semver",
"nuget" => "semver",
"cargo" => "semver",
"go" => "semver",
"rubygems" => "semver",
"composer" => "semver",
"hex" => "semver",
"pub" => "semver",
"rpm" => "rpm",
"deb" => "deb",
"apk" => "semver",
"cpe" => "cpe",
"vendor" => "vendor",
"ics" => "ics-vendor",
"generic" => "semver",
_ => "semver"
};
}
}

View File

@@ -0,0 +1,50 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using Npgsql;
using StellaOps.Infrastructure.Postgres.Connections;
using StellaOps.Infrastructure.Postgres.Options;
namespace StellaOps.Concelier.Persistence.Postgres;
/// <summary>
/// PostgreSQL data source for the Concelier (vulnerability) module.
/// Manages connections for advisory ingestion, merging, and vulnerability data.
/// </summary>
/// <remarks>
/// The Concelier module stores global vulnerability data that is not tenant-scoped.
/// Advisories and their metadata are shared across all tenants.
/// </remarks>
public sealed class ConcelierDataSource : DataSourceBase
{
/// <summary>
/// Default schema name for Concelier/vulnerability tables.
/// </summary>
public const string DefaultSchemaName = "vuln";
/// <summary>
/// Creates a new Concelier data source.
/// </summary>
public ConcelierDataSource(IOptions<PostgresOptions> options, ILogger<ConcelierDataSource> logger)
: base(CreateOptions(options.Value), logger)
{
}
/// <inheritdoc />
protected override string ModuleName => "Concelier";
/// <inheritdoc />
protected override void ConfigureDataSourceBuilder(NpgsqlDataSourceBuilder builder)
{
base.ConfigureDataSourceBuilder(builder);
// Enable full-text search vector support for advisory searching
}
private static PostgresOptions CreateOptions(PostgresOptions baseOptions)
{
if (string.IsNullOrWhiteSpace(baseOptions.SchemaName))
{
baseOptions.SchemaName = DefaultSchemaName;
}
return baseOptions;
}
}

View File

@@ -0,0 +1,125 @@
using System;
using System.Text.Json;
using StellaOps.Concelier.Documents;
using StellaOps.Concelier.Documents.IO;
using Contracts = StellaOps.Concelier.Storage.Contracts;
using LegacyContracts = StellaOps.Concelier.Storage;
namespace StellaOps.Concelier.Persistence.Postgres;
internal static class ContractsMappingExtensions
{
private static readonly JsonWriterSettings RelaxedJsonSettings = new()
{
OutputMode = JsonOutputMode.RelaxedExtendedJson
};
internal static Contracts.StorageDocument ToStorageDocument(this LegacyContracts.DocumentRecord record)
{
return new Contracts.StorageDocument(
record.Id,
record.SourceName,
record.Uri,
record.CreatedAt,
record.Sha256,
record.Status,
record.ContentType,
record.Headers,
record.Metadata,
record.Etag,
record.LastModified,
record.PayloadId,
record.ExpiresAt,
record.Payload,
record.FetchedAt);
}
internal static LegacyContracts.DocumentRecord ToLegacyDocumentRecord(this Contracts.StorageDocument record)
{
return new LegacyContracts.DocumentRecord(
record.Id,
record.SourceName,
record.Uri,
record.CreatedAt,
record.Sha256,
record.Status,
record.ContentType,
record.Headers,
record.Metadata,
record.Etag,
record.LastModified,
record.PayloadId,
record.ExpiresAt,
record.Payload,
record.FetchedAt);
}
internal static Contracts.StorageDto ToStorageDto(this LegacyContracts.DtoRecord record)
{
var json = record.Payload.ToJson(RelaxedJsonSettings);
var payload = JsonDocument.Parse(json);
return new Contracts.StorageDto(
record.Id,
record.DocumentId,
record.SourceName,
record.Format,
payload,
record.CreatedAt,
record.SchemaVersion,
record.ValidatedAt);
}
internal static LegacyContracts.DtoRecord ToLegacyDtoRecord(this Contracts.StorageDto record)
{
var json = record.Payload.RootElement.GetRawText();
var doc = DocumentObject.Parse(json);
return new LegacyContracts.DtoRecord(
record.Id,
record.DocumentId,
record.SourceName,
record.Format,
doc,
record.CreatedAt,
record.SchemaVersion,
record.ValidatedAt);
}
internal static Contracts.SourceCursorState ToStorageCursorState(this LegacyContracts.SourceStateRecord record)
{
var cursorJson = record.Cursor is null ? null : record.Cursor.ToJson(RelaxedJsonSettings);
var cursor = cursorJson is null ? null : JsonDocument.Parse(cursorJson);
return new Contracts.SourceCursorState(
record.SourceName,
record.Enabled,
record.Paused,
cursor,
record.LastSuccess,
record.LastFailure,
record.FailCount,
record.BackoffUntil,
record.UpdatedAt,
record.LastFailureReason);
}
internal static LegacyContracts.SourceStateRecord ToLegacySourceStateRecord(this Contracts.SourceCursorState record)
{
var docCursor = record.Cursor is null ? null : DocumentObject.Parse(record.Cursor.RootElement.GetRawText());
return new LegacyContracts.SourceStateRecord(
record.SourceName,
record.Enabled,
record.Paused,
docCursor,
record.LastSuccess,
record.LastFailure,
record.FailCount,
record.BackoffUntil,
record.UpdatedAt,
record.LastFailureReason);
}
internal static DocumentObject ToDocumentObject(this JsonDocument document)
{
ArgumentNullException.ThrowIfNull(document);
return DocumentObject.Parse(document.RootElement.GetRawText());
}
}

View File

@@ -0,0 +1,56 @@
using StellaOps.Concelier.Persistence.Postgres.Models;
namespace StellaOps.Concelier.Persistence.Postgres.Conversion;
/// <summary>
/// Result of converting an advisory document to PostgreSQL entities.
/// Contains the main advisory entity and all related child entities.
/// </summary>
public sealed class AdvisoryConversionResult
{
/// <summary>
/// The main advisory entity.
/// </summary>
public required AdvisoryEntity Advisory { get; init; }
/// <summary>
/// Alias entities (CVE, GHSA, etc.).
/// </summary>
public IReadOnlyList<AdvisoryAliasEntity> Aliases { get; init; } = Array.Empty<AdvisoryAliasEntity>();
/// <summary>
/// CVSS score entities.
/// </summary>
public IReadOnlyList<AdvisoryCvssEntity> Cvss { get; init; } = Array.Empty<AdvisoryCvssEntity>();
/// <summary>
/// Affected package entities.
/// </summary>
public IReadOnlyList<AdvisoryAffectedEntity> Affected { get; init; } = Array.Empty<AdvisoryAffectedEntity>();
/// <summary>
/// Reference URL entities.
/// </summary>
public IReadOnlyList<AdvisoryReferenceEntity> References { get; init; } = Array.Empty<AdvisoryReferenceEntity>();
/// <summary>
/// Credit entities.
/// </summary>
public IReadOnlyList<AdvisoryCreditEntity> Credits { get; init; } = Array.Empty<AdvisoryCreditEntity>();
/// <summary>
/// Weakness (CWE) entities.
/// </summary>
public IReadOnlyList<AdvisoryWeaknessEntity> Weaknesses { get; init; } = Array.Empty<AdvisoryWeaknessEntity>();
/// <summary>
/// Known Exploited Vulnerabilities (KEV) flag entities.
/// </summary>
public IReadOnlyList<KevFlagEntity> KevFlags { get; init; } = Array.Empty<KevFlagEntity>();
/// <summary>
/// Total number of child entities.
/// </summary>
public int TotalChildEntities =>
Aliases.Count + Cvss.Count + Affected.Count + References.Count + Credits.Count + Weaknesses.Count + KevFlags.Count;
}

View File

@@ -0,0 +1,258 @@
using System.Text.Json;
using StellaOps.Concelier.Models;
using StellaOps.Concelier.Persistence.Postgres.Models;
namespace StellaOps.Concelier.Persistence.Postgres.Conversion;
/// <summary>
/// Converts domain advisories to PostgreSQL entity structures.
/// </summary>
public sealed class AdvisoryConverter
{
private static readonly JsonSerializerOptions JsonOptions = new()
{
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
WriteIndented = false
};
/// <summary>
/// Converts an Advisory domain model to PostgreSQL entities.
/// </summary>
public AdvisoryConversionResult ConvertFromDomain(Advisory advisory, Guid? sourceId = null)
{
ArgumentNullException.ThrowIfNull(advisory);
var advisoryId = Guid.NewGuid();
var now = DateTimeOffset.UtcNow;
var primaryVulnId = advisory.Aliases
.FirstOrDefault(a => a.StartsWith("CVE-", StringComparison.OrdinalIgnoreCase))
?? advisory.Aliases.FirstOrDefault()
?? advisory.AdvisoryKey;
var provenanceJson = JsonSerializer.Serialize(advisory.Provenance, JsonOptions);
var entity = new AdvisoryEntity
{
Id = advisoryId,
AdvisoryKey = advisory.AdvisoryKey,
PrimaryVulnId = primaryVulnId,
SourceId = sourceId,
Title = advisory.Title,
Summary = advisory.Summary,
Description = advisory.Description,
Severity = advisory.Severity,
PublishedAt = advisory.Published,
ModifiedAt = advisory.Modified,
WithdrawnAt = null,
Provenance = provenanceJson,
RawPayload = CanonicalJsonSerializer.Serialize(advisory),
CreatedAt = now,
UpdatedAt = now
};
// Aliases
var aliasEntities = new List<AdvisoryAliasEntity>();
var isPrimarySet = false;
foreach (var alias in advisory.Aliases)
{
var aliasType = DetermineAliasType(alias);
var isPrimary = !isPrimarySet && aliasType == "cve";
if (isPrimary) isPrimarySet = true;
aliasEntities.Add(new AdvisoryAliasEntity
{
Id = Guid.NewGuid(),
AdvisoryId = advisoryId,
AliasType = aliasType,
AliasValue = alias,
IsPrimary = isPrimary,
CreatedAt = now
});
}
// CVSS
var cvssEntities = new List<AdvisoryCvssEntity>();
var isPrimaryCvss = true;
foreach (var metric in advisory.CvssMetrics)
{
cvssEntities.Add(new AdvisoryCvssEntity
{
Id = Guid.NewGuid(),
AdvisoryId = advisoryId,
CvssVersion = metric.Version,
VectorString = metric.Vector,
BaseScore = (decimal)metric.BaseScore,
BaseSeverity = metric.BaseSeverity,
ExploitabilityScore = null,
ImpactScore = null,
Source = metric.Provenance.Source,
IsPrimary = isPrimaryCvss,
CreatedAt = now
});
isPrimaryCvss = false;
}
// Affected packages
var affectedEntities = new List<AdvisoryAffectedEntity>();
foreach (var pkg in advisory.AffectedPackages)
{
var ecosystem = MapTypeToEcosystem(pkg.Type);
var versionRangeJson = JsonSerializer.Serialize(pkg.VersionRanges, JsonOptions);
affectedEntities.Add(new AdvisoryAffectedEntity
{
Id = Guid.NewGuid(),
AdvisoryId = advisoryId,
Ecosystem = ecosystem,
PackageName = pkg.Identifier,
Purl = BuildPurl(ecosystem, pkg.Identifier),
VersionRange = versionRangeJson,
VersionsAffected = null,
VersionsFixed = ExtractFixedVersions(pkg.VersionRanges),
DatabaseSpecific = null,
CreatedAt = now
});
}
// References
var referenceEntities = advisory.References.Select(reference => new AdvisoryReferenceEntity
{
Id = Guid.NewGuid(),
AdvisoryId = advisoryId,
RefType = reference.Kind ?? "web",
Url = reference.Url,
CreatedAt = now
}).ToList();
// Credits
var creditEntities = advisory.Credits.Select(credit => new AdvisoryCreditEntity
{
Id = Guid.NewGuid(),
AdvisoryId = advisoryId,
Name = credit.DisplayName,
Contact = credit.Contacts.FirstOrDefault(),
CreditType = credit.Role,
CreatedAt = now
}).ToList();
// Weaknesses
var weaknessEntities = advisory.Cwes.Select(weakness => new AdvisoryWeaknessEntity
{
Id = Guid.NewGuid(),
AdvisoryId = advisoryId,
CweId = weakness.Identifier,
Description = weakness.Name,
Source = weakness.Provenance.FirstOrDefault()?.Source,
CreatedAt = now
}).ToList();
// KEV flags from domain data
var kevFlags = new List<KevFlagEntity>();
if (advisory.ExploitKnown)
{
var cveId = advisory.Aliases.FirstOrDefault(a => a.StartsWith("CVE-", StringComparison.OrdinalIgnoreCase));
if (!string.IsNullOrWhiteSpace(cveId))
{
kevFlags.Add(new KevFlagEntity
{
Id = Guid.NewGuid(),
AdvisoryId = advisoryId,
CveId = cveId,
VendorProject = null,
Product = null,
VulnerabilityName = advisory.Title,
DateAdded = DateOnly.FromDateTime(now.UtcDateTime),
DueDate = null,
KnownRansomwareUse = false,
Notes = null,
CreatedAt = now
});
}
}
return new AdvisoryConversionResult
{
Advisory = entity,
Aliases = aliasEntities,
Cvss = cvssEntities,
Affected = affectedEntities,
References = referenceEntities,
Credits = creditEntities,
Weaknesses = weaknessEntities,
KevFlags = kevFlags
};
}
private static string DetermineAliasType(string alias)
{
if (alias.StartsWith("CVE-", StringComparison.OrdinalIgnoreCase))
return "cve";
if (alias.StartsWith("GHSA-", StringComparison.OrdinalIgnoreCase))
return "ghsa";
if (alias.StartsWith("RUSTSEC-", StringComparison.OrdinalIgnoreCase))
return "rustsec";
if (alias.StartsWith("GO-", StringComparison.OrdinalIgnoreCase))
return "go";
if (alias.StartsWith("PYSEC-", StringComparison.OrdinalIgnoreCase))
return "pysec";
if (alias.StartsWith("DSA-", StringComparison.OrdinalIgnoreCase))
return "dsa";
if (alias.StartsWith("RHSA-", StringComparison.OrdinalIgnoreCase))
return "rhsa";
if (alias.StartsWith("USN-", StringComparison.OrdinalIgnoreCase))
return "usn";
return "other";
}
private static string MapTypeToEcosystem(string type) =>
type.ToLowerInvariant() switch
{
"npm" => "npm",
"pypi" => "pypi",
"maven" => "maven",
"nuget" => "nuget",
"cargo" => "cargo",
"go" => "go",
"rubygems" => "rubygems",
"composer" => "composer",
"hex" => "hex",
"pub" => "pub",
"rpm" => "rpm",
"deb" => "deb",
"apk" => "apk",
"cpe" => "cpe",
"semver" => "generic",
"vendor" => "vendor",
"ics-vendor" => "ics",
_ => "generic"
};
private static string? BuildPurl(string ecosystem, string identifier) =>
ecosystem switch
{
"npm" => $"pkg:npm/{identifier}",
"pypi" => $"pkg:pypi/{identifier}",
"maven" => identifier.Contains(':') ? $"pkg:maven/{identifier.Replace(':', '/')}" : null,
"nuget" => $"pkg:nuget/{identifier}",
"cargo" => $"pkg:cargo/{identifier}",
"go" => $"pkg:golang/{identifier}",
"rubygems" => $"pkg:gem/{identifier}",
"composer" => $"pkg:composer/{identifier}",
"hex" => $"pkg:hex/{identifier}",
"pub" => $"pkg:pub/{identifier}",
_ => null
};
private static string[]? ExtractFixedVersions(IEnumerable<AffectedVersionRange> ranges)
{
var fixedVersions = ranges
.Where(r => !string.IsNullOrEmpty(r.FixedVersion))
.Select(r => r.FixedVersion!)
.Distinct()
.ToArray();
return fixedVersions.Length > 0 ? fixedVersions : null;
}
}

View File

@@ -0,0 +1,125 @@
using System.Text.Json;
using StellaOps.Concelier.Storage;
using Contracts = StellaOps.Concelier.Storage.Contracts;
using StellaOps.Concelier.Persistence.Postgres.Models;
using StellaOps.Concelier.Persistence.Postgres.Repositories;
namespace StellaOps.Concelier.Persistence.Postgres;
/// <summary>
/// Postgres-backed implementation that satisfies the legacy IDocumentStore contract and the new Postgres-native storage contract.
/// </summary>
public sealed class PostgresDocumentStore : IDocumentStore, Contracts.IStorageDocumentStore
{
private readonly IDocumentRepository _repository;
private readonly ISourceRepository _sourceRepository;
private readonly JsonSerializerOptions _json = new(JsonSerializerDefaults.Web);
public PostgresDocumentStore(IDocumentRepository repository, ISourceRepository sourceRepository)
{
_repository = repository ?? throw new ArgumentNullException(nameof(repository));
_sourceRepository = sourceRepository ?? throw new ArgumentNullException(nameof(sourceRepository));
}
public async Task<DocumentRecord?> FindAsync(Guid id, CancellationToken cancellationToken)
{
var row = await _repository.FindAsync(id, cancellationToken).ConfigureAwait(false);
return row is null ? null : Map(row);
}
public async Task<DocumentRecord?> FindBySourceAndUriAsync(string sourceName, string uri, CancellationToken cancellationToken)
{
var row = await _repository.FindBySourceAndUriAsync(sourceName, uri, cancellationToken).ConfigureAwait(false);
return row is null ? null : Map(row);
}
public async Task<DocumentRecord> UpsertAsync(DocumentRecord record, CancellationToken cancellationToken)
{
var source = await EnsureSourceAsync(record.SourceName, cancellationToken).ConfigureAwait(false);
var entity = new DocumentRecordEntity(
Id: record.Id == Guid.Empty ? Guid.NewGuid() : record.Id,
SourceId: source.Id,
SourceName: record.SourceName,
Uri: record.Uri,
Sha256: record.Sha256,
Status: record.Status,
ContentType: record.ContentType,
HeadersJson: record.Headers is null ? null : JsonSerializer.Serialize(record.Headers, _json),
MetadataJson: record.Metadata is null ? null : JsonSerializer.Serialize(record.Metadata, _json),
Etag: record.Etag,
LastModified: record.LastModified,
Payload: record.Payload ?? Array.Empty<byte>(),
CreatedAt: record.CreatedAt,
UpdatedAt: DateTimeOffset.UtcNow,
ExpiresAt: record.ExpiresAt);
var saved = await _repository.UpsertAsync(entity, cancellationToken).ConfigureAwait(false);
return Map(saved);
}
public async Task UpdateStatusAsync(Guid id, string status, CancellationToken cancellationToken)
{
await _repository.UpdateStatusAsync(id, status, cancellationToken).ConfigureAwait(false);
}
async Task<Contracts.StorageDocument?> Contracts.IStorageDocumentStore.FindBySourceAndUriAsync(string sourceName, string uri, CancellationToken cancellationToken)
=> (await FindBySourceAndUriAsync(sourceName, uri, cancellationToken).ConfigureAwait(false))?.ToStorageDocument();
async Task<Contracts.StorageDocument?> Contracts.IStorageDocumentStore.FindAsync(Guid id, CancellationToken cancellationToken)
=> (await FindAsync(id, cancellationToken).ConfigureAwait(false))?.ToStorageDocument();
async Task<Contracts.StorageDocument> Contracts.IStorageDocumentStore.UpsertAsync(Contracts.StorageDocument record, CancellationToken cancellationToken)
=> (await UpsertAsync(record.ToLegacyDocumentRecord(), cancellationToken).ConfigureAwait(false)).ToStorageDocument();
Task Contracts.IStorageDocumentStore.UpdateStatusAsync(Guid id, string status, CancellationToken cancellationToken)
=> UpdateStatusAsync(id, status, cancellationToken);
private DocumentRecord Map(DocumentRecordEntity row)
{
return new DocumentRecord(
row.Id,
row.SourceName,
row.Uri,
row.CreatedAt,
row.Sha256,
row.Status,
row.ContentType,
row.HeadersJson is null
? null
: JsonSerializer.Deserialize<Dictionary<string, string>>(row.HeadersJson, _json),
row.MetadataJson is null
? null
: JsonSerializer.Deserialize<Dictionary<string, string>>(row.MetadataJson, _json),
row.Etag,
row.LastModified,
PayloadId: row.Id,
ExpiresAt: row.ExpiresAt,
Payload: row.Payload);
}
private async Task<SourceEntity> EnsureSourceAsync(string sourceName, CancellationToken cancellationToken)
{
var existing = await _sourceRepository.GetByKeyAsync(sourceName, cancellationToken).ConfigureAwait(false);
if (existing is not null)
{
return existing;
}
var now = DateTimeOffset.UtcNow;
return await _sourceRepository.UpsertAsync(new SourceEntity
{
Id = Guid.NewGuid(),
Key = sourceName,
Name = sourceName,
SourceType = sourceName,
Url = null,
Priority = 0,
Enabled = true,
Config = "{}",
Metadata = "{}",
CreatedAt = now,
UpdatedAt = now,
}, cancellationToken).ConfigureAwait(false);
}
}

View File

@@ -0,0 +1,19 @@
namespace StellaOps.Concelier.Persistence.Postgres.Models;
/// <summary>
/// Represents an affected package entry for an advisory.
/// </summary>
public sealed class AdvisoryAffectedEntity
{
public Guid Id { get; init; }
public Guid AdvisoryId { get; init; }
public required string Ecosystem { get; init; }
public required string PackageName { get; init; }
public string? Purl { get; init; }
public string VersionRange { get; init; } = "{}";
public string[]? VersionsAffected { get; init; }
public string[]? VersionsFixed { get; init; }
public string? DatabaseSpecific { get; init; }
public DateTimeOffset CreatedAt { get; init; }
}

View File

@@ -0,0 +1,15 @@
namespace StellaOps.Concelier.Persistence.Postgres.Models;
/// <summary>
/// Represents an advisory alias (e.g., CVE, GHSA).
/// </summary>
public sealed class AdvisoryAliasEntity
{
public Guid Id { get; init; }
public Guid AdvisoryId { get; init; }
public required string AliasType { get; init; }
public required string AliasValue { get; init; }
public bool IsPrimary { get; init; }
public DateTimeOffset CreatedAt { get; init; }
}

View File

@@ -0,0 +1,85 @@
// -----------------------------------------------------------------------------
// AdvisoryCanonicalEntity.cs
// Sprint: SPRINT_8200_0012_0002_DB_canonical_source_edge_schema
// Task: SCHEMA-8200-007
// Description: Entity for deduplicated canonical advisory records
// -----------------------------------------------------------------------------
namespace StellaOps.Concelier.Persistence.Postgres.Models;
/// <summary>
/// Represents a deduplicated canonical advisory in the vuln schema.
/// Canonical advisories are identified by their semantic merge_hash.
/// </summary>
public sealed class AdvisoryCanonicalEntity
{
/// <summary>
/// Unique canonical advisory identifier.
/// </summary>
public required Guid Id { get; init; }
/// <summary>
/// CVE identifier (e.g., "CVE-2024-1234").
/// </summary>
public required string Cve { get; init; }
/// <summary>
/// Normalized PURL or CPE identifying the affected package.
/// </summary>
public required string AffectsKey { get; init; }
/// <summary>
/// Structured version range as JSON (introduced, fixed, last_affected).
/// </summary>
public string? VersionRange { get; init; }
/// <summary>
/// Sorted CWE array (e.g., ["CWE-79", "CWE-89"]).
/// </summary>
public string[] Weakness { get; init; } = [];
/// <summary>
/// Deterministic SHA256 hash of (cve, affects_key, version_range, weakness, patch_lineage).
/// </summary>
public required string MergeHash { get; init; }
/// <summary>
/// Status: active, stub, or withdrawn.
/// </summary>
public string Status { get; init; } = "active";
/// <summary>
/// Normalized severity: critical, high, medium, low, none, unknown.
/// </summary>
public string? Severity { get; init; }
/// <summary>
/// EPSS exploit prediction probability (0.0000-1.0000).
/// </summary>
public decimal? EpssScore { get; init; }
/// <summary>
/// Whether an exploit is known to exist.
/// </summary>
public bool ExploitKnown { get; init; }
/// <summary>
/// Advisory title (for stub degradation).
/// </summary>
public string? Title { get; init; }
/// <summary>
/// Advisory summary (for stub degradation).
/// </summary>
public string? Summary { get; init; }
/// <summary>
/// When the canonical record was created.
/// </summary>
public DateTimeOffset CreatedAt { get; init; }
/// <summary>
/// When the canonical record was last updated.
/// </summary>
public DateTimeOffset UpdatedAt { get; init; }
}

View File

@@ -0,0 +1,15 @@
namespace StellaOps.Concelier.Persistence.Postgres.Models;
/// <summary>
/// Represents a credit entry for an advisory.
/// </summary>
public sealed class AdvisoryCreditEntity
{
public Guid Id { get; init; }
public Guid AdvisoryId { get; init; }
public required string Name { get; init; }
public string? Contact { get; init; }
public string? CreditType { get; init; }
public DateTimeOffset CreatedAt { get; init; }
}

View File

@@ -0,0 +1,20 @@
namespace StellaOps.Concelier.Persistence.Postgres.Models;
/// <summary>
/// Represents a CVSS score for an advisory.
/// </summary>
public sealed class AdvisoryCvssEntity
{
public Guid Id { get; init; }
public Guid AdvisoryId { get; init; }
public required string CvssVersion { get; init; }
public required string VectorString { get; init; }
public decimal BaseScore { get; init; }
public string? BaseSeverity { get; init; }
public decimal? ExploitabilityScore { get; init; }
public decimal? ImpactScore { get; init; }
public string? Source { get; init; }
public bool IsPrimary { get; init; }
public DateTimeOffset CreatedAt { get; init; }
}

View File

@@ -0,0 +1,82 @@
namespace StellaOps.Concelier.Persistence.Postgres.Models;
/// <summary>
/// Represents an advisory entity in the vuln schema.
/// </summary>
public sealed class AdvisoryEntity
{
/// <summary>
/// Unique advisory identifier.
/// </summary>
public required Guid Id { get; init; }
/// <summary>
/// Advisory key (unique identifier, e.g., "ghsa:GHSA-xxxx").
/// </summary>
public required string AdvisoryKey { get; init; }
/// <summary>
/// Primary vulnerability ID (CVE, GHSA, etc.).
/// </summary>
public required string PrimaryVulnId { get; init; }
/// <summary>
/// Source that provided this advisory.
/// </summary>
public Guid? SourceId { get; init; }
/// <summary>
/// Advisory title.
/// </summary>
public string? Title { get; init; }
/// <summary>
/// Brief summary.
/// </summary>
public string? Summary { get; init; }
/// <summary>
/// Full description.
/// </summary>
public string? Description { get; init; }
/// <summary>
/// Severity level.
/// </summary>
public string? Severity { get; init; }
/// <summary>
/// When the advisory was published.
/// </summary>
public DateTimeOffset? PublishedAt { get; init; }
/// <summary>
/// When the advisory was last modified.
/// </summary>
public DateTimeOffset? ModifiedAt { get; init; }
/// <summary>
/// When the advisory was withdrawn (if applicable).
/// </summary>
public DateTimeOffset? WithdrawnAt { get; init; }
/// <summary>
/// Provenance information as JSON.
/// </summary>
public string Provenance { get; init; } = "{}";
/// <summary>
/// Raw payload from the source as JSON.
/// </summary>
public string? RawPayload { get; init; }
/// <summary>
/// When the record was created.
/// </summary>
public DateTimeOffset CreatedAt { get; init; }
/// <summary>
/// When the record was last updated.
/// </summary>
public DateTimeOffset UpdatedAt { get; init; }
}

View File

@@ -0,0 +1,21 @@
using System;
namespace StellaOps.Concelier.Persistence.Postgres.Models;
/// <summary>
/// Represents a cached Link-Not-Merge linkset snapshot stored in PostgreSQL.
/// </summary>
public sealed class AdvisoryLinksetCacheEntity
{
public Guid Id { get; init; }
public string TenantId { get; init; } = default!;
public string Source { get; init; } = default!;
public string AdvisoryId { get; init; } = default!;
public string[] Observations { get; init; } = Array.Empty<string>();
public string? NormalizedJson { get; init; }
public string? ConflictsJson { get; init; }
public string? ProvenanceJson { get; init; }
public double? Confidence { get; init; }
public DateTimeOffset CreatedAt { get; init; }
public string? BuiltByJobId { get; init; }
}

View File

@@ -0,0 +1,14 @@
namespace StellaOps.Concelier.Persistence.Postgres.Models;
/// <summary>
/// Represents an advisory reference URL.
/// </summary>
public sealed class AdvisoryReferenceEntity
{
public Guid Id { get; init; }
public Guid AdvisoryId { get; init; }
public required string RefType { get; init; }
public required string Url { get; init; }
public DateTimeOffset CreatedAt { get; init; }
}

View File

@@ -0,0 +1,14 @@
namespace StellaOps.Concelier.Persistence.Postgres.Models;
/// <summary>
/// Represents a snapshot of an advisory at a point in time.
/// </summary>
public sealed class AdvisorySnapshotEntity
{
public Guid Id { get; init; }
public Guid FeedSnapshotId { get; init; }
public required string AdvisoryKey { get; init; }
public required string ContentHash { get; init; }
public DateTimeOffset CreatedAt { get; init; }
}

View File

@@ -0,0 +1,71 @@
// -----------------------------------------------------------------------------
// AdvisorySourceEdgeEntity.cs
// Sprint: SPRINT_8200_0012_0002_DB_canonical_source_edge_schema
// Task: SCHEMA-8200-008
// Description: Entity linking canonical advisory to source documents with DSSE
// -----------------------------------------------------------------------------
namespace StellaOps.Concelier.Persistence.Postgres.Models;
/// <summary>
/// Represents a link between a canonical advisory and its source document.
/// Stores DSSE signature envelopes and raw payload for provenance.
/// </summary>
public sealed class AdvisorySourceEdgeEntity
{
/// <summary>
/// Unique source edge identifier.
/// </summary>
public required Guid Id { get; init; }
/// <summary>
/// Reference to the deduplicated canonical advisory.
/// </summary>
public required Guid CanonicalId { get; init; }
/// <summary>
/// Reference to the feed source.
/// </summary>
public required Guid SourceId { get; init; }
/// <summary>
/// Vendor's advisory ID (e.g., "DSA-5678", "RHSA-2024:1234").
/// </summary>
public required string SourceAdvisoryId { get; init; }
/// <summary>
/// SHA256 hash of the raw source document.
/// </summary>
public required string SourceDocHash { get; init; }
/// <summary>
/// VEX-style status: affected, not_affected, fixed, under_investigation.
/// </summary>
public string? VendorStatus { get; init; }
/// <summary>
/// Source priority: vendor=10, distro=20, osv=30, nvd=40, default=100.
/// Lower value = higher priority.
/// </summary>
public int PrecedenceRank { get; init; } = 100;
/// <summary>
/// DSSE signature envelope as JSON ({ payloadType, payload, signatures[] }).
/// </summary>
public string? DsseEnvelope { get; init; }
/// <summary>
/// Original advisory document as JSON.
/// </summary>
public string? RawPayload { get; init; }
/// <summary>
/// When the source document was fetched.
/// </summary>
public DateTimeOffset FetchedAt { get; init; }
/// <summary>
/// When the edge record was created.
/// </summary>
public DateTimeOffset CreatedAt { get; init; }
}

View File

@@ -0,0 +1,15 @@
namespace StellaOps.Concelier.Persistence.Postgres.Models;
/// <summary>
/// Represents a CWE weakness linked to an advisory.
/// </summary>
public sealed class AdvisoryWeaknessEntity
{
public Guid Id { get; init; }
public Guid AdvisoryId { get; init; }
public required string CweId { get; init; }
public string? Description { get; init; }
public string? Source { get; init; }
public DateTimeOffset CreatedAt { get; init; }
}

View File

@@ -0,0 +1,18 @@
namespace StellaOps.Concelier.Persistence.Postgres.Models;
public sealed record DocumentRecordEntity(
Guid Id,
Guid SourceId,
string SourceName,
string Uri,
string Sha256,
string Status,
string? ContentType,
string? HeadersJson,
string? MetadataJson,
string? Etag,
DateTimeOffset? LastModified,
byte[] Payload,
DateTimeOffset CreatedAt,
DateTimeOffset UpdatedAt,
DateTimeOffset? ExpiresAt);

View File

@@ -0,0 +1,16 @@
namespace StellaOps.Concelier.Persistence.Postgres.Models;
/// <summary>
/// Represents a feed snapshot record.
/// </summary>
public sealed class FeedSnapshotEntity
{
public Guid Id { get; init; }
public Guid SourceId { get; init; }
public required string SnapshotId { get; init; }
public int AdvisoryCount { get; init; }
public string? Checksum { get; init; }
public string Metadata { get; init; } = "{}";
public DateTimeOffset CreatedAt { get; init; }
}

View File

@@ -0,0 +1,20 @@
namespace StellaOps.Concelier.Persistence.Postgres.Models;
/// <summary>
/// Represents a Known Exploited Vulnerability flag entry.
/// </summary>
public sealed class KevFlagEntity
{
public Guid Id { get; init; }
public Guid AdvisoryId { get; init; }
public required string CveId { get; init; }
public string? VendorProject { get; init; }
public string? Product { get; init; }
public string? VulnerabilityName { get; init; }
public DateOnly DateAdded { get; init; }
public DateOnly? DueDate { get; init; }
public bool KnownRansomwareUse { get; init; }
public string? Notes { get; init; }
public DateTimeOffset CreatedAt { get; init; }
}

View File

@@ -0,0 +1,16 @@
namespace StellaOps.Concelier.Persistence.Postgres.Models;
/// <summary>
/// Represents a merge event audit record.
/// </summary>
public sealed class MergeEventEntity
{
public long Id { get; init; }
public Guid AdvisoryId { get; init; }
public Guid? SourceId { get; init; }
public required string EventType { get; init; }
public string? OldValue { get; init; }
public string? NewValue { get; init; }
public DateTimeOffset CreatedAt { get; init; }
}

View File

@@ -0,0 +1,64 @@
// -----------------------------------------------------------------------------
// ProvenanceScopeEntity.cs
// Sprint: SPRINT_8200_0015_0001_CONCEL_backport_integration
// Task: BACKPORT-8200-001
// Description: Entity for distro-specific backport and patch provenance
// -----------------------------------------------------------------------------
namespace StellaOps.Concelier.Persistence.Postgres.Models;
/// <summary>
/// Represents distro-specific backport and patch provenance per canonical advisory.
/// </summary>
public sealed class ProvenanceScopeEntity
{
/// <summary>
/// Unique provenance scope identifier.
/// </summary>
public required Guid Id { get; init; }
/// <summary>
/// Reference to the canonical advisory.
/// </summary>
public required Guid CanonicalId { get; init; }
/// <summary>
/// Linux distribution release identifier (e.g., debian:bookworm, rhel:9.2, ubuntu:22.04).
/// </summary>
public required string DistroRelease { get; init; }
/// <summary>
/// Distro version containing backport (may differ from upstream fixed version).
/// </summary>
public string? BackportSemver { get; init; }
/// <summary>
/// Upstream commit SHA or patch identifier for lineage tracking.
/// </summary>
public string? PatchId { get; init; }
/// <summary>
/// Source of the patch: upstream, distro, or vendor.
/// </summary>
public string? PatchOrigin { get; init; }
/// <summary>
/// Reference to BackportProofService evidence in proofchain.
/// </summary>
public Guid? EvidenceRef { get; init; }
/// <summary>
/// Confidence score from BackportProofService (0.0-1.0).
/// </summary>
public decimal Confidence { get; init; } = 0.5m;
/// <summary>
/// When the provenance scope record was created.
/// </summary>
public DateTimeOffset CreatedAt { get; init; }
/// <summary>
/// When the provenance scope record was last updated.
/// </summary>
public DateTimeOffset UpdatedAt { get; init; }
}

View File

@@ -0,0 +1,74 @@
// -----------------------------------------------------------------------------
// SitePolicyEntity.cs
// Sprint: SPRINT_8200_0014_0001_DB_sync_ledger_schema
// Task: SYNC-8200-005
// Description: Entity for per-site federation governance policies
// -----------------------------------------------------------------------------
namespace StellaOps.Concelier.Persistence.Postgres.Models;
/// <summary>
/// Represents a site federation policy for governance control.
/// </summary>
public sealed class SitePolicyEntity
{
/// <summary>
/// Unique policy identifier.
/// </summary>
public required Guid Id { get; init; }
/// <summary>
/// Remote site identifier this policy applies to.
/// </summary>
public required string SiteId { get; init; }
/// <summary>
/// Human-readable display name for the site.
/// </summary>
public string? DisplayName { get; init; }
/// <summary>
/// Source keys to allow (empty allows all sources).
/// </summary>
public string[] AllowedSources { get; init; } = [];
/// <summary>
/// Source keys to deny (takes precedence over allowed).
/// </summary>
public string[] DeniedSources { get; init; } = [];
/// <summary>
/// Maximum bundle size in megabytes.
/// </summary>
public int MaxBundleSizeMb { get; init; } = 100;
/// <summary>
/// Maximum items per bundle.
/// </summary>
public int MaxItemsPerBundle { get; init; } = 10000;
/// <summary>
/// Whether bundles must be cryptographically signed.
/// </summary>
public bool RequireSignature { get; init; } = true;
/// <summary>
/// Signing key IDs or issuer patterns allowed for bundle verification.
/// </summary>
public string[] AllowedSigners { get; init; } = [];
/// <summary>
/// Whether this site policy is enabled.
/// </summary>
public bool Enabled { get; init; } = true;
/// <summary>
/// When the policy was created.
/// </summary>
public DateTimeOffset CreatedAt { get; init; }
/// <summary>
/// When the policy was last updated.
/// </summary>
public DateTimeOffset UpdatedAt { get; init; }
}

View File

@@ -0,0 +1,62 @@
namespace StellaOps.Concelier.Persistence.Postgres.Models;
/// <summary>
/// Represents a vulnerability feed source entity.
/// </summary>
public sealed class SourceEntity
{
/// <summary>
/// Unique source identifier.
/// </summary>
public required Guid Id { get; init; }
/// <summary>
/// Unique source key (e.g., "nvd", "ghsa", "osv").
/// </summary>
public required string Key { get; init; }
/// <summary>
/// Display name.
/// </summary>
public required string Name { get; init; }
/// <summary>
/// Source type (e.g., "nvd", "osv", "github").
/// </summary>
public required string SourceType { get; init; }
/// <summary>
/// Source URL.
/// </summary>
public string? Url { get; init; }
/// <summary>
/// Priority for merge precedence (higher = more authoritative).
/// </summary>
public int Priority { get; init; }
/// <summary>
/// Source is enabled.
/// </summary>
public bool Enabled { get; init; } = true;
/// <summary>
/// Source-specific configuration as JSON.
/// </summary>
public string Config { get; init; } = "{}";
/// <summary>
/// Source metadata as JSON.
/// </summary>
public string Metadata { get; init; } = "{}";
/// <summary>
/// When the record was created.
/// </summary>
public DateTimeOffset CreatedAt { get; init; }
/// <summary>
/// When the record was last updated.
/// </summary>
public DateTimeOffset UpdatedAt { get; init; }
}

View File

@@ -0,0 +1,19 @@
namespace StellaOps.Concelier.Persistence.Postgres.Models;
/// <summary>
/// Tracks source ingestion cursors and metrics.
/// </summary>
public sealed class SourceStateEntity
{
public Guid Id { get; init; }
public Guid SourceId { get; init; }
public string? Cursor { get; init; }
public DateTimeOffset? LastSyncAt { get; init; }
public DateTimeOffset? LastSuccessAt { get; init; }
public string? LastError { get; init; }
public long SyncCount { get; init; }
public int ErrorCount { get; init; }
public string Metadata { get; init; } = "{}";
public DateTimeOffset UpdatedAt { get; init; }
}

View File

@@ -0,0 +1,49 @@
// -----------------------------------------------------------------------------
// SyncLedgerEntity.cs
// Sprint: SPRINT_8200_0014_0001_DB_sync_ledger_schema
// Task: SYNC-8200-004
// Description: Entity for tracking federation sync state per remote site
// -----------------------------------------------------------------------------
namespace StellaOps.Concelier.Persistence.Postgres.Models;
/// <summary>
/// Represents a sync ledger entry for federation cursor tracking.
/// </summary>
public sealed class SyncLedgerEntity
{
/// <summary>
/// Unique ledger entry identifier.
/// </summary>
public required Guid Id { get; init; }
/// <summary>
/// Remote site identifier (e.g., "site-us-west", "airgap-dc2").
/// </summary>
public required string SiteId { get; init; }
/// <summary>
/// Opaque cursor position (usually ISO8601 timestamp#sequence).
/// </summary>
public required string Cursor { get; init; }
/// <summary>
/// SHA256 hash of the imported bundle for deduplication.
/// </summary>
public required string BundleHash { get; init; }
/// <summary>
/// Number of items in the imported bundle.
/// </summary>
public int ItemsCount { get; init; }
/// <summary>
/// When the bundle was signed by the remote site.
/// </summary>
public DateTimeOffset SignedAt { get; init; }
/// <summary>
/// When the bundle was imported to this site.
/// </summary>
public DateTimeOffset ImportedAt { get; init; }
}

View File

@@ -0,0 +1,143 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Concelier.Persistence.Postgres.Models;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// PostgreSQL repository for advisory affected packages.
/// </summary>
public sealed class AdvisoryAffectedRepository : RepositoryBase<ConcelierDataSource>, IAdvisoryAffectedRepository
{
private const string SystemTenantId = "_system";
public AdvisoryAffectedRepository(ConcelierDataSource dataSource, ILogger<AdvisoryAffectedRepository> logger)
: base(dataSource, logger)
{
}
public async Task ReplaceAsync(Guid advisoryId, IEnumerable<AdvisoryAffectedEntity> affected, CancellationToken cancellationToken = default)
{
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
const string deleteSql = "DELETE FROM vuln.advisory_affected WHERE advisory_id = @advisory_id";
await using (var deleteCmd = CreateCommand(deleteSql, connection))
{
deleteCmd.Transaction = transaction;
AddParameter(deleteCmd, "advisory_id", advisoryId);
await deleteCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
const string insertSql = """
INSERT INTO vuln.advisory_affected
(id, advisory_id, ecosystem, package_name, purl, version_range, versions_affected,
versions_fixed, database_specific)
VALUES
(@id, @advisory_id, @ecosystem, @package_name, @purl, @version_range::jsonb,
@versions_affected, @versions_fixed, @database_specific::jsonb)
""";
foreach (var entry in affected)
{
await using var insertCmd = CreateCommand(insertSql, connection);
insertCmd.Transaction = transaction;
AddParameter(insertCmd, "id", entry.Id);
AddParameter(insertCmd, "advisory_id", advisoryId);
AddParameter(insertCmd, "ecosystem", entry.Ecosystem);
AddParameter(insertCmd, "package_name", entry.PackageName);
AddParameter(insertCmd, "purl", entry.Purl);
AddJsonbParameter(insertCmd, "version_range", entry.VersionRange);
AddTextArrayParameter(insertCmd, "versions_affected", entry.VersionsAffected);
AddTextArrayParameter(insertCmd, "versions_fixed", entry.VersionsFixed);
AddJsonbParameter(insertCmd, "database_specific", entry.DatabaseSpecific);
await insertCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
}
public Task<IReadOnlyList<AdvisoryAffectedEntity>> GetByAdvisoryAsync(Guid advisoryId, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT id, advisory_id, ecosystem, package_name, purl, version_range::text,
versions_affected, versions_fixed, database_specific::text, created_at
FROM vuln.advisory_affected
WHERE advisory_id = @advisory_id
ORDER BY ecosystem, package_name, purl NULLS LAST
""";
return QueryAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "advisory_id", advisoryId),
MapAffected,
cancellationToken);
}
public Task<IReadOnlyList<AdvisoryAffectedEntity>> GetByPurlAsync(string purl, int limit = 100, int offset = 0, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT id, advisory_id, ecosystem, package_name, purl, version_range::text,
versions_affected, versions_fixed, database_specific::text, created_at
FROM vuln.advisory_affected
WHERE purl = @purl
ORDER BY advisory_id, id
LIMIT @limit OFFSET @offset
""";
return QueryAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "purl", purl);
AddParameter(cmd, "limit", limit);
AddParameter(cmd, "offset", offset);
},
MapAffected,
cancellationToken);
}
public Task<IReadOnlyList<AdvisoryAffectedEntity>> GetByPackageNameAsync(string ecosystem, string packageName, int limit = 100, int offset = 0, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT id, advisory_id, ecosystem, package_name, purl, version_range::text,
versions_affected, versions_fixed, database_specific::text, created_at
FROM vuln.advisory_affected
WHERE ecosystem = @ecosystem AND package_name = @package_name
ORDER BY advisory_id, id
LIMIT @limit OFFSET @offset
""";
return QueryAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "ecosystem", ecosystem);
AddParameter(cmd, "package_name", packageName);
AddParameter(cmd, "limit", limit);
AddParameter(cmd, "offset", offset);
},
MapAffected,
cancellationToken);
}
private static AdvisoryAffectedEntity MapAffected(NpgsqlDataReader reader) => new()
{
Id = reader.GetGuid(0),
AdvisoryId = reader.GetGuid(1),
Ecosystem = reader.GetString(2),
PackageName = reader.GetString(3),
Purl = GetNullableString(reader, 4),
VersionRange = reader.GetString(5),
VersionsAffected = reader.IsDBNull(6) ? null : reader.GetFieldValue<string[]>(6),
VersionsFixed = reader.IsDBNull(7) ? null : reader.GetFieldValue<string[]>(7),
DatabaseSpecific = GetNullableString(reader, 8),
CreatedAt = reader.GetFieldValue<DateTimeOffset>(9)
};
}

View File

@@ -0,0 +1,100 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Concelier.Persistence.Postgres.Models;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// PostgreSQL repository for advisory aliases.
/// </summary>
public sealed class AdvisoryAliasRepository : RepositoryBase<ConcelierDataSource>, IAdvisoryAliasRepository
{
private const string SystemTenantId = "_system";
public AdvisoryAliasRepository(ConcelierDataSource dataSource, ILogger<AdvisoryAliasRepository> logger)
: base(dataSource, logger)
{
}
public async Task ReplaceAsync(Guid advisoryId, IEnumerable<AdvisoryAliasEntity> aliases, CancellationToken cancellationToken = default)
{
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
const string deleteSql = "DELETE FROM vuln.advisory_aliases WHERE advisory_id = @advisory_id";
await using (var deleteCmd = CreateCommand(deleteSql, connection))
{
deleteCmd.Transaction = transaction;
AddParameter(deleteCmd, "advisory_id", advisoryId);
await deleteCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
const string insertSql = """
INSERT INTO vuln.advisory_aliases
(id, advisory_id, alias_type, alias_value, is_primary)
VALUES
(@id, @advisory_id, @alias_type, @alias_value, @is_primary)
""";
foreach (var alias in aliases)
{
await using var insertCmd = CreateCommand(insertSql, connection);
insertCmd.Transaction = transaction;
AddParameter(insertCmd, "id", alias.Id);
AddParameter(insertCmd, "advisory_id", advisoryId);
AddParameter(insertCmd, "alias_type", alias.AliasType);
AddParameter(insertCmd, "alias_value", alias.AliasValue);
AddParameter(insertCmd, "is_primary", alias.IsPrimary);
await insertCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
}
public Task<IReadOnlyList<AdvisoryAliasEntity>> GetByAdvisoryAsync(Guid advisoryId, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT id, advisory_id, alias_type, alias_value, is_primary, created_at
FROM vuln.advisory_aliases
WHERE advisory_id = @advisory_id
ORDER BY is_primary DESC, alias_type, alias_value
""";
return QueryAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "advisory_id", advisoryId),
MapAlias,
cancellationToken);
}
public Task<IReadOnlyList<AdvisoryAliasEntity>> GetByAliasAsync(string aliasValue, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT id, advisory_id, alias_type, alias_value, is_primary, created_at
FROM vuln.advisory_aliases
WHERE alias_value = @alias_value
ORDER BY is_primary DESC
""";
return QueryAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "alias_value", aliasValue),
MapAlias,
cancellationToken);
}
private static AdvisoryAliasEntity MapAlias(NpgsqlDataReader reader) => new()
{
Id = reader.GetGuid(0),
AdvisoryId = reader.GetGuid(1),
AliasType = reader.GetString(2),
AliasValue = reader.GetString(3),
IsPrimary = reader.GetBoolean(4),
CreatedAt = reader.GetFieldValue<DateTimeOffset>(5)
};
}

View File

@@ -0,0 +1,429 @@
// -----------------------------------------------------------------------------
// AdvisoryCanonicalRepository.cs
// Sprint: SPRINT_8200_0012_0002_DB_canonical_source_edge_schema
// Task: SCHEMA-8200-010
// Description: PostgreSQL repository for canonical advisory and source edge operations
// -----------------------------------------------------------------------------
using System.Runtime.CompilerServices;
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Concelier.Persistence.Postgres.Models;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// PostgreSQL repository for canonical advisory and source edge operations.
/// </summary>
public sealed class AdvisoryCanonicalRepository : RepositoryBase<ConcelierDataSource>, IAdvisoryCanonicalRepository
{
private const string SystemTenantId = "_system";
public AdvisoryCanonicalRepository(ConcelierDataSource dataSource, ILogger<AdvisoryCanonicalRepository> logger)
: base(dataSource, logger)
{
}
#region Canonical Advisory Operations
public Task<AdvisoryCanonicalEntity?> GetByIdAsync(Guid id, CancellationToken ct = default)
{
const string sql = """
SELECT id, cve, affects_key, version_range::text, weakness, merge_hash,
status, severity, epss_score, exploit_known, title, summary,
created_at, updated_at
FROM vuln.advisory_canonical
WHERE id = @id
""";
return QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "id", id),
MapCanonical,
ct);
}
public Task<AdvisoryCanonicalEntity?> GetByMergeHashAsync(string mergeHash, CancellationToken ct = default)
{
const string sql = """
SELECT id, cve, affects_key, version_range::text, weakness, merge_hash,
status, severity, epss_score, exploit_known, title, summary,
created_at, updated_at
FROM vuln.advisory_canonical
WHERE merge_hash = @merge_hash
""";
return QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "merge_hash", mergeHash),
MapCanonical,
ct);
}
public Task<IReadOnlyList<AdvisoryCanonicalEntity>> GetByCveAsync(string cve, CancellationToken ct = default)
{
const string sql = """
SELECT id, cve, affects_key, version_range::text, weakness, merge_hash,
status, severity, epss_score, exploit_known, title, summary,
created_at, updated_at
FROM vuln.advisory_canonical
WHERE cve = @cve
ORDER BY updated_at DESC
""";
return QueryAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "cve", cve),
MapCanonical,
ct);
}
public Task<IReadOnlyList<AdvisoryCanonicalEntity>> GetByAffectsKeyAsync(string affectsKey, CancellationToken ct = default)
{
const string sql = """
SELECT id, cve, affects_key, version_range::text, weakness, merge_hash,
status, severity, epss_score, exploit_known, title, summary,
created_at, updated_at
FROM vuln.advisory_canonical
WHERE affects_key = @affects_key
ORDER BY updated_at DESC
""";
return QueryAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "affects_key", affectsKey),
MapCanonical,
ct);
}
public Task<IReadOnlyList<AdvisoryCanonicalEntity>> GetUpdatedSinceAsync(
DateTimeOffset since,
int limit = 1000,
CancellationToken ct = default)
{
const string sql = """
SELECT id, cve, affects_key, version_range::text, weakness, merge_hash,
status, severity, epss_score, exploit_known, title, summary,
created_at, updated_at
FROM vuln.advisory_canonical
WHERE updated_at > @since
ORDER BY updated_at ASC
LIMIT @limit
""";
return QueryAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "since", since);
AddParameter(cmd, "limit", limit);
},
MapCanonical,
ct);
}
public async Task<Guid> UpsertAsync(AdvisoryCanonicalEntity entity, CancellationToken ct = default)
{
const string sql = """
INSERT INTO vuln.advisory_canonical
(id, cve, affects_key, version_range, weakness, merge_hash,
status, severity, epss_score, exploit_known, title, summary)
VALUES
(@id, @cve, @affects_key, @version_range::jsonb, @weakness, @merge_hash,
@status, @severity, @epss_score, @exploit_known, @title, @summary)
ON CONFLICT (merge_hash) DO UPDATE SET
severity = COALESCE(EXCLUDED.severity, vuln.advisory_canonical.severity),
epss_score = COALESCE(EXCLUDED.epss_score, vuln.advisory_canonical.epss_score),
exploit_known = EXCLUDED.exploit_known OR vuln.advisory_canonical.exploit_known,
title = COALESCE(EXCLUDED.title, vuln.advisory_canonical.title),
summary = COALESCE(EXCLUDED.summary, vuln.advisory_canonical.summary),
updated_at = NOW()
RETURNING id
""";
var id = entity.Id == Guid.Empty ? Guid.NewGuid() : entity.Id;
return await ExecuteScalarAsync<Guid>(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "id", id);
AddParameter(cmd, "cve", entity.Cve);
AddParameter(cmd, "affects_key", entity.AffectsKey);
AddJsonbParameter(cmd, "version_range", entity.VersionRange);
AddTextArrayParameter(cmd, "weakness", entity.Weakness);
AddParameter(cmd, "merge_hash", entity.MergeHash);
AddParameter(cmd, "status", entity.Status);
AddParameter(cmd, "severity", entity.Severity);
AddParameter(cmd, "epss_score", entity.EpssScore);
AddParameter(cmd, "exploit_known", entity.ExploitKnown);
AddParameter(cmd, "title", entity.Title);
AddParameter(cmd, "summary", entity.Summary);
},
ct).ConfigureAwait(false);
}
public async Task UpdateStatusAsync(Guid id, string status, CancellationToken ct = default)
{
const string sql = """
UPDATE vuln.advisory_canonical
SET status = @status, updated_at = NOW()
WHERE id = @id
""";
await ExecuteAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "id", id);
AddParameter(cmd, "status", status);
},
ct).ConfigureAwait(false);
}
public async Task DeleteAsync(Guid id, CancellationToken ct = default)
{
const string sql = "DELETE FROM vuln.advisory_canonical WHERE id = @id";
await ExecuteAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "id", id),
ct).ConfigureAwait(false);
}
public async Task<long> CountAsync(CancellationToken ct = default)
{
const string sql = "SELECT COUNT(*) FROM vuln.advisory_canonical WHERE status = 'active'";
return await ExecuteScalarAsync<long>(
SystemTenantId,
sql,
null,
ct).ConfigureAwait(false);
}
public async IAsyncEnumerable<AdvisoryCanonicalEntity> StreamActiveAsync(
[EnumeratorCancellation] CancellationToken ct = default)
{
const string sql = """
SELECT id, cve, affects_key, version_range::text, weakness, merge_hash,
status, severity, epss_score, exploit_known, title, summary,
created_at, updated_at
FROM vuln.advisory_canonical
WHERE status = 'active'
ORDER BY id
""";
await using var connection = await DataSource.OpenSystemConnectionAsync(ct).ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
await using var reader = await command.ExecuteReaderAsync(ct).ConfigureAwait(false);
while (await reader.ReadAsync(ct).ConfigureAwait(false))
{
yield return MapCanonical(reader);
}
}
#endregion
#region Source Edge Operations
public Task<IReadOnlyList<AdvisorySourceEdgeEntity>> GetSourceEdgesAsync(Guid canonicalId, CancellationToken ct = default)
{
const string sql = """
SELECT id, canonical_id, source_id, source_advisory_id, source_doc_hash,
vendor_status, precedence_rank, dsse_envelope::text, raw_payload::text,
fetched_at, created_at
FROM vuln.advisory_source_edge
WHERE canonical_id = @canonical_id
ORDER BY precedence_rank ASC, fetched_at DESC
""";
return QueryAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "canonical_id", canonicalId),
MapSourceEdge,
ct);
}
public Task<AdvisorySourceEdgeEntity?> GetSourceEdgeByIdAsync(Guid id, CancellationToken ct = default)
{
const string sql = """
SELECT id, canonical_id, source_id, source_advisory_id, source_doc_hash,
vendor_status, precedence_rank, dsse_envelope::text, raw_payload::text,
fetched_at, created_at
FROM vuln.advisory_source_edge
WHERE id = @id
""";
return QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "id", id),
MapSourceEdge,
ct);
}
public async Task<Guid> AddSourceEdgeAsync(AdvisorySourceEdgeEntity edge, CancellationToken ct = default)
{
const string sql = """
INSERT INTO vuln.advisory_source_edge
(id, canonical_id, source_id, source_advisory_id, source_doc_hash,
vendor_status, precedence_rank, dsse_envelope, raw_payload, fetched_at)
VALUES
(@id, @canonical_id, @source_id, @source_advisory_id, @source_doc_hash,
@vendor_status, @precedence_rank, @dsse_envelope::jsonb, @raw_payload::jsonb, @fetched_at)
ON CONFLICT (canonical_id, source_id, source_doc_hash) DO UPDATE SET
vendor_status = COALESCE(EXCLUDED.vendor_status, vuln.advisory_source_edge.vendor_status),
precedence_rank = LEAST(EXCLUDED.precedence_rank, vuln.advisory_source_edge.precedence_rank),
dsse_envelope = COALESCE(EXCLUDED.dsse_envelope, vuln.advisory_source_edge.dsse_envelope),
raw_payload = COALESCE(EXCLUDED.raw_payload, vuln.advisory_source_edge.raw_payload)
RETURNING id
""";
var id = edge.Id == Guid.Empty ? Guid.NewGuid() : edge.Id;
return await ExecuteScalarAsync<Guid>(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "id", id);
AddParameter(cmd, "canonical_id", edge.CanonicalId);
AddParameter(cmd, "source_id", edge.SourceId);
AddParameter(cmd, "source_advisory_id", edge.SourceAdvisoryId);
AddParameter(cmd, "source_doc_hash", edge.SourceDocHash);
AddParameter(cmd, "vendor_status", edge.VendorStatus);
AddParameter(cmd, "precedence_rank", edge.PrecedenceRank);
AddJsonbParameter(cmd, "dsse_envelope", edge.DsseEnvelope);
AddJsonbParameter(cmd, "raw_payload", edge.RawPayload);
AddParameter(cmd, "fetched_at", edge.FetchedAt == default ? DateTimeOffset.UtcNow : edge.FetchedAt);
},
ct).ConfigureAwait(false);
}
public Task<IReadOnlyList<AdvisorySourceEdgeEntity>> GetSourceEdgesByAdvisoryIdAsync(
string sourceAdvisoryId,
CancellationToken ct = default)
{
const string sql = """
SELECT id, canonical_id, source_id, source_advisory_id, source_doc_hash,
vendor_status, precedence_rank, dsse_envelope::text, raw_payload::text,
fetched_at, created_at
FROM vuln.advisory_source_edge
WHERE source_advisory_id = @source_advisory_id
ORDER BY fetched_at DESC
""";
return QueryAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "source_advisory_id", sourceAdvisoryId),
MapSourceEdge,
ct);
}
public async Task<long> CountSourceEdgesAsync(CancellationToken ct = default)
{
const string sql = "SELECT COUNT(*) FROM vuln.advisory_source_edge";
return await ExecuteScalarAsync<long>(
SystemTenantId,
sql,
null,
ct).ConfigureAwait(false);
}
#endregion
#region Statistics
public async Task<CanonicalStatistics> GetStatisticsAsync(CancellationToken ct = default)
{
const string sql = """
SELECT
(SELECT COUNT(*) FROM vuln.advisory_canonical) AS total_canonicals,
(SELECT COUNT(*) FROM vuln.advisory_canonical WHERE status = 'active') AS active_canonicals,
(SELECT COUNT(*) FROM vuln.advisory_source_edge) AS total_edges,
(SELECT MAX(updated_at) FROM vuln.advisory_canonical) AS last_updated
""";
var stats = await QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
_ => { },
reader => new
{
TotalCanonicals = reader.GetInt64(0),
ActiveCanonicals = reader.GetInt64(1),
TotalEdges = reader.GetInt64(2),
LastUpdated = GetNullableDateTimeOffset(reader, 3)
},
ct).ConfigureAwait(false);
if (stats is null)
{
return new CanonicalStatistics();
}
return new CanonicalStatistics
{
TotalCanonicals = stats.TotalCanonicals,
ActiveCanonicals = stats.ActiveCanonicals,
TotalSourceEdges = stats.TotalEdges,
AvgSourceEdgesPerCanonical = stats.TotalCanonicals > 0
? (double)stats.TotalEdges / stats.TotalCanonicals
: 0,
LastUpdatedAt = stats.LastUpdated
};
}
#endregion
#region Mappers
private static AdvisoryCanonicalEntity MapCanonical(NpgsqlDataReader reader) => new()
{
Id = reader.GetGuid(0),
Cve = reader.GetString(1),
AffectsKey = reader.GetString(2),
VersionRange = GetNullableString(reader, 3),
Weakness = reader.IsDBNull(4) ? [] : reader.GetFieldValue<string[]>(4),
MergeHash = reader.GetString(5),
Status = reader.GetString(6),
Severity = GetNullableString(reader, 7),
EpssScore = reader.IsDBNull(8) ? null : reader.GetDecimal(8),
ExploitKnown = reader.GetBoolean(9),
Title = GetNullableString(reader, 10),
Summary = GetNullableString(reader, 11),
CreatedAt = reader.GetFieldValue<DateTimeOffset>(12),
UpdatedAt = reader.GetFieldValue<DateTimeOffset>(13)
};
private static AdvisorySourceEdgeEntity MapSourceEdge(NpgsqlDataReader reader) => new()
{
Id = reader.GetGuid(0),
CanonicalId = reader.GetGuid(1),
SourceId = reader.GetGuid(2),
SourceAdvisoryId = reader.GetString(3),
SourceDocHash = reader.GetString(4),
VendorStatus = GetNullableString(reader, 5),
PrecedenceRank = reader.GetInt32(6),
DsseEnvelope = GetNullableString(reader, 7),
RawPayload = GetNullableString(reader, 8),
FetchedAt = reader.GetFieldValue<DateTimeOffset>(9),
CreatedAt = reader.GetFieldValue<DateTimeOffset>(10)
};
#endregion
}

View File

@@ -0,0 +1,83 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Concelier.Persistence.Postgres.Models;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// PostgreSQL repository for advisory credits.
/// </summary>
public sealed class AdvisoryCreditRepository : RepositoryBase<ConcelierDataSource>, IAdvisoryCreditRepository
{
private const string SystemTenantId = "_system";
public AdvisoryCreditRepository(ConcelierDataSource dataSource, ILogger<AdvisoryCreditRepository> logger)
: base(dataSource, logger)
{
}
public async Task ReplaceAsync(Guid advisoryId, IEnumerable<AdvisoryCreditEntity> credits, CancellationToken cancellationToken = default)
{
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
const string deleteSql = "DELETE FROM vuln.advisory_credits WHERE advisory_id = @advisory_id";
await using (var deleteCmd = CreateCommand(deleteSql, connection))
{
deleteCmd.Transaction = transaction;
AddParameter(deleteCmd, "advisory_id", advisoryId);
await deleteCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
const string insertSql = """
INSERT INTO vuln.advisory_credits
(id, advisory_id, name, contact, credit_type)
VALUES
(@id, @advisory_id, @name, @contact, @credit_type)
""";
foreach (var credit in credits)
{
await using var insertCmd = CreateCommand(insertSql, connection);
insertCmd.Transaction = transaction;
AddParameter(insertCmd, "id", credit.Id);
AddParameter(insertCmd, "advisory_id", advisoryId);
AddParameter(insertCmd, "name", credit.Name);
AddParameter(insertCmd, "contact", credit.Contact);
AddParameter(insertCmd, "credit_type", credit.CreditType);
await insertCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
}
public Task<IReadOnlyList<AdvisoryCreditEntity>> GetByAdvisoryAsync(Guid advisoryId, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT id, advisory_id, name, contact, credit_type, created_at
FROM vuln.advisory_credits
WHERE advisory_id = @advisory_id
ORDER BY name
""";
return QueryAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "advisory_id", advisoryId),
MapCredit,
cancellationToken);
}
private static AdvisoryCreditEntity MapCredit(NpgsqlDataReader reader) => new()
{
Id = reader.GetGuid(0),
AdvisoryId = reader.GetGuid(1),
Name = reader.GetString(2),
Contact = GetNullableString(reader, 3),
CreditType = GetNullableString(reader, 4),
CreatedAt = reader.GetFieldValue<DateTimeOffset>(5)
};
}

View File

@@ -0,0 +1,96 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Concelier.Persistence.Postgres.Models;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// PostgreSQL repository for advisory CVSS scores.
/// </summary>
public sealed class AdvisoryCvssRepository : RepositoryBase<ConcelierDataSource>, IAdvisoryCvssRepository
{
private const string SystemTenantId = "_system";
public AdvisoryCvssRepository(ConcelierDataSource dataSource, ILogger<AdvisoryCvssRepository> logger)
: base(dataSource, logger)
{
}
public async Task ReplaceAsync(Guid advisoryId, IEnumerable<AdvisoryCvssEntity> scores, CancellationToken cancellationToken = default)
{
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
const string deleteSql = "DELETE FROM vuln.advisory_cvss WHERE advisory_id = @advisory_id";
await using (var deleteCmd = CreateCommand(deleteSql, connection))
{
deleteCmd.Transaction = transaction;
AddParameter(deleteCmd, "advisory_id", advisoryId);
await deleteCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
const string insertSql = """
INSERT INTO vuln.advisory_cvss
(id, advisory_id, cvss_version, vector_string, base_score, base_severity,
exploitability_score, impact_score, source, is_primary)
VALUES
(@id, @advisory_id, @cvss_version, @vector_string, @base_score, @base_severity,
@exploitability_score, @impact_score, @source, @is_primary)
""";
foreach (var score in scores)
{
await using var insertCmd = CreateCommand(insertSql, connection);
insertCmd.Transaction = transaction;
AddParameter(insertCmd, "id", score.Id);
AddParameter(insertCmd, "advisory_id", advisoryId);
AddParameter(insertCmd, "cvss_version", score.CvssVersion);
AddParameter(insertCmd, "vector_string", score.VectorString);
AddParameter(insertCmd, "base_score", score.BaseScore);
AddParameter(insertCmd, "base_severity", score.BaseSeverity);
AddParameter(insertCmd, "exploitability_score", score.ExploitabilityScore);
AddParameter(insertCmd, "impact_score", score.ImpactScore);
AddParameter(insertCmd, "source", score.Source);
AddParameter(insertCmd, "is_primary", score.IsPrimary);
await insertCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
}
public Task<IReadOnlyList<AdvisoryCvssEntity>> GetByAdvisoryAsync(Guid advisoryId, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT id, advisory_id, cvss_version, vector_string, base_score, base_severity,
exploitability_score, impact_score, source, is_primary, created_at
FROM vuln.advisory_cvss
WHERE advisory_id = @advisory_id
ORDER BY is_primary DESC, cvss_version
""";
return QueryAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "advisory_id", advisoryId),
MapCvss,
cancellationToken);
}
private static AdvisoryCvssEntity MapCvss(NpgsqlDataReader reader) => new()
{
Id = reader.GetGuid(0),
AdvisoryId = reader.GetGuid(1),
CvssVersion = reader.GetString(2),
VectorString = reader.GetString(3),
BaseScore = reader.GetDecimal(4),
BaseSeverity = GetNullableString(reader, 5),
ExploitabilityScore = GetNullableDecimal(reader, 6),
ImpactScore = GetNullableDecimal(reader, 7),
Source = GetNullableString(reader, 8),
IsPrimary = reader.GetBoolean(9),
CreatedAt = reader.GetFieldValue<DateTimeOffset>(10)
};
}

View File

@@ -0,0 +1,264 @@
using System.Collections.Immutable;
using System.Text.Json;
using System.Text.Json.Serialization;
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Concelier.Core.Linksets;
using StellaOps.Concelier.Persistence.Postgres.Models;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// PostgreSQL implementation of the Link-Not-Merge linkset cache.
/// </summary>
public sealed class AdvisoryLinksetCacheRepository
: RepositoryBase<ConcelierDataSource>,
IAdvisoryLinksetStore
{
private static readonly JsonSerializerOptions JsonOptions = new()
{
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull,
WriteIndented = false
};
public AdvisoryLinksetCacheRepository(
ConcelierDataSource dataSource,
ILogger<AdvisoryLinksetCacheRepository> logger)
: base(dataSource, logger)
{
}
public async Task UpsertAsync(
AdvisoryLinkset linkset,
CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(linkset);
var tenant = NormalizeTenant(linkset.TenantId);
var entity = ToEntity(linkset with { TenantId = tenant });
const string sql = """
INSERT INTO vuln.lnm_linkset_cache
(id, tenant_id, source, advisory_id, observations,
normalized, conflicts, provenance, confidence, built_by_job_id, created_at)
VALUES
(@id, @tenant_id, @source, @advisory_id, @observations,
@normalized::jsonb, @conflicts::jsonb, @provenance::jsonb,
@confidence, @built_by_job_id, @created_at)
ON CONFLICT (tenant_id, advisory_id, source) DO UPDATE SET
observations = EXCLUDED.observations,
normalized = EXCLUDED.normalized,
conflicts = EXCLUDED.conflicts,
provenance = EXCLUDED.provenance,
confidence = EXCLUDED.confidence,
built_by_job_id = EXCLUDED.built_by_job_id,
created_at = EXCLUDED.created_at
""";
await using var connection = await DataSource
.OpenConnectionAsync(tenant, "lnm-cache-upsert", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "id", entity.Id);
AddParameter(command, "tenant_id", entity.TenantId);
AddParameter(command, "source", entity.Source);
AddParameter(command, "advisory_id", entity.AdvisoryId);
AddTextArrayParameter(command, "observations", entity.Observations);
AddJsonbParameter(command, "normalized", entity.NormalizedJson);
AddJsonbParameter(command, "conflicts", entity.ConflictsJson);
AddJsonbParameter(command, "provenance", entity.ProvenanceJson);
AddParameter(command, "confidence", entity.Confidence);
AddParameter(command, "built_by_job_id", entity.BuiltByJobId);
AddParameter(command, "created_at", entity.CreatedAt.UtcDateTime);
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
public async Task<IReadOnlyList<AdvisoryLinkset>> FindByTenantAsync(
string tenantId,
IEnumerable<string>? advisoryIds,
IEnumerable<string>? sources,
AdvisoryLinksetCursor? cursor,
int limit,
CancellationToken cancellationToken)
{
if (string.IsNullOrWhiteSpace(tenantId))
{
throw new ArgumentNullException(nameof(tenantId));
}
if (limit <= 0)
{
throw new ArgumentOutOfRangeException(nameof(limit));
}
var normalizedTenant = NormalizeTenant(tenantId);
var advisoryIdArray = advisoryIds?.Select(a => a.Trim()).Where(a => !string.IsNullOrWhiteSpace(a)).ToArray();
var sourceArray = sources?.Select(s => s.Trim()).Where(s => !string.IsNullOrWhiteSpace(s)).ToArray();
const string sql = """
SELECT id, tenant_id, source, advisory_id, observations,
normalized::text, conflicts::text, provenance::text,
confidence, built_by_job_id, created_at
FROM vuln.lnm_linkset_cache
WHERE tenant_id = @tenant_id
AND (@advisory_ids IS NULL OR advisory_id = ANY(@advisory_ids))
AND (@sources IS NULL OR source = ANY(@sources))
AND (
@cursor_created_at IS NULL
OR created_at < @cursor_created_at
OR (created_at = @cursor_created_at AND advisory_id > @cursor_advisory_id)
)
ORDER BY created_at DESC, advisory_id ASC
LIMIT @limit
""";
return await QueryAsync(
normalizedTenant,
sql,
cmd =>
{
AddParameter(cmd, "tenant_id", normalizedTenant);
AddTextArrayParameter(cmd, "advisory_ids", advisoryIdArray);
AddTextArrayParameter(cmd, "sources", sourceArray);
if (cursor is null)
{
AddParameter(cmd, "cursor_created_at", DBNull.Value);
AddParameter(cmd, "cursor_advisory_id", DBNull.Value);
}
else
{
AddParameter(cmd, "cursor_created_at", cursor.CreatedAt.UtcDateTime);
AddParameter(cmd, "cursor_advisory_id", cursor.AdvisoryId);
}
AddParameter(cmd, "limit", limit);
},
MapLinkset,
cancellationToken).ConfigureAwait(false);
}
private static string NormalizeTenant(string tenantId) =>
tenantId.Trim().ToLowerInvariant();
private static AdvisoryLinksetCacheEntity ToEntity(AdvisoryLinkset linkset)
{
var normalizedJson = linkset.Normalized is null
? null
: JsonSerializer.Serialize(new
{
linkset.Normalized.Purls,
linkset.Normalized.Cpes,
linkset.Normalized.Versions,
linkset.Normalized.Ranges,
linkset.Normalized.Severities
}, JsonOptions);
var conflictsJson = linkset.Conflicts is null
? null
: JsonSerializer.Serialize(
linkset.Conflicts.Select(c => new
{
c.Field,
c.Reason,
c.Values,
c.SourceIds
}),
JsonOptions);
var provenanceJson = linkset.Provenance is null
? null
: JsonSerializer.Serialize(new
{
linkset.Provenance.ObservationHashes,
linkset.Provenance.ToolVersion,
linkset.Provenance.PolicyHash
}, JsonOptions);
return new AdvisoryLinksetCacheEntity
{
Id = Guid.NewGuid(),
TenantId = linkset.TenantId,
Source = linkset.Source,
AdvisoryId = linkset.AdvisoryId,
Observations = linkset.ObservationIds.ToArray(),
NormalizedJson = normalizedJson,
ConflictsJson = conflictsJson,
ProvenanceJson = provenanceJson,
Confidence = linkset.Confidence,
CreatedAt = linkset.CreatedAt,
BuiltByJobId = linkset.BuiltByJobId
};
}
private static AdvisoryLinkset MapLinkset(NpgsqlDataReader reader)
{
var normalized = Deserialize<NormalizedPayload>(reader, 5);
var conflicts = Deserialize<List<ConflictPayload>>(reader, 6);
var provenance = Deserialize<ProvenancePayload>(reader, 7);
return new AdvisoryLinkset(
TenantId: reader.GetString(1),
Source: reader.GetString(2),
AdvisoryId: reader.GetString(3),
ObservationIds: reader.GetFieldValue<string[]>(4).ToImmutableArray(),
Normalized: normalized is null
? null
: new AdvisoryLinksetNormalized(
normalized.Purls,
normalized.Cpes,
normalized.Versions,
normalized.Ranges,
normalized.Severities),
Provenance: provenance is null
? null
: new AdvisoryLinksetProvenance(
provenance.ObservationHashes,
provenance.ToolVersion,
provenance.PolicyHash),
Confidence: GetNullableDouble(reader, 8),
Conflicts: conflicts?.Select(c => new AdvisoryLinksetConflict(
c.Field ?? string.Empty,
c.Reason ?? string.Empty,
c.Values,
c.SourceIds)).ToList(),
CreatedAt: reader.GetFieldValue<DateTimeOffset>(10),
BuiltByJobId: GetNullableString(reader, 9));
}
private static double? GetNullableDouble(NpgsqlDataReader reader, int ordinal) =>
reader.IsDBNull(ordinal) ? null : reader.GetDouble(ordinal);
private static TPayload? Deserialize<TPayload>(NpgsqlDataReader reader, int ordinal)
{
if (reader.IsDBNull(ordinal))
{
return default;
}
var json = reader.GetString(ordinal);
return JsonSerializer.Deserialize<TPayload>(json, JsonOptions);
}
private sealed record NormalizedPayload(
IReadOnlyList<string>? Purls,
IReadOnlyList<string>? Cpes,
IReadOnlyList<string>? Versions,
IReadOnlyList<Dictionary<string, object?>>? Ranges,
IReadOnlyList<Dictionary<string, object?>>? Severities);
private sealed record ConflictPayload(
string? Field,
string? Reason,
IReadOnlyList<string>? Values,
IReadOnlyList<string>? SourceIds);
private sealed record ProvenancePayload(
IReadOnlyList<string>? ObservationHashes,
string? ToolVersion,
string? PolicyHash);
}

View File

@@ -0,0 +1,81 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Concelier.Persistence.Postgres.Models;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// PostgreSQL repository for advisory references.
/// </summary>
public sealed class AdvisoryReferenceRepository : RepositoryBase<ConcelierDataSource>, IAdvisoryReferenceRepository
{
private const string SystemTenantId = "_system";
public AdvisoryReferenceRepository(ConcelierDataSource dataSource, ILogger<AdvisoryReferenceRepository> logger)
: base(dataSource, logger)
{
}
public async Task ReplaceAsync(Guid advisoryId, IEnumerable<AdvisoryReferenceEntity> references, CancellationToken cancellationToken = default)
{
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
const string deleteSql = "DELETE FROM vuln.advisory_references WHERE advisory_id = @advisory_id";
await using (var deleteCmd = CreateCommand(deleteSql, connection))
{
deleteCmd.Transaction = transaction;
AddParameter(deleteCmd, "advisory_id", advisoryId);
await deleteCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
const string insertSql = """
INSERT INTO vuln.advisory_references
(id, advisory_id, ref_type, url)
VALUES
(@id, @advisory_id, @ref_type, @url)
""";
foreach (var reference in references)
{
await using var insertCmd = CreateCommand(insertSql, connection);
insertCmd.Transaction = transaction;
AddParameter(insertCmd, "id", reference.Id);
AddParameter(insertCmd, "advisory_id", advisoryId);
AddParameter(insertCmd, "ref_type", reference.RefType);
AddParameter(insertCmd, "url", reference.Url);
await insertCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
}
public Task<IReadOnlyList<AdvisoryReferenceEntity>> GetByAdvisoryAsync(Guid advisoryId, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT id, advisory_id, ref_type, url, created_at
FROM vuln.advisory_references
WHERE advisory_id = @advisory_id
ORDER BY ref_type, url
""";
return QueryAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "advisory_id", advisoryId),
MapReference,
cancellationToken);
}
private static AdvisoryReferenceEntity MapReference(NpgsqlDataReader reader) => new()
{
Id = reader.GetGuid(0),
AdvisoryId = reader.GetGuid(1),
RefType = reader.GetString(2),
Url = reader.GetString(3),
CreatedAt = reader.GetFieldValue<DateTimeOffset>(4)
};
}

View File

@@ -0,0 +1,725 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Concelier.Persistence.Postgres.Models;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// PostgreSQL repository for advisory operations.
/// </summary>
/// <remarks>
/// Advisory data is global (not tenant-scoped) as vulnerability information
/// is shared across all tenants.
/// </remarks>
public sealed class AdvisoryRepository : RepositoryBase<ConcelierDataSource>, IAdvisoryRepository
{
private const string SystemTenantId = "_system";
/// <summary>
/// Creates a new advisory repository.
/// </summary>
public AdvisoryRepository(ConcelierDataSource dataSource, ILogger<AdvisoryRepository> logger)
: base(dataSource, logger)
{
}
/// <inheritdoc />
public Task<AdvisoryEntity> UpsertAsync(
AdvisoryEntity advisory,
IEnumerable<AdvisoryAliasEntity>? aliases,
IEnumerable<AdvisoryCvssEntity>? cvss,
IEnumerable<AdvisoryAffectedEntity>? affected,
IEnumerable<AdvisoryReferenceEntity>? references,
IEnumerable<AdvisoryCreditEntity>? credits,
IEnumerable<AdvisoryWeaknessEntity>? weaknesses,
IEnumerable<KevFlagEntity>? kevFlags,
CancellationToken cancellationToken = default)
{
return UpsertInternalAsync(advisory, aliases, cvss, affected, references, credits, weaknesses, kevFlags, cancellationToken);
}
/// <inheritdoc />
public Task<AdvisoryEntity> UpsertAsync(AdvisoryEntity advisory, CancellationToken cancellationToken = default)
{
return UpsertInternalAsync(advisory, null, null, null, null, null, null, null, cancellationToken);
}
/// <inheritdoc />
public async Task<AdvisoryEntity?> GetByIdAsync(Guid id, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT id, advisory_key, primary_vuln_id, source_id, title, summary, description,
severity, published_at, modified_at, withdrawn_at, provenance::text, raw_Payload::text,
created_at, updated_at
FROM vuln.advisories
WHERE id = @id
""";
return await QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "id", id),
MapAdvisory,
cancellationToken).ConfigureAwait(false);
}
/// <inheritdoc />
public async Task<AdvisoryEntity?> GetByKeyAsync(string advisoryKey, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT id, advisory_key, primary_vuln_id, source_id, title, summary, description,
severity, published_at, modified_at, withdrawn_at, provenance::text, raw_Payload::text,
created_at, updated_at
FROM vuln.advisories
WHERE advisory_key = @advisory_key
""";
return await QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "advisory_key", advisoryKey),
MapAdvisory,
cancellationToken).ConfigureAwait(false);
}
/// <inheritdoc />
public async Task<AdvisoryEntity?> GetByVulnIdAsync(string vulnId, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT id, advisory_key, primary_vuln_id, source_id, title, summary, description,
severity, published_at, modified_at, withdrawn_at, provenance::text, raw_Payload::text,
created_at, updated_at
FROM vuln.advisories
WHERE primary_vuln_id = @vuln_id
""";
return await QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "vuln_id", vulnId),
MapAdvisory,
cancellationToken).ConfigureAwait(false);
}
/// <inheritdoc />
public async Task<IReadOnlyList<AdvisoryEntity>> GetByAliasAsync(string aliasValue, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT a.id, a.advisory_key, a.primary_vuln_id, a.source_id, a.title, a.summary, a.description,
a.severity, a.published_at, a.modified_at, a.withdrawn_at, a.provenance::text, a.raw_Payload::text,
a.created_at, a.updated_at
FROM vuln.advisories a
JOIN vuln.advisory_aliases al ON al.advisory_id = a.id
WHERE al.alias_value = @alias_value
ORDER BY a.modified_at DESC, a.id
""";
return await QueryAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "alias_value", aliasValue),
MapAdvisory,
cancellationToken).ConfigureAwait(false);
}
/// <inheritdoc />
public async Task<IReadOnlyList<AdvisoryEntity>> GetAffectingPackageAsync(
string purl,
int limit = 100,
int offset = 0,
CancellationToken cancellationToken = default)
{
const string sql = """
SELECT a.id, a.advisory_key, a.primary_vuln_id, a.source_id, a.title, a.summary, a.description,
a.severity, a.published_at, a.modified_at, a.withdrawn_at, a.provenance::text, a.raw_Payload::text,
a.created_at, a.updated_at
FROM vuln.advisories a
JOIN vuln.advisory_affected af ON af.advisory_id = a.id
WHERE af.purl = @purl
ORDER BY a.modified_at DESC, a.id
LIMIT @limit OFFSET @offset
""";
return await QueryAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "purl", purl);
AddParameter(cmd, "limit", limit);
AddParameter(cmd, "offset", offset);
},
MapAdvisory,
cancellationToken).ConfigureAwait(false);
}
/// <inheritdoc />
public async Task<IReadOnlyList<AdvisoryEntity>> GetAffectingPackageNameAsync(
string ecosystem,
string packageName,
int limit = 100,
int offset = 0,
CancellationToken cancellationToken = default)
{
const string sql = """
SELECT a.id, a.advisory_key, a.primary_vuln_id, a.source_id, a.title, a.summary, a.description,
a.severity, a.published_at, a.modified_at, a.withdrawn_at, a.provenance::text, a.raw_Payload::text,
a.created_at, a.updated_at
FROM vuln.advisories a
JOIN vuln.advisory_affected af ON af.advisory_id = a.id
WHERE af.ecosystem = @ecosystem AND af.package_name = @package_name
ORDER BY a.modified_at DESC, a.id
LIMIT @limit OFFSET @offset
""";
return await QueryAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "ecosystem", ecosystem);
AddParameter(cmd, "package_name", packageName);
AddParameter(cmd, "limit", limit);
AddParameter(cmd, "offset", offset);
},
MapAdvisory,
cancellationToken).ConfigureAwait(false);
}
/// <inheritdoc />
public async Task<IReadOnlyList<AdvisoryEntity>> SearchAsync(
string query,
string? severity = null,
int limit = 50,
int offset = 0,
CancellationToken cancellationToken = default)
{
var sql = """
SELECT id, advisory_key, primary_vuln_id, source_id, title, summary, description,
severity, published_at, modified_at, withdrawn_at, provenance::text, raw_Payload::text,
created_at, updated_at,
ts_rank(search_vector, websearch_to_tsquery('english', @query)) as rank
FROM vuln.advisories
WHERE search_vector @@ websearch_to_tsquery('english', @query)
""";
if (!string.IsNullOrEmpty(severity))
{
sql += " AND severity = @severity";
}
sql += " ORDER BY rank DESC, modified_at DESC, id LIMIT @limit OFFSET @offset";
return await QueryAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "query", query);
if (!string.IsNullOrEmpty(severity))
{
AddParameter(cmd, "severity", severity);
}
AddParameter(cmd, "limit", limit);
AddParameter(cmd, "offset", offset);
},
MapAdvisory,
cancellationToken).ConfigureAwait(false);
}
/// <inheritdoc />
public async Task<IReadOnlyList<AdvisoryEntity>> GetBySeverityAsync(
string severity,
int limit = 100,
int offset = 0,
CancellationToken cancellationToken = default)
{
const string sql = """
SELECT id, advisory_key, primary_vuln_id, source_id, title, summary, description,
severity, published_at, modified_at, withdrawn_at, provenance::text, raw_Payload::text,
created_at, updated_at
FROM vuln.advisories
WHERE severity = @severity
ORDER BY modified_at DESC, id
LIMIT @limit OFFSET @offset
""";
return await QueryAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "severity", severity);
AddParameter(cmd, "limit", limit);
AddParameter(cmd, "offset", offset);
},
MapAdvisory,
cancellationToken).ConfigureAwait(false);
}
/// <inheritdoc />
public async Task<IReadOnlyList<AdvisoryEntity>> GetModifiedSinceAsync(
DateTimeOffset since,
int limit = 1000,
CancellationToken cancellationToken = default)
{
const string sql = """
SELECT id, advisory_key, primary_vuln_id, source_id, title, summary, description,
severity, published_at, modified_at, withdrawn_at, provenance::text, raw_Payload::text,
created_at, updated_at
FROM vuln.advisories
WHERE modified_at > @since
ORDER BY modified_at, id
LIMIT @limit
""";
return await QueryAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "since", since);
AddParameter(cmd, "limit", limit);
},
MapAdvisory,
cancellationToken).ConfigureAwait(false);
}
/// <inheritdoc />
public async Task<IReadOnlyList<AdvisoryEntity>> GetBySourceAsync(
Guid sourceId,
int limit = 100,
int offset = 0,
CancellationToken cancellationToken = default)
{
const string sql = """
SELECT id, advisory_key, primary_vuln_id, source_id, title, summary, description,
severity, published_at, modified_at, withdrawn_at, provenance::text, raw_Payload::text,
created_at, updated_at
FROM vuln.advisories
WHERE source_id = @source_id
ORDER BY modified_at DESC, id
LIMIT @limit OFFSET @offset
""";
return await QueryAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "source_id", sourceId);
AddParameter(cmd, "limit", limit);
AddParameter(cmd, "offset", offset);
},
MapAdvisory,
cancellationToken).ConfigureAwait(false);
}
/// <inheritdoc />
public async Task<long> CountAsync(CancellationToken cancellationToken = default)
{
const string sql = "SELECT COUNT(*) FROM vuln.advisories";
var result = await ExecuteScalarAsync<long>(
SystemTenantId,
sql,
null,
cancellationToken).ConfigureAwait(false);
return result;
}
/// <inheritdoc />
public async Task<IDictionary<string, long>> CountBySeverityAsync(CancellationToken cancellationToken = default)
{
const string sql = """
SELECT COALESCE(severity, 'unknown') as severity, COUNT(*) as count
FROM vuln.advisories
GROUP BY severity
ORDER BY severity
""";
var results = await QueryAsync(
SystemTenantId,
sql,
null,
reader => (
Severity: reader.GetString(0),
Count: reader.GetInt64(1)
),
cancellationToken).ConfigureAwait(false);
return results.ToDictionary(r => r.Severity, r => r.Count);
}
private async Task<AdvisoryEntity> UpsertInternalAsync(
AdvisoryEntity advisory,
IEnumerable<AdvisoryAliasEntity>? aliases,
IEnumerable<AdvisoryCvssEntity>? cvss,
IEnumerable<AdvisoryAffectedEntity>? affected,
IEnumerable<AdvisoryReferenceEntity>? references,
IEnumerable<AdvisoryCreditEntity>? credits,
IEnumerable<AdvisoryWeaknessEntity>? weaknesses,
IEnumerable<KevFlagEntity>? kevFlags,
CancellationToken cancellationToken)
{
const string sql = """
INSERT INTO vuln.advisories (
id, advisory_key, primary_vuln_id, source_id, title, summary, description,
severity, published_at, modified_at, withdrawn_at, provenance, raw_payload
)
VALUES (
@id, @advisory_key, @primary_vuln_id, @source_id, @title, @summary, @description,
@severity, @published_at, @modified_at, @withdrawn_at, @provenance::jsonb, @raw_Payload::jsonb
)
ON CONFLICT (advisory_key) DO UPDATE SET
primary_vuln_id = EXCLUDED.primary_vuln_id,
source_id = COALESCE(EXCLUDED.source_id, vuln.advisories.source_id),
title = COALESCE(EXCLUDED.title, vuln.advisories.title),
summary = COALESCE(EXCLUDED.summary, vuln.advisories.summary),
description = COALESCE(EXCLUDED.description, vuln.advisories.description),
severity = COALESCE(EXCLUDED.severity, vuln.advisories.severity),
published_at = COALESCE(EXCLUDED.published_at, vuln.advisories.published_at),
modified_at = COALESCE(EXCLUDED.modified_at, vuln.advisories.modified_at),
withdrawn_at = EXCLUDED.withdrawn_at,
provenance = vuln.advisories.provenance || EXCLUDED.provenance,
raw_payload = EXCLUDED.raw_payload,
updated_at = NOW()
RETURNING id, advisory_key, primary_vuln_id, source_id, title, summary, description,
severity, published_at, modified_at, withdrawn_at, provenance::text, raw_Payload::text,
created_at, updated_at
""";
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
AdvisoryEntity result;
await using (var command = CreateCommand(sql, connection))
{
command.Transaction = transaction;
AddParameter(command, "id", advisory.Id);
AddParameter(command, "advisory_key", advisory.AdvisoryKey);
AddParameter(command, "primary_vuln_id", advisory.PrimaryVulnId);
AddParameter(command, "source_id", advisory.SourceId);
AddParameter(command, "title", advisory.Title);
AddParameter(command, "summary", advisory.Summary);
AddParameter(command, "description", advisory.Description);
AddParameter(command, "severity", advisory.Severity);
AddParameter(command, "published_at", advisory.PublishedAt);
AddParameter(command, "modified_at", advisory.ModifiedAt);
AddParameter(command, "withdrawn_at", advisory.WithdrawnAt);
AddJsonbParameter(command, "provenance", advisory.Provenance);
AddJsonbParameter(command, "raw_payload", advisory.RawPayload);
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
await reader.ReadAsync(cancellationToken).ConfigureAwait(false);
result = MapAdvisory(reader);
}
// Replace child tables only when collections are provided (null = leave existing).
if (aliases is not null)
{
await ReplaceAliasesAsync(result.Id, aliases, connection, transaction, cancellationToken).ConfigureAwait(false);
}
if (cvss is not null)
{
await ReplaceCvssAsync(result.Id, cvss, connection, transaction, cancellationToken).ConfigureAwait(false);
}
if (affected is not null)
{
await ReplaceAffectedAsync(result.Id, affected, connection, transaction, cancellationToken).ConfigureAwait(false);
}
if (references is not null)
{
await ReplaceReferencesAsync(result.Id, references, connection, transaction, cancellationToken).ConfigureAwait(false);
}
if (credits is not null)
{
await ReplaceCreditsAsync(result.Id, credits, connection, transaction, cancellationToken).ConfigureAwait(false);
}
if (weaknesses is not null)
{
await ReplaceWeaknessesAsync(result.Id, weaknesses, connection, transaction, cancellationToken).ConfigureAwait(false);
}
if (kevFlags is not null)
{
await ReplaceKevFlagsAsync(result.Id, kevFlags, connection, transaction, cancellationToken).ConfigureAwait(false);
}
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
return result;
}
private static async Task ReplaceAliasesAsync(
Guid advisoryId,
IEnumerable<AdvisoryAliasEntity> aliases,
NpgsqlConnection connection,
NpgsqlTransaction transaction,
CancellationToken cancellationToken)
{
const string deleteSql = "DELETE FROM vuln.advisory_aliases WHERE advisory_id = @advisory_id";
await using (var deleteCmd = new NpgsqlCommand(deleteSql, connection, transaction))
{
deleteCmd.Parameters.AddWithValue("advisory_id", advisoryId);
await deleteCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
const string insertSql = """
INSERT INTO vuln.advisory_aliases (id, advisory_id, alias_type, alias_value, is_primary)
VALUES (@id, @advisory_id, @alias_type, @alias_value, @is_primary)
""";
foreach (var alias in aliases)
{
await using var insertCmd = new NpgsqlCommand(insertSql, connection, transaction);
insertCmd.Parameters.AddWithValue("id", alias.Id);
insertCmd.Parameters.AddWithValue("advisory_id", advisoryId);
insertCmd.Parameters.AddWithValue("alias_type", alias.AliasType);
insertCmd.Parameters.AddWithValue("alias_value", alias.AliasValue);
insertCmd.Parameters.AddWithValue("is_primary", alias.IsPrimary);
await insertCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
}
private static async Task ReplaceCvssAsync(
Guid advisoryId,
IEnumerable<AdvisoryCvssEntity> scores,
NpgsqlConnection connection,
NpgsqlTransaction transaction,
CancellationToken cancellationToken)
{
const string deleteSql = "DELETE FROM vuln.advisory_cvss WHERE advisory_id = @advisory_id";
await using (var deleteCmd = new NpgsqlCommand(deleteSql, connection, transaction))
{
deleteCmd.Parameters.AddWithValue("advisory_id", advisoryId);
await deleteCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
const string insertSql = """
INSERT INTO vuln.advisory_cvss
(id, advisory_id, cvss_version, vector_string, base_score, base_severity,
exploitability_score, impact_score, source, is_primary)
VALUES
(@id, @advisory_id, @cvss_version, @vector_string, @base_score, @base_severity,
@exploitability_score, @impact_score, @source, @is_primary)
""";
foreach (var score in scores)
{
await using var insertCmd = new NpgsqlCommand(insertSql, connection, transaction);
insertCmd.Parameters.AddWithValue("id", score.Id);
insertCmd.Parameters.AddWithValue("advisory_id", advisoryId);
insertCmd.Parameters.AddWithValue("cvss_version", score.CvssVersion);
insertCmd.Parameters.AddWithValue("vector_string", score.VectorString);
insertCmd.Parameters.AddWithValue("base_score", score.BaseScore);
insertCmd.Parameters.AddWithValue("base_severity", (object?)score.BaseSeverity ?? DBNull.Value);
insertCmd.Parameters.AddWithValue("exploitability_score", (object?)score.ExploitabilityScore ?? DBNull.Value);
insertCmd.Parameters.AddWithValue("impact_score", (object?)score.ImpactScore ?? DBNull.Value);
insertCmd.Parameters.AddWithValue("source", (object?)score.Source ?? DBNull.Value);
insertCmd.Parameters.AddWithValue("is_primary", score.IsPrimary);
await insertCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
}
private static async Task ReplaceAffectedAsync(
Guid advisoryId,
IEnumerable<AdvisoryAffectedEntity> affected,
NpgsqlConnection connection,
NpgsqlTransaction transaction,
CancellationToken cancellationToken)
{
const string deleteSql = "DELETE FROM vuln.advisory_affected WHERE advisory_id = @advisory_id";
await using (var deleteCmd = new NpgsqlCommand(deleteSql, connection, transaction))
{
deleteCmd.Parameters.AddWithValue("advisory_id", advisoryId);
await deleteCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
const string insertSql = """
INSERT INTO vuln.advisory_affected
(id, advisory_id, ecosystem, package_name, purl, version_range, versions_affected,
versions_fixed, database_specific)
VALUES
(@id, @advisory_id, @ecosystem, @package_name, @purl, @version_range::jsonb,
@versions_affected, @versions_fixed, @database_specific::jsonb)
""";
foreach (var entry in affected)
{
await using var insertCmd = new NpgsqlCommand(insertSql, connection, transaction);
insertCmd.Parameters.AddWithValue("id", entry.Id);
insertCmd.Parameters.AddWithValue("advisory_id", advisoryId);
insertCmd.Parameters.AddWithValue("ecosystem", entry.Ecosystem);
insertCmd.Parameters.AddWithValue("package_name", entry.PackageName);
insertCmd.Parameters.AddWithValue("purl", (object?)entry.Purl ?? DBNull.Value);
insertCmd.Parameters.Add(new NpgsqlParameter<string?>("version_range", NpgsqlTypes.NpgsqlDbType.Jsonb)
{
TypedValue = entry.VersionRange
});
insertCmd.Parameters.Add(new NpgsqlParameter<string[]?>("versions_affected", NpgsqlTypes.NpgsqlDbType.Array | NpgsqlTypes.NpgsqlDbType.Text)
{
TypedValue = entry.VersionsAffected
});
insertCmd.Parameters.Add(new NpgsqlParameter<string[]?>("versions_fixed", NpgsqlTypes.NpgsqlDbType.Array | NpgsqlTypes.NpgsqlDbType.Text)
{
TypedValue = entry.VersionsFixed
});
insertCmd.Parameters.Add(new NpgsqlParameter<string?>("database_specific", NpgsqlTypes.NpgsqlDbType.Jsonb)
{
TypedValue = entry.DatabaseSpecific
});
await insertCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
}
private static async Task ReplaceReferencesAsync(
Guid advisoryId,
IEnumerable<AdvisoryReferenceEntity> references,
NpgsqlConnection connection,
NpgsqlTransaction transaction,
CancellationToken cancellationToken)
{
const string deleteSql = "DELETE FROM vuln.advisory_references WHERE advisory_id = @advisory_id";
await using (var deleteCmd = new NpgsqlCommand(deleteSql, connection, transaction))
{
deleteCmd.Parameters.AddWithValue("advisory_id", advisoryId);
await deleteCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
const string insertSql = """
INSERT INTO vuln.advisory_references (id, advisory_id, ref_type, url)
VALUES (@id, @advisory_id, @ref_type, @url)
""";
foreach (var reference in references)
{
await using var insertCmd = new NpgsqlCommand(insertSql, connection, transaction);
insertCmd.Parameters.AddWithValue("id", reference.Id);
insertCmd.Parameters.AddWithValue("advisory_id", advisoryId);
insertCmd.Parameters.AddWithValue("ref_type", reference.RefType);
insertCmd.Parameters.AddWithValue("url", reference.Url);
await insertCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
}
private static async Task ReplaceCreditsAsync(
Guid advisoryId,
IEnumerable<AdvisoryCreditEntity> credits,
NpgsqlConnection connection,
NpgsqlTransaction transaction,
CancellationToken cancellationToken)
{
const string deleteSql = "DELETE FROM vuln.advisory_credits WHERE advisory_id = @advisory_id";
await using (var deleteCmd = new NpgsqlCommand(deleteSql, connection, transaction))
{
deleteCmd.Parameters.AddWithValue("advisory_id", advisoryId);
await deleteCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
const string insertSql = """
INSERT INTO vuln.advisory_credits (id, advisory_id, name, contact, credit_type)
VALUES (@id, @advisory_id, @name, @contact, @credit_type)
""";
foreach (var credit in credits)
{
await using var insertCmd = new NpgsqlCommand(insertSql, connection, transaction);
insertCmd.Parameters.AddWithValue("id", credit.Id);
insertCmd.Parameters.AddWithValue("advisory_id", advisoryId);
insertCmd.Parameters.AddWithValue("name", credit.Name);
insertCmd.Parameters.AddWithValue("contact", (object?)credit.Contact ?? DBNull.Value);
insertCmd.Parameters.AddWithValue("credit_type", (object?)credit.CreditType ?? DBNull.Value);
await insertCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
}
private static async Task ReplaceWeaknessesAsync(
Guid advisoryId,
IEnumerable<AdvisoryWeaknessEntity> weaknesses,
NpgsqlConnection connection,
NpgsqlTransaction transaction,
CancellationToken cancellationToken)
{
const string deleteSql = "DELETE FROM vuln.advisory_weaknesses WHERE advisory_id = @advisory_id";
await using (var deleteCmd = new NpgsqlCommand(deleteSql, connection, transaction))
{
deleteCmd.Parameters.AddWithValue("advisory_id", advisoryId);
await deleteCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
const string insertSql = """
INSERT INTO vuln.advisory_weaknesses (id, advisory_id, cwe_id, description, source)
VALUES (@id, @advisory_id, @cwe_id, @description, @source)
""";
foreach (var weakness in weaknesses)
{
await using var insertCmd = new NpgsqlCommand(insertSql, connection, transaction);
insertCmd.Parameters.AddWithValue("id", weakness.Id);
insertCmd.Parameters.AddWithValue("advisory_id", advisoryId);
insertCmd.Parameters.AddWithValue("cwe_id", weakness.CweId);
insertCmd.Parameters.AddWithValue("description", (object?)weakness.Description ?? DBNull.Value);
insertCmd.Parameters.AddWithValue("source", (object?)weakness.Source ?? DBNull.Value);
await insertCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
}
private static async Task ReplaceKevFlagsAsync(
Guid advisoryId,
IEnumerable<KevFlagEntity> kevFlags,
NpgsqlConnection connection,
NpgsqlTransaction transaction,
CancellationToken cancellationToken)
{
const string deleteSql = "DELETE FROM vuln.kev_flags WHERE advisory_id = @advisory_id";
await using (var deleteCmd = new NpgsqlCommand(deleteSql, connection, transaction))
{
deleteCmd.Parameters.AddWithValue("advisory_id", advisoryId);
await deleteCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
const string insertSql = """
INSERT INTO vuln.kev_flags
(id, advisory_id, cve_id, vendor_project, product, vulnerability_name,
date_added, due_date, known_ransomware_use, notes)
VALUES
(@id, @advisory_id, @cve_id, @vendor_project, @product, @vulnerability_name,
@date_added, @due_date, @known_ransomware_use, @notes)
""";
foreach (var flag in kevFlags)
{
await using var insertCmd = new NpgsqlCommand(insertSql, connection, transaction);
insertCmd.Parameters.AddWithValue("id", flag.Id);
insertCmd.Parameters.AddWithValue("advisory_id", advisoryId);
insertCmd.Parameters.AddWithValue("cve_id", flag.CveId);
insertCmd.Parameters.AddWithValue("vendor_project", (object?)flag.VendorProject ?? DBNull.Value);
insertCmd.Parameters.AddWithValue("product", (object?)flag.Product ?? DBNull.Value);
insertCmd.Parameters.AddWithValue("vulnerability_name", (object?)flag.VulnerabilityName ?? DBNull.Value);
insertCmd.Parameters.AddWithValue("date_added", flag.DateAdded);
insertCmd.Parameters.AddWithValue("due_date", (object?)flag.DueDate ?? DBNull.Value);
insertCmd.Parameters.AddWithValue("known_ransomware_use", flag.KnownRansomwareUse);
insertCmd.Parameters.AddWithValue("notes", (object?)flag.Notes ?? DBNull.Value);
await insertCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
}
private static AdvisoryEntity MapAdvisory(NpgsqlDataReader reader) => new()
{
Id = reader.GetGuid(0),
AdvisoryKey = reader.GetString(1),
PrimaryVulnId = reader.GetString(2),
SourceId = GetNullableGuid(reader, 3),
Title = GetNullableString(reader, 4),
Summary = GetNullableString(reader, 5),
Description = GetNullableString(reader, 6),
Severity = GetNullableString(reader, 7),
PublishedAt = GetNullableDateTimeOffset(reader, 8),
ModifiedAt = GetNullableDateTimeOffset(reader, 9),
WithdrawnAt = GetNullableDateTimeOffset(reader, 10),
Provenance = reader.GetString(11),
RawPayload = GetNullableString(reader, 12),
CreatedAt = reader.GetFieldValue<DateTimeOffset>(13),
UpdatedAt = reader.GetFieldValue<DateTimeOffset>(14)
};
}

View File

@@ -0,0 +1,72 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Concelier.Persistence.Postgres.Models;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// PostgreSQL repository for advisory snapshots.
/// </summary>
public sealed class AdvisorySnapshotRepository : RepositoryBase<ConcelierDataSource>, IAdvisorySnapshotRepository
{
private const string SystemTenantId = "_system";
public AdvisorySnapshotRepository(ConcelierDataSource dataSource, ILogger<AdvisorySnapshotRepository> logger)
: base(dataSource, logger)
{
}
public async Task<AdvisorySnapshotEntity> InsertAsync(AdvisorySnapshotEntity snapshot, CancellationToken cancellationToken = default)
{
const string sql = """
INSERT INTO vuln.advisory_snapshots
(id, feed_snapshot_id, advisory_key, content_hash)
VALUES
(@id, @feed_snapshot_id, @advisory_key, @content_hash)
ON CONFLICT (feed_snapshot_id, advisory_key) DO UPDATE SET
content_hash = EXCLUDED.content_hash
RETURNING id, feed_snapshot_id, advisory_key, content_hash, created_at
""";
return await QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "id", snapshot.Id);
AddParameter(cmd, "feed_snapshot_id", snapshot.FeedSnapshotId);
AddParameter(cmd, "advisory_key", snapshot.AdvisoryKey);
AddParameter(cmd, "content_hash", snapshot.ContentHash);
},
MapSnapshot!,
cancellationToken).ConfigureAwait(false) ?? throw new InvalidOperationException("Insert returned null");
}
public Task<IReadOnlyList<AdvisorySnapshotEntity>> GetByFeedSnapshotAsync(Guid feedSnapshotId, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT id, feed_snapshot_id, advisory_key, content_hash, created_at
FROM vuln.advisory_snapshots
WHERE feed_snapshot_id = @feed_snapshot_id
ORDER BY advisory_key
""";
return QueryAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "feed_snapshot_id", feedSnapshotId),
MapSnapshot,
cancellationToken);
}
private static AdvisorySnapshotEntity MapSnapshot(NpgsqlDataReader reader) => new()
{
Id = reader.GetGuid(0),
FeedSnapshotId = reader.GetGuid(1),
AdvisoryKey = reader.GetString(2),
ContentHash = reader.GetString(3),
CreatedAt = reader.GetFieldValue<DateTimeOffset>(4)
};
}

View File

@@ -0,0 +1,83 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Concelier.Persistence.Postgres.Models;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// PostgreSQL repository for advisory weaknesses (CWE).
/// </summary>
public sealed class AdvisoryWeaknessRepository : RepositoryBase<ConcelierDataSource>, IAdvisoryWeaknessRepository
{
private const string SystemTenantId = "_system";
public AdvisoryWeaknessRepository(ConcelierDataSource dataSource, ILogger<AdvisoryWeaknessRepository> logger)
: base(dataSource, logger)
{
}
public async Task ReplaceAsync(Guid advisoryId, IEnumerable<AdvisoryWeaknessEntity> weaknesses, CancellationToken cancellationToken = default)
{
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
const string deleteSql = "DELETE FROM vuln.advisory_weaknesses WHERE advisory_id = @advisory_id";
await using (var deleteCmd = CreateCommand(deleteSql, connection))
{
deleteCmd.Transaction = transaction;
AddParameter(deleteCmd, "advisory_id", advisoryId);
await deleteCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
const string insertSql = """
INSERT INTO vuln.advisory_weaknesses
(id, advisory_id, cwe_id, description, source)
VALUES
(@id, @advisory_id, @cwe_id, @description, @source)
""";
foreach (var weakness in weaknesses)
{
await using var insertCmd = CreateCommand(insertSql, connection);
insertCmd.Transaction = transaction;
AddParameter(insertCmd, "id", weakness.Id);
AddParameter(insertCmd, "advisory_id", advisoryId);
AddParameter(insertCmd, "cwe_id", weakness.CweId);
AddParameter(insertCmd, "description", weakness.Description);
AddParameter(insertCmd, "source", weakness.Source);
await insertCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
}
public Task<IReadOnlyList<AdvisoryWeaknessEntity>> GetByAdvisoryAsync(Guid advisoryId, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT id, advisory_id, cwe_id, description, source, created_at
FROM vuln.advisory_weaknesses
WHERE advisory_id = @advisory_id
ORDER BY cwe_id
""";
return QueryAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "advisory_id", advisoryId),
MapWeakness,
cancellationToken);
}
private static AdvisoryWeaknessEntity MapWeakness(NpgsqlDataReader reader) => new()
{
Id = reader.GetGuid(0),
AdvisoryId = reader.GetGuid(1),
CweId = reader.GetString(2),
Description = GetNullableString(reader, 3),
Source = GetNullableString(reader, 4),
CreatedAt = reader.GetFieldValue<DateTimeOffset>(5)
};
}

View File

@@ -0,0 +1,127 @@
using System.Text.Json;
using Dapper;
using Microsoft.Extensions.Logging;
using StellaOps.Concelier.Persistence.Postgres.Models;
using StellaOps.Infrastructure.Postgres;
using StellaOps.Infrastructure.Postgres.Connections;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
public interface IDocumentRepository
{
Task<DocumentRecordEntity?> FindAsync(Guid id, CancellationToken cancellationToken);
Task<DocumentRecordEntity?> FindBySourceAndUriAsync(string sourceName, string uri, CancellationToken cancellationToken);
Task<DocumentRecordEntity> UpsertAsync(DocumentRecordEntity record, CancellationToken cancellationToken);
Task UpdateStatusAsync(Guid id, string status, CancellationToken cancellationToken);
}
public sealed class DocumentRepository : RepositoryBase<ConcelierDataSource>, IDocumentRepository
{
private readonly JsonSerializerOptions _json = new(JsonSerializerDefaults.Web);
public DocumentRepository(ConcelierDataSource dataSource, ILogger<DocumentRepository> logger)
: base(dataSource, logger)
{
}
public async Task<DocumentRecordEntity?> FindAsync(Guid id, CancellationToken cancellationToken)
{
const string sql = """
SELECT * FROM concelier.source_documents
WHERE id = @Id
LIMIT 1;
""";
await using var conn = await DataSource.OpenSystemConnectionAsync(cancellationToken);
var row = await conn.QuerySingleOrDefaultAsync(sql, new { Id = id });
return row is null ? null : Map(row);
}
public async Task<DocumentRecordEntity?> FindBySourceAndUriAsync(string sourceName, string uri, CancellationToken cancellationToken)
{
const string sql = """
SELECT * FROM concelier.source_documents
WHERE source_name = @SourceName AND uri = @Uri
LIMIT 1;
""";
await using var conn = await DataSource.OpenSystemConnectionAsync(cancellationToken);
var row = await conn.QuerySingleOrDefaultAsync(sql, new { SourceName = sourceName, Uri = uri });
return row is null ? null : Map(row);
}
public async Task<DocumentRecordEntity> UpsertAsync(DocumentRecordEntity record, CancellationToken cancellationToken)
{
const string sql = """
INSERT INTO concelier.source_documents (
id, source_id, source_name, uri, sha256, status, content_type,
headers_json, metadata_json, etag, last_modified, payload, created_at, updated_at, expires_at)
VALUES (
@Id, @SourceId, @SourceName, @Uri, @Sha256, @Status, @ContentType,
@HeadersJson, @MetadataJson, @Etag, @LastModified, @Payload, @CreatedAt, @UpdatedAt, @ExpiresAt)
ON CONFLICT (source_name, uri) DO UPDATE SET
sha256 = EXCLUDED.sha256,
status = EXCLUDED.status,
content_type = EXCLUDED.content_type,
headers_json = EXCLUDED.headers_json,
metadata_json = EXCLUDED.metadata_json,
etag = EXCLUDED.etag,
last_modified = EXCLUDED.last_modified,
payload = EXCLUDED.payload,
updated_at = EXCLUDED.updated_at,
expires_at = EXCLUDED.expires_at
RETURNING *;
""";
await using var conn = await DataSource.OpenSystemConnectionAsync(cancellationToken);
var row = await conn.QuerySingleAsync(sql, new
{
record.Id,
record.SourceId,
record.SourceName,
record.Uri,
record.Sha256,
record.Status,
record.ContentType,
record.HeadersJson,
record.MetadataJson,
record.Etag,
record.LastModified,
record.Payload,
record.CreatedAt,
record.UpdatedAt,
record.ExpiresAt
});
return Map(row);
}
public async Task UpdateStatusAsync(Guid id, string status, CancellationToken cancellationToken)
{
const string sql = """
UPDATE concelier.source_documents
SET status = @Status, updated_at = NOW()
WHERE id = @Id;
""";
await using var conn = await DataSource.OpenSystemConnectionAsync(cancellationToken);
await conn.ExecuteAsync(sql, new { Id = id, Status = status });
}
private DocumentRecordEntity Map(dynamic row)
{
return new DocumentRecordEntity(
row.id,
row.source_id,
row.source_name,
row.uri,
row.sha256,
row.status,
(string?)row.content_type,
(string?)row.headers_json,
(string?)row.metadata_json,
(string?)row.etag,
(DateTimeOffset?)row.last_modified,
(byte[])row.payload,
DateTime.SpecifyKind(row.created_at, DateTimeKind.Utc),
DateTime.SpecifyKind(row.updated_at, DateTimeKind.Utc),
row.expires_at is null ? null : DateTime.SpecifyKind(row.expires_at, DateTimeKind.Utc));
}
}

View File

@@ -0,0 +1,78 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Concelier.Persistence.Postgres.Models;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// PostgreSQL repository for feed snapshots.
/// </summary>
public sealed class FeedSnapshotRepository : RepositoryBase<ConcelierDataSource>, IFeedSnapshotRepository
{
private const string SystemTenantId = "_system";
public FeedSnapshotRepository(ConcelierDataSource dataSource, ILogger<FeedSnapshotRepository> logger)
: base(dataSource, logger)
{
}
public async Task<FeedSnapshotEntity> InsertAsync(FeedSnapshotEntity snapshot, CancellationToken cancellationToken = default)
{
const string sql = """
INSERT INTO vuln.feed_snapshots
(id, source_id, snapshot_id, advisory_count, checksum, metadata)
VALUES
(@id, @source_id, @snapshot_id, @advisory_count, @checksum, @metadata::jsonb)
ON CONFLICT (source_id, snapshot_id) DO NOTHING
RETURNING id, source_id, snapshot_id, advisory_count, checksum, metadata::text, created_at
""";
return await QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "id", snapshot.Id);
AddParameter(cmd, "source_id", snapshot.SourceId);
AddParameter(cmd, "snapshot_id", snapshot.SnapshotId);
AddParameter(cmd, "advisory_count", snapshot.AdvisoryCount);
AddParameter(cmd, "checksum", snapshot.Checksum);
AddJsonbParameter(cmd, "metadata", snapshot.Metadata);
},
MapSnapshot!,
cancellationToken).ConfigureAwait(false) ?? snapshot;
}
public Task<FeedSnapshotEntity?> GetBySourceAndIdAsync(Guid sourceId, string snapshotId, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT id, source_id, snapshot_id, advisory_count, checksum, metadata::text, created_at
FROM vuln.feed_snapshots
WHERE source_id = @source_id AND snapshot_id = @snapshot_id
""";
return QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "source_id", sourceId);
AddParameter(cmd, "snapshot_id", snapshotId);
},
MapSnapshot,
cancellationToken);
}
private static FeedSnapshotEntity MapSnapshot(NpgsqlDataReader reader) => new()
{
Id = reader.GetGuid(0),
SourceId = reader.GetGuid(1),
SnapshotId = reader.GetString(2),
AdvisoryCount = reader.GetInt32(3),
Checksum = GetNullableString(reader, 4),
Metadata = reader.GetString(5),
CreatedAt = reader.GetFieldValue<DateTimeOffset>(6)
};
}

View File

@@ -0,0 +1,15 @@
using StellaOps.Concelier.Persistence.Postgres.Models;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// Repository for advisory affected package rows.
/// </summary>
public interface IAdvisoryAffectedRepository
{
Task ReplaceAsync(Guid advisoryId, IEnumerable<AdvisoryAffectedEntity> affected, CancellationToken cancellationToken = default);
Task<IReadOnlyList<AdvisoryAffectedEntity>> GetByAdvisoryAsync(Guid advisoryId, CancellationToken cancellationToken = default);
Task<IReadOnlyList<AdvisoryAffectedEntity>> GetByPurlAsync(string purl, int limit = 100, int offset = 0, CancellationToken cancellationToken = default);
Task<IReadOnlyList<AdvisoryAffectedEntity>> GetByPackageNameAsync(string ecosystem, string packageName, int limit = 100, int offset = 0, CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,14 @@
using StellaOps.Concelier.Persistence.Postgres.Models;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// Repository for advisory aliases.
/// </summary>
public interface IAdvisoryAliasRepository
{
Task ReplaceAsync(Guid advisoryId, IEnumerable<AdvisoryAliasEntity> aliases, CancellationToken cancellationToken = default);
Task<IReadOnlyList<AdvisoryAliasEntity>> GetByAdvisoryAsync(Guid advisoryId, CancellationToken cancellationToken = default);
Task<IReadOnlyList<AdvisoryAliasEntity>> GetByAliasAsync(string aliasValue, CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,144 @@
// -----------------------------------------------------------------------------
// IAdvisoryCanonicalRepository.cs
// Sprint: SPRINT_8200_0012_0002_DB_canonical_source_edge_schema
// Task: SCHEMA-8200-009
// Description: Repository interface for canonical advisory operations
// -----------------------------------------------------------------------------
using StellaOps.Concelier.Persistence.Postgres.Models;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// Repository interface for canonical advisory and source edge operations.
/// </summary>
public interface IAdvisoryCanonicalRepository
{
#region Canonical Advisory Operations
/// <summary>
/// Gets a canonical advisory by ID.
/// </summary>
Task<AdvisoryCanonicalEntity?> GetByIdAsync(Guid id, CancellationToken ct = default);
/// <summary>
/// Gets a canonical advisory by merge hash.
/// </summary>
Task<AdvisoryCanonicalEntity?> GetByMergeHashAsync(string mergeHash, CancellationToken ct = default);
/// <summary>
/// Gets all canonical advisories for a CVE.
/// </summary>
Task<IReadOnlyList<AdvisoryCanonicalEntity>> GetByCveAsync(string cve, CancellationToken ct = default);
/// <summary>
/// Gets all canonical advisories for an affects key (PURL or CPE).
/// </summary>
Task<IReadOnlyList<AdvisoryCanonicalEntity>> GetByAffectsKeyAsync(string affectsKey, CancellationToken ct = default);
/// <summary>
/// Gets canonical advisories updated since a given time.
/// </summary>
Task<IReadOnlyList<AdvisoryCanonicalEntity>> GetUpdatedSinceAsync(
DateTimeOffset since,
int limit = 1000,
CancellationToken ct = default);
/// <summary>
/// Upserts a canonical advisory (insert or update by merge_hash).
/// </summary>
Task<Guid> UpsertAsync(AdvisoryCanonicalEntity entity, CancellationToken ct = default);
/// <summary>
/// Updates the status of a canonical advisory.
/// </summary>
Task UpdateStatusAsync(Guid id, string status, CancellationToken ct = default);
/// <summary>
/// Deletes a canonical advisory and all its source edges (cascade).
/// </summary>
Task DeleteAsync(Guid id, CancellationToken ct = default);
/// <summary>
/// Counts total active canonical advisories.
/// </summary>
Task<long> CountAsync(CancellationToken ct = default);
/// <summary>
/// Streams all active canonical advisories for batch processing.
/// </summary>
IAsyncEnumerable<AdvisoryCanonicalEntity> StreamActiveAsync(CancellationToken ct = default);
#endregion
#region Source Edge Operations
/// <summary>
/// Gets all source edges for a canonical advisory.
/// </summary>
Task<IReadOnlyList<AdvisorySourceEdgeEntity>> GetSourceEdgesAsync(Guid canonicalId, CancellationToken ct = default);
/// <summary>
/// Gets a source edge by ID.
/// </summary>
Task<AdvisorySourceEdgeEntity?> GetSourceEdgeByIdAsync(Guid id, CancellationToken ct = default);
/// <summary>
/// Adds a source edge to a canonical advisory.
/// </summary>
Task<Guid> AddSourceEdgeAsync(AdvisorySourceEdgeEntity edge, CancellationToken ct = default);
/// <summary>
/// Gets source edges by source advisory ID (vendor ID).
/// </summary>
Task<IReadOnlyList<AdvisorySourceEdgeEntity>> GetSourceEdgesByAdvisoryIdAsync(
string sourceAdvisoryId,
CancellationToken ct = default);
/// <summary>
/// Counts total source edges.
/// </summary>
Task<long> CountSourceEdgesAsync(CancellationToken ct = default);
#endregion
#region Statistics
/// <summary>
/// Gets statistics about canonical advisories.
/// </summary>
Task<CanonicalStatistics> GetStatisticsAsync(CancellationToken ct = default);
#endregion
}
/// <summary>
/// Statistics about canonical advisory records.
/// </summary>
public sealed record CanonicalStatistics
{
/// <summary>
/// Total canonical advisory count.
/// </summary>
public long TotalCanonicals { get; init; }
/// <summary>
/// Active canonical advisory count.
/// </summary>
public long ActiveCanonicals { get; init; }
/// <summary>
/// Total source edge count.
/// </summary>
public long TotalSourceEdges { get; init; }
/// <summary>
/// Average source edges per canonical.
/// </summary>
public double AvgSourceEdgesPerCanonical { get; init; }
/// <summary>
/// Most recent canonical update time.
/// </summary>
public DateTimeOffset? LastUpdatedAt { get; init; }
}

View File

@@ -0,0 +1,13 @@
using StellaOps.Concelier.Persistence.Postgres.Models;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// Repository for advisory credits.
/// </summary>
public interface IAdvisoryCreditRepository
{
Task ReplaceAsync(Guid advisoryId, IEnumerable<AdvisoryCreditEntity> credits, CancellationToken cancellationToken = default);
Task<IReadOnlyList<AdvisoryCreditEntity>> GetByAdvisoryAsync(Guid advisoryId, CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,13 @@
using StellaOps.Concelier.Persistence.Postgres.Models;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// Repository for advisory CVSS scores.
/// </summary>
public interface IAdvisoryCvssRepository
{
Task ReplaceAsync(Guid advisoryId, IEnumerable<AdvisoryCvssEntity> scores, CancellationToken cancellationToken = default);
Task<IReadOnlyList<AdvisoryCvssEntity>> GetByAdvisoryAsync(Guid advisoryId, CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,13 @@
using StellaOps.Concelier.Persistence.Postgres.Models;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// Repository for advisory references.
/// </summary>
public interface IAdvisoryReferenceRepository
{
Task ReplaceAsync(Guid advisoryId, IEnumerable<AdvisoryReferenceEntity> references, CancellationToken cancellationToken = default);
Task<IReadOnlyList<AdvisoryReferenceEntity>> GetByAdvisoryAsync(Guid advisoryId, CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,114 @@
using StellaOps.Concelier.Persistence.Postgres.Models;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// Repository interface for advisory operations.
/// </summary>
public interface IAdvisoryRepository
{
/// <summary>
/// Creates or updates an advisory (upsert by advisory_key).
/// </summary>
Task<AdvisoryEntity> UpsertAsync(AdvisoryEntity advisory, CancellationToken cancellationToken = default);
/// <summary>
/// Gets an advisory by ID.
/// </summary>
Task<AdvisoryEntity?> GetByIdAsync(Guid id, CancellationToken cancellationToken = default);
/// <summary>
/// Gets an advisory by key.
/// </summary>
Task<AdvisoryEntity?> GetByKeyAsync(string advisoryKey, CancellationToken cancellationToken = default);
/// <summary>
/// Gets an advisory by primary vulnerability ID (CVE, GHSA, etc.).
/// </summary>
Task<AdvisoryEntity?> GetByVulnIdAsync(string vulnId, CancellationToken cancellationToken = default);
/// <summary>
/// Gets advisories that include the provided alias (e.g., CVE, GHSA).
/// </summary>
Task<IReadOnlyList<AdvisoryEntity>> GetByAliasAsync(string aliasValue, CancellationToken cancellationToken = default);
/// <summary>
/// Gets advisories affecting a package identified by full PURL.
/// </summary>
Task<IReadOnlyList<AdvisoryEntity>> GetAffectingPackageAsync(
string purl,
int limit = 100,
int offset = 0,
CancellationToken cancellationToken = default);
/// <summary>
/// Gets advisories affecting a package by ecosystem/name (when PURL missing).
/// </summary>
Task<IReadOnlyList<AdvisoryEntity>> GetAffectingPackageNameAsync(
string ecosystem,
string packageName,
int limit = 100,
int offset = 0,
CancellationToken cancellationToken = default);
/// <summary>
/// Searches advisories by full-text search.
/// </summary>
Task<IReadOnlyList<AdvisoryEntity>> SearchAsync(
string query,
string? severity = null,
int limit = 50,
int offset = 0,
CancellationToken cancellationToken = default);
/// <summary>
/// Gets advisories by severity.
/// </summary>
Task<IReadOnlyList<AdvisoryEntity>> GetBySeverityAsync(
string severity,
int limit = 100,
int offset = 0,
CancellationToken cancellationToken = default);
/// <summary>
/// Gets advisories modified since a given time.
/// </summary>
Task<IReadOnlyList<AdvisoryEntity>> GetModifiedSinceAsync(
DateTimeOffset since,
int limit = 1000,
CancellationToken cancellationToken = default);
/// <summary>
/// Gets advisories by source.
/// </summary>
Task<IReadOnlyList<AdvisoryEntity>> GetBySourceAsync(
Guid sourceId,
int limit = 100,
int offset = 0,
CancellationToken cancellationToken = default);
/// <summary>
/// Counts total advisories.
/// </summary>
Task<long> CountAsync(CancellationToken cancellationToken = default);
/// <summary>
/// Counts advisories by severity.
/// </summary>
Task<IDictionary<string, long>> CountBySeverityAsync(CancellationToken cancellationToken = default);
/// <summary>
/// Upserts an advisory and optional child records in a single transaction.
/// Passing null collections leaves existing child rows untouched; pass empty collections to replace with none.
/// </summary>
Task<AdvisoryEntity> UpsertAsync(
AdvisoryEntity advisory,
IEnumerable<Models.AdvisoryAliasEntity>? aliases,
IEnumerable<Models.AdvisoryCvssEntity>? cvss,
IEnumerable<Models.AdvisoryAffectedEntity>? affected,
IEnumerable<Models.AdvisoryReferenceEntity>? references,
IEnumerable<Models.AdvisoryCreditEntity>? credits,
IEnumerable<Models.AdvisoryWeaknessEntity>? weaknesses,
IEnumerable<Models.KevFlagEntity>? kevFlags,
CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,13 @@
using StellaOps.Concelier.Persistence.Postgres.Models;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// Repository for advisory snapshots.
/// </summary>
public interface IAdvisorySnapshotRepository
{
Task<AdvisorySnapshotEntity> InsertAsync(AdvisorySnapshotEntity snapshot, CancellationToken cancellationToken = default);
Task<IReadOnlyList<AdvisorySnapshotEntity>> GetByFeedSnapshotAsync(Guid feedSnapshotId, CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,13 @@
using StellaOps.Concelier.Persistence.Postgres.Models;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// Repository for advisory weaknesses (CWE).
/// </summary>
public interface IAdvisoryWeaknessRepository
{
Task ReplaceAsync(Guid advisoryId, IEnumerable<AdvisoryWeaknessEntity> weaknesses, CancellationToken cancellationToken = default);
Task<IReadOnlyList<AdvisoryWeaknessEntity>> GetByAdvisoryAsync(Guid advisoryId, CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,13 @@
using StellaOps.Concelier.Persistence.Postgres.Models;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// Repository for feed snapshots.
/// </summary>
public interface IFeedSnapshotRepository
{
Task<FeedSnapshotEntity> InsertAsync(FeedSnapshotEntity snapshot, CancellationToken cancellationToken = default);
Task<FeedSnapshotEntity?> GetBySourceAndIdAsync(Guid sourceId, string snapshotId, CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,14 @@
using StellaOps.Concelier.Persistence.Postgres.Models;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// Repository for KEV flag records.
/// </summary>
public interface IKevFlagRepository
{
Task ReplaceAsync(Guid advisoryId, IEnumerable<KevFlagEntity> flags, CancellationToken cancellationToken = default);
Task<IReadOnlyList<KevFlagEntity>> GetByAdvisoryAsync(Guid advisoryId, CancellationToken cancellationToken = default);
Task<IReadOnlyList<KevFlagEntity>> GetByCveAsync(string cveId, CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,13 @@
using StellaOps.Concelier.Persistence.Postgres.Models;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// Repository for merge event audit records.
/// </summary>
public interface IMergeEventRepository
{
Task<MergeEventEntity> InsertAsync(MergeEventEntity evt, CancellationToken cancellationToken = default);
Task<IReadOnlyList<MergeEventEntity>> GetByAdvisoryAsync(Guid advisoryId, int limit = 100, int offset = 0, CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,169 @@
// -----------------------------------------------------------------------------
// IProvenanceScopeRepository.cs
// Sprint: SPRINT_8200_0015_0001_CONCEL_backport_integration
// Task: BACKPORT-8200-002
// Description: Repository interface for provenance scope operations
// -----------------------------------------------------------------------------
using StellaOps.Concelier.Persistence.Postgres.Models;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// Repository interface for distro-specific provenance scope operations.
/// </summary>
public interface IProvenanceScopeRepository
{
#region CRUD Operations
/// <summary>
/// Gets a provenance scope by ID.
/// </summary>
Task<ProvenanceScopeEntity?> GetByIdAsync(Guid id, CancellationToken ct = default);
/// <summary>
/// Gets a provenance scope by canonical ID and distro release.
/// </summary>
Task<ProvenanceScopeEntity?> GetByCanonicalAndDistroAsync(
Guid canonicalId,
string distroRelease,
CancellationToken ct = default);
/// <summary>
/// Gets all provenance scopes for a canonical advisory.
/// </summary>
Task<IReadOnlyList<ProvenanceScopeEntity>> GetByCanonicalIdAsync(
Guid canonicalId,
CancellationToken ct = default);
/// <summary>
/// Gets all provenance scopes for a distro release.
/// </summary>
Task<IReadOnlyList<ProvenanceScopeEntity>> GetByDistroReleaseAsync(
string distroRelease,
CancellationToken ct = default);
/// <summary>
/// Gets provenance scopes by patch ID (for lineage tracking).
/// </summary>
Task<IReadOnlyList<ProvenanceScopeEntity>> GetByPatchIdAsync(
string patchId,
CancellationToken ct = default);
/// <summary>
/// Upserts a provenance scope (insert or update by canonical_id + distro_release).
/// </summary>
Task<Guid> UpsertAsync(ProvenanceScopeEntity entity, CancellationToken ct = default);
/// <summary>
/// Updates an existing provenance scope.
/// </summary>
Task UpdateAsync(ProvenanceScopeEntity entity, CancellationToken ct = default);
/// <summary>
/// Deletes a provenance scope.
/// </summary>
Task DeleteAsync(Guid id, CancellationToken ct = default);
/// <summary>
/// Deletes all provenance scopes for a canonical advisory.
/// </summary>
Task DeleteByCanonicalIdAsync(Guid canonicalId, CancellationToken ct = default);
#endregion
#region Query Operations
/// <summary>
/// Gets provenance scopes with high confidence (>= threshold).
/// </summary>
Task<IReadOnlyList<ProvenanceScopeEntity>> GetHighConfidenceAsync(
decimal threshold = 0.7m,
int limit = 1000,
CancellationToken ct = default);
/// <summary>
/// Gets provenance scopes updated since a given time.
/// </summary>
Task<IReadOnlyList<ProvenanceScopeEntity>> GetUpdatedSinceAsync(
DateTimeOffset since,
int limit = 1000,
CancellationToken ct = default);
/// <summary>
/// Gets provenance scopes by patch origin (upstream, distro, vendor).
/// </summary>
Task<IReadOnlyList<ProvenanceScopeEntity>> GetByPatchOriginAsync(
string patchOrigin,
int limit = 1000,
CancellationToken ct = default);
/// <summary>
/// Gets provenance scopes with linked evidence.
/// </summary>
Task<IReadOnlyList<ProvenanceScopeEntity>> GetWithEvidenceAsync(
int limit = 1000,
CancellationToken ct = default);
/// <summary>
/// Streams all provenance scopes for batch processing.
/// </summary>
IAsyncEnumerable<ProvenanceScopeEntity> StreamAllAsync(CancellationToken ct = default);
#endregion
#region Statistics
/// <summary>
/// Gets provenance scope statistics.
/// </summary>
Task<ProvenanceScopeStatistics> GetStatisticsAsync(CancellationToken ct = default);
/// <summary>
/// Counts provenance scopes by distro release.
/// </summary>
Task<IReadOnlyDictionary<string, long>> CountByDistroAsync(CancellationToken ct = default);
#endregion
}
/// <summary>
/// Statistics about provenance scope records.
/// </summary>
public sealed record ProvenanceScopeStatistics
{
/// <summary>
/// Total provenance scope count.
/// </summary>
public long TotalScopes { get; init; }
/// <summary>
/// Count of scopes with high confidence (>= 0.7).
/// </summary>
public long HighConfidenceScopes { get; init; }
/// <summary>
/// Count of scopes with linked evidence.
/// </summary>
public long ScopesWithEvidence { get; init; }
/// <summary>
/// Average confidence score.
/// </summary>
public decimal AvgConfidence { get; init; }
/// <summary>
/// Count of unique canonical advisories with provenance.
/// </summary>
public long UniqueCanonicals { get; init; }
/// <summary>
/// Count of unique distro releases tracked.
/// </summary>
public long UniqueDistros { get; init; }
/// <summary>
/// Most recent provenance scope update time.
/// </summary>
public DateTimeOffset? LastUpdatedAt { get; init; }
}

View File

@@ -0,0 +1,15 @@
using StellaOps.Concelier.Persistence.Postgres.Models;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// Repository for vulnerability feed sources.
/// </summary>
public interface ISourceRepository
{
Task<SourceEntity> UpsertAsync(SourceEntity source, CancellationToken cancellationToken = default);
Task<SourceEntity?> GetByIdAsync(Guid id, CancellationToken cancellationToken = default);
Task<SourceEntity?> GetByKeyAsync(string key, CancellationToken cancellationToken = default);
Task<IReadOnlyList<SourceEntity>> ListAsync(bool? enabled = null, CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,13 @@
using StellaOps.Concelier.Persistence.Postgres.Models;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// Repository for source ingestion state.
/// </summary>
public interface ISourceStateRepository
{
Task<SourceStateEntity> UpsertAsync(SourceStateEntity state, CancellationToken cancellationToken = default);
Task<SourceStateEntity?> GetBySourceIdAsync(Guid sourceId, CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,130 @@
// -----------------------------------------------------------------------------
// ISyncLedgerRepository.cs
// Sprint: SPRINT_8200_0014_0001_DB_sync_ledger_schema
// Task: SYNC-8200-006
// Description: Repository interface for federation sync ledger operations
// -----------------------------------------------------------------------------
using StellaOps.Concelier.Persistence.Postgres.Models;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// Repository for federation sync ledger and site policy operations.
/// </summary>
public interface ISyncLedgerRepository
{
#region Ledger Operations
/// <summary>
/// Gets the latest sync ledger entry for a site.
/// </summary>
Task<SyncLedgerEntity?> GetLatestAsync(string siteId, CancellationToken ct = default);
/// <summary>
/// Gets sync history for a site.
/// </summary>
Task<IReadOnlyList<SyncLedgerEntity>> GetHistoryAsync(string siteId, int limit = 10, CancellationToken ct = default);
/// <summary>
/// Gets a ledger entry by bundle hash (for deduplication).
/// </summary>
Task<SyncLedgerEntity?> GetByBundleHashAsync(string bundleHash, CancellationToken ct = default);
/// <summary>
/// Inserts a new ledger entry.
/// </summary>
Task<Guid> InsertAsync(SyncLedgerEntity entry, CancellationToken ct = default);
#endregion
#region Cursor Operations
/// <summary>
/// Gets the current cursor position for a site.
/// </summary>
Task<string?> GetCursorAsync(string siteId, CancellationToken ct = default);
/// <summary>
/// Advances the cursor to a new position (inserts a new ledger entry).
/// </summary>
Task AdvanceCursorAsync(
string siteId,
string newCursor,
string bundleHash,
int itemsCount,
DateTimeOffset signedAt,
CancellationToken ct = default);
/// <summary>
/// Checks if importing a bundle would conflict with existing cursor.
/// Returns true if the cursor is older than the current position.
/// </summary>
Task<bool> IsCursorConflictAsync(string siteId, string cursor, CancellationToken ct = default);
#endregion
#region Site Policy Operations
/// <summary>
/// Gets the policy for a specific site.
/// </summary>
Task<SitePolicyEntity?> GetPolicyAsync(string siteId, CancellationToken ct = default);
/// <summary>
/// Creates or updates a site policy.
/// </summary>
Task UpsertPolicyAsync(SitePolicyEntity policy, CancellationToken ct = default);
/// <summary>
/// Gets all site policies.
/// </summary>
Task<IReadOnlyList<SitePolicyEntity>> GetAllPoliciesAsync(bool enabledOnly = true, CancellationToken ct = default);
/// <summary>
/// Deletes a site policy.
/// </summary>
Task<bool> DeletePolicyAsync(string siteId, CancellationToken ct = default);
#endregion
#region Statistics
/// <summary>
/// Gets sync statistics across all sites.
/// </summary>
Task<SyncStatistics> GetStatisticsAsync(CancellationToken ct = default);
#endregion
}
/// <summary>
/// Aggregated sync statistics across all sites.
/// </summary>
public sealed record SyncStatistics
{
/// <summary>
/// Total number of registered sites.
/// </summary>
public int TotalSites { get; init; }
/// <summary>
/// Number of enabled sites.
/// </summary>
public int EnabledSites { get; init; }
/// <summary>
/// Total bundles imported across all sites.
/// </summary>
public long TotalBundlesImported { get; init; }
/// <summary>
/// Total items imported across all sites.
/// </summary>
public long TotalItemsImported { get; init; }
/// <summary>
/// Timestamp of the most recent import.
/// </summary>
public DateTimeOffset? LastImportAt { get; init; }
}

View File

@@ -0,0 +1,364 @@
// -----------------------------------------------------------------------------
// InterestScoreRepository.cs
// Sprint: SPRINT_8200_0013_0002_CONCEL_interest_scoring
// Task: ISCORE-8200-003
// Description: PostgreSQL repository for interest score persistence
// -----------------------------------------------------------------------------
using System.Text.Json;
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Concelier.Interest;
using StellaOps.Concelier.Interest.Models;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// PostgreSQL repository for interest score persistence.
/// </summary>
public sealed class InterestScoreRepository : RepositoryBase<ConcelierDataSource>, IInterestScoreRepository
{
private const string SystemTenantId = "_system";
public InterestScoreRepository(ConcelierDataSource dataSource, ILogger<InterestScoreRepository> logger)
: base(dataSource, logger)
{
}
/// <inheritdoc />
public Task<InterestScore?> GetByCanonicalIdAsync(Guid canonicalId, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT canonical_id, score, reasons, last_seen_in_build, computed_at
FROM vuln.interest_score
WHERE canonical_id = @canonical_id
""";
return QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "canonical_id", canonicalId),
MapScore,
cancellationToken);
}
/// <inheritdoc />
public async Task<IReadOnlyDictionary<Guid, InterestScore>> GetByCanonicalIdsAsync(
IEnumerable<Guid> canonicalIds,
CancellationToken cancellationToken = default)
{
var ids = canonicalIds.ToArray();
if (ids.Length == 0)
{
return new Dictionary<Guid, InterestScore>();
}
const string sql = """
SELECT canonical_id, score, reasons, last_seen_in_build, computed_at
FROM vuln.interest_score
WHERE canonical_id = ANY(@canonical_ids)
""";
var scores = await QueryAsync(
SystemTenantId,
sql,
cmd => AddUuidArrayParameter(cmd, "canonical_ids", ids),
MapScore,
cancellationToken).ConfigureAwait(false);
return scores.ToDictionary(s => s.CanonicalId);
}
/// <inheritdoc />
public async Task SaveAsync(InterestScore score, CancellationToken cancellationToken = default)
{
const string sql = """
INSERT INTO vuln.interest_score
(canonical_id, score, reasons, last_seen_in_build, computed_at)
VALUES
(@canonical_id, @score, @reasons, @last_seen_in_build, @computed_at)
ON CONFLICT (canonical_id)
DO UPDATE SET
score = EXCLUDED.score,
reasons = EXCLUDED.reasons,
last_seen_in_build = EXCLUDED.last_seen_in_build,
computed_at = EXCLUDED.computed_at
""";
var reasonsJson = JsonSerializer.Serialize(score.Reasons);
await ExecuteAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "canonical_id", score.CanonicalId);
AddParameter(cmd, "score", score.Score);
AddJsonbParameter(cmd, "reasons", reasonsJson);
AddParameter(cmd, "last_seen_in_build", score.LastSeenInBuild ?? (object)DBNull.Value);
AddParameter(cmd, "computed_at", score.ComputedAt);
},
cancellationToken).ConfigureAwait(false);
}
/// <inheritdoc />
public async Task SaveManyAsync(IEnumerable<InterestScore> scores, CancellationToken cancellationToken = default)
{
var scoreList = scores.ToList();
if (scoreList.Count == 0)
{
return;
}
// Use batch insert with ON CONFLICT for efficiency
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
const string sql = """
INSERT INTO vuln.interest_score
(canonical_id, score, reasons, last_seen_in_build, computed_at)
VALUES
(@canonical_id, @score, @reasons, @last_seen_in_build, @computed_at)
ON CONFLICT (canonical_id)
DO UPDATE SET
score = EXCLUDED.score,
reasons = EXCLUDED.reasons,
last_seen_in_build = EXCLUDED.last_seen_in_build,
computed_at = EXCLUDED.computed_at
""";
foreach (var score in scoreList)
{
await using var cmd = CreateCommand(sql, connection);
cmd.Transaction = transaction;
var reasonsJson = JsonSerializer.Serialize(score.Reasons);
AddParameter(cmd, "canonical_id", score.CanonicalId);
AddParameter(cmd, "score", score.Score);
AddJsonbParameter(cmd, "reasons", reasonsJson);
AddParameter(cmd, "last_seen_in_build", score.LastSeenInBuild ?? (object)DBNull.Value);
AddParameter(cmd, "computed_at", score.ComputedAt);
await cmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
Logger.LogDebug("Saved {Count} interest scores", scoreList.Count);
}
/// <inheritdoc />
public Task DeleteAsync(Guid canonicalId, CancellationToken cancellationToken = default)
{
const string sql = "DELETE FROM vuln.interest_score WHERE canonical_id = @canonical_id";
return ExecuteAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "canonical_id", canonicalId),
cancellationToken);
}
/// <inheritdoc />
public Task<IReadOnlyList<Guid>> GetLowScoreCanonicalIdsAsync(
double threshold,
TimeSpan minAge,
int limit,
CancellationToken cancellationToken = default)
{
const string sql = """
SELECT canonical_id
FROM vuln.interest_score
WHERE score < @threshold
AND computed_at < @min_computed_at
ORDER BY score ASC, computed_at ASC
LIMIT @limit
""";
var minComputedAt = DateTimeOffset.UtcNow - minAge;
return QueryAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "threshold", threshold);
AddParameter(cmd, "min_computed_at", minComputedAt);
AddParameter(cmd, "limit", limit);
},
reader => reader.GetGuid(0),
cancellationToken);
}
/// <inheritdoc />
public Task<IReadOnlyList<Guid>> GetHighScoreCanonicalIdsAsync(
double threshold,
int limit,
CancellationToken cancellationToken = default)
{
const string sql = """
SELECT canonical_id
FROM vuln.interest_score
WHERE score >= @threshold
ORDER BY score DESC
LIMIT @limit
""";
return QueryAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "threshold", threshold);
AddParameter(cmd, "limit", limit);
},
reader => reader.GetGuid(0),
cancellationToken);
}
/// <inheritdoc />
public Task<IReadOnlyList<InterestScore>> GetTopScoresAsync(
int limit,
CancellationToken cancellationToken = default)
{
const string sql = """
SELECT canonical_id, score, reasons, last_seen_in_build, computed_at
FROM vuln.interest_score
ORDER BY score DESC, computed_at DESC
LIMIT @limit
""";
return QueryAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "limit", limit),
MapScore,
cancellationToken);
}
/// <inheritdoc />
public Task<IReadOnlyList<Guid>> GetStaleCanonicalIdsAsync(
DateTimeOffset staleAfter,
int limit,
CancellationToken cancellationToken = default)
{
const string sql = """
SELECT canonical_id
FROM vuln.interest_score
WHERE computed_at < @stale_after
ORDER BY computed_at ASC
LIMIT @limit
""";
return QueryAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "stale_after", staleAfter);
AddParameter(cmd, "limit", limit);
},
reader => reader.GetGuid(0),
cancellationToken);
}
/// <inheritdoc />
public Task<IReadOnlyList<InterestScore>> GetAllAsync(
int offset,
int limit,
CancellationToken cancellationToken = default)
{
const string sql = """
SELECT canonical_id, score, reasons, last_seen_in_build, computed_at
FROM vuln.interest_score
ORDER BY score DESC, computed_at DESC
OFFSET @offset
LIMIT @limit
""";
return QueryAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "offset", offset);
AddParameter(cmd, "limit", limit);
},
MapScore,
cancellationToken);
}
/// <inheritdoc />
public Task<ScoreDistribution> GetScoreDistributionAsync(
CancellationToken cancellationToken = default)
{
return GetDistributionAsync(cancellationToken);
}
/// <inheritdoc />
public async Task<long> CountAsync(CancellationToken cancellationToken = default)
{
const string sql = "SELECT COUNT(*) FROM vuln.interest_score";
var count = await ExecuteScalarAsync<long>(
SystemTenantId,
sql,
null,
cancellationToken).ConfigureAwait(false);
return count;
}
/// <inheritdoc />
public async Task<ScoreDistribution> GetDistributionAsync(CancellationToken cancellationToken = default)
{
const string sql = """
SELECT
COUNT(*) AS total_count,
COUNT(*) FILTER (WHERE score >= 0.7) AS high_count,
COUNT(*) FILTER (WHERE score >= 0.4 AND score < 0.7) AS medium_count,
COUNT(*) FILTER (WHERE score >= 0.2 AND score < 0.4) AS low_count,
COUNT(*) FILTER (WHERE score < 0.2) AS none_count,
COALESCE(AVG(score), 0) AS average_score,
COALESCE(PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY score), 0) AS median_score
FROM vuln.interest_score
""";
var result = await QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
null,
reader => new ScoreDistribution
{
TotalCount = reader.GetInt64(0),
HighCount = reader.GetInt64(1),
MediumCount = reader.GetInt64(2),
LowCount = reader.GetInt64(3),
NoneCount = reader.GetInt64(4),
AverageScore = reader.GetDouble(5),
MedianScore = reader.GetDouble(6)
},
cancellationToken).ConfigureAwait(false);
return result ?? new ScoreDistribution();
}
private static InterestScore MapScore(NpgsqlDataReader reader)
{
var reasonsJson = GetNullableString(reader, 2);
var reasons = string.IsNullOrEmpty(reasonsJson)
? Array.Empty<string>()
: JsonSerializer.Deserialize<string[]>(reasonsJson) ?? [];
return new InterestScore
{
CanonicalId = reader.GetGuid(0),
Score = reader.GetDouble(1),
Reasons = reasons,
LastSeenInBuild = GetNullableGuid(reader, 3),
ComputedAt = reader.GetFieldValue<DateTimeOffset>(4)
};
}
}

View File

@@ -0,0 +1,114 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Concelier.Persistence.Postgres.Models;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// PostgreSQL repository for KEV flags.
/// </summary>
public sealed class KevFlagRepository : RepositoryBase<ConcelierDataSource>, IKevFlagRepository
{
private const string SystemTenantId = "_system";
public KevFlagRepository(ConcelierDataSource dataSource, ILogger<KevFlagRepository> logger)
: base(dataSource, logger)
{
}
public async Task ReplaceAsync(Guid advisoryId, IEnumerable<KevFlagEntity> flags, CancellationToken cancellationToken = default)
{
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
const string deleteSql = "DELETE FROM vuln.kev_flags WHERE advisory_id = @advisory_id";
await using (var deleteCmd = CreateCommand(deleteSql, connection))
{
deleteCmd.Transaction = transaction;
AddParameter(deleteCmd, "advisory_id", advisoryId);
await deleteCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
const string insertSql = """
INSERT INTO vuln.kev_flags
(id, advisory_id, cve_id, vendor_project, product, vulnerability_name,
date_added, due_date, known_ransomware_use, notes)
VALUES
(@id, @advisory_id, @cve_id, @vendor_project, @product, @vulnerability_name,
@date_added, @due_date, @known_ransomware_use, @notes)
""";
foreach (var flag in flags)
{
await using var insertCmd = CreateCommand(insertSql, connection);
insertCmd.Transaction = transaction;
AddParameter(insertCmd, "id", flag.Id);
AddParameter(insertCmd, "advisory_id", advisoryId);
AddParameter(insertCmd, "cve_id", flag.CveId);
AddParameter(insertCmd, "vendor_project", flag.VendorProject);
AddParameter(insertCmd, "product", flag.Product);
AddParameter(insertCmd, "vulnerability_name", flag.VulnerabilityName);
AddParameter(insertCmd, "date_added", flag.DateAdded);
AddParameter(insertCmd, "due_date", flag.DueDate);
AddParameter(insertCmd, "known_ransomware_use", flag.KnownRansomwareUse);
AddParameter(insertCmd, "notes", flag.Notes);
await insertCmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
}
public Task<IReadOnlyList<KevFlagEntity>> GetByAdvisoryAsync(Guid advisoryId, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT id, advisory_id, cve_id, vendor_project, product, vulnerability_name,
date_added, due_date, known_ransomware_use, notes, created_at
FROM vuln.kev_flags
WHERE advisory_id = @advisory_id
ORDER BY date_added DESC, cve_id
""";
return QueryAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "advisory_id", advisoryId),
MapKev,
cancellationToken);
}
public Task<IReadOnlyList<KevFlagEntity>> GetByCveAsync(string cveId, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT id, advisory_id, cve_id, vendor_project, product, vulnerability_name,
date_added, due_date, known_ransomware_use, notes, created_at
FROM vuln.kev_flags
WHERE cve_id = @cve_id
ORDER BY date_added DESC, advisory_id
""";
return QueryAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "cve_id", cveId),
MapKev,
cancellationToken);
}
private static KevFlagEntity MapKev(NpgsqlDataReader reader) => new()
{
Id = reader.GetGuid(0),
AdvisoryId = reader.GetGuid(1),
CveId = reader.GetString(2),
VendorProject = GetNullableString(reader, 3),
Product = GetNullableString(reader, 4),
VulnerabilityName = GetNullableString(reader, 5),
DateAdded = DateOnly.FromDateTime(reader.GetDateTime(6)),
DueDate = reader.IsDBNull(7) ? null : DateOnly.FromDateTime(reader.GetDateTime(7)),
KnownRansomwareUse = reader.GetBoolean(8),
Notes = GetNullableString(reader, 9),
CreatedAt = reader.GetFieldValue<DateTimeOffset>(10)
};
}

View File

@@ -0,0 +1,79 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Concelier.Persistence.Postgres.Models;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// PostgreSQL repository for merge event audit records.
/// </summary>
public sealed class MergeEventRepository : RepositoryBase<ConcelierDataSource>, IMergeEventRepository
{
private const string SystemTenantId = "_system";
public MergeEventRepository(ConcelierDataSource dataSource, ILogger<MergeEventRepository> logger)
: base(dataSource, logger)
{
}
public async Task<MergeEventEntity> InsertAsync(MergeEventEntity evt, CancellationToken cancellationToken = default)
{
const string sql = """
INSERT INTO vuln.merge_events
(advisory_id, source_id, event_type, old_value, new_value)
VALUES
(@advisory_id, @source_id, @event_type, @old_value::jsonb, @new_value::jsonb)
RETURNING id, advisory_id, source_id, event_type, old_value::text, new_value::text, created_at
""";
return await QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "advisory_id", evt.AdvisoryId);
AddParameter(cmd, "source_id", evt.SourceId);
AddParameter(cmd, "event_type", evt.EventType);
AddJsonbParameter(cmd, "old_value", evt.OldValue);
AddJsonbParameter(cmd, "new_value", evt.NewValue);
},
MapEvent!,
cancellationToken).ConfigureAwait(false) ?? throw new InvalidOperationException("Insert returned null");
}
public Task<IReadOnlyList<MergeEventEntity>> GetByAdvisoryAsync(Guid advisoryId, int limit = 100, int offset = 0, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT id, advisory_id, source_id, event_type, old_value::text, new_value::text, created_at
FROM vuln.merge_events
WHERE advisory_id = @advisory_id
ORDER BY created_at DESC, id DESC
LIMIT @limit OFFSET @offset
""";
return QueryAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "advisory_id", advisoryId);
AddParameter(cmd, "limit", limit);
AddParameter(cmd, "offset", offset);
},
MapEvent,
cancellationToken);
}
private static MergeEventEntity MapEvent(NpgsqlDataReader reader) => new()
{
Id = reader.GetInt64(0),
AdvisoryId = reader.GetGuid(1),
SourceId = GetNullableGuid(reader, 2),
EventType = reader.GetString(3),
OldValue = GetNullableString(reader, 4),
NewValue = GetNullableString(reader, 5),
CreatedAt = reader.GetFieldValue<DateTimeOffset>(6)
};
}

View File

@@ -0,0 +1,96 @@
using System.Text.Json;
using Dapper;
using StellaOps.Concelier.Storage.ChangeHistory;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
internal sealed class PostgresChangeHistoryStore : IChangeHistoryStore
{
private readonly ConcelierDataSource _dataSource;
private readonly JsonSerializerOptions _jsonOptions = new(JsonSerializerDefaults.General)
{
PropertyNamingPolicy = JsonNamingPolicy.CamelCase
};
public PostgresChangeHistoryStore(ConcelierDataSource dataSource)
{
_dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource));
}
public async Task AddAsync(ChangeHistoryRecord record, CancellationToken cancellationToken)
{
const string sql = """
INSERT INTO concelier.change_history
(id, source_name, advisory_key, document_id, document_hash, snapshot_hash, previous_snapshot_hash, snapshot, previous_snapshot, changes, created_at)
VALUES (@Id, @SourceName, @AdvisoryKey, @DocumentId, @DocumentHash, @SnapshotHash, @PreviousSnapshotHash, @Snapshot, @PreviousSnapshot, @Changes, @CreatedAt)
ON CONFLICT (id) DO NOTHING;
""";
await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken);
await connection.ExecuteAsync(new CommandDefinition(sql, new
{
record.Id,
record.SourceName,
record.AdvisoryKey,
record.DocumentId,
record.DocumentHash,
record.SnapshotHash,
record.PreviousSnapshotHash,
Snapshot = record.Snapshot,
PreviousSnapshot = record.PreviousSnapshot,
Changes = JsonSerializer.Serialize(record.Changes, _jsonOptions),
record.CreatedAt
}, cancellationToken: cancellationToken));
}
public async Task<IReadOnlyList<ChangeHistoryRecord>> GetRecentAsync(string sourceName, string advisoryKey, int limit, CancellationToken cancellationToken)
{
const string sql = """
SELECT id, source_name, advisory_key, document_id, document_hash, snapshot_hash, previous_snapshot_hash, snapshot, previous_snapshot, changes, created_at
FROM concelier.change_history
WHERE source_name = @SourceName AND advisory_key = @AdvisoryKey
ORDER BY created_at DESC
LIMIT @Limit;
""";
await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken);
var rows = await connection.QueryAsync<ChangeHistoryRow>(new CommandDefinition(sql, new
{
SourceName = sourceName,
AdvisoryKey = advisoryKey,
Limit = limit
}, cancellationToken: cancellationToken));
return rows.Select(ToRecord).ToArray();
}
private ChangeHistoryRecord ToRecord(ChangeHistoryRow row)
{
var changes = JsonSerializer.Deserialize<IReadOnlyList<ChangeHistoryFieldChange>>(row.Changes, _jsonOptions) ?? Array.Empty<ChangeHistoryFieldChange>();
return new ChangeHistoryRecord(
row.Id,
row.SourceName,
row.AdvisoryKey,
row.DocumentId,
row.DocumentHash,
row.SnapshotHash,
row.PreviousSnapshotHash ?? string.Empty,
row.Snapshot,
row.PreviousSnapshot ?? string.Empty,
changes,
row.CreatedAt);
}
private sealed record ChangeHistoryRow(
Guid Id,
string SourceName,
string AdvisoryKey,
Guid DocumentId,
string DocumentHash,
string SnapshotHash,
string? PreviousSnapshotHash,
string Snapshot,
string? PreviousSnapshot,
string Changes,
DateTimeOffset CreatedAt);
}

View File

@@ -0,0 +1,118 @@
using System.Linq;
using System.Text.Json;
using Dapper;
using StellaOps.Concelier.Storage;
using Contracts = StellaOps.Concelier.Storage.Contracts;
using StellaOps.Concelier.Persistence.Postgres;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
internal sealed class PostgresDtoStore : IDtoStore, Contracts.IStorageDtoStore
{
private readonly ConcelierDataSource _dataSource;
private readonly JsonSerializerOptions _jsonOptions = new(JsonSerializerDefaults.General)
{
PropertyNamingPolicy = JsonNamingPolicy.CamelCase
};
public PostgresDtoStore(ConcelierDataSource dataSource)
{
_dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource));
}
public async Task<DtoRecord> UpsertAsync(DtoRecord record, CancellationToken cancellationToken)
{
const string sql = """
INSERT INTO concelier.dtos (id, document_id, source_name, format, payload_json, schema_version, created_at, validated_at)
VALUES (@Id, @DocumentId, @SourceName, @Format, @PayloadJson, @SchemaVersion, @CreatedAt, @ValidatedAt)
ON CONFLICT (document_id) DO UPDATE
SET payload_json = EXCLUDED.payload_json,
schema_version = EXCLUDED.schema_version,
source_name = EXCLUDED.source_name,
format = EXCLUDED.format,
validated_at = EXCLUDED.validated_at
RETURNING id, document_id, source_name, format, payload_json, schema_version, created_at, validated_at;
""";
var payloadJson = record.Payload.ToJson();
await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken);
var row = await connection.QuerySingleAsync<DtoRow>(new CommandDefinition(sql, new
{
record.Id,
record.DocumentId,
record.SourceName,
record.Format,
PayloadJson = payloadJson,
record.SchemaVersion,
record.CreatedAt,
record.ValidatedAt
}, cancellationToken: cancellationToken));
return ToRecord(row);
}
public async Task<DtoRecord?> FindByDocumentIdAsync(Guid documentId, CancellationToken cancellationToken)
{
const string sql = """
SELECT id, document_id, source_name, format, payload_json, schema_version, created_at, validated_at
FROM concelier.dtos
WHERE document_id = @DocumentId
LIMIT 1;
""";
await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken);
var row = await connection.QuerySingleOrDefaultAsync<DtoRow>(new CommandDefinition(sql, new { DocumentId = documentId }, cancellationToken: cancellationToken));
return row is null ? null : ToRecord(row);
}
public async Task<IReadOnlyList<DtoRecord>> GetBySourceAsync(string sourceName, int limit, CancellationToken cancellationToken)
{
const string sql = """
SELECT id, document_id, source_name, format, payload_json, schema_version, created_at, validated_at
FROM concelier.dtos
WHERE source_name = @SourceName
ORDER BY created_at DESC
LIMIT @Limit;
""";
await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken);
var rows = await connection.QueryAsync<DtoRow>(new CommandDefinition(sql, new { SourceName = sourceName, Limit = limit }, cancellationToken: cancellationToken));
return rows.Select(ToRecord).ToArray();
}
private DtoRecord ToRecord(DtoRow row)
{
var payload = StellaOps.Concelier.Documents.DocumentObject.Parse(row.PayloadJson);
return new DtoRecord(
row.Id,
row.DocumentId,
row.SourceName,
row.Format,
payload,
row.CreatedAt,
row.SchemaVersion,
row.ValidatedAt);
}
async Task<Contracts.StorageDto> Contracts.IStorageDtoStore.UpsertAsync(Contracts.StorageDto record, CancellationToken cancellationToken)
=> (await UpsertAsync(record.ToLegacyDtoRecord(), cancellationToken).ConfigureAwait(false)).ToStorageDto();
async Task<Contracts.StorageDto?> Contracts.IStorageDtoStore.FindByDocumentIdAsync(Guid documentId, CancellationToken cancellationToken)
=> (await FindByDocumentIdAsync(documentId, cancellationToken).ConfigureAwait(false))?.ToStorageDto();
async Task<IReadOnlyList<Contracts.StorageDto>> Contracts.IStorageDtoStore.GetBySourceAsync(string sourceName, int limit, CancellationToken cancellationToken)
=> (await GetBySourceAsync(sourceName, limit, cancellationToken).ConfigureAwait(false))
.Select(dto => dto.ToStorageDto())
.ToArray();
private sealed record DtoRow(
Guid Id,
Guid DocumentId,
string SourceName,
string Format,
string PayloadJson,
string SchemaVersion,
DateTimeOffset CreatedAt,
DateTimeOffset ValidatedAt);
}

View File

@@ -0,0 +1,119 @@
using System.Text.Json;
using Dapper;
using StellaOps.Concelier.Storage.Exporting;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
internal sealed class PostgresExportStateStore : IExportStateStore
{
private readonly ConcelierDataSource _dataSource;
private readonly JsonSerializerOptions _jsonOptions = new(JsonSerializerDefaults.General)
{
PropertyNamingPolicy = JsonNamingPolicy.CamelCase
};
public PostgresExportStateStore(ConcelierDataSource dataSource)
{
_dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource));
}
public async Task<ExportStateRecord?> FindAsync(string id, CancellationToken cancellationToken)
{
const string sql = """
SELECT id,
export_cursor,
last_full_digest,
last_delta_digest,
base_export_id,
base_digest,
target_repository,
files,
exporter_version,
updated_at
FROM concelier.export_states
WHERE id = @Id
LIMIT 1;
""";
await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken);
var row = await connection.QuerySingleOrDefaultAsync<ExportStateRow>(new CommandDefinition(sql, new { Id = id }, cancellationToken: cancellationToken));
return row is null ? null : ToRecord(row);
}
public async Task<ExportStateRecord> UpsertAsync(ExportStateRecord record, CancellationToken cancellationToken)
{
const string sql = """
INSERT INTO concelier.export_states
(id, export_cursor, last_full_digest, last_delta_digest, base_export_id, base_digest, target_repository, files, exporter_version, updated_at)
VALUES (@Id, @ExportCursor, @LastFullDigest, @LastDeltaDigest, @BaseExportId, @BaseDigest, @TargetRepository, @Files, @ExporterVersion, @UpdatedAt)
ON CONFLICT (id) DO UPDATE
SET export_cursor = EXCLUDED.export_cursor,
last_full_digest = EXCLUDED.last_full_digest,
last_delta_digest = EXCLUDED.last_delta_digest,
base_export_id = EXCLUDED.base_export_id,
base_digest = EXCLUDED.base_digest,
target_repository = EXCLUDED.target_repository,
files = EXCLUDED.files,
exporter_version = EXCLUDED.exporter_version,
updated_at = EXCLUDED.updated_at
RETURNING id,
export_cursor,
last_full_digest,
last_delta_digest,
base_export_id,
base_digest,
target_repository,
files,
exporter_version,
updated_at;
""";
var filesJson = JsonSerializer.Serialize(record.Files, _jsonOptions);
await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken);
var row = await connection.QuerySingleAsync<ExportStateRow>(new CommandDefinition(sql, new
{
record.Id,
record.ExportCursor,
record.LastFullDigest,
record.LastDeltaDigest,
record.BaseExportId,
record.BaseDigest,
record.TargetRepository,
Files = filesJson,
record.ExporterVersion,
record.UpdatedAt
}, cancellationToken: cancellationToken));
return ToRecord(row);
}
private ExportStateRecord ToRecord(ExportStateRow row)
{
var files = JsonSerializer.Deserialize<IReadOnlyList<ExportFileRecord>>(row.Files, _jsonOptions) ?? Array.Empty<ExportFileRecord>();
return new ExportStateRecord(
row.Id,
row.ExportCursor,
row.LastFullDigest,
row.LastDeltaDigest,
row.BaseExportId,
row.BaseDigest,
row.TargetRepository,
files,
row.ExporterVersion,
row.UpdatedAt);
}
private sealed record ExportStateRow(
string Id,
string ExportCursor,
string? LastFullDigest,
string? LastDeltaDigest,
string? BaseExportId,
string? BaseDigest,
string? TargetRepository,
string Files,
string ExporterVersion,
DateTimeOffset UpdatedAt);
}

View File

@@ -0,0 +1,58 @@
using Dapper;
using StellaOps.Concelier.Storage.JpFlags;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
internal sealed class PostgresJpFlagStore : IJpFlagStore
{
private readonly ConcelierDataSource _dataSource;
public PostgresJpFlagStore(ConcelierDataSource dataSource)
{
_dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource));
}
public async Task UpsertAsync(JpFlagRecord record, CancellationToken cancellationToken)
{
const string sql = """
INSERT INTO concelier.jp_flags (advisory_key, source_name, category, vendor_status, created_at)
VALUES (@AdvisoryKey, @SourceName, @Category, @VendorStatus, @CreatedAt)
ON CONFLICT (advisory_key) DO UPDATE
SET source_name = EXCLUDED.source_name,
category = EXCLUDED.category,
vendor_status = EXCLUDED.vendor_status,
created_at = EXCLUDED.created_at;
""";
await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken);
await connection.ExecuteAsync(new CommandDefinition(sql, new
{
record.AdvisoryKey,
record.SourceName,
record.Category,
record.VendorStatus,
record.CreatedAt
}, cancellationToken: cancellationToken));
}
public async Task<JpFlagRecord?> FindAsync(string advisoryKey, CancellationToken cancellationToken)
{
const string sql = """
SELECT advisory_key, source_name, category, vendor_status, created_at
FROM concelier.jp_flags
WHERE advisory_key = @AdvisoryKey
LIMIT 1;
""";
await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken);
var row = await connection.QuerySingleOrDefaultAsync<JpFlagRow>(new CommandDefinition(sql, new { AdvisoryKey = advisoryKey }, cancellationToken: cancellationToken));
return row is null ? null : new JpFlagRecord(row.AdvisoryKey, row.SourceName, row.Category, row.VendorStatus, row.CreatedAt);
}
private sealed record JpFlagRow(
string AdvisoryKey,
string SourceName,
string Category,
string? VendorStatus,
DateTimeOffset CreatedAt);
}

View File

@@ -0,0 +1,155 @@
// -----------------------------------------------------------------------------
// PostgresProvenanceScopeStore.cs
// Sprint: SPRINT_8200_0015_0001_CONCEL_backport_integration
// Tasks: BACKPORT-8200-014, BACKPORT-8200-015, BACKPORT-8200-016
// Description: PostgreSQL store implementation for provenance scope
// -----------------------------------------------------------------------------
using StellaOps.Concelier.Merge.Backport;
using StellaOps.Concelier.Persistence.Postgres.Models;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// PostgreSQL implementation of IProvenanceScopeStore.
/// Bridges the domain ProvenanceScope model to the persistence layer.
/// </summary>
public sealed class PostgresProvenanceScopeStore : IProvenanceScopeStore
{
private readonly IProvenanceScopeRepository _repository;
public PostgresProvenanceScopeStore(IProvenanceScopeRepository repository)
{
_repository = repository ?? throw new ArgumentNullException(nameof(repository));
}
/// <inheritdoc />
public async Task<ProvenanceScope?> GetByCanonicalAndDistroAsync(
Guid canonicalId,
string distroRelease,
CancellationToken ct = default)
{
var entity = await _repository.GetByCanonicalAndDistroAsync(canonicalId, distroRelease, ct)
.ConfigureAwait(false);
return entity is null ? null : MapToDomain(entity);
}
/// <inheritdoc />
public async Task<IReadOnlyList<ProvenanceScope>> GetByCanonicalIdAsync(
Guid canonicalId,
CancellationToken ct = default)
{
var entities = await _repository.GetByCanonicalIdAsync(canonicalId, ct)
.ConfigureAwait(false);
return entities.Select(MapToDomain).ToList();
}
/// <inheritdoc />
public async Task<Guid> UpsertAsync(ProvenanceScope scope, CancellationToken ct = default)
{
ArgumentNullException.ThrowIfNull(scope);
var entity = MapToEntity(scope);
return await _repository.UpsertAsync(entity, ct).ConfigureAwait(false);
}
/// <inheritdoc />
public async Task LinkEvidenceRefAsync(
Guid provenanceScopeId,
Guid evidenceRef,
CancellationToken ct = default)
{
var existing = await _repository.GetByIdAsync(provenanceScopeId, ct).ConfigureAwait(false);
if (existing is null)
{
return;
}
// Create updated entity with evidence ref
var updated = new ProvenanceScopeEntity
{
Id = existing.Id,
CanonicalId = existing.CanonicalId,
DistroRelease = existing.DistroRelease,
BackportSemver = existing.BackportSemver,
PatchId = existing.PatchId,
PatchOrigin = existing.PatchOrigin,
EvidenceRef = evidenceRef,
Confidence = existing.Confidence,
CreatedAt = existing.CreatedAt,
UpdatedAt = DateTimeOffset.UtcNow
};
await _repository.UpdateAsync(updated, ct).ConfigureAwait(false);
}
/// <inheritdoc />
public Task DeleteByCanonicalIdAsync(Guid canonicalId, CancellationToken ct = default)
{
return _repository.DeleteByCanonicalIdAsync(canonicalId, ct);
}
#region Mapping
private static ProvenanceScope MapToDomain(ProvenanceScopeEntity entity)
{
return new ProvenanceScope
{
Id = entity.Id,
CanonicalId = entity.CanonicalId,
DistroRelease = entity.DistroRelease,
BackportSemver = entity.BackportSemver,
PatchId = entity.PatchId,
PatchOrigin = ParsePatchOrigin(entity.PatchOrigin),
EvidenceRef = entity.EvidenceRef,
Confidence = (double)entity.Confidence,
CreatedAt = entity.CreatedAt,
UpdatedAt = entity.UpdatedAt
};
}
private static ProvenanceScopeEntity MapToEntity(ProvenanceScope scope)
{
return new ProvenanceScopeEntity
{
Id = scope.Id,
CanonicalId = scope.CanonicalId,
DistroRelease = scope.DistroRelease,
BackportSemver = scope.BackportSemver,
PatchId = scope.PatchId,
PatchOrigin = MapPatchOriginToString(scope.PatchOrigin),
EvidenceRef = scope.EvidenceRef,
Confidence = (decimal)scope.Confidence,
CreatedAt = scope.CreatedAt,
UpdatedAt = scope.UpdatedAt
};
}
private static Merge.Backport.PatchOrigin? ParsePatchOrigin(string? origin)
{
return origin?.ToLowerInvariant() switch
{
"upstream" => Merge.Backport.PatchOrigin.Upstream,
"distro" => Merge.Backport.PatchOrigin.Distro,
"vendor" => Merge.Backport.PatchOrigin.Vendor,
_ => null
};
}
private static string? MapPatchOriginToString(Merge.Backport.PatchOrigin? origin)
{
return origin switch
{
Merge.Backport.PatchOrigin.Upstream => "upstream",
Merge.Backport.PatchOrigin.Distro => "distro",
Merge.Backport.PatchOrigin.Vendor => "vendor",
Merge.Backport.PatchOrigin.Unknown => null,
null => null,
_ => null
};
}
#endregion
}

View File

@@ -0,0 +1,76 @@
using Dapper;
using StellaOps.Concelier.Storage.PsirtFlags;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
internal sealed class PostgresPsirtFlagStore : IPsirtFlagStore
{
private readonly ConcelierDataSource _dataSource;
public PostgresPsirtFlagStore(ConcelierDataSource dataSource)
{
_dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource));
}
public async Task UpsertAsync(PsirtFlagRecord flag, CancellationToken cancellationToken)
{
const string sql = """
INSERT INTO concelier.psirt_flags (advisory_id, vendor, source_name, external_id, recorded_at)
VALUES (@AdvisoryId, @Vendor, @SourceName, @ExternalId, @RecordedAt)
ON CONFLICT (advisory_id, vendor) DO UPDATE
SET source_name = EXCLUDED.source_name,
external_id = EXCLUDED.external_id,
recorded_at = EXCLUDED.recorded_at;
""";
await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken);
await connection.ExecuteAsync(new CommandDefinition(sql, new
{
flag.AdvisoryId,
flag.Vendor,
flag.SourceName,
flag.ExternalId,
flag.RecordedAt
}, cancellationToken: cancellationToken));
}
public async Task<IReadOnlyList<PsirtFlagRecord>> GetRecentAsync(string advisoryKey, int limit, CancellationToken cancellationToken)
{
const string sql = """
SELECT advisory_id, vendor, source_name, external_id, recorded_at
FROM concelier.psirt_flags
WHERE advisory_id = @AdvisoryId
ORDER BY recorded_at DESC
LIMIT @Limit;
""";
await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken);
var rows = await connection.QueryAsync<PsirtFlagRow>(new CommandDefinition(sql, new { AdvisoryId = advisoryKey, Limit = limit }, cancellationToken: cancellationToken));
return rows.Select(ToRecord).ToArray();
}
public async Task<PsirtFlagRecord?> FindAsync(string advisoryKey, CancellationToken cancellationToken)
{
const string sql = """
SELECT advisory_id, vendor, source_name, external_id, recorded_at
FROM concelier.psirt_flags
WHERE advisory_id = @AdvisoryId
ORDER BY recorded_at DESC
LIMIT 1;
""";
await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken);
var row = await connection.QuerySingleOrDefaultAsync<PsirtFlagRow>(new CommandDefinition(sql, new { AdvisoryId = advisoryKey }, cancellationToken: cancellationToken));
return row is null ? null : ToRecord(row);
}
private static PsirtFlagRecord ToRecord(PsirtFlagRow row) =>
new(row.AdvisoryId, row.Vendor, row.SourceName, row.ExternalId, row.RecordedAt);
private sealed record PsirtFlagRow(
string AdvisoryId,
string Vendor,
string SourceName,
string? ExternalId,
DateTimeOffset RecordedAt);
}

View File

@@ -0,0 +1,427 @@
// -----------------------------------------------------------------------------
// ProvenanceScopeRepository.cs
// Sprint: SPRINT_8200_0015_0001_CONCEL_backport_integration
// Task: BACKPORT-8200-003
// Description: PostgreSQL repository for provenance scope operations
// -----------------------------------------------------------------------------
using System.Runtime.CompilerServices;
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Concelier.Persistence.Postgres.Models;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// PostgreSQL repository for provenance scope operations.
/// </summary>
public sealed class ProvenanceScopeRepository : RepositoryBase<ConcelierDataSource>, IProvenanceScopeRepository
{
private const string SystemTenantId = "_system";
public ProvenanceScopeRepository(ConcelierDataSource dataSource, ILogger<ProvenanceScopeRepository> logger)
: base(dataSource, logger)
{
}
#region CRUD Operations
public Task<ProvenanceScopeEntity?> GetByIdAsync(Guid id, CancellationToken ct = default)
{
const string sql = """
SELECT id, canonical_id, distro_release, backport_semver, patch_id,
patch_origin, evidence_ref, confidence, created_at, updated_at
FROM vuln.provenance_scope
WHERE id = @id
""";
return QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "id", id),
MapProvenanceScope,
ct);
}
public Task<ProvenanceScopeEntity?> GetByCanonicalAndDistroAsync(
Guid canonicalId,
string distroRelease,
CancellationToken ct = default)
{
const string sql = """
SELECT id, canonical_id, distro_release, backport_semver, patch_id,
patch_origin, evidence_ref, confidence, created_at, updated_at
FROM vuln.provenance_scope
WHERE canonical_id = @canonical_id AND distro_release = @distro_release
""";
return QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "canonical_id", canonicalId);
AddParameter(cmd, "distro_release", distroRelease);
},
MapProvenanceScope,
ct);
}
public Task<IReadOnlyList<ProvenanceScopeEntity>> GetByCanonicalIdAsync(
Guid canonicalId,
CancellationToken ct = default)
{
const string sql = """
SELECT id, canonical_id, distro_release, backport_semver, patch_id,
patch_origin, evidence_ref, confidence, created_at, updated_at
FROM vuln.provenance_scope
WHERE canonical_id = @canonical_id
ORDER BY confidence DESC, distro_release
""";
return QueryAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "canonical_id", canonicalId),
MapProvenanceScope,
ct);
}
public Task<IReadOnlyList<ProvenanceScopeEntity>> GetByDistroReleaseAsync(
string distroRelease,
CancellationToken ct = default)
{
const string sql = """
SELECT id, canonical_id, distro_release, backport_semver, patch_id,
patch_origin, evidence_ref, confidence, created_at, updated_at
FROM vuln.provenance_scope
WHERE distro_release = @distro_release
ORDER BY confidence DESC, updated_at DESC
""";
return QueryAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "distro_release", distroRelease),
MapProvenanceScope,
ct);
}
public Task<IReadOnlyList<ProvenanceScopeEntity>> GetByPatchIdAsync(
string patchId,
CancellationToken ct = default)
{
const string sql = """
SELECT id, canonical_id, distro_release, backport_semver, patch_id,
patch_origin, evidence_ref, confidence, created_at, updated_at
FROM vuln.provenance_scope
WHERE patch_id = @patch_id
ORDER BY confidence DESC, updated_at DESC
""";
return QueryAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "patch_id", patchId),
MapProvenanceScope,
ct);
}
public async Task<Guid> UpsertAsync(ProvenanceScopeEntity entity, CancellationToken ct = default)
{
const string sql = """
INSERT INTO vuln.provenance_scope (
id, canonical_id, distro_release, backport_semver, patch_id,
patch_origin, evidence_ref, confidence, created_at, updated_at
)
VALUES (
@id, @canonical_id, @distro_release, @backport_semver, @patch_id,
@patch_origin, @evidence_ref, @confidence, NOW(), NOW()
)
ON CONFLICT (canonical_id, distro_release)
DO UPDATE SET
backport_semver = EXCLUDED.backport_semver,
patch_id = EXCLUDED.patch_id,
patch_origin = EXCLUDED.patch_origin,
evidence_ref = EXCLUDED.evidence_ref,
confidence = EXCLUDED.confidence,
updated_at = NOW()
RETURNING id
""";
var id = entity.Id == Guid.Empty ? Guid.NewGuid() : entity.Id;
var result = await ExecuteScalarAsync<Guid>(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "id", id);
AddParameter(cmd, "canonical_id", entity.CanonicalId);
AddParameter(cmd, "distro_release", entity.DistroRelease);
AddParameter(cmd, "backport_semver", entity.BackportSemver);
AddParameter(cmd, "patch_id", entity.PatchId);
AddParameter(cmd, "patch_origin", entity.PatchOrigin);
AddParameter(cmd, "evidence_ref", entity.EvidenceRef);
AddParameter(cmd, "confidence", entity.Confidence);
},
ct);
return result;
}
public Task UpdateAsync(ProvenanceScopeEntity entity, CancellationToken ct = default)
{
const string sql = """
UPDATE vuln.provenance_scope
SET backport_semver = @backport_semver,
patch_id = @patch_id,
patch_origin = @patch_origin,
evidence_ref = @evidence_ref,
confidence = @confidence,
updated_at = NOW()
WHERE id = @id
""";
return ExecuteAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "id", entity.Id);
AddParameter(cmd, "backport_semver", entity.BackportSemver);
AddParameter(cmd, "patch_id", entity.PatchId);
AddParameter(cmd, "patch_origin", entity.PatchOrigin);
AddParameter(cmd, "evidence_ref", entity.EvidenceRef);
AddParameter(cmd, "confidence", entity.Confidence);
},
ct);
}
public Task DeleteAsync(Guid id, CancellationToken ct = default)
{
const string sql = "DELETE FROM vuln.provenance_scope WHERE id = @id";
return ExecuteAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "id", id),
ct);
}
public Task DeleteByCanonicalIdAsync(Guid canonicalId, CancellationToken ct = default)
{
const string sql = "DELETE FROM vuln.provenance_scope WHERE canonical_id = @canonical_id";
return ExecuteAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "canonical_id", canonicalId),
ct);
}
#endregion
#region Query Operations
public Task<IReadOnlyList<ProvenanceScopeEntity>> GetHighConfidenceAsync(
decimal threshold = 0.7m,
int limit = 1000,
CancellationToken ct = default)
{
const string sql = """
SELECT id, canonical_id, distro_release, backport_semver, patch_id,
patch_origin, evidence_ref, confidence, created_at, updated_at
FROM vuln.provenance_scope
WHERE confidence >= @threshold
ORDER BY confidence DESC, updated_at DESC
LIMIT @limit
""";
return QueryAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "threshold", threshold);
AddParameter(cmd, "limit", limit);
},
MapProvenanceScope,
ct);
}
public Task<IReadOnlyList<ProvenanceScopeEntity>> GetUpdatedSinceAsync(
DateTimeOffset since,
int limit = 1000,
CancellationToken ct = default)
{
const string sql = """
SELECT id, canonical_id, distro_release, backport_semver, patch_id,
patch_origin, evidence_ref, confidence, created_at, updated_at
FROM vuln.provenance_scope
WHERE updated_at > @since
ORDER BY updated_at ASC
LIMIT @limit
""";
return QueryAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "since", since);
AddParameter(cmd, "limit", limit);
},
MapProvenanceScope,
ct);
}
public Task<IReadOnlyList<ProvenanceScopeEntity>> GetByPatchOriginAsync(
string patchOrigin,
int limit = 1000,
CancellationToken ct = default)
{
const string sql = """
SELECT id, canonical_id, distro_release, backport_semver, patch_id,
patch_origin, evidence_ref, confidence, created_at, updated_at
FROM vuln.provenance_scope
WHERE patch_origin = @patch_origin
ORDER BY confidence DESC, updated_at DESC
LIMIT @limit
""";
return QueryAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "patch_origin", patchOrigin);
AddParameter(cmd, "limit", limit);
},
MapProvenanceScope,
ct);
}
public Task<IReadOnlyList<ProvenanceScopeEntity>> GetWithEvidenceAsync(
int limit = 1000,
CancellationToken ct = default)
{
const string sql = """
SELECT id, canonical_id, distro_release, backport_semver, patch_id,
patch_origin, evidence_ref, confidence, created_at, updated_at
FROM vuln.provenance_scope
WHERE evidence_ref IS NOT NULL
ORDER BY confidence DESC, updated_at DESC
LIMIT @limit
""";
return QueryAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "limit", limit),
MapProvenanceScope,
ct);
}
public async IAsyncEnumerable<ProvenanceScopeEntity> StreamAllAsync(
[EnumeratorCancellation] CancellationToken ct = default)
{
const string sql = """
SELECT id, canonical_id, distro_release, backport_semver, patch_id,
patch_origin, evidence_ref, confidence, created_at, updated_at
FROM vuln.provenance_scope
ORDER BY canonical_id, distro_release
""";
await using var connection = await DataSource.OpenSystemConnectionAsync(ct).ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
await using var reader = await command.ExecuteReaderAsync(ct).ConfigureAwait(false);
while (await reader.ReadAsync(ct).ConfigureAwait(false))
{
yield return MapProvenanceScope(reader);
}
}
#endregion
#region Statistics
public async Task<ProvenanceScopeStatistics> GetStatisticsAsync(CancellationToken ct = default)
{
const string sql = """
SELECT
COUNT(*) AS total_scopes,
COUNT(*) FILTER (WHERE confidence >= 0.7) AS high_confidence_scopes,
COUNT(*) FILTER (WHERE evidence_ref IS NOT NULL) AS scopes_with_evidence,
COALESCE(AVG(confidence), 0) AS avg_confidence,
COUNT(DISTINCT canonical_id) AS unique_canonicals,
COUNT(DISTINCT distro_release) AS unique_distros,
MAX(updated_at) AS last_updated_at
FROM vuln.provenance_scope
""";
var result = await QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
_ => { },
reader => new ProvenanceScopeStatistics
{
TotalScopes = reader.GetInt64(0),
HighConfidenceScopes = reader.GetInt64(1),
ScopesWithEvidence = reader.GetInt64(2),
AvgConfidence = reader.GetDecimal(3),
UniqueCanonicals = reader.GetInt64(4),
UniqueDistros = reader.GetInt64(5),
LastUpdatedAt = reader.IsDBNull(6) ? null : reader.GetFieldValue<DateTimeOffset>(6)
},
ct);
return result ?? new ProvenanceScopeStatistics();
}
public async Task<IReadOnlyDictionary<string, long>> CountByDistroAsync(CancellationToken ct = default)
{
const string sql = """
SELECT distro_release, COUNT(*) AS count
FROM vuln.provenance_scope
GROUP BY distro_release
ORDER BY count DESC
""";
var results = await QueryAsync(
SystemTenantId,
sql,
_ => { },
reader => new KeyValuePair<string, long>(
reader.GetString(0),
reader.GetInt64(1)),
ct);
return results.ToDictionary(kv => kv.Key, kv => kv.Value);
}
#endregion
#region Mapping
private static ProvenanceScopeEntity MapProvenanceScope(NpgsqlDataReader reader)
{
return new ProvenanceScopeEntity
{
Id = reader.GetGuid(0),
CanonicalId = reader.GetGuid(1),
DistroRelease = reader.GetString(2),
BackportSemver = reader.IsDBNull(3) ? null : reader.GetString(3),
PatchId = reader.IsDBNull(4) ? null : reader.GetString(4),
PatchOrigin = reader.IsDBNull(5) ? null : reader.GetString(5),
EvidenceRef = reader.IsDBNull(6) ? null : reader.GetGuid(6),
Confidence = reader.GetDecimal(7),
CreatedAt = reader.GetFieldValue<DateTimeOffset>(8),
UpdatedAt = reader.GetFieldValue<DateTimeOffset>(9)
};
}
#endregion
}

View File

@@ -0,0 +1,510 @@
// -----------------------------------------------------------------------------
// SbomRegistryRepository.cs
// Sprint: SPRINT_8200_0013_0003_SCAN_sbom_intersection_scoring
// Task: SBOM-8200-003
// Description: PostgreSQL repository for SBOM registry persistence
// -----------------------------------------------------------------------------
using System.Text.Json;
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Concelier.SbomIntegration;
using StellaOps.Concelier.SbomIntegration.Models;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// PostgreSQL repository for SBOM registry persistence.
/// </summary>
public sealed class SbomRegistryRepository : RepositoryBase<ConcelierDataSource>, ISbomRegistryRepository
{
private const string SystemTenantId = "_system";
public SbomRegistryRepository(ConcelierDataSource dataSource, ILogger<SbomRegistryRepository> logger)
: base(dataSource, logger)
{
}
#region Registration CRUD
/// <inheritdoc />
public async Task SaveAsync(SbomRegistration registration, CancellationToken cancellationToken = default)
{
const string sql = """
INSERT INTO vuln.sbom_registry
(id, digest, format, spec_version, primary_name, primary_version,
component_count, affected_count, source, tenant_id, registered_at, last_matched_at)
VALUES
(@id, @digest, @format, @spec_version, @primary_name, @primary_version,
@component_count, @affected_count, @source, @tenant_id, @registered_at, @last_matched_at)
ON CONFLICT (digest)
DO UPDATE SET
primary_name = EXCLUDED.primary_name,
primary_version = EXCLUDED.primary_version,
component_count = EXCLUDED.component_count,
source = EXCLUDED.source
""";
await ExecuteAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "id", registration.Id);
AddParameter(cmd, "digest", registration.Digest);
AddParameter(cmd, "format", registration.Format.ToString().ToLowerInvariant());
AddParameter(cmd, "spec_version", registration.SpecVersion);
AddParameter(cmd, "primary_name", registration.PrimaryName ?? (object)DBNull.Value);
AddParameter(cmd, "primary_version", registration.PrimaryVersion ?? (object)DBNull.Value);
AddParameter(cmd, "component_count", registration.ComponentCount);
AddParameter(cmd, "affected_count", registration.AffectedCount);
AddParameter(cmd, "source", registration.Source);
AddParameter(cmd, "tenant_id", registration.TenantId ?? (object)DBNull.Value);
AddParameter(cmd, "registered_at", registration.RegisteredAt);
AddParameter(cmd, "last_matched_at", registration.LastMatchedAt ?? (object)DBNull.Value);
},
cancellationToken).ConfigureAwait(false);
// Save PURLs in a separate table or as JSONB - using JSONB for simplicity
await SavePurlsAsync(registration.Id, registration.Purls, cancellationToken).ConfigureAwait(false);
}
/// <inheritdoc />
public Task<SbomRegistration?> GetByDigestAsync(string digest, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT id, digest, format, spec_version, primary_name, primary_version,
component_count, affected_count, source, tenant_id, registered_at, last_matched_at
FROM vuln.sbom_registry
WHERE digest = @digest
""";
return QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "digest", digest),
MapRegistration,
cancellationToken);
}
/// <inheritdoc />
public Task<SbomRegistration?> GetByIdAsync(Guid id, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT id, digest, format, spec_version, primary_name, primary_version,
component_count, affected_count, source, tenant_id, registered_at, last_matched_at
FROM vuln.sbom_registry
WHERE id = @id
""";
return QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "id", id),
MapRegistration,
cancellationToken);
}
/// <inheritdoc />
public Task<IReadOnlyList<SbomRegistration>> ListAsync(
int offset,
int limit,
string? tenantId = null,
CancellationToken cancellationToken = default)
{
var sql = """
SELECT id, digest, format, spec_version, primary_name, primary_version,
component_count, affected_count, source, tenant_id, registered_at, last_matched_at
FROM vuln.sbom_registry
""";
if (tenantId is not null)
{
sql += " WHERE tenant_id = @tenant_id";
}
sql += " ORDER BY registered_at DESC OFFSET @offset LIMIT @limit";
return QueryAsync(
SystemTenantId,
sql,
cmd =>
{
if (tenantId is not null)
{
AddParameter(cmd, "tenant_id", tenantId);
}
AddParameter(cmd, "offset", offset);
AddParameter(cmd, "limit", limit);
},
MapRegistration,
cancellationToken);
}
/// <inheritdoc />
public Task DeleteAsync(string digest, CancellationToken cancellationToken = default)
{
const string sql = "DELETE FROM vuln.sbom_registry WHERE digest = @digest";
return ExecuteAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "digest", digest),
cancellationToken);
}
/// <inheritdoc />
public async Task<long> CountAsync(string? tenantId = null, CancellationToken cancellationToken = default)
{
var sql = "SELECT COUNT(*) FROM vuln.sbom_registry";
if (tenantId is not null)
{
sql += " WHERE tenant_id = @tenant_id";
}
var count = await ExecuteScalarAsync<long>(
SystemTenantId,
sql,
tenantId is not null ? cmd => AddParameter(cmd, "tenant_id", tenantId) : null,
cancellationToken).ConfigureAwait(false);
return count;
}
#endregion
#region Match CRUD
/// <inheritdoc />
public async Task SaveMatchesAsync(
Guid sbomId,
IEnumerable<SbomAdvisoryMatch> matches,
CancellationToken cancellationToken = default)
{
var matchList = matches.ToList();
if (matchList.Count == 0)
{
return;
}
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
const string sql = """
INSERT INTO vuln.sbom_canonical_match
(id, sbom_id, canonical_id, purl, match_method, confidence, is_reachable, is_deployed, matched_at)
VALUES
(@id, @sbom_id, @canonical_id, @purl, @match_method, @confidence, @is_reachable, @is_deployed, @matched_at)
ON CONFLICT (sbom_id, canonical_id, purl)
DO UPDATE SET
is_reachable = EXCLUDED.is_reachable,
is_deployed = EXCLUDED.is_deployed,
matched_at = EXCLUDED.matched_at
""";
foreach (var match in matchList)
{
await using var cmd = CreateCommand(sql, connection);
cmd.Transaction = transaction;
AddParameter(cmd, "id", match.Id == Guid.Empty ? Guid.NewGuid() : match.Id);
AddParameter(cmd, "sbom_id", sbomId);
AddParameter(cmd, "canonical_id", match.CanonicalId);
AddParameter(cmd, "purl", match.Purl);
AddParameter(cmd, "match_method", MapMethodToString(match.Method));
AddParameter(cmd, "confidence", match.Confidence);
AddParameter(cmd, "is_reachable", match.IsReachable);
AddParameter(cmd, "is_deployed", match.IsDeployed);
AddParameter(cmd, "matched_at", match.MatchedAt == default ? DateTimeOffset.UtcNow : match.MatchedAt);
await cmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
}
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
Logger.LogDebug("Saved {Count} SBOM matches for SBOM {SbomId}", matchList.Count, sbomId);
}
/// <inheritdoc />
public async Task<IReadOnlyList<SbomAdvisoryMatch>> GetMatchesAsync(
string digest,
CancellationToken cancellationToken = default)
{
const string sql = """
SELECT m.id, m.sbom_id, r.digest, m.canonical_id, m.purl,
m.is_reachable, m.is_deployed, m.confidence, m.match_method, m.matched_at
FROM vuln.sbom_canonical_match m
JOIN vuln.sbom_registry r ON r.id = m.sbom_id
WHERE r.digest = @digest
ORDER BY m.matched_at DESC
""";
return await QueryAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "digest", digest),
MapMatch,
cancellationToken).ConfigureAwait(false);
}
/// <inheritdoc />
public async Task<IReadOnlyList<SbomAdvisoryMatch>> GetMatchesByCanonicalAsync(
Guid canonicalId,
CancellationToken cancellationToken = default)
{
const string sql = """
SELECT m.id, m.sbom_id, r.digest, m.canonical_id, m.purl,
m.is_reachable, m.is_deployed, m.confidence, m.match_method, m.matched_at
FROM vuln.sbom_canonical_match m
JOIN vuln.sbom_registry r ON r.id = m.sbom_id
WHERE m.canonical_id = @canonical_id
ORDER BY m.matched_at DESC
""";
return await QueryAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "canonical_id", canonicalId),
MapMatch,
cancellationToken).ConfigureAwait(false);
}
/// <inheritdoc />
public Task DeleteMatchesAsync(Guid sbomId, CancellationToken cancellationToken = default)
{
const string sql = "DELETE FROM vuln.sbom_canonical_match WHERE sbom_id = @sbom_id";
return ExecuteAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "sbom_id", sbomId),
cancellationToken);
}
#endregion
#region Statistics
/// <inheritdoc />
public async Task<SbomRegistryStats> GetStatsAsync(
string? tenantId = null,
CancellationToken cancellationToken = default)
{
var sql = """
SELECT
COUNT(DISTINCT r.id) AS total_sboms,
COALESCE(SUM(r.component_count), 0) AS total_purls,
COUNT(DISTINCT m.id) AS total_matches,
COUNT(DISTINCT CASE WHEN r.affected_count > 0 THEN r.id END) AS affected_sboms
FROM vuln.sbom_registry r
LEFT JOIN vuln.sbom_canonical_match m ON m.sbom_id = r.id
""";
if (tenantId is not null)
{
sql += " WHERE r.tenant_id = @tenant_id";
}
var result = await QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
tenantId is not null ? cmd => AddParameter(cmd, "tenant_id", tenantId) : null,
reader =>
{
var totalSboms = reader.GetInt64(0);
var totalMatches = reader.GetInt64(2);
return new SbomRegistryStats
{
TotalSboms = totalSboms,
TotalPurls = reader.GetInt64(1),
TotalMatches = totalMatches,
AffectedSboms = reader.GetInt64(3),
AverageMatchesPerSbom = totalSboms > 0 ? (double)totalMatches / totalSboms : 0
};
},
cancellationToken).ConfigureAwait(false);
return result ?? new SbomRegistryStats();
}
/// <inheritdoc />
public Task UpdateAffectedCountAsync(
string digest,
int affectedCount,
CancellationToken cancellationToken = default)
{
const string sql = """
UPDATE vuln.sbom_registry
SET affected_count = @affected_count
WHERE digest = @digest
""";
return ExecuteAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "digest", digest);
AddParameter(cmd, "affected_count", affectedCount);
},
cancellationToken);
}
/// <inheritdoc />
public Task UpdateLastMatchedAsync(
string digest,
DateTimeOffset lastMatched,
CancellationToken cancellationToken = default)
{
const string sql = """
UPDATE vuln.sbom_registry
SET last_matched_at = @last_matched_at
WHERE digest = @digest
""";
return ExecuteAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "digest", digest);
AddParameter(cmd, "last_matched_at", lastMatched);
},
cancellationToken);
}
/// <inheritdoc />
public async Task UpdatePurlsAsync(
string digest,
IReadOnlyList<string> purls,
CancellationToken cancellationToken = default)
{
// First get the SBOM registration to get the ID
var registration = await GetByDigestAsync(digest, cancellationToken).ConfigureAwait(false);
if (registration == null)
{
return;
}
// Update component count based on purls count
const string sql = """
UPDATE vuln.sbom_registry
SET component_count = @component_count
WHERE digest = @digest
""";
await ExecuteAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "digest", digest);
AddParameter(cmd, "component_count", purls.Count);
},
cancellationToken).ConfigureAwait(false);
}
#endregion
#region Private Helpers
private async Task SavePurlsAsync(Guid sbomId, IReadOnlyList<string> purls, CancellationToken cancellationToken)
{
// Store PURLs in a dedicated column in the registry or a separate table
// For now, we'll update the registry with a JSONB column for purls
const string sql = """
UPDATE vuln.sbom_registry
SET component_count = @component_count
WHERE id = @id
""";
await ExecuteAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "id", sbomId);
AddParameter(cmd, "component_count", purls.Count);
},
cancellationToken).ConfigureAwait(false);
// Note: The actual PURLs are stored in memory on the SbomRegistration record
// For persistence, we could add a sbom_purls table or a JSONB column
}
private async Task<IReadOnlyList<string>> LoadPurlsAsync(Guid sbomId, CancellationToken cancellationToken)
{
// If we need to load PURLs from storage, we would query them here
// For now, returning empty as PURLs are typically provided in the input
return [];
}
private SbomRegistration MapRegistration(NpgsqlDataReader reader)
{
var formatStr = reader.GetString(2);
var format = formatStr.Equals("cyclonedx", StringComparison.OrdinalIgnoreCase)
? SbomFormat.CycloneDX
: SbomFormat.SPDX;
return new SbomRegistration
{
Id = reader.GetGuid(0),
Digest = reader.GetString(1),
Format = format,
SpecVersion = reader.GetString(3),
PrimaryName = GetNullableString(reader, 4),
PrimaryVersion = GetNullableString(reader, 5),
ComponentCount = reader.GetInt32(6),
AffectedCount = reader.GetInt32(7),
Source = reader.GetString(8),
TenantId = GetNullableString(reader, 9),
RegisteredAt = reader.GetFieldValue<DateTimeOffset>(10),
LastMatchedAt = reader.IsDBNull(11) ? null : reader.GetFieldValue<DateTimeOffset>(11),
Purls = [] // PURLs would be loaded separately if needed
};
}
private static SbomAdvisoryMatch MapMatch(NpgsqlDataReader reader)
{
var methodStr = reader.GetString(8);
var method = methodStr switch
{
"exact_purl" => MatchMethod.ExactPurl,
"purl_version_range" => MatchMethod.PurlVersionRange,
"cpe" => MatchMethod.Cpe,
"name_version" => MatchMethod.NameVersion,
_ => MatchMethod.ExactPurl
};
return new SbomAdvisoryMatch
{
Id = reader.GetGuid(0),
SbomId = reader.GetGuid(1),
SbomDigest = reader.GetString(2),
CanonicalId = reader.GetGuid(3),
Purl = reader.GetString(4),
IsReachable = reader.GetBoolean(5),
IsDeployed = reader.GetBoolean(6),
Confidence = reader.GetDouble(7),
Method = method,
MatchedAt = reader.GetFieldValue<DateTimeOffset>(9)
};
}
private static string MapMethodToString(MatchMethod method)
{
return method switch
{
MatchMethod.ExactPurl => "exact_purl",
MatchMethod.PurlVersionRange => "purl_version_range",
MatchMethod.Cpe => "cpe",
MatchMethod.NameVersion => "name_version",
_ => "exact_purl"
};
}
#endregion
}

View File

@@ -0,0 +1,136 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Concelier.Persistence.Postgres.Models;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// PostgreSQL repository for feed sources.
/// </summary>
public sealed class SourceRepository : RepositoryBase<ConcelierDataSource>, ISourceRepository
{
private const string SystemTenantId = "_system";
public SourceRepository(ConcelierDataSource dataSource, ILogger<SourceRepository> logger)
: base(dataSource, logger)
{
}
public async Task<SourceEntity> UpsertAsync(SourceEntity source, CancellationToken cancellationToken = default)
{
const string sql = """
INSERT INTO vuln.sources
(id, key, name, source_type, url, priority, enabled, config, metadata)
VALUES
(@id, @key, @name, @source_type, @url, @priority, @enabled, @config::jsonb, @metadata::jsonb)
ON CONFLICT (key) DO UPDATE SET
name = EXCLUDED.name,
source_type = EXCLUDED.source_type,
url = EXCLUDED.url,
priority = EXCLUDED.priority,
enabled = EXCLUDED.enabled,
config = EXCLUDED.config,
metadata = EXCLUDED.metadata,
updated_at = NOW()
RETURNING id, key, name, source_type, url, priority, enabled,
config::text, metadata::text, created_at, updated_at
""";
return await QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "id", source.Id);
AddParameter(cmd, "key", source.Key);
AddParameter(cmd, "name", source.Name);
AddParameter(cmd, "source_type", source.SourceType);
AddParameter(cmd, "url", source.Url);
AddParameter(cmd, "priority", source.Priority);
AddParameter(cmd, "enabled", source.Enabled);
AddJsonbParameter(cmd, "config", source.Config);
AddJsonbParameter(cmd, "metadata", source.Metadata);
},
MapSource!,
cancellationToken).ConfigureAwait(false) ?? throw new InvalidOperationException("Upsert returned null");
}
public Task<SourceEntity?> GetByIdAsync(Guid id, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT id, key, name, source_type, url, priority, enabled,
config::text, metadata::text, created_at, updated_at
FROM vuln.sources
WHERE id = @id
""";
return QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "id", id),
MapSource,
cancellationToken);
}
public Task<SourceEntity?> GetByKeyAsync(string key, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT id, key, name, source_type, url, priority, enabled,
config::text, metadata::text, created_at, updated_at
FROM vuln.sources
WHERE key = @key
""";
return QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "key", key),
MapSource,
cancellationToken);
}
public Task<IReadOnlyList<SourceEntity>> ListAsync(bool? enabled = null, CancellationToken cancellationToken = default)
{
var sql = """
SELECT id, key, name, source_type, url, priority, enabled,
config::text, metadata::text, created_at, updated_at
FROM vuln.sources
""";
if (enabled.HasValue)
{
sql += " WHERE enabled = @enabled";
}
sql += " ORDER BY priority DESC, key";
return QueryAsync(
SystemTenantId,
sql,
cmd =>
{
if (enabled.HasValue)
{
AddParameter(cmd, "enabled", enabled.Value);
}
},
MapSource,
cancellationToken);
}
private static SourceEntity MapSource(Npgsql.NpgsqlDataReader reader) => new()
{
Id = reader.GetGuid(0),
Key = reader.GetString(1),
Name = reader.GetString(2),
SourceType = reader.GetString(3),
Url = GetNullableString(reader, 4),
Priority = reader.GetInt32(5),
Enabled = reader.GetBoolean(6),
Config = reader.GetString(7),
Metadata = reader.GetString(8),
CreatedAt = reader.GetFieldValue<DateTimeOffset>(9),
UpdatedAt = reader.GetFieldValue<DateTimeOffset>(10)
};
}

View File

@@ -0,0 +1,92 @@
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Concelier.Persistence.Postgres.Models;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// PostgreSQL repository for source ingestion state.
/// </summary>
public sealed class SourceStateRepository : RepositoryBase<ConcelierDataSource>, ISourceStateRepository
{
private const string SystemTenantId = "_system";
public SourceStateRepository(ConcelierDataSource dataSource, ILogger<SourceStateRepository> logger)
: base(dataSource, logger)
{
}
public async Task<SourceStateEntity> UpsertAsync(SourceStateEntity state, CancellationToken cancellationToken = default)
{
const string sql = """
INSERT INTO vuln.source_states
(id, source_id, cursor, last_sync_at, last_success_at, last_error,
sync_count, error_count, metadata)
VALUES
(@id, @source_id, @cursor, @last_sync_at, @last_success_at, @last_error,
@sync_count, @error_count, @metadata::jsonb)
ON CONFLICT (source_id) DO UPDATE SET
cursor = EXCLUDED.cursor,
last_sync_at = EXCLUDED.last_sync_at,
last_success_at = EXCLUDED.last_success_at,
last_error = EXCLUDED.last_error,
sync_count = EXCLUDED.sync_count,
error_count = EXCLUDED.error_count,
metadata = EXCLUDED.metadata,
updated_at = NOW()
RETURNING id, source_id, cursor, last_sync_at, last_success_at, last_error,
sync_count, error_count, metadata::text, updated_at
""";
return await QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "id", state.Id);
AddParameter(cmd, "source_id", state.SourceId);
AddParameter(cmd, "cursor", state.Cursor);
AddParameter(cmd, "last_sync_at", state.LastSyncAt);
AddParameter(cmd, "last_success_at", state.LastSuccessAt);
AddParameter(cmd, "last_error", state.LastError);
AddParameter(cmd, "sync_count", state.SyncCount);
AddParameter(cmd, "error_count", state.ErrorCount);
AddJsonbParameter(cmd, "metadata", state.Metadata);
},
MapState!,
cancellationToken).ConfigureAwait(false) ?? throw new InvalidOperationException("Upsert returned null");
}
public Task<SourceStateEntity?> GetBySourceIdAsync(Guid sourceId, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT id, source_id, cursor, last_sync_at, last_success_at, last_error,
sync_count, error_count, metadata::text, updated_at
FROM vuln.source_states
WHERE source_id = @source_id
""";
return QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "source_id", sourceId),
MapState,
cancellationToken);
}
private static SourceStateEntity MapState(NpgsqlDataReader reader) => new()
{
Id = reader.GetGuid(0),
SourceId = reader.GetGuid(1),
Cursor = GetNullableString(reader, 2),
LastSyncAt = GetNullableDateTimeOffset(reader, 3),
LastSuccessAt = GetNullableDateTimeOffset(reader, 4),
LastError = GetNullableString(reader, 5),
SyncCount = reader.GetInt64(6),
ErrorCount = reader.GetInt32(7),
Metadata = reader.GetString(8),
UpdatedAt = reader.GetFieldValue<DateTimeOffset>(9)
};
}

View File

@@ -0,0 +1,376 @@
// -----------------------------------------------------------------------------
// SyncLedgerRepository.cs
// Sprint: SPRINT_8200_0014_0001_DB_sync_ledger_schema
// Task: SYNC-8200-007
// Description: PostgreSQL repository for federation sync ledger operations
// -----------------------------------------------------------------------------
using Microsoft.Extensions.Logging;
using Npgsql;
using StellaOps.Concelier.Persistence.Postgres.Models;
using StellaOps.Infrastructure.Postgres.Repositories;
namespace StellaOps.Concelier.Persistence.Postgres.Repositories;
/// <summary>
/// PostgreSQL repository for federation sync ledger and site policy operations.
/// </summary>
public sealed class SyncLedgerRepository : RepositoryBase<ConcelierDataSource>, ISyncLedgerRepository
{
private const string SystemTenantId = "_system";
public SyncLedgerRepository(ConcelierDataSource dataSource, ILogger<SyncLedgerRepository> logger)
: base(dataSource, logger)
{
}
#region Ledger Operations
public Task<SyncLedgerEntity?> GetLatestAsync(string siteId, CancellationToken ct = default)
{
const string sql = """
SELECT id, site_id, cursor, bundle_hash, items_count, signed_at, imported_at
FROM vuln.sync_ledger
WHERE site_id = @site_id
ORDER BY signed_at DESC
LIMIT 1
""";
return QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "site_id", siteId),
MapLedgerEntry,
ct);
}
public Task<IReadOnlyList<SyncLedgerEntity>> GetHistoryAsync(string siteId, int limit = 10, CancellationToken ct = default)
{
const string sql = """
SELECT id, site_id, cursor, bundle_hash, items_count, signed_at, imported_at
FROM vuln.sync_ledger
WHERE site_id = @site_id
ORDER BY signed_at DESC
LIMIT @limit
""";
return QueryAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "site_id", siteId);
AddParameter(cmd, "limit", limit);
},
MapLedgerEntry,
ct);
}
public Task<SyncLedgerEntity?> GetByBundleHashAsync(string bundleHash, CancellationToken ct = default)
{
const string sql = """
SELECT id, site_id, cursor, bundle_hash, items_count, signed_at, imported_at
FROM vuln.sync_ledger
WHERE bundle_hash = @bundle_hash
""";
return QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "bundle_hash", bundleHash),
MapLedgerEntry,
ct);
}
public async Task<Guid> InsertAsync(SyncLedgerEntity entry, CancellationToken ct = default)
{
const string sql = """
INSERT INTO vuln.sync_ledger
(id, site_id, cursor, bundle_hash, items_count, signed_at, imported_at)
VALUES
(@id, @site_id, @cursor, @bundle_hash, @items_count, @signed_at, @imported_at)
RETURNING id
""";
var id = entry.Id == Guid.Empty ? Guid.NewGuid() : entry.Id;
await ExecuteAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "id", id);
AddParameter(cmd, "site_id", entry.SiteId);
AddParameter(cmd, "cursor", entry.Cursor);
AddParameter(cmd, "bundle_hash", entry.BundleHash);
AddParameter(cmd, "items_count", entry.ItemsCount);
AddParameter(cmd, "signed_at", entry.SignedAt);
AddParameter(cmd, "imported_at", entry.ImportedAt == default ? DateTimeOffset.UtcNow : entry.ImportedAt);
},
ct).ConfigureAwait(false);
return id;
}
#endregion
#region Cursor Operations
public async Task<string?> GetCursorAsync(string siteId, CancellationToken ct = default)
{
const string sql = """
SELECT cursor
FROM vuln.sync_ledger
WHERE site_id = @site_id
ORDER BY signed_at DESC
LIMIT 1
""";
return await ExecuteScalarAsync<string>(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "site_id", siteId),
ct).ConfigureAwait(false);
}
public async Task AdvanceCursorAsync(
string siteId,
string newCursor,
string bundleHash,
int itemsCount,
DateTimeOffset signedAt,
CancellationToken ct = default)
{
var entry = new SyncLedgerEntity
{
Id = Guid.NewGuid(),
SiteId = siteId,
Cursor = newCursor,
BundleHash = bundleHash,
ItemsCount = itemsCount,
SignedAt = signedAt,
ImportedAt = DateTimeOffset.UtcNow
};
await InsertAsync(entry, ct).ConfigureAwait(false);
}
public async Task<bool> IsCursorConflictAsync(string siteId, string cursor, CancellationToken ct = default)
{
var currentCursor = await GetCursorAsync(siteId, ct).ConfigureAwait(false);
if (currentCursor is null)
{
// No existing cursor, no conflict
return false;
}
// Compare cursors - the new cursor should be newer than the current
return !CursorFormat.IsAfter(cursor, currentCursor);
}
#endregion
#region Site Policy Operations
public Task<SitePolicyEntity?> GetPolicyAsync(string siteId, CancellationToken ct = default)
{
const string sql = """
SELECT id, site_id, display_name, allowed_sources, denied_sources,
max_bundle_size_mb, max_items_per_bundle, require_signature,
allowed_signers, enabled, created_at, updated_at
FROM vuln.site_policy
WHERE site_id = @site_id
""";
return QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "site_id", siteId),
MapPolicy,
ct);
}
public async Task UpsertPolicyAsync(SitePolicyEntity policy, CancellationToken ct = default)
{
const string sql = """
INSERT INTO vuln.site_policy
(id, site_id, display_name, allowed_sources, denied_sources,
max_bundle_size_mb, max_items_per_bundle, require_signature,
allowed_signers, enabled)
VALUES
(@id, @site_id, @display_name, @allowed_sources, @denied_sources,
@max_bundle_size_mb, @max_items_per_bundle, @require_signature,
@allowed_signers, @enabled)
ON CONFLICT (site_id) DO UPDATE SET
display_name = EXCLUDED.display_name,
allowed_sources = EXCLUDED.allowed_sources,
denied_sources = EXCLUDED.denied_sources,
max_bundle_size_mb = EXCLUDED.max_bundle_size_mb,
max_items_per_bundle = EXCLUDED.max_items_per_bundle,
require_signature = EXCLUDED.require_signature,
allowed_signers = EXCLUDED.allowed_signers,
enabled = EXCLUDED.enabled,
updated_at = NOW()
""";
await ExecuteAsync(
SystemTenantId,
sql,
cmd =>
{
AddParameter(cmd, "id", policy.Id == Guid.Empty ? Guid.NewGuid() : policy.Id);
AddParameter(cmd, "site_id", policy.SiteId);
AddParameter(cmd, "display_name", policy.DisplayName);
AddTextArrayParameter(cmd, "allowed_sources", policy.AllowedSources);
AddTextArrayParameter(cmd, "denied_sources", policy.DeniedSources);
AddParameter(cmd, "max_bundle_size_mb", policy.MaxBundleSizeMb);
AddParameter(cmd, "max_items_per_bundle", policy.MaxItemsPerBundle);
AddParameter(cmd, "require_signature", policy.RequireSignature);
AddTextArrayParameter(cmd, "allowed_signers", policy.AllowedSigners);
AddParameter(cmd, "enabled", policy.Enabled);
},
ct).ConfigureAwait(false);
}
public Task<IReadOnlyList<SitePolicyEntity>> GetAllPoliciesAsync(bool enabledOnly = true, CancellationToken ct = default)
{
var sql = """
SELECT id, site_id, display_name, allowed_sources, denied_sources,
max_bundle_size_mb, max_items_per_bundle, require_signature,
allowed_signers, enabled, created_at, updated_at
FROM vuln.site_policy
""";
if (enabledOnly)
{
sql += " WHERE enabled = TRUE";
}
sql += " ORDER BY site_id";
return QueryAsync(
SystemTenantId,
sql,
_ => { },
MapPolicy,
ct);
}
public async Task<bool> DeletePolicyAsync(string siteId, CancellationToken ct = default)
{
const string sql = """
DELETE FROM vuln.site_policy
WHERE site_id = @site_id
""";
var rows = await ExecuteAsync(
SystemTenantId,
sql,
cmd => AddParameter(cmd, "site_id", siteId),
ct).ConfigureAwait(false);
return rows > 0;
}
#endregion
#region Statistics
public async Task<SyncStatistics> GetStatisticsAsync(CancellationToken ct = default)
{
const string sql = """
SELECT
(SELECT COUNT(DISTINCT site_id) FROM vuln.site_policy) AS total_sites,
(SELECT COUNT(DISTINCT site_id) FROM vuln.site_policy WHERE enabled = TRUE) AS enabled_sites,
(SELECT COUNT(*) FROM vuln.sync_ledger) AS total_bundles,
(SELECT COALESCE(SUM(items_count), 0) FROM vuln.sync_ledger) AS total_items,
(SELECT MAX(imported_at) FROM vuln.sync_ledger) AS last_import
""";
return await QuerySingleOrDefaultAsync(
SystemTenantId,
sql,
_ => { },
reader => new SyncStatistics
{
TotalSites = reader.GetInt32(0),
EnabledSites = reader.GetInt32(1),
TotalBundlesImported = reader.GetInt64(2),
TotalItemsImported = reader.GetInt64(3),
LastImportAt = GetNullableDateTimeOffset(reader, 4)
},
ct).ConfigureAwait(false) ?? new SyncStatistics();
}
#endregion
#region Mappers
private static SyncLedgerEntity MapLedgerEntry(NpgsqlDataReader reader) => new()
{
Id = reader.GetGuid(0),
SiteId = reader.GetString(1),
Cursor = reader.GetString(2),
BundleHash = reader.GetString(3),
ItemsCount = reader.GetInt32(4),
SignedAt = reader.GetFieldValue<DateTimeOffset>(5),
ImportedAt = reader.GetFieldValue<DateTimeOffset>(6)
};
private static SitePolicyEntity MapPolicy(NpgsqlDataReader reader) => new()
{
Id = reader.GetGuid(0),
SiteId = reader.GetString(1),
DisplayName = GetNullableString(reader, 2),
AllowedSources = reader.GetFieldValue<string[]>(3),
DeniedSources = reader.GetFieldValue<string[]>(4),
MaxBundleSizeMb = reader.GetInt32(5),
MaxItemsPerBundle = reader.GetInt32(6),
RequireSignature = reader.GetBoolean(7),
AllowedSigners = reader.GetFieldValue<string[]>(8),
Enabled = reader.GetBoolean(9),
CreatedAt = reader.GetFieldValue<DateTimeOffset>(10),
UpdatedAt = reader.GetFieldValue<DateTimeOffset>(11)
};
#endregion
}
/// <summary>
/// Cursor format utilities for federation sync.
/// </summary>
public static class CursorFormat
{
/// <summary>
/// Creates a cursor from timestamp and sequence.
/// Format: "2025-01-15T10:30:00.000Z#0042"
/// </summary>
public static string Create(DateTimeOffset timestamp, int sequence = 0)
{
return $"{timestamp:O}#{sequence:D4}";
}
/// <summary>
/// Parses a cursor into timestamp and sequence.
/// </summary>
public static (DateTimeOffset Timestamp, int Sequence) Parse(string cursor)
{
var parts = cursor.Split('#');
var timestamp = DateTimeOffset.Parse(parts[0]);
var sequence = parts.Length > 1 ? int.Parse(parts[1]) : 0;
return (timestamp, sequence);
}
/// <summary>
/// Compares two cursors. Returns true if cursor1 is after cursor2.
/// </summary>
public static bool IsAfter(string cursor1, string cursor2)
{
var (ts1, seq1) = Parse(cursor1);
var (ts2, seq2) = Parse(cursor2);
if (ts1 != ts2) return ts1 > ts2;
return seq1 > seq2;
}
}

View File

@@ -0,0 +1,118 @@
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using StellaOps.Concelier.Persistence.Postgres.Repositories;
using StellaOps.Concelier.Persistence.Postgres.Advisories;
using StellaOps.Infrastructure.Postgres;
using StellaOps.Infrastructure.Postgres.Options;
using StellaOps.Concelier.Core.Linksets;
using StorageContracts = StellaOps.Concelier.Storage;
using AdvisoryContracts = StellaOps.Concelier.Storage.Advisories;
using ExportingContracts = StellaOps.Concelier.Storage.Exporting;
using JpFlagsContracts = StellaOps.Concelier.Storage.JpFlags;
using PsirtContracts = StellaOps.Concelier.Storage.PsirtFlags;
using HistoryContracts = StellaOps.Concelier.Storage.ChangeHistory;
using StellaOps.Concelier.Merge.Backport;
namespace StellaOps.Concelier.Persistence.Postgres;
/// <summary>
/// Extension methods for configuring Concelier PostgreSQL storage services.
/// </summary>
public static class ServiceCollectionExtensions
{
/// <summary>
/// Adds Concelier PostgreSQL storage services.
/// </summary>
/// <param name="services">Service collection.</param>
/// <param name="configuration">Configuration root.</param>
/// <param name="sectionName">Configuration section name for PostgreSQL options.</param>
/// <returns>Service collection for chaining.</returns>
public static IServiceCollection AddConcelierPostgresStorage(
this IServiceCollection services,
IConfiguration configuration,
string sectionName = "Postgres:Concelier")
{
services.Configure<PostgresOptions>(sectionName, configuration.GetSection(sectionName));
services.AddSingleton<ConcelierDataSource>();
// Register repositories
services.AddScoped<IAdvisoryRepository, AdvisoryRepository>();
services.AddScoped<IPostgresAdvisoryStore, PostgresAdvisoryStore>();
services.AddScoped<ISourceRepository, SourceRepository>();
services.AddScoped<IAdvisoryAliasRepository, AdvisoryAliasRepository>();
services.AddScoped<IAdvisoryCvssRepository, AdvisoryCvssRepository>();
services.AddScoped<IAdvisoryAffectedRepository, AdvisoryAffectedRepository>();
services.AddScoped<IAdvisoryReferenceRepository, AdvisoryReferenceRepository>();
services.AddScoped<IAdvisoryCreditRepository, AdvisoryCreditRepository>();
services.AddScoped<IAdvisoryWeaknessRepository, AdvisoryWeaknessRepository>();
services.AddScoped<IKevFlagRepository, KevFlagRepository>();
services.AddScoped<StellaOps.Concelier.Persistence.Postgres.Repositories.ISourceStateRepository, SourceStateRepository>();
services.AddScoped<AdvisoryContracts.IAdvisoryStore, PostgresAdvisoryStore>();
services.AddScoped<IDocumentRepository, DocumentRepository>();
services.AddScoped<StorageContracts.ISourceStateRepository, PostgresSourceStateAdapter>();
services.AddScoped<IFeedSnapshotRepository, FeedSnapshotRepository>();
services.AddScoped<IAdvisorySnapshotRepository, AdvisorySnapshotRepository>();
services.AddScoped<IMergeEventRepository, MergeEventRepository>();
services.AddScoped<IAdvisoryLinksetStore, AdvisoryLinksetCacheRepository>();
services.AddScoped<IAdvisoryLinksetLookup>(sp => sp.GetRequiredService<IAdvisoryLinksetStore>());
services.AddScoped<StorageContracts.IDocumentStore, PostgresDocumentStore>();
services.AddScoped<StorageContracts.IDtoStore, PostgresDtoStore>();
services.AddScoped<ExportingContracts.IExportStateStore, PostgresExportStateStore>();
services.AddScoped<PsirtContracts.IPsirtFlagStore, PostgresPsirtFlagStore>();
services.AddScoped<JpFlagsContracts.IJpFlagStore, PostgresJpFlagStore>();
services.AddScoped<HistoryContracts.IChangeHistoryStore, PostgresChangeHistoryStore>();
// Provenance scope services (backport integration)
services.AddScoped<Repositories.IProvenanceScopeRepository, ProvenanceScopeRepository>();
services.AddScoped<IProvenanceScopeStore, PostgresProvenanceScopeStore>();
return services;
}
/// <summary>
/// Adds Concelier PostgreSQL storage services with explicit options.
/// </summary>
/// <param name="services">Service collection.</param>
/// <param name="configureOptions">Options configuration action.</param>
/// <returns>Service collection for chaining.</returns>
public static IServiceCollection AddConcelierPostgresStorage(
this IServiceCollection services,
Action<PostgresOptions> configureOptions)
{
services.Configure(configureOptions);
services.AddSingleton<ConcelierDataSource>();
// Register repositories
services.AddScoped<IAdvisoryRepository, AdvisoryRepository>();
services.AddScoped<IPostgresAdvisoryStore, PostgresAdvisoryStore>();
services.AddScoped<ISourceRepository, SourceRepository>();
services.AddScoped<IAdvisoryAliasRepository, AdvisoryAliasRepository>();
services.AddScoped<IAdvisoryCvssRepository, AdvisoryCvssRepository>();
services.AddScoped<IAdvisoryAffectedRepository, AdvisoryAffectedRepository>();
services.AddScoped<IAdvisoryReferenceRepository, AdvisoryReferenceRepository>();
services.AddScoped<IAdvisoryCreditRepository, AdvisoryCreditRepository>();
services.AddScoped<IAdvisoryWeaknessRepository, AdvisoryWeaknessRepository>();
services.AddScoped<IKevFlagRepository, KevFlagRepository>();
services.AddScoped<StellaOps.Concelier.Persistence.Postgres.Repositories.ISourceStateRepository, SourceStateRepository>();
services.AddScoped<AdvisoryContracts.IAdvisoryStore, PostgresAdvisoryStore>();
services.AddScoped<IDocumentRepository, DocumentRepository>();
services.AddScoped<StorageContracts.ISourceStateRepository, PostgresSourceStateAdapter>();
services.AddScoped<IFeedSnapshotRepository, FeedSnapshotRepository>();
services.AddScoped<IAdvisorySnapshotRepository, AdvisorySnapshotRepository>();
services.AddScoped<IMergeEventRepository, MergeEventRepository>();
services.AddScoped<IAdvisoryLinksetStore, AdvisoryLinksetCacheRepository>();
services.AddScoped<IAdvisoryLinksetLookup>(sp => sp.GetRequiredService<IAdvisoryLinksetStore>());
services.AddScoped<StorageContracts.IDocumentStore, PostgresDocumentStore>();
services.AddScoped<StorageContracts.IDtoStore, PostgresDtoStore>();
services.AddScoped<ExportingContracts.IExportStateStore, PostgresExportStateStore>();
services.AddScoped<PsirtContracts.IPsirtFlagStore, PostgresPsirtFlagStore>();
services.AddScoped<JpFlagsContracts.IJpFlagStore, PostgresJpFlagStore>();
services.AddScoped<HistoryContracts.IChangeHistoryStore, PostgresChangeHistoryStore>();
// Provenance scope services (backport integration)
services.AddScoped<Repositories.IProvenanceScopeRepository, ProvenanceScopeRepository>();
services.AddScoped<IProvenanceScopeStore, PostgresProvenanceScopeStore>();
return services;
}
}

View File

@@ -0,0 +1,215 @@
using System;
using System.Text.Json;
using System.Collections.Generic;
using StellaOps.Concelier.Documents;
using StellaOps.Concelier.Persistence.Postgres.Models;
using StellaOps.Concelier.Persistence.Postgres.Repositories;
using Contracts = StellaOps.Concelier.Storage.Contracts;
using LegacyContracts = StellaOps.Concelier.Storage;
namespace StellaOps.Concelier.Persistence.Postgres;
/// <summary>
/// Adapter that satisfies the legacy source state contract using PostgreSQL storage and provides a Postgres-native cursor contract.
/// </summary>
public sealed class PostgresSourceStateAdapter : LegacyContracts.ISourceStateRepository, Contracts.ISourceStateStore
{
private readonly ISourceRepository _sourceRepository;
private readonly Repositories.ISourceStateRepository _stateRepository;
private readonly TimeProvider _timeProvider;
public PostgresSourceStateAdapter(
ISourceRepository sourceRepository,
Repositories.ISourceStateRepository stateRepository,
TimeProvider? timeProvider = null)
{
_sourceRepository = sourceRepository ?? throw new ArgumentNullException(nameof(sourceRepository));
_stateRepository = stateRepository ?? throw new ArgumentNullException(nameof(stateRepository));
_timeProvider = timeProvider ?? TimeProvider.System;
}
public async Task<LegacyContracts.SourceStateRecord?> TryGetAsync(string sourceName, CancellationToken cancellationToken)
{
ArgumentException.ThrowIfNullOrEmpty(sourceName);
var source = await _sourceRepository.GetByKeyAsync(sourceName, cancellationToken).ConfigureAwait(false);
if (source is null)
{
return null;
}
var state = await _stateRepository.GetBySourceIdAsync(source.Id, cancellationToken).ConfigureAwait(false);
if (state is null)
{
return null;
}
var cursor = string.IsNullOrWhiteSpace(state.Cursor) ? null : DocumentObject.Parse(state.Cursor);
var backoffUntil = TryParseBackoffUntil(state.Metadata);
return new LegacyContracts.SourceStateRecord(
sourceName,
Enabled: true,
Paused: false,
Cursor: cursor,
LastSuccess: state.LastSuccessAt,
LastFailure: state.LastError is null ? null : state.LastSyncAt,
FailCount: state.ErrorCount,
BackoffUntil: backoffUntil,
UpdatedAt: state.UpdatedAt,
LastFailureReason: state.LastError);
}
public async Task UpdateCursorAsync(string sourceName, DocumentObject cursor, DateTimeOffset completedAt, CancellationToken cancellationToken)
{
ArgumentException.ThrowIfNullOrEmpty(sourceName);
ArgumentNullException.ThrowIfNull(cursor);
var source = await EnsureSourceAsync(sourceName, cancellationToken).ConfigureAwait(false);
var existing = await _stateRepository.GetBySourceIdAsync(source.Id, cancellationToken).ConfigureAwait(false);
var entity = new SourceStateEntity
{
Id = existing?.Id ?? Guid.NewGuid(),
SourceId = source.Id,
Cursor = cursor.ToJson(),
LastSyncAt = completedAt,
LastSuccessAt = completedAt,
LastError = null,
SyncCount = (existing?.SyncCount ?? 0) + 1,
ErrorCount = existing?.ErrorCount ?? 0,
Metadata = existing?.Metadata ?? "{}",
UpdatedAt = completedAt
};
_ = await _stateRepository.UpsertAsync(entity, cancellationToken).ConfigureAwait(false);
}
public async Task MarkFailureAsync(string sourceName, DateTimeOffset now, TimeSpan backoff, string reason, CancellationToken cancellationToken)
{
ArgumentException.ThrowIfNullOrEmpty(sourceName);
var source = await EnsureSourceAsync(sourceName, cancellationToken).ConfigureAwait(false);
var existing = await _stateRepository.GetBySourceIdAsync(source.Id, cancellationToken).ConfigureAwait(false);
var backoffUntil = SafeAdd(now, backoff);
var metadata = new Dictionary<string, object?>(StringComparer.Ordinal)
{
["backoffUntil"] = backoffUntil.ToString("O"),
["reason"] = reason
};
var entity = new SourceStateEntity
{
Id = existing?.Id ?? Guid.NewGuid(),
SourceId = source.Id,
Cursor = existing?.Cursor,
LastSyncAt = now,
LastSuccessAt = existing?.LastSuccessAt,
LastError = reason,
SyncCount = existing?.SyncCount ?? 0,
ErrorCount = (existing?.ErrorCount ?? 0) + 1,
Metadata = JsonSerializer.Serialize(metadata, new JsonSerializerOptions(JsonSerializerDefaults.Web)),
UpdatedAt = now
};
_ = await _stateRepository.UpsertAsync(entity, cancellationToken).ConfigureAwait(false);
}
public async Task UpsertAsync(LegacyContracts.SourceStateRecord record, CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(record);
var source = await EnsureSourceAsync(record.SourceName, cancellationToken).ConfigureAwait(false);
var entity = new SourceStateEntity
{
Id = Guid.NewGuid(),
SourceId = source.Id,
Cursor = record.Cursor?.ToJson(),
LastSyncAt = record.UpdatedAt,
LastSuccessAt = record.LastSuccess,
LastError = record.LastFailureReason,
SyncCount = record.FailCount,
ErrorCount = record.FailCount,
Metadata = "{}",
UpdatedAt = record.UpdatedAt
};
_ = await _stateRepository.UpsertAsync(entity, cancellationToken).ConfigureAwait(false);
}
async Task<Contracts.SourceCursorState?> Contracts.ISourceStateStore.TryGetAsync(string sourceName, CancellationToken cancellationToken)
=> (await TryGetAsync(sourceName, cancellationToken).ConfigureAwait(false))?.ToStorageCursorState();
Task Contracts.ISourceStateStore.UpdateCursorAsync(string sourceName, JsonDocument cursor, DateTimeOffset completedAt, CancellationToken cancellationToken)
=> UpdateCursorAsync(sourceName, cursor.ToDocumentObject(), completedAt, cancellationToken);
Task Contracts.ISourceStateStore.MarkFailureAsync(string sourceName, DateTimeOffset now, TimeSpan backoff, string reason, CancellationToken cancellationToken)
=> MarkFailureAsync(sourceName, now, backoff, reason, cancellationToken);
Task Contracts.ISourceStateStore.UpsertAsync(Contracts.SourceCursorState record, CancellationToken cancellationToken)
=> UpsertAsync(record.ToLegacySourceStateRecord(), cancellationToken);
private async Task<SourceEntity> EnsureSourceAsync(string sourceName, CancellationToken cancellationToken)
{
var existing = await _sourceRepository.GetByKeyAsync(sourceName, cancellationToken).ConfigureAwait(false);
if (existing is not null)
{
return existing;
}
var now = _timeProvider.GetUtcNow();
return await _sourceRepository.UpsertAsync(new SourceEntity
{
Id = Guid.NewGuid(),
Key = sourceName,
Name = sourceName,
SourceType = sourceName,
Url = null,
Priority = 0,
Enabled = true,
Config = "{}",
Metadata = "{}",
CreatedAt = now,
UpdatedAt = now
}, cancellationToken).ConfigureAwait(false);
}
private static DateTimeOffset SafeAdd(DateTimeOffset value, TimeSpan delta)
{
try
{
return value.Add(delta);
}
catch (ArgumentOutOfRangeException)
{
return delta < TimeSpan.Zero ? DateTimeOffset.MinValue : DateTimeOffset.MaxValue;
}
}
private static DateTimeOffset? TryParseBackoffUntil(string? metadata)
{
if (string.IsNullOrWhiteSpace(metadata))
{
return null;
}
try
{
using var document = JsonDocument.Parse(metadata);
if (!document.RootElement.TryGetProperty("backoffUntil", out var backoffProperty))
{
return null;
}
if (backoffProperty.ValueKind == JsonValueKind.String
&& DateTimeOffset.TryParse(backoffProperty.GetString(), out var parsed))
{
return parsed;
}
}
catch
{
}
return null;
}
}

View File

@@ -0,0 +1,407 @@
// -----------------------------------------------------------------------------
// SitePolicyEnforcementService.cs
// Sprint: SPRINT_8200_0014_0001_DB_sync_ledger_schema
// Task: SYNC-8200-014
// Description: Enforces site federation policies including source allow/deny lists
// -----------------------------------------------------------------------------
using Microsoft.Extensions.Logging;
using StellaOps.Concelier.Persistence.Postgres.Models;
using StellaOps.Concelier.Persistence.Postgres.Repositories;
namespace StellaOps.Concelier.Persistence.Postgres.Sync;
/// <summary>
/// Enforces site federation policies for bundle imports.
/// </summary>
public sealed class SitePolicyEnforcementService
{
private readonly ISyncLedgerRepository _repository;
private readonly ILogger<SitePolicyEnforcementService> _logger;
public SitePolicyEnforcementService(
ISyncLedgerRepository repository,
ILogger<SitePolicyEnforcementService> logger)
{
_repository = repository ?? throw new ArgumentNullException(nameof(repository));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
}
/// <summary>
/// Validates whether a source is allowed for a given site.
/// </summary>
/// <param name="siteId">The site identifier.</param>
/// <param name="sourceKey">The source key to validate.</param>
/// <param name="ct">Cancellation token.</param>
/// <returns>Validation result indicating if the source is allowed.</returns>
public async Task<SourceValidationResult> ValidateSourceAsync(
string siteId,
string sourceKey,
CancellationToken ct = default)
{
ArgumentException.ThrowIfNullOrWhiteSpace(siteId);
ArgumentException.ThrowIfNullOrWhiteSpace(sourceKey);
var policy = await _repository.GetPolicyAsync(siteId, ct).ConfigureAwait(false);
if (policy is null)
{
_logger.LogDebug("No policy found for site {SiteId}, allowing source {SourceKey} by default", siteId, sourceKey);
return SourceValidationResult.Allowed("No policy configured");
}
if (!policy.Enabled)
{
_logger.LogWarning("Site {SiteId} policy is disabled, rejecting source {SourceKey}", siteId, sourceKey);
return SourceValidationResult.Denied("Site policy is disabled");
}
return ValidateSourceAgainstPolicy(policy, sourceKey);
}
/// <summary>
/// Validates a source against a specific policy without fetching from repository.
/// </summary>
public SourceValidationResult ValidateSourceAgainstPolicy(SitePolicyEntity policy, string sourceKey)
{
ArgumentNullException.ThrowIfNull(policy);
ArgumentException.ThrowIfNullOrWhiteSpace(sourceKey);
// Denied list takes precedence
if (IsSourceInList(policy.DeniedSources, sourceKey))
{
_logger.LogInformation(
"Source {SourceKey} is explicitly denied for site {SiteId}",
sourceKey, policy.SiteId);
return SourceValidationResult.Denied($"Source '{sourceKey}' is in deny list");
}
// If allowed list is empty, all non-denied sources are allowed
if (policy.AllowedSources.Length == 0)
{
_logger.LogDebug(
"Source {SourceKey} allowed for site {SiteId} (no allow list restrictions)",
sourceKey, policy.SiteId);
return SourceValidationResult.Allowed("No allow list restrictions");
}
// Check if source is in allowed list
if (IsSourceInList(policy.AllowedSources, sourceKey))
{
_logger.LogDebug(
"Source {SourceKey} is explicitly allowed for site {SiteId}",
sourceKey, policy.SiteId);
return SourceValidationResult.Allowed("Source is in allow list");
}
// Source not in allowed list
_logger.LogInformation(
"Source {SourceKey} not in allow list for site {SiteId}",
sourceKey, policy.SiteId);
return SourceValidationResult.Denied($"Source '{sourceKey}' is not in allow list");
}
/// <summary>
/// Validates multiple sources and returns results for each.
/// </summary>
public async Task<IReadOnlyDictionary<string, SourceValidationResult>> ValidateSourcesAsync(
string siteId,
IEnumerable<string> sourceKeys,
CancellationToken ct = default)
{
ArgumentException.ThrowIfNullOrWhiteSpace(siteId);
ArgumentNullException.ThrowIfNull(sourceKeys);
var policy = await _repository.GetPolicyAsync(siteId, ct).ConfigureAwait(false);
var results = new Dictionary<string, SourceValidationResult>();
foreach (var sourceKey in sourceKeys)
{
if (string.IsNullOrWhiteSpace(sourceKey))
{
continue;
}
if (policy is null)
{
results[sourceKey] = SourceValidationResult.Allowed("No policy configured");
}
else if (!policy.Enabled)
{
results[sourceKey] = SourceValidationResult.Denied("Site policy is disabled");
}
else
{
results[sourceKey] = ValidateSourceAgainstPolicy(policy, sourceKey);
}
}
return results;
}
/// <summary>
/// Filters a collection of source keys to only those allowed by the site policy.
/// </summary>
public async Task<IReadOnlyList<string>> FilterAllowedSourcesAsync(
string siteId,
IEnumerable<string> sourceKeys,
CancellationToken ct = default)
{
var results = await ValidateSourcesAsync(siteId, sourceKeys, ct).ConfigureAwait(false);
return results
.Where(kvp => kvp.Value.IsAllowed)
.Select(kvp => kvp.Key)
.ToList();
}
private static bool IsSourceInList(string[] sourceList, string sourceKey)
{
if (sourceList.Length == 0)
{
return false;
}
foreach (var source in sourceList)
{
// Exact match (case-insensitive)
if (string.Equals(source, sourceKey, StringComparison.OrdinalIgnoreCase))
{
return true;
}
// Wildcard pattern match (e.g., "nvd-*" matches "nvd-cve", "nvd-cpe")
if (source.EndsWith('*') && sourceKey.StartsWith(
source[..^1], StringComparison.OrdinalIgnoreCase))
{
return true;
}
}
return false;
}
#region Size Budget Tracking (SYNC-8200-015)
/// <summary>
/// Validates bundle size against site policy limits.
/// </summary>
/// <param name="siteId">The site identifier.</param>
/// <param name="bundleSizeMb">Bundle size in megabytes.</param>
/// <param name="itemsCount">Number of items in the bundle.</param>
/// <param name="ct">Cancellation token.</param>
/// <returns>Validation result indicating if the bundle is within limits.</returns>
public async Task<BundleSizeValidationResult> ValidateBundleSizeAsync(
string siteId,
decimal bundleSizeMb,
int itemsCount,
CancellationToken ct = default)
{
ArgumentException.ThrowIfNullOrWhiteSpace(siteId);
var policy = await _repository.GetPolicyAsync(siteId, ct).ConfigureAwait(false);
if (policy is null)
{
_logger.LogDebug(
"No policy found for site {SiteId}, allowing bundle (size={SizeMb}MB, items={Items})",
siteId, bundleSizeMb, itemsCount);
return BundleSizeValidationResult.Allowed("No policy configured", bundleSizeMb, itemsCount);
}
if (!policy.Enabled)
{
_logger.LogWarning("Site {SiteId} policy is disabled, rejecting bundle", siteId);
return BundleSizeValidationResult.Denied(
"Site policy is disabled",
bundleSizeMb,
itemsCount,
policy.MaxBundleSizeMb,
policy.MaxItemsPerBundle);
}
return ValidateBundleSizeAgainstPolicy(policy, bundleSizeMb, itemsCount);
}
/// <summary>
/// Validates bundle size against a specific policy without fetching from repository.
/// </summary>
public BundleSizeValidationResult ValidateBundleSizeAgainstPolicy(
SitePolicyEntity policy,
decimal bundleSizeMb,
int itemsCount)
{
ArgumentNullException.ThrowIfNull(policy);
var violations = new List<string>();
// Check size limit
if (bundleSizeMb > policy.MaxBundleSizeMb)
{
violations.Add($"Bundle size ({bundleSizeMb:F2}MB) exceeds limit ({policy.MaxBundleSizeMb}MB)");
}
// Check items limit
if (itemsCount > policy.MaxItemsPerBundle)
{
violations.Add($"Item count ({itemsCount}) exceeds limit ({policy.MaxItemsPerBundle})");
}
if (violations.Count > 0)
{
var reason = string.Join("; ", violations);
_logger.LogWarning(
"Bundle rejected for site {SiteId}: {Reason}",
policy.SiteId, reason);
return BundleSizeValidationResult.Denied(
reason,
bundleSizeMb,
itemsCount,
policy.MaxBundleSizeMb,
policy.MaxItemsPerBundle);
}
_logger.LogDebug(
"Bundle accepted for site {SiteId}: size={SizeMb}MB (limit={MaxSize}MB), items={Items} (limit={MaxItems})",
policy.SiteId, bundleSizeMb, policy.MaxBundleSizeMb, itemsCount, policy.MaxItemsPerBundle);
return BundleSizeValidationResult.Allowed(
"Within size limits",
bundleSizeMb,
itemsCount,
policy.MaxBundleSizeMb,
policy.MaxItemsPerBundle);
}
/// <summary>
/// Gets the remaining budget for a site based on recent imports.
/// </summary>
/// <param name="siteId">The site identifier.</param>
/// <param name="windowHours">Time window in hours to consider for recent imports.</param>
/// <param name="ct">Cancellation token.</param>
/// <returns>Remaining budget information.</returns>
public async Task<SiteBudgetInfo> GetRemainingBudgetAsync(
string siteId,
int windowHours = 24,
CancellationToken ct = default)
{
ArgumentException.ThrowIfNullOrWhiteSpace(siteId);
var policy = await _repository.GetPolicyAsync(siteId, ct).ConfigureAwait(false);
var history = await _repository.GetHistoryAsync(siteId, limit: 100, ct).ConfigureAwait(false);
if (policy is null)
{
return new SiteBudgetInfo(
SiteId: siteId,
HasPolicy: false,
MaxBundleSizeMb: int.MaxValue,
MaxItemsPerBundle: int.MaxValue,
RecentImportsCount: history.Count,
RecentItemsImported: history.Sum(h => h.ItemsCount),
WindowHours: windowHours);
}
var windowStart = DateTimeOffset.UtcNow.AddHours(-windowHours);
var recentHistory = history.Where(h => h.ImportedAt >= windowStart).ToList();
return new SiteBudgetInfo(
SiteId: siteId,
HasPolicy: true,
MaxBundleSizeMb: policy.MaxBundleSizeMb,
MaxItemsPerBundle: policy.MaxItemsPerBundle,
RecentImportsCount: recentHistory.Count,
RecentItemsImported: recentHistory.Sum(h => h.ItemsCount),
WindowHours: windowHours);
}
#endregion
}
/// <summary>
/// Result of source validation against site policy.
/// </summary>
public sealed record SourceValidationResult
{
private SourceValidationResult(bool isAllowed, string reason)
{
IsAllowed = isAllowed;
Reason = reason;
}
/// <summary>
/// Whether the source is allowed.
/// </summary>
public bool IsAllowed { get; }
/// <summary>
/// Reason for the decision.
/// </summary>
public string Reason { get; }
/// <summary>
/// Creates an allowed result.
/// </summary>
public static SourceValidationResult Allowed(string reason) => new(true, reason);
/// <summary>
/// Creates a denied result.
/// </summary>
public static SourceValidationResult Denied(string reason) => new(false, reason);
}
/// <summary>
/// Result of bundle size validation against site policy.
/// </summary>
public sealed record BundleSizeValidationResult
{
private BundleSizeValidationResult(
bool isAllowed,
string reason,
decimal actualSizeMb,
int actualItemCount,
int? maxSizeMb,
int? maxItems)
{
IsAllowed = isAllowed;
Reason = reason;
ActualSizeMb = actualSizeMb;
ActualItemCount = actualItemCount;
MaxSizeMb = maxSizeMb;
MaxItems = maxItems;
}
public bool IsAllowed { get; }
public string Reason { get; }
public decimal ActualSizeMb { get; }
public int ActualItemCount { get; }
public int? MaxSizeMb { get; }
public int? MaxItems { get; }
public static BundleSizeValidationResult Allowed(
string reason,
decimal actualSizeMb,
int actualItemCount,
int? maxSizeMb = null,
int? maxItems = null)
=> new(true, reason, actualSizeMb, actualItemCount, maxSizeMb, maxItems);
public static BundleSizeValidationResult Denied(
string reason,
decimal actualSizeMb,
int actualItemCount,
int? maxSizeMb = null,
int? maxItems = null)
=> new(false, reason, actualSizeMb, actualItemCount, maxSizeMb, maxItems);
}
/// <summary>
/// Information about a site's remaining import budget.
/// </summary>
public sealed record SiteBudgetInfo(
string SiteId,
bool HasPolicy,
int MaxBundleSizeMb,
int MaxItemsPerBundle,
int RecentImportsCount,
int RecentItemsImported,
int WindowHours);

View File

@@ -0,0 +1,42 @@
<?xml version="1.0" ?>
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<LangVersion>preview</LangVersion>
<TreatWarningsAsErrors>false</TreatWarningsAsErrors>
<RootNamespace>StellaOps.Concelier.Persistence</RootNamespace>
<AssemblyName>StellaOps.Concelier.Persistence</AssemblyName>
<Description>Consolidated persistence layer for StellaOps Concelier module (EF Core + Raw SQL)</Description>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Dapper" />
<PackageReference Include="Microsoft.EntityFrameworkCore" />
<PackageReference Include="Microsoft.EntityFrameworkCore.Design" PrivateAssets="all" />
<PackageReference Include="Npgsql" />
<PackageReference Include="Npgsql.EntityFrameworkCore.PostgreSQL" />
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" />
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" />
<PackageReference Include="Microsoft.Extensions.Configuration.Abstractions" />
</ItemGroup>
<ItemGroup>
<!-- Only include active migrations, exclude archived -->
<EmbeddedResource Include="Migrations\*.sql" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Infrastructure.Postgres\StellaOps.Infrastructure.Postgres.csproj" />
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Infrastructure.EfCore\StellaOps.Infrastructure.EfCore.csproj" />
<ProjectReference Include="..\StellaOps.Concelier.Core\StellaOps.Concelier.Core.csproj" />
<ProjectReference Include="..\StellaOps.Concelier.Interest\StellaOps.Concelier.Interest.csproj" />
<ProjectReference Include="..\StellaOps.Concelier.Models\StellaOps.Concelier.Models.csproj" />
<ProjectReference Include="..\StellaOps.Concelier.SbomIntegration\StellaOps.Concelier.SbomIntegration.csproj" />
<ProjectReference Include="..\StellaOps.Concelier.Merge\StellaOps.Concelier.Merge.csproj" />
<ProjectReference Include="..\..\..\__Libraries\StellaOps.DependencyInjection\StellaOps.DependencyInjection.csproj" />
</ItemGroup>
</Project>