devops folders consolidate

This commit is contained in:
master
2026-01-25 23:27:41 +02:00
parent 6e687b523a
commit a50bbb38ef
334 changed files with 35079 additions and 5569 deletions

View File

@@ -0,0 +1,69 @@
-- -----------------------------------------------------------------------------
-- 005_timestamp_evidence.sql
-- Sprint: SPRINT_20260119_009 Evidence Storage for Timestamps
-- Task: EVT-002 - PostgreSQL Schema Extension
-- Description: Schema for storing timestamp and revocation evidence.
-- -----------------------------------------------------------------------------
-- Ensure the evidence schema exists
CREATE SCHEMA IF NOT EXISTS evidence;
-- Timestamp evidence storage
CREATE TABLE IF NOT EXISTS evidence.timestamp_tokens (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
artifact_digest TEXT NOT NULL,
digest_algorithm TEXT NOT NULL,
tst_blob BYTEA NOT NULL,
generation_time TIMESTAMPTZ NOT NULL,
tsa_name TEXT NOT NULL,
tsa_policy_oid TEXT NOT NULL,
serial_number TEXT NOT NULL,
tsa_chain_pem TEXT NOT NULL,
ocsp_response BYTEA,
crl_snapshot BYTEA,
captured_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
provider_name TEXT NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT uq_timestamp_artifact_time UNIQUE (artifact_digest, generation_time)
);
-- Indexes for timestamp queries
CREATE INDEX IF NOT EXISTS idx_timestamp_artifact ON evidence.timestamp_tokens(artifact_digest);
CREATE INDEX IF NOT EXISTS idx_timestamp_generation ON evidence.timestamp_tokens(generation_time);
CREATE INDEX IF NOT EXISTS idx_timestamp_provider ON evidence.timestamp_tokens(provider_name);
CREATE INDEX IF NOT EXISTS idx_timestamp_created ON evidence.timestamp_tokens(created_at);
-- Revocation evidence storage
CREATE TABLE IF NOT EXISTS evidence.revocation_snapshots (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
certificate_fingerprint TEXT NOT NULL,
source TEXT NOT NULL CHECK (source IN ('Ocsp', 'Crl', 'None')),
raw_response BYTEA NOT NULL,
response_time TIMESTAMPTZ NOT NULL,
valid_until TIMESTAMPTZ NOT NULL,
status TEXT NOT NULL CHECK (status IN ('Good', 'Revoked', 'Unknown')),
revocation_time TIMESTAMPTZ,
reason TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes for revocation queries
CREATE INDEX IF NOT EXISTS idx_revocation_cert ON evidence.revocation_snapshots(certificate_fingerprint);
CREATE INDEX IF NOT EXISTS idx_revocation_valid ON evidence.revocation_snapshots(valid_until);
CREATE INDEX IF NOT EXISTS idx_revocation_status ON evidence.revocation_snapshots(status);
CREATE INDEX IF NOT EXISTS idx_revocation_created ON evidence.revocation_snapshots(created_at);
-- Comments
COMMENT ON TABLE evidence.timestamp_tokens IS 'RFC-3161 TimeStampToken evidence for long-term validation';
COMMENT ON TABLE evidence.revocation_snapshots IS 'OCSP/CRL certificate revocation evidence snapshots';
COMMENT ON COLUMN evidence.timestamp_tokens.artifact_digest IS 'SHA-256 digest of the timestamped artifact';
COMMENT ON COLUMN evidence.timestamp_tokens.tst_blob IS 'Raw DER-encoded RFC 3161 TimeStampToken';
COMMENT ON COLUMN evidence.timestamp_tokens.tsa_chain_pem IS 'PEM-encoded TSA certificate chain for LTV';
COMMENT ON COLUMN evidence.timestamp_tokens.ocsp_response IS 'Stapled OCSP response at signing time';
COMMENT ON COLUMN evidence.timestamp_tokens.crl_snapshot IS 'CRL snapshot at signing time (fallback for OCSP)';
COMMENT ON COLUMN evidence.revocation_snapshots.certificate_fingerprint IS 'SHA-256 fingerprint of the certificate';
COMMENT ON COLUMN evidence.revocation_snapshots.raw_response IS 'Raw OCSP response or CRL bytes';
COMMENT ON COLUMN evidence.revocation_snapshots.response_time IS 'thisUpdate from the response';
COMMENT ON COLUMN evidence.revocation_snapshots.valid_until IS 'nextUpdate from the response';

View File

@@ -0,0 +1,21 @@
-- -----------------------------------------------------------------------------
-- 005_timestamp_evidence_rollback.sql
-- Sprint: SPRINT_20260119_009 Evidence Storage for Timestamps
-- Task: EVT-002 - PostgreSQL Schema Extension
-- Description: Rollback migration for timestamp and revocation evidence.
-- -----------------------------------------------------------------------------
-- Drop indexes first
DROP INDEX IF EXISTS evidence.idx_timestamp_artifact;
DROP INDEX IF EXISTS evidence.idx_timestamp_generation;
DROP INDEX IF EXISTS evidence.idx_timestamp_provider;
DROP INDEX IF EXISTS evidence.idx_timestamp_created;
DROP INDEX IF EXISTS evidence.idx_revocation_cert;
DROP INDEX IF EXISTS evidence.idx_revocation_valid;
DROP INDEX IF EXISTS evidence.idx_revocation_status;
DROP INDEX IF EXISTS evidence.idx_revocation_created;
-- Drop tables
DROP TABLE IF EXISTS evidence.revocation_snapshots;
DROP TABLE IF EXISTS evidence.timestamp_tokens;

View File

@@ -0,0 +1,120 @@
-- Validation harness schema for tracking validation runs and match results
-- Migration: 005_validation_harness.sql
-- Validation runs table
CREATE TABLE IF NOT EXISTS groundtruth.validation_runs (
run_id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
name TEXT NOT NULL,
description TEXT,
status TEXT NOT NULL DEFAULT 'pending',
-- Configuration (stored as JSONB)
config JSONB NOT NULL,
-- Timestamps
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
started_at TIMESTAMPTZ,
completed_at TIMESTAMPTZ,
-- Metrics (populated after completion)
total_pairs INT,
total_functions INT,
true_positives INT,
false_positives INT,
true_negatives INT,
false_negatives INT,
match_rate DOUBLE PRECISION,
precision_score DOUBLE PRECISION,
recall_score DOUBLE PRECISION,
f1_score DOUBLE PRECISION,
average_match_score DOUBLE PRECISION,
-- Mismatch counts by bucket (JSONB map)
mismatch_counts JSONB,
-- Metadata
corpus_snapshot_id TEXT,
matcher_version TEXT,
error_message TEXT,
tags TEXT[] DEFAULT '{}',
-- Constraints
CONSTRAINT valid_status CHECK (status IN ('pending', 'running', 'completed', 'failed', 'cancelled'))
);
-- Indexes for validation runs
CREATE INDEX IF NOT EXISTS idx_validation_runs_status ON groundtruth.validation_runs(status);
CREATE INDEX IF NOT EXISTS idx_validation_runs_created_at ON groundtruth.validation_runs(created_at DESC);
CREATE INDEX IF NOT EXISTS idx_validation_runs_tags ON groundtruth.validation_runs USING GIN (tags);
-- Match results table
CREATE TABLE IF NOT EXISTS groundtruth.match_results (
result_id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
run_id UUID NOT NULL REFERENCES groundtruth.validation_runs(run_id) ON DELETE CASCADE,
security_pair_id UUID NOT NULL,
-- Source function
source_name TEXT NOT NULL,
source_demangled_name TEXT,
source_address BIGINT NOT NULL,
source_size BIGINT,
source_build_id TEXT NOT NULL,
source_binary_name TEXT NOT NULL,
-- Expected target
expected_name TEXT NOT NULL,
expected_demangled_name TEXT,
expected_address BIGINT NOT NULL,
expected_size BIGINT,
expected_build_id TEXT NOT NULL,
expected_binary_name TEXT NOT NULL,
-- Actual matched target (nullable if no match found)
actual_name TEXT,
actual_demangled_name TEXT,
actual_address BIGINT,
actual_size BIGINT,
actual_build_id TEXT,
actual_binary_name TEXT,
-- Outcome
outcome TEXT NOT NULL,
match_score DOUBLE PRECISION,
confidence TEXT,
-- Mismatch analysis
inferred_cause TEXT,
mismatch_detail JSONB,
-- Performance
match_duration_ms DOUBLE PRECISION,
-- Constraints
CONSTRAINT valid_outcome CHECK (outcome IN ('true_positive', 'false_positive', 'true_negative', 'false_negative'))
);
-- Indexes for match results
CREATE INDEX IF NOT EXISTS idx_match_results_run_id ON groundtruth.match_results(run_id);
CREATE INDEX IF NOT EXISTS idx_match_results_security_pair_id ON groundtruth.match_results(security_pair_id);
CREATE INDEX IF NOT EXISTS idx_match_results_outcome ON groundtruth.match_results(outcome);
CREATE INDEX IF NOT EXISTS idx_match_results_inferred_cause ON groundtruth.match_results(inferred_cause) WHERE inferred_cause IS NOT NULL;
-- View for run summaries
CREATE OR REPLACE VIEW groundtruth.validation_run_summaries AS
SELECT
run_id AS id,
name,
status,
created_at,
completed_at,
match_rate,
f1_score,
total_pairs AS pair_count,
total_functions AS function_count,
tags
FROM groundtruth.validation_runs;
-- Comments
COMMENT ON TABLE groundtruth.validation_runs IS 'Validation harness runs with aggregate metrics';
COMMENT ON TABLE groundtruth.match_results IS 'Per-function match results from validation runs';
COMMENT ON VIEW groundtruth.validation_run_summaries IS 'Summary view for listing validation runs';

View File

@@ -0,0 +1,27 @@
-- -----------------------------------------------------------------------------
-- 006_timestamp_supersession.sql
-- Sprint: SPRINT_20260119_009 Evidence Storage for Timestamps
-- Task: EVT-005 - Re-Timestamping Support
-- Description: Schema extension for timestamp supersession chain.
-- -----------------------------------------------------------------------------
-- Add supersession column for re-timestamping chain
ALTER TABLE evidence.timestamp_tokens
ADD COLUMN IF NOT EXISTS supersedes_id UUID REFERENCES evidence.timestamp_tokens(id);
-- Index for finding superseding timestamps
CREATE INDEX IF NOT EXISTS idx_timestamp_supersedes ON evidence.timestamp_tokens(supersedes_id);
-- Index for finding timestamps by expiry (for re-timestamp scheduling)
-- Note: We need to track TSA certificate expiry separately - for now use generation_time + typical cert lifetime
CREATE INDEX IF NOT EXISTS idx_timestamp_for_retimestamp
ON evidence.timestamp_tokens(generation_time)
WHERE supersedes_id IS NULL; -- Only query leaf timestamps (not already superseded)
-- Comments
COMMENT ON COLUMN evidence.timestamp_tokens.supersedes_id IS 'ID of the timestamp this supersedes (for re-timestamping chain)';
-- Rollback script (execute separately if needed):
-- ALTER TABLE evidence.timestamp_tokens DROP COLUMN IF EXISTS supersedes_id;
-- DROP INDEX IF EXISTS evidence.idx_timestamp_supersedes;
-- DROP INDEX IF EXISTS evidence.idx_timestamp_for_retimestamp;

View File

@@ -0,0 +1,108 @@
-- OpsMemory and AdvisoryAI PostgreSQL Schema Migration
-- Version: 20260108
-- Author: StellaOps Agent
-- Sprint: SPRINT_20260107_006_004 (OpsMemory), SPRINT_20260107_006_003 (AdvisoryAI)
-- ============================================================================
-- OpsMemory Schema
-- ============================================================================
CREATE SCHEMA IF NOT EXISTS opsmemory;
-- Decision records table
CREATE TABLE IF NOT EXISTS opsmemory.decisions (
memory_id TEXT PRIMARY KEY,
tenant_id TEXT NOT NULL,
recorded_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Situation context
cve_id TEXT,
component_purl TEXT,
severity TEXT,
reachability TEXT,
epss_score DECIMAL(5, 4),
cvss_score DECIMAL(3, 1),
context_tags TEXT[],
similarity_vector DOUBLE PRECISION[],
-- Decision details
action TEXT NOT NULL,
rationale TEXT,
decided_by TEXT NOT NULL,
policy_reference TEXT,
mitigation_type TEXT,
mitigation_details TEXT,
-- Outcome (nullable until recorded)
outcome_status TEXT,
resolution_time INTERVAL,
actual_impact TEXT,
lessons_learned TEXT,
outcome_recorded_by TEXT,
outcome_recorded_at TIMESTAMPTZ
);
-- Indexes for querying
CREATE INDEX IF NOT EXISTS idx_opsmemory_decisions_tenant ON opsmemory.decisions(tenant_id);
CREATE INDEX IF NOT EXISTS idx_opsmemory_decisions_cve ON opsmemory.decisions(cve_id);
CREATE INDEX IF NOT EXISTS idx_opsmemory_decisions_component ON opsmemory.decisions(component_purl);
CREATE INDEX IF NOT EXISTS idx_opsmemory_decisions_recorded ON opsmemory.decisions(recorded_at);
CREATE INDEX IF NOT EXISTS idx_opsmemory_decisions_action ON opsmemory.decisions(action);
CREATE INDEX IF NOT EXISTS idx_opsmemory_decisions_outcome ON opsmemory.decisions(outcome_status);
-- ============================================================================
-- AdvisoryAI Schema
-- ============================================================================
CREATE SCHEMA IF NOT EXISTS advisoryai;
-- Conversations table
CREATE TABLE IF NOT EXISTS advisoryai.conversations (
conversation_id TEXT PRIMARY KEY,
tenant_id TEXT NOT NULL,
user_id TEXT NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
context JSONB,
metadata JSONB
);
-- Conversation turns table
CREATE TABLE IF NOT EXISTS advisoryai.turns (
turn_id TEXT PRIMARY KEY,
conversation_id TEXT NOT NULL REFERENCES advisoryai.conversations(conversation_id) ON DELETE CASCADE,
role TEXT NOT NULL,
content TEXT NOT NULL,
timestamp TIMESTAMPTZ NOT NULL DEFAULT NOW(),
evidence_links JSONB,
proposed_actions JSONB,
metadata JSONB
);
-- Indexes for querying
CREATE INDEX IF NOT EXISTS idx_advisoryai_conv_tenant ON advisoryai.conversations(tenant_id);
CREATE INDEX IF NOT EXISTS idx_advisoryai_conv_user ON advisoryai.conversations(user_id);
CREATE INDEX IF NOT EXISTS idx_advisoryai_conv_updated ON advisoryai.conversations(updated_at);
CREATE INDEX IF NOT EXISTS idx_advisoryai_turns_conv ON advisoryai.turns(conversation_id);
CREATE INDEX IF NOT EXISTS idx_advisoryai_turns_timestamp ON advisoryai.turns(timestamp);
-- ============================================================================
-- Comments for documentation
-- ============================================================================
COMMENT ON SCHEMA opsmemory IS 'OpsMemory: Decision ledger for security playbook learning';
COMMENT ON SCHEMA advisoryai IS 'AdvisoryAI: Chat conversation storage';
COMMENT ON TABLE opsmemory.decisions IS 'Stores security decisions and their outcomes for playbook suggestions';
COMMENT ON TABLE advisoryai.conversations IS 'Stores AI chat conversations with context';
COMMENT ON TABLE advisoryai.turns IS 'Individual messages in conversations';
-- ============================================================================
-- Grants (adjust as needed for your environment)
-- ============================================================================
-- GRANT USAGE ON SCHEMA opsmemory TO stellaops_app;
-- GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA opsmemory TO stellaops_app;
-- GRANT USAGE ON SCHEMA advisoryai TO stellaops_app;
-- GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA advisoryai TO stellaops_app;

View File

@@ -0,0 +1,220 @@
-- CVE-Symbol Mapping PostgreSQL Schema Migration
-- Version: 20260110
-- Author: StellaOps Agent
-- Sprint: SPRINT_20260109_009_003_BE_cve_symbol_mapping
-- ============================================================================
-- Reachability Schema
-- ============================================================================
CREATE SCHEMA IF NOT EXISTS reachability;
-- ============================================================================
-- CVE-Symbol Mapping Tables
-- ============================================================================
-- Mapping source enumeration type
CREATE TYPE reachability.mapping_source AS ENUM (
'patch_analysis',
'osv_advisory',
'nvd_cpe',
'manual_curation',
'fuzzing_corpus',
'exploit_database',
'unknown'
);
-- Vulnerability type enumeration (for taint analysis)
CREATE TYPE reachability.vulnerability_type AS ENUM (
'source',
'sink',
'gadget',
'both_source_and_sink',
'unknown'
);
-- Main CVE-symbol mapping table
CREATE TABLE IF NOT EXISTS reachability.cve_symbol_mappings (
mapping_id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
-- CVE identification
cve_id TEXT NOT NULL,
cve_id_normalized TEXT NOT NULL GENERATED ALWAYS AS (UPPER(cve_id)) STORED,
-- Affected package (PURL format)
purl TEXT NOT NULL,
affected_versions TEXT[], -- Version ranges like [">=1.0.0,<2.0.0"]
fixed_versions TEXT[], -- Versions where fix is applied
-- Vulnerable symbol details
symbol_name TEXT NOT NULL,
canonical_id TEXT, -- Normalized symbol ID from canonicalization service
file_path TEXT,
start_line INTEGER,
end_line INTEGER,
-- Metadata
source reachability.mapping_source NOT NULL DEFAULT 'unknown',
vulnerability_type reachability.vulnerability_type NOT NULL DEFAULT 'unknown',
confidence DECIMAL(3, 2) NOT NULL DEFAULT 0.5 CHECK (confidence >= 0 AND confidence <= 1),
-- Provenance
evidence_uri TEXT, -- stella:// URI to evidence
source_commit_url TEXT,
patch_url TEXT,
-- Timestamps
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
verified_at TIMESTAMPTZ,
verified_by TEXT,
-- Tenant support
tenant_id TEXT NOT NULL DEFAULT 'default'
);
-- Vulnerable symbol detail records (for additional symbol metadata)
CREATE TABLE IF NOT EXISTS reachability.vulnerable_symbols (
symbol_id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
mapping_id UUID NOT NULL REFERENCES reachability.cve_symbol_mappings(mapping_id) ON DELETE CASCADE,
-- Symbol identification
symbol_name TEXT NOT NULL,
canonical_id TEXT,
symbol_type TEXT, -- 'function', 'method', 'class', 'module'
-- Location
file_path TEXT,
start_line INTEGER,
end_line INTEGER,
-- Code context
signature TEXT, -- Function signature
containing_class TEXT,
namespace TEXT,
-- Vulnerability context
vulnerability_type reachability.vulnerability_type NOT NULL DEFAULT 'unknown',
is_entry_point BOOLEAN DEFAULT FALSE,
requires_control_flow BOOLEAN DEFAULT FALSE,
-- Metadata
confidence DECIMAL(3, 2) NOT NULL DEFAULT 0.5,
notes TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Patch analysis results (cached)
CREATE TABLE IF NOT EXISTS reachability.patch_analysis (
analysis_id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
-- Source identification
commit_url TEXT NOT NULL UNIQUE,
repository_url TEXT,
commit_sha TEXT,
-- Analysis results (stored as JSONB for flexibility)
diff_content TEXT,
extracted_symbols JSONB NOT NULL DEFAULT '[]',
language_detected TEXT,
-- Metadata
analyzed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
analyzer_version TEXT,
-- Error tracking
analysis_status TEXT NOT NULL DEFAULT 'pending',
error_message TEXT
);
-- ============================================================================
-- Indexes
-- ============================================================================
-- CVE lookup indexes
CREATE INDEX IF NOT EXISTS idx_cve_mapping_cve_normalized ON reachability.cve_symbol_mappings(cve_id_normalized);
CREATE INDEX IF NOT EXISTS idx_cve_mapping_purl ON reachability.cve_symbol_mappings(purl);
CREATE INDEX IF NOT EXISTS idx_cve_mapping_symbol ON reachability.cve_symbol_mappings(symbol_name);
CREATE INDEX IF NOT EXISTS idx_cve_mapping_canonical ON reachability.cve_symbol_mappings(canonical_id) WHERE canonical_id IS NOT NULL;
CREATE INDEX IF NOT EXISTS idx_cve_mapping_tenant ON reachability.cve_symbol_mappings(tenant_id);
CREATE INDEX IF NOT EXISTS idx_cve_mapping_source ON reachability.cve_symbol_mappings(source);
CREATE INDEX IF NOT EXISTS idx_cve_mapping_confidence ON reachability.cve_symbol_mappings(confidence);
CREATE INDEX IF NOT EXISTS idx_cve_mapping_created ON reachability.cve_symbol_mappings(created_at);
-- Composite index for common queries
CREATE INDEX IF NOT EXISTS idx_cve_mapping_cve_purl ON reachability.cve_symbol_mappings(cve_id_normalized, purl);
-- Symbol indexes
CREATE INDEX IF NOT EXISTS idx_vuln_symbol_mapping ON reachability.vulnerable_symbols(mapping_id);
CREATE INDEX IF NOT EXISTS idx_vuln_symbol_name ON reachability.vulnerable_symbols(symbol_name);
CREATE INDEX IF NOT EXISTS idx_vuln_symbol_canonical ON reachability.vulnerable_symbols(canonical_id) WHERE canonical_id IS NOT NULL;
-- Patch analysis indexes
CREATE INDEX IF NOT EXISTS idx_patch_analysis_commit ON reachability.patch_analysis(commit_sha);
CREATE INDEX IF NOT EXISTS idx_patch_analysis_repo ON reachability.patch_analysis(repository_url);
-- ============================================================================
-- Full-text search
-- ============================================================================
-- Add tsvector column for symbol search
ALTER TABLE reachability.cve_symbol_mappings
ADD COLUMN IF NOT EXISTS symbol_search_vector tsvector
GENERATED ALWAYS AS (to_tsvector('simple', coalesce(symbol_name, '') || ' ' || coalesce(file_path, ''))) STORED;
CREATE INDEX IF NOT EXISTS idx_cve_mapping_fts ON reachability.cve_symbol_mappings USING GIN(symbol_search_vector);
-- ============================================================================
-- Trigger for updated_at
-- ============================================================================
CREATE OR REPLACE FUNCTION reachability.update_modified_column()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = NOW();
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER update_cve_mapping_modtime
BEFORE UPDATE ON reachability.cve_symbol_mappings
FOR EACH ROW
EXECUTE FUNCTION reachability.update_modified_column();
-- ============================================================================
-- Comments for documentation
-- ============================================================================
COMMENT ON SCHEMA reachability IS 'Hybrid reachability analysis: CVE-symbol mappings, static/runtime evidence';
COMMENT ON TABLE reachability.cve_symbol_mappings IS 'Maps CVE IDs to vulnerable symbols with confidence scores';
COMMENT ON COLUMN reachability.cve_symbol_mappings.cve_id_normalized IS 'Uppercase normalized CVE ID for case-insensitive lookup';
COMMENT ON COLUMN reachability.cve_symbol_mappings.canonical_id IS 'Symbol canonical ID from canonicalization service';
COMMENT ON COLUMN reachability.cve_symbol_mappings.evidence_uri IS 'stella:// URI pointing to evidence bundle';
COMMENT ON TABLE reachability.vulnerable_symbols IS 'Additional symbol details for a CVE mapping';
COMMENT ON TABLE reachability.patch_analysis IS 'Cached patch analysis results for commit URLs';
-- ============================================================================
-- Initial data / seed (optional well-known CVEs for testing)
-- ============================================================================
-- Example: Log4Shell (CVE-2021-44228)
INSERT INTO reachability.cve_symbol_mappings (cve_id, purl, symbol_name, file_path, source, confidence, vulnerability_type)
VALUES
('CVE-2021-44228', 'pkg:maven/org.apache.logging.log4j/log4j-core@2.14.1', 'JndiLookup.lookup', 'log4j-core/src/main/java/org/apache/logging/log4j/core/lookup/JndiLookup.java', 'manual_curation', 0.99, 'sink'),
('CVE-2021-44228', 'pkg:maven/org.apache.logging.log4j/log4j-core@2.14.1', 'JndiManager.lookup', 'log4j-core/src/main/java/org/apache/logging/log4j/core/net/JndiManager.java', 'manual_curation', 0.95, 'sink')
ON CONFLICT DO NOTHING;
-- Example: Spring4Shell (CVE-2022-22965)
INSERT INTO reachability.cve_symbol_mappings (cve_id, purl, symbol_name, file_path, source, confidence, vulnerability_type)
VALUES
('CVE-2022-22965', 'pkg:maven/org.springframework/spring-beans@5.3.17', 'CachedIntrospectionResults.getBeanInfo', 'spring-beans/src/main/java/org/springframework/beans/CachedIntrospectionResults.java', 'patch_analysis', 0.90, 'source')
ON CONFLICT DO NOTHING;
-- Example: polyfill.io supply chain (CVE-2024-38526)
INSERT INTO reachability.cve_symbol_mappings (cve_id, purl, symbol_name, source, confidence, vulnerability_type)
VALUES
('CVE-2024-38526', 'pkg:npm/polyfill.io', 'window.polyfill', 'manual_curation', 0.85, 'source')
ON CONFLICT DO NOTHING;

View File

@@ -0,0 +1,38 @@
-- -----------------------------------------------------------------------------
-- V20260117__create_doctor_reports_table.sql
-- Sprint: SPRINT_20260117_025_Doctor_coverage_expansion
-- Task: DOC-EXP-005 - Persistent Report Storage
-- Description: Migration to create doctor_reports table for persistent storage
-- -----------------------------------------------------------------------------
-- Doctor reports table for persistent storage
CREATE TABLE IF NOT EXISTS doctor_reports (
run_id VARCHAR(64) PRIMARY KEY,
started_at TIMESTAMPTZ NOT NULL,
completed_at TIMESTAMPTZ,
overall_severity VARCHAR(16) NOT NULL,
passed_count INTEGER NOT NULL DEFAULT 0,
warning_count INTEGER NOT NULL DEFAULT 0,
failed_count INTEGER NOT NULL DEFAULT 0,
skipped_count INTEGER NOT NULL DEFAULT 0,
info_count INTEGER NOT NULL DEFAULT 0,
total_count INTEGER NOT NULL DEFAULT 0,
report_json_compressed BYTEA NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Index for listing reports by date
CREATE INDEX IF NOT EXISTS idx_doctor_reports_started_at
ON doctor_reports (started_at DESC);
-- Index for retention cleanup
CREATE INDEX IF NOT EXISTS idx_doctor_reports_created_at
ON doctor_reports (created_at);
-- Index for filtering by severity
CREATE INDEX IF NOT EXISTS idx_doctor_reports_severity
ON doctor_reports (overall_severity);
-- Comment on table
COMMENT ON TABLE doctor_reports IS 'Stores Doctor diagnostic reports with compression for audit trail';
COMMENT ON COLUMN doctor_reports.report_json_compressed IS 'GZip compressed JSON report data';

View File

@@ -0,0 +1,153 @@
-- Migration: V20260117__vex_rekor_linkage.sql
-- Sprint: SPRINT_20260117_002_EXCITITOR_vex_rekor_linkage
-- Task: VRL-004, VRL-005 - Create Excititor and VexHub database migrations
-- Description: Add Rekor transparency log linkage columns to VEX tables
-- Author: StellaOps
-- Date: 2026-01-17
-- ============================================================================
-- EXCITITOR SCHEMA: vex_observations table
-- ============================================================================
-- Add Rekor linkage columns to vex_observations
ALTER TABLE IF EXISTS excititor.vex_observations
ADD COLUMN IF NOT EXISTS rekor_uuid TEXT,
ADD COLUMN IF NOT EXISTS rekor_log_index BIGINT,
ADD COLUMN IF NOT EXISTS rekor_integrated_time TIMESTAMPTZ,
ADD COLUMN IF NOT EXISTS rekor_log_url TEXT,
ADD COLUMN IF NOT EXISTS rekor_tree_root TEXT,
ADD COLUMN IF NOT EXISTS rekor_tree_size BIGINT,
ADD COLUMN IF NOT EXISTS rekor_inclusion_proof JSONB,
ADD COLUMN IF NOT EXISTS rekor_entry_body_hash TEXT,
ADD COLUMN IF NOT EXISTS rekor_entry_kind TEXT,
ADD COLUMN IF NOT EXISTS rekor_linked_at TIMESTAMPTZ;
-- Index for Rekor queries by UUID
CREATE INDEX IF NOT EXISTS idx_vex_observations_rekor_uuid
ON excititor.vex_observations(rekor_uuid)
WHERE rekor_uuid IS NOT NULL;
-- Index for Rekor queries by log index (for ordered traversal)
CREATE INDEX IF NOT EXISTS idx_vex_observations_rekor_log_index
ON excititor.vex_observations(rekor_log_index DESC)
WHERE rekor_log_index IS NOT NULL;
-- Index for finding unlinked observations (for retry/backfill)
CREATE INDEX IF NOT EXISTS idx_vex_observations_pending_rekor
ON excititor.vex_observations(created_at)
WHERE rekor_uuid IS NULL;
-- Comment on columns
COMMENT ON COLUMN excititor.vex_observations.rekor_uuid IS 'Rekor entry UUID (64-char hex)';
COMMENT ON COLUMN excititor.vex_observations.rekor_log_index IS 'Monotonically increasing log position';
COMMENT ON COLUMN excititor.vex_observations.rekor_integrated_time IS 'Time entry was integrated into Rekor log';
COMMENT ON COLUMN excititor.vex_observations.rekor_log_url IS 'Rekor server URL where entry was submitted';
COMMENT ON COLUMN excititor.vex_observations.rekor_tree_root IS 'Merkle tree root hash at submission time (base64)';
COMMENT ON COLUMN excititor.vex_observations.rekor_tree_size IS 'Tree size at submission time';
COMMENT ON COLUMN excititor.vex_observations.rekor_inclusion_proof IS 'RFC 6962 inclusion proof for offline verification';
COMMENT ON COLUMN excititor.vex_observations.rekor_entry_body_hash IS 'SHA-256 hash of entry body';
COMMENT ON COLUMN excititor.vex_observations.rekor_entry_kind IS 'Entry kind (dsse, intoto, hashedrekord)';
COMMENT ON COLUMN excititor.vex_observations.rekor_linked_at IS 'When linkage was recorded locally';
-- ============================================================================
-- EXCITITOR SCHEMA: vex_statement_change_events table
-- ============================================================================
-- Add Rekor linkage to change events
ALTER TABLE IF EXISTS excititor.vex_statement_change_events
ADD COLUMN IF NOT EXISTS rekor_entry_id TEXT,
ADD COLUMN IF NOT EXISTS rekor_log_index BIGINT;
-- Index for Rekor queries on change events
CREATE INDEX IF NOT EXISTS idx_vex_change_events_rekor
ON excititor.vex_statement_change_events(rekor_entry_id)
WHERE rekor_entry_id IS NOT NULL;
COMMENT ON COLUMN excititor.vex_statement_change_events.rekor_entry_id IS 'Rekor entry UUID for change attestation';
COMMENT ON COLUMN excititor.vex_statement_change_events.rekor_log_index IS 'Rekor log index for change attestation';
-- ============================================================================
-- VEXHUB SCHEMA: vex_statements table
-- ============================================================================
-- Add Rekor linkage columns to vex_statements
ALTER TABLE IF EXISTS vexhub.vex_statements
ADD COLUMN IF NOT EXISTS rekor_uuid TEXT,
ADD COLUMN IF NOT EXISTS rekor_log_index BIGINT,
ADD COLUMN IF NOT EXISTS rekor_integrated_time TIMESTAMPTZ,
ADD COLUMN IF NOT EXISTS rekor_inclusion_proof JSONB;
-- Index for Rekor queries
CREATE INDEX IF NOT EXISTS idx_vexhub_statements_rekor_uuid
ON vexhub.vex_statements(rekor_uuid)
WHERE rekor_uuid IS NOT NULL;
CREATE INDEX IF NOT EXISTS idx_vexhub_statements_rekor_log_index
ON vexhub.vex_statements(rekor_log_index DESC)
WHERE rekor_log_index IS NOT NULL;
COMMENT ON COLUMN vexhub.vex_statements.rekor_uuid IS 'Rekor entry UUID for statement attestation';
COMMENT ON COLUMN vexhub.vex_statements.rekor_log_index IS 'Rekor log index for statement attestation';
COMMENT ON COLUMN vexhub.vex_statements.rekor_integrated_time IS 'Time statement was integrated into Rekor log';
COMMENT ON COLUMN vexhub.vex_statements.rekor_inclusion_proof IS 'RFC 6962 inclusion proof for offline verification';
-- ============================================================================
-- ATTESTOR SCHEMA: rekor_entries verification tracking
-- Sprint: SPRINT_20260117_001_ATTESTOR_periodic_rekor_verification (PRV-003)
-- ============================================================================
-- Add verification tracking columns to existing rekor_entries table
ALTER TABLE IF EXISTS attestor.rekor_entries
ADD COLUMN IF NOT EXISTS last_verified_at TIMESTAMPTZ,
ADD COLUMN IF NOT EXISTS verification_count INT NOT NULL DEFAULT 0,
ADD COLUMN IF NOT EXISTS last_verification_result TEXT;
-- Index for verification queries (find entries needing verification)
CREATE INDEX IF NOT EXISTS idx_rekor_entries_verification
ON attestor.rekor_entries(created_at DESC, last_verified_at NULLS FIRST)
WHERE last_verification_result IS DISTINCT FROM 'invalid';
-- Index for finding never-verified entries
CREATE INDEX IF NOT EXISTS idx_rekor_entries_unverified
ON attestor.rekor_entries(created_at DESC)
WHERE last_verified_at IS NULL;
COMMENT ON COLUMN attestor.rekor_entries.last_verified_at IS 'Timestamp of last successful verification';
COMMENT ON COLUMN attestor.rekor_entries.verification_count IS 'Number of times entry has been verified';
COMMENT ON COLUMN attestor.rekor_entries.last_verification_result IS 'Result of last verification: valid, invalid, skipped';
-- ============================================================================
-- ATTESTOR SCHEMA: rekor_root_checkpoints table
-- Stores tree root checkpoints for consistency verification
-- ============================================================================
CREATE TABLE IF NOT EXISTS attestor.rekor_root_checkpoints (
id BIGSERIAL PRIMARY KEY,
tree_root TEXT NOT NULL,
tree_size BIGINT NOT NULL,
log_id TEXT NOT NULL,
log_url TEXT,
checkpoint_envelope TEXT,
captured_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
verified_at TIMESTAMPTZ,
is_consistent BOOLEAN,
inconsistency_reason TEXT,
CONSTRAINT uq_root_checkpoint UNIQUE (log_id, tree_root, tree_size)
);
-- Index for finding latest checkpoints per log
CREATE INDEX IF NOT EXISTS idx_rekor_root_checkpoints_latest
ON attestor.rekor_root_checkpoints(log_id, captured_at DESC);
-- Index for consistency verification
CREATE INDEX IF NOT EXISTS idx_rekor_root_checkpoints_unverified
ON attestor.rekor_root_checkpoints(captured_at DESC)
WHERE verified_at IS NULL;
COMMENT ON TABLE attestor.rekor_root_checkpoints IS 'Stores Rekor tree root checkpoints for consistency verification';
COMMENT ON COLUMN attestor.rekor_root_checkpoints.tree_root IS 'Merkle tree root hash (base64)';
COMMENT ON COLUMN attestor.rekor_root_checkpoints.tree_size IS 'Tree size at checkpoint';
COMMENT ON COLUMN attestor.rekor_root_checkpoints.log_id IS 'Rekor log identifier (hash of public key)';
COMMENT ON COLUMN attestor.rekor_root_checkpoints.checkpoint_envelope IS 'Signed checkpoint in note format';
COMMENT ON COLUMN attestor.rekor_root_checkpoints.is_consistent IS 'Whether checkpoint was consistent with previous';
COMMENT ON COLUMN attestor.rekor_root_checkpoints.inconsistency_reason IS 'Reason for inconsistency if detected';

View File

@@ -0,0 +1,139 @@
-- -----------------------------------------------------------------------------
-- V20260119_001__Add_UnderReview_Escalated_Rejected_States.sql
-- Sprint: SPRINT_20260118_018_Unknowns_queue_enhancement
-- Task: UQ-005 - Migration for existing entries (map to new states)
-- Description: Adds new state machine states and required columns
-- -----------------------------------------------------------------------------
-- Add new columns for UnderReview and Escalated states
ALTER TABLE grey_queue_entries
ADD COLUMN IF NOT EXISTS assignee VARCHAR(255) NULL,
ADD COLUMN IF NOT EXISTS assigned_at TIMESTAMPTZ NULL,
ADD COLUMN IF NOT EXISTS escalated_at TIMESTAMPTZ NULL,
ADD COLUMN IF NOT EXISTS escalation_reason TEXT NULL;
-- Add new enum values to grey_queue_status
-- Note: PostgreSQL requires special handling for enum additions
-- First, check if we need to add the values (idempotent)
DO $$
BEGIN
-- Add 'under_review' if not exists
IF NOT EXISTS (
SELECT 1 FROM pg_enum
WHERE enumlabel = 'under_review'
AND enumtypid = 'grey_queue_status'::regtype
) THEN
ALTER TYPE grey_queue_status ADD VALUE 'under_review' AFTER 'retrying';
END IF;
-- Add 'escalated' if not exists
IF NOT EXISTS (
SELECT 1 FROM pg_enum
WHERE enumlabel = 'escalated'
AND enumtypid = 'grey_queue_status'::regtype
) THEN
ALTER TYPE grey_queue_status ADD VALUE 'escalated' AFTER 'under_review';
END IF;
-- Add 'rejected' if not exists
IF NOT EXISTS (
SELECT 1 FROM pg_enum
WHERE enumlabel = 'rejected'
AND enumtypid = 'grey_queue_status'::regtype
) THEN
ALTER TYPE grey_queue_status ADD VALUE 'rejected' AFTER 'resolved';
END IF;
EXCEPTION
WHEN others THEN
-- Enum values may already exist, which is fine
NULL;
END $$;
-- Add indexes for new query patterns
CREATE INDEX IF NOT EXISTS idx_grey_queue_assignee
ON grey_queue_entries(assignee)
WHERE assignee IS NOT NULL;
CREATE INDEX IF NOT EXISTS idx_grey_queue_status_assignee
ON grey_queue_entries(status, assignee)
WHERE status IN ('under_review', 'escalated');
CREATE INDEX IF NOT EXISTS idx_grey_queue_escalated_at
ON grey_queue_entries(escalated_at DESC)
WHERE escalated_at IS NOT NULL;
-- Add audit trigger for state transitions
CREATE TABLE IF NOT EXISTS grey_queue_state_transitions (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
entry_id UUID NOT NULL REFERENCES grey_queue_entries(id),
tenant_id VARCHAR(128) NOT NULL,
from_state VARCHAR(32) NOT NULL,
to_state VARCHAR(32) NOT NULL,
transitioned_by VARCHAR(255),
reason TEXT,
transitioned_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
metadata JSONB
);
CREATE INDEX IF NOT EXISTS idx_grey_queue_transitions_entry
ON grey_queue_state_transitions(entry_id);
CREATE INDEX IF NOT EXISTS idx_grey_queue_transitions_tenant_time
ON grey_queue_state_transitions(tenant_id, transitioned_at DESC);
-- Function to record state transitions
CREATE OR REPLACE FUNCTION record_grey_queue_transition()
RETURNS TRIGGER AS $$
BEGIN
IF OLD.status IS DISTINCT FROM NEW.status THEN
INSERT INTO grey_queue_state_transitions (
entry_id, tenant_id, from_state, to_state,
transitioned_by, transitioned_at
) VALUES (
NEW.id,
NEW.tenant_id,
OLD.status::text,
NEW.status::text,
COALESCE(NEW.assignee, current_user),
NOW()
);
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Create trigger if not exists
DROP TRIGGER IF EXISTS trg_grey_queue_state_transition ON grey_queue_entries;
CREATE TRIGGER trg_grey_queue_state_transition
AFTER UPDATE ON grey_queue_entries
FOR EACH ROW
EXECUTE FUNCTION record_grey_queue_transition();
-- Update summary view to include new states
CREATE OR REPLACE VIEW grey_queue_summary AS
SELECT
tenant_id,
COUNT(*) FILTER (WHERE status = 'pending') as pending_count,
COUNT(*) FILTER (WHERE status = 'processing') as processing_count,
COUNT(*) FILTER (WHERE status = 'retrying') as retrying_count,
COUNT(*) FILTER (WHERE status = 'under_review') as under_review_count,
COUNT(*) FILTER (WHERE status = 'escalated') as escalated_count,
COUNT(*) FILTER (WHERE status = 'resolved') as resolved_count,
COUNT(*) FILTER (WHERE status = 'rejected') as rejected_count,
COUNT(*) FILTER (WHERE status = 'failed') as failed_count,
COUNT(*) FILTER (WHERE status = 'expired') as expired_count,
COUNT(*) FILTER (WHERE status = 'dismissed') as dismissed_count,
COUNT(*) as total_count
FROM grey_queue_entries
GROUP BY tenant_id;
-- Comment for documentation
COMMENT ON COLUMN grey_queue_entries.assignee IS
'Assignee for entries in UnderReview state (Sprint UQ-005)';
COMMENT ON COLUMN grey_queue_entries.assigned_at IS
'When the entry was assigned for review (Sprint UQ-005)';
COMMENT ON COLUMN grey_queue_entries.escalated_at IS
'When the entry was escalated to security team (Sprint UQ-005)';
COMMENT ON COLUMN grey_queue_entries.escalation_reason IS
'Reason for escalation (Sprint UQ-005)';

View File

@@ -0,0 +1,130 @@
-- Migration: Add diff_id column to scanner layers table
-- Sprint: SPRINT_025_Scanner_layer_manifest_infrastructure
-- Task: TASK-025-03
-- Add diff_id column to layers table (sha256:64hex = 71 chars)
ALTER TABLE scanner.layers
ADD COLUMN IF NOT EXISTS diff_id VARCHAR(71);
-- Add timestamp for when diffID was computed
ALTER TABLE scanner.layers
ADD COLUMN IF NOT EXISTS diff_id_computed_at_utc TIMESTAMP;
-- Create index on diff_id for fast lookups
CREATE INDEX IF NOT EXISTS idx_layers_diff_id
ON scanner.layers (diff_id)
WHERE diff_id IS NOT NULL;
-- Create image_layers junction table if it doesn't exist
-- This tracks which layers belong to which images
CREATE TABLE IF NOT EXISTS scanner.image_layers (
image_reference VARCHAR(512) NOT NULL,
layer_digest VARCHAR(71) NOT NULL,
layer_index INT NOT NULL,
created_at_utc TIMESTAMP NOT NULL DEFAULT NOW(),
PRIMARY KEY (image_reference, layer_digest)
);
CREATE INDEX IF NOT EXISTS idx_image_layers_digest
ON scanner.image_layers (layer_digest);
-- DiffID cache table for resolved diffIDs
CREATE TABLE IF NOT EXISTS scanner.scanner_diffid_cache (
layer_digest VARCHAR(71) PRIMARY KEY,
diff_id VARCHAR(71) NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Base image fingerprint tables for layer reuse detection
CREATE TABLE IF NOT EXISTS scanner.scanner_base_image_fingerprints (
image_reference VARCHAR(512) PRIMARY KEY,
layer_count INT NOT NULL,
registered_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
detection_count BIGINT NOT NULL DEFAULT 0
);
CREATE TABLE IF NOT EXISTS scanner.scanner_base_image_layers (
image_reference VARCHAR(512) NOT NULL REFERENCES scanner.scanner_base_image_fingerprints(image_reference) ON DELETE CASCADE,
layer_index INT NOT NULL,
diff_id VARCHAR(71) NOT NULL,
PRIMARY KEY (image_reference, layer_index)
);
CREATE INDEX IF NOT EXISTS idx_base_image_layers_diff_id
ON scanner.scanner_base_image_layers (diff_id);
-- Manifest snapshots table for IOciManifestSnapshotService
CREATE TABLE IF NOT EXISTS scanner.manifest_snapshots (
id UUID PRIMARY KEY,
image_reference VARCHAR(512) NOT NULL,
registry VARCHAR(256) NOT NULL,
repository VARCHAR(256) NOT NULL,
tag VARCHAR(128),
manifest_digest VARCHAR(71) NOT NULL,
config_digest VARCHAR(71) NOT NULL,
media_type VARCHAR(128) NOT NULL,
layers JSONB NOT NULL,
diff_ids JSONB NOT NULL,
platform JSONB,
total_size BIGINT NOT NULL,
captured_at TIMESTAMPTZ NOT NULL,
snapshot_version VARCHAR(32),
UNIQUE (manifest_digest)
);
CREATE INDEX IF NOT EXISTS idx_manifest_snapshots_image_ref
ON scanner.manifest_snapshots (image_reference);
CREATE INDEX IF NOT EXISTS idx_manifest_snapshots_repository
ON scanner.manifest_snapshots (registry, repository);
CREATE INDEX IF NOT EXISTS idx_manifest_snapshots_captured_at
ON scanner.manifest_snapshots (captured_at DESC);
-- Layer scan history for reuse detection (TASK-025-04)
CREATE TABLE IF NOT EXISTS scanner.layer_scans (
diff_id VARCHAR(71) PRIMARY KEY,
scanned_at TIMESTAMPTZ NOT NULL,
finding_count INT,
scanned_by VARCHAR(128) NOT NULL,
scanner_version VARCHAR(64)
);
-- Layer reuse counts for statistics
CREATE TABLE IF NOT EXISTS scanner.layer_reuse_counts (
diff_id VARCHAR(71) PRIMARY KEY,
reuse_count INT NOT NULL DEFAULT 1,
first_seen_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_layer_reuse_counts_count
ON scanner.layer_reuse_counts (reuse_count DESC);
COMMENT ON COLUMN scanner.layers.diff_id IS 'Uncompressed layer content hash (sha256:hex64). Immutable once computed.';
COMMENT ON TABLE scanner.scanner_diffid_cache IS 'Cache of layer digest to diffID mappings. Layer digests are immutable so cache entries never expire.';
COMMENT ON TABLE scanner.scanner_base_image_fingerprints IS 'Known base image fingerprints for layer reuse detection.';
COMMENT ON TABLE scanner.manifest_snapshots IS 'Point-in-time captures of OCI image manifests for delta scanning.';
COMMENT ON TABLE scanner.layer_scans IS 'History of layer scans for deduplication. One entry per diffID.';
COMMENT ON TABLE scanner.layer_reuse_counts IS 'Counts of how many times each layer appears across images.';
-- Layer SBOM CAS for per-layer SBOM storage (TASK-026-02)
CREATE TABLE IF NOT EXISTS scanner.layer_sbom_cas (
diff_id VARCHAR(71) NOT NULL,
format VARCHAR(20) NOT NULL,
content BYTEA NOT NULL,
size_bytes BIGINT NOT NULL,
compressed BOOLEAN NOT NULL DEFAULT TRUE,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
last_accessed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
PRIMARY KEY (diff_id, format)
);
CREATE INDEX IF NOT EXISTS idx_layer_sbom_cas_last_accessed
ON scanner.layer_sbom_cas (last_accessed_at);
CREATE INDEX IF NOT EXISTS idx_layer_sbom_cas_format
ON scanner.layer_sbom_cas (format);
COMMENT ON TABLE scanner.layer_sbom_cas IS 'Content-addressable storage for per-layer SBOMs. Keyed by diffID (immutable).';
COMMENT ON COLUMN scanner.layer_sbom_cas.content IS 'Compressed (gzip) SBOM content.';
COMMENT ON COLUMN scanner.layer_sbom_cas.last_accessed_at IS 'For TTL-based eviction of cold entries.';