refactor: DB schema fixes + container renames + compose include + audit sprint
- FindingsLedger: change schema from public to findings (V3-01) - Add 9 migration module plugins: RiskEngine, Replay, ExportCenter, Integrations, Signer, IssuerDirectory, Workflow, PacksRegistry, OpsMemory (V4-01 to V4-09) - Remove 16 redundant inline CREATE SCHEMA patterns (V4-10) - Rename export→export-web, excititor→excititor-web for consistency - Compose stella-ops.yml: thin wrapper using include: directive - Fix dead /api/v1/jobengine/* gateway routes → release-orchestrator/packsregistry - Scheduler plugin architecture: ISchedulerJobPlugin + ScanJobPlugin + DoctorJobPlugin - Create unified audit sink sprint plan - VulnExplorer integration tests + gap analysis Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -43,9 +43,7 @@ internal sealed class PostgresKnowledgeSearchStore : IKnowledgeSearchStore, IKno
|
||||
|
||||
await AcquireSchemaLockAsync(connection, transaction, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
const string createSchemaSql = "CREATE SCHEMA IF NOT EXISTS advisoryai;";
|
||||
await ExecuteNonQueryAsync(connection, transaction, createSchemaSql, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
// Schema creation handled by central migration runner (AdvisoryAiMigrationModulePlugin)
|
||||
const string createHistorySql = """
|
||||
CREATE TABLE IF NOT EXISTS advisoryai.__migration_history
|
||||
(
|
||||
|
||||
@@ -0,0 +1,56 @@
|
||||
-- OpsMemory Schema Migration 001: Initial Schema
|
||||
-- Migrated from devops/database/migrations/V20260108__opsmemory_advisoryai_schema.sql
|
||||
-- Creates the opsmemory schema for decision ledger and playbook learning.
|
||||
|
||||
CREATE SCHEMA IF NOT EXISTS opsmemory;
|
||||
|
||||
-- Decision records table
|
||||
CREATE TABLE IF NOT EXISTS opsmemory.decisions (
|
||||
memory_id TEXT PRIMARY KEY,
|
||||
tenant_id TEXT NOT NULL,
|
||||
recorded_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Situation context
|
||||
cve_id TEXT,
|
||||
component TEXT,
|
||||
component_name TEXT,
|
||||
component_version TEXT,
|
||||
severity TEXT,
|
||||
reachability TEXT NOT NULL DEFAULT 'Unknown',
|
||||
epss_score DOUBLE PRECISION,
|
||||
cvss_score DOUBLE PRECISION,
|
||||
is_kev BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
context_tags TEXT[],
|
||||
additional_context JSONB,
|
||||
similarity_vector REAL[],
|
||||
|
||||
-- Decision details
|
||||
action TEXT NOT NULL,
|
||||
rationale TEXT,
|
||||
decided_by TEXT NOT NULL,
|
||||
decided_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
policy_reference TEXT,
|
||||
vex_statement_id TEXT,
|
||||
mitigation JSONB,
|
||||
|
||||
-- Outcome (nullable until recorded)
|
||||
outcome_status TEXT,
|
||||
outcome_resolution_time DOUBLE PRECISION,
|
||||
outcome_actual_impact TEXT,
|
||||
outcome_lessons_learned TEXT,
|
||||
outcome_recorded_by TEXT,
|
||||
outcome_recorded_at TIMESTAMPTZ,
|
||||
outcome_would_repeat BOOLEAN,
|
||||
outcome_alternative_actions TEXT
|
||||
);
|
||||
|
||||
-- Indexes for querying
|
||||
CREATE INDEX IF NOT EXISTS idx_opsmemory_decisions_tenant ON opsmemory.decisions(tenant_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_opsmemory_decisions_cve ON opsmemory.decisions(cve_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_opsmemory_decisions_component ON opsmemory.decisions(component);
|
||||
CREATE INDEX IF NOT EXISTS idx_opsmemory_decisions_recorded ON opsmemory.decisions(recorded_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_opsmemory_decisions_action ON opsmemory.decisions(action);
|
||||
CREATE INDEX IF NOT EXISTS idx_opsmemory_decisions_outcome ON opsmemory.decisions(outcome_status);
|
||||
|
||||
COMMENT ON SCHEMA opsmemory IS 'OpsMemory: Decision ledger for security playbook learning';
|
||||
COMMENT ON TABLE opsmemory.decisions IS 'Stores security decisions and their outcomes for playbook suggestions';
|
||||
@@ -8,6 +8,10 @@
|
||||
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
|
||||
<Description>OpsMemory - Decision ledger for security playbook learning</Description>
|
||||
</PropertyGroup>
|
||||
<ItemGroup>
|
||||
<EmbeddedResource Include="Migrations\**\*.sql" LogicalName="%(RecursiveDir)%(Filename)%(Extension)" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" />
|
||||
<PackageReference Include="Microsoft.Extensions.Options" />
|
||||
|
||||
@@ -253,9 +253,8 @@ public sealed class PostgresRekorCheckpointStore : IRekorCheckpointStore, IAsync
|
||||
/// </summary>
|
||||
public async Task InitializeSchemaAsync(CancellationToken cancellationToken = default)
|
||||
{
|
||||
// Schema creation handled by central migration runner (AttestorMigrationModulePlugin)
|
||||
const string sql = @"
|
||||
CREATE SCHEMA IF NOT EXISTS attestor;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS attestor.rekor_checkpoints (
|
||||
checkpoint_id UUID PRIMARY KEY,
|
||||
origin TEXT NOT NULL,
|
||||
|
||||
@@ -94,8 +94,8 @@ public sealed class BinaryIndexMigrationRunner
|
||||
|
||||
private static async Task EnsureHistoryTableAsync(NpgsqlConnection connection, CancellationToken ct)
|
||||
{
|
||||
// Schema creation handled by central migration runner (BinaryIndexMigrationModulePlugin)
|
||||
const string sql = """
|
||||
CREATE SCHEMA IF NOT EXISTS binaries;
|
||||
CREATE TABLE IF NOT EXISTS binaries.schema_migrations (
|
||||
name TEXT PRIMARY KEY,
|
||||
applied_at TIMESTAMPTZ NOT NULL DEFAULT now()
|
||||
|
||||
@@ -9,6 +9,6 @@ public class MigrationCommandHandlersTests
|
||||
[Fact]
|
||||
public void Registry_Has_All_Modules()
|
||||
{
|
||||
Assert.Equal(28, MigrationModuleRegistry.Modules.Count);
|
||||
Assert.Equal(36, MigrationModuleRegistry.Modules.Count);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,7 +39,17 @@ public class MigrationModuleRegistryTests
|
||||
Assert.Contains(modules, m => m.Name == "SbomLineage" && m.SchemaName == "sbom");
|
||||
Assert.Contains(modules, m => m.Name == "ReachGraph" && m.SchemaName == "reachgraph");
|
||||
Assert.Contains(modules, m => m.Name == "Verdict" && m.SchemaName == "stellaops");
|
||||
Assert.True(MigrationModuleRegistry.ModuleNames.Count() >= 20);
|
||||
Assert.Contains(modules, m => m.Name == "FindingsLedger" && m.SchemaName == "findings");
|
||||
Assert.Contains(modules, m => m.Name == "Signer" && m.SchemaName == "signer");
|
||||
Assert.Contains(modules, m => m.Name == "IssuerDirectory" && m.SchemaName == "issuer");
|
||||
Assert.Contains(modules, m => m.Name == "Workflow" && m.SchemaName == "workflow");
|
||||
Assert.Contains(modules, m => m.Name == "PacksRegistry" && m.SchemaName == "packs");
|
||||
Assert.Contains(modules, m => m.Name == "OpsMemory" && m.SchemaName == "opsmemory");
|
||||
Assert.Contains(modules, m => m.Name == "ExportCenter" && m.SchemaName == "export_center");
|
||||
Assert.Contains(modules, m => m.Name == "Integrations" && m.SchemaName == "integrations");
|
||||
Assert.Contains(modules, m => m.Name == "Replay" && m.SchemaName == "replay");
|
||||
Assert.Contains(modules, m => m.Name == "RiskEngine" && m.SchemaName == "riskengine");
|
||||
Assert.True(MigrationModuleRegistry.ModuleNames.Count() >= 36);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
@@ -78,6 +88,6 @@ public class MigrationModuleRegistryTests
|
||||
public void GetModules_All_Returns_All()
|
||||
{
|
||||
var result = MigrationModuleRegistry.GetModules(null);
|
||||
Assert.True(result.Count() >= 20);
|
||||
Assert.True(result.Count() >= 36);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ public partial class FindingsLedgerDbContext : DbContext
|
||||
: base(options)
|
||||
{
|
||||
_schemaName = string.IsNullOrWhiteSpace(schemaName)
|
||||
? "public"
|
||||
? "findings"
|
||||
: schemaName.Trim();
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ namespace StellaOps.Findings.Ledger.Infrastructure.Postgres;
|
||||
|
||||
internal static class FindingsLedgerDbContextFactory
|
||||
{
|
||||
public const string DefaultSchemaName = "public";
|
||||
public const string DefaultSchemaName = "findings";
|
||||
|
||||
public static FindingsLedgerDbContext Create(NpgsqlConnection connection, int commandTimeoutSeconds, string schemaName)
|
||||
{
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
-- 001_initial.sql
|
||||
-- Findings Ledger bootstrap schema (LEDGER-29-001)
|
||||
|
||||
CREATE SCHEMA IF NOT EXISTS findings;
|
||||
SET search_path TO findings, public;
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE TYPE ledger_event_type AS ENUM (
|
||||
CREATE TYPE findings.ledger_event_type AS ENUM (
|
||||
'finding.created',
|
||||
'finding.status_changed',
|
||||
'finding.severity_changed',
|
||||
@@ -16,7 +19,7 @@ CREATE TYPE ledger_event_type AS ENUM (
|
||||
'finding.closed'
|
||||
);
|
||||
|
||||
CREATE TYPE ledger_action_type AS ENUM (
|
||||
CREATE TYPE findings.ledger_action_type AS ENUM (
|
||||
'assign',
|
||||
'comment',
|
||||
'attach_evidence',
|
||||
@@ -28,12 +31,12 @@ CREATE TYPE ledger_action_type AS ENUM (
|
||||
'close'
|
||||
);
|
||||
|
||||
CREATE TABLE ledger_events (
|
||||
CREATE TABLE findings.ledger_events (
|
||||
tenant_id TEXT NOT NULL,
|
||||
chain_id UUID NOT NULL,
|
||||
sequence_no BIGINT NOT NULL,
|
||||
event_id UUID NOT NULL,
|
||||
event_type ledger_event_type NOT NULL,
|
||||
event_type findings.ledger_event_type NOT NULL,
|
||||
policy_version TEXT NOT NULL,
|
||||
finding_id TEXT NOT NULL,
|
||||
artifact_id TEXT NOT NULL,
|
||||
@@ -55,13 +58,13 @@ CREATE TABLE ledger_events (
|
||||
CONSTRAINT ck_ledger_events_actor_type CHECK (actor_type IN ('system', 'operator', 'integration'))
|
||||
) PARTITION BY LIST (tenant_id);
|
||||
|
||||
CREATE TABLE ledger_events_default PARTITION OF ledger_events DEFAULT;
|
||||
CREATE TABLE findings.ledger_events_default PARTITION OF findings.ledger_events DEFAULT;
|
||||
|
||||
CREATE INDEX ix_ledger_events_finding ON ledger_events (tenant_id, finding_id, policy_version);
|
||||
CREATE INDEX ix_ledger_events_type ON ledger_events (tenant_id, event_type, recorded_at DESC);
|
||||
CREATE INDEX ix_ledger_events_recorded_at ON ledger_events (tenant_id, recorded_at DESC);
|
||||
CREATE INDEX ix_ledger_events_finding ON findings.ledger_events (tenant_id, finding_id, policy_version);
|
||||
CREATE INDEX ix_ledger_events_type ON findings.ledger_events (tenant_id, event_type, recorded_at DESC);
|
||||
CREATE INDEX ix_ledger_events_recorded_at ON findings.ledger_events (tenant_id, recorded_at DESC);
|
||||
|
||||
CREATE TABLE ledger_merkle_roots (
|
||||
CREATE TABLE findings.ledger_merkle_roots (
|
||||
tenant_id TEXT NOT NULL,
|
||||
anchor_id UUID NOT NULL,
|
||||
window_start TIMESTAMPTZ NOT NULL,
|
||||
@@ -77,11 +80,11 @@ CREATE TABLE ledger_merkle_roots (
|
||||
CONSTRAINT ck_ledger_merkle_root_hash_hex CHECK (root_hash ~ '^[0-9a-f]{64}$')
|
||||
) PARTITION BY LIST (tenant_id);
|
||||
|
||||
CREATE TABLE ledger_merkle_roots_default PARTITION OF ledger_merkle_roots DEFAULT;
|
||||
CREATE TABLE findings.ledger_merkle_roots_default PARTITION OF findings.ledger_merkle_roots DEFAULT;
|
||||
|
||||
CREATE INDEX ix_merkle_sequences ON ledger_merkle_roots (tenant_id, sequence_end DESC);
|
||||
CREATE INDEX ix_merkle_sequences ON findings.ledger_merkle_roots (tenant_id, sequence_end DESC);
|
||||
|
||||
CREATE TABLE findings_projection (
|
||||
CREATE TABLE findings.findings_projection (
|
||||
tenant_id TEXT NOT NULL,
|
||||
finding_id TEXT NOT NULL,
|
||||
policy_version TEXT NOT NULL,
|
||||
@@ -96,12 +99,12 @@ CREATE TABLE findings_projection (
|
||||
CONSTRAINT ck_findings_projection_cycle_hash_hex CHECK (cycle_hash ~ '^[0-9a-f]{64}$')
|
||||
) PARTITION BY LIST (tenant_id);
|
||||
|
||||
CREATE TABLE findings_projection_default PARTITION OF findings_projection DEFAULT;
|
||||
CREATE TABLE findings.findings_projection_default PARTITION OF findings.findings_projection DEFAULT;
|
||||
|
||||
CREATE INDEX ix_projection_status ON findings_projection (tenant_id, status, severity DESC);
|
||||
CREATE INDEX ix_projection_labels_gin ON findings_projection USING GIN (labels JSONB_PATH_OPS);
|
||||
CREATE INDEX ix_projection_status ON findings.findings_projection (tenant_id, status, severity DESC);
|
||||
CREATE INDEX ix_projection_labels_gin ON findings.findings_projection USING GIN (labels JSONB_PATH_OPS);
|
||||
|
||||
CREATE TABLE finding_history (
|
||||
CREATE TABLE findings.finding_history (
|
||||
tenant_id TEXT NOT NULL,
|
||||
finding_id TEXT NOT NULL,
|
||||
policy_version TEXT NOT NULL,
|
||||
@@ -114,25 +117,25 @@ CREATE TABLE finding_history (
|
||||
CONSTRAINT pk_finding_history PRIMARY KEY (tenant_id, finding_id, event_id)
|
||||
) PARTITION BY LIST (tenant_id);
|
||||
|
||||
CREATE TABLE finding_history_default PARTITION OF finding_history DEFAULT;
|
||||
CREATE TABLE findings.finding_history_default PARTITION OF findings.finding_history DEFAULT;
|
||||
|
||||
CREATE INDEX ix_finding_history_timeline ON finding_history (tenant_id, finding_id, occurred_at DESC);
|
||||
CREATE INDEX ix_finding_history_timeline ON findings.finding_history (tenant_id, finding_id, occurred_at DESC);
|
||||
|
||||
CREATE TABLE triage_actions (
|
||||
CREATE TABLE findings.triage_actions (
|
||||
tenant_id TEXT NOT NULL,
|
||||
action_id UUID NOT NULL,
|
||||
event_id UUID NOT NULL,
|
||||
finding_id TEXT NOT NULL,
|
||||
action_type ledger_action_type NOT NULL,
|
||||
action_type findings.ledger_action_type NOT NULL,
|
||||
payload JSONB NOT NULL DEFAULT '{}'::JSONB,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
created_by TEXT NOT NULL,
|
||||
CONSTRAINT pk_triage_actions PRIMARY KEY (tenant_id, action_id)
|
||||
) PARTITION BY LIST (tenant_id);
|
||||
|
||||
CREATE TABLE triage_actions_default PARTITION OF triage_actions DEFAULT;
|
||||
CREATE TABLE findings.triage_actions_default PARTITION OF findings.triage_actions DEFAULT;
|
||||
|
||||
CREATE INDEX ix_triage_actions_event ON triage_actions (tenant_id, event_id);
|
||||
CREATE INDEX ix_triage_actions_created_at ON triage_actions (tenant_id, created_at DESC);
|
||||
CREATE INDEX ix_triage_actions_event ON findings.triage_actions (tenant_id, event_id);
|
||||
CREATE INDEX ix_triage_actions_created_at ON findings.triage_actions (tenant_id, created_at DESC);
|
||||
|
||||
COMMIT;
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
-- LEDGER-OBS-53-001: persist evidence bundle references alongside ledger entries.
|
||||
|
||||
ALTER TABLE ledger_events
|
||||
SET search_path TO findings, public;
|
||||
|
||||
ALTER TABLE findings.ledger_events
|
||||
ADD COLUMN evidence_bundle_ref text NULL;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_ledger_events_finding_evidence_ref
|
||||
ON ledger_events (tenant_id, finding_id, recorded_at DESC)
|
||||
ON findings.ledger_events (tenant_id, finding_id, recorded_at DESC)
|
||||
WHERE evidence_bundle_ref IS NOT NULL;
|
||||
|
||||
@@ -1,16 +1,18 @@
|
||||
-- 002_projection_offsets.sql
|
||||
-- Projection worker checkpoint storage (LEDGER-29-003)
|
||||
|
||||
SET search_path TO findings, public;
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS ledger_projection_offsets (
|
||||
CREATE TABLE IF NOT EXISTS findings.ledger_projection_offsets (
|
||||
worker_id TEXT NOT NULL PRIMARY KEY,
|
||||
last_recorded_at TIMESTAMPTZ NOT NULL,
|
||||
last_event_id UUID NOT NULL,
|
||||
updated_at TIMESTAMPTZ NOT NULL
|
||||
);
|
||||
|
||||
INSERT INTO ledger_projection_offsets (worker_id, last_recorded_at, last_event_id, updated_at)
|
||||
INSERT INTO findings.ledger_projection_offsets (worker_id, last_recorded_at, last_event_id, updated_at)
|
||||
VALUES (
|
||||
'default',
|
||||
'1970-01-01T00:00:00Z',
|
||||
|
||||
@@ -1,15 +1,17 @@
|
||||
-- 003_policy_rationale.sql
|
||||
-- Add policy rationale column to findings_projection (LEDGER-29-004)
|
||||
|
||||
SET search_path TO findings, public;
|
||||
|
||||
BEGIN;
|
||||
|
||||
ALTER TABLE findings_projection
|
||||
ALTER TABLE findings.findings_projection
|
||||
ADD COLUMN IF NOT EXISTS policy_rationale JSONB NOT NULL DEFAULT '[]'::JSONB;
|
||||
|
||||
ALTER TABLE findings_projection
|
||||
ALTER TABLE findings.findings_projection
|
||||
ALTER COLUMN policy_rationale SET DEFAULT '[]'::JSONB;
|
||||
|
||||
UPDATE findings_projection
|
||||
UPDATE findings.findings_projection
|
||||
SET policy_rationale = '[]'::JSONB
|
||||
WHERE policy_rationale IS NULL;
|
||||
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
-- 004_ledger_attestations.sql
|
||||
-- LEDGER-OBS-54-001: storage for attestation verification exports
|
||||
|
||||
SET search_path TO findings, public;
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS ledger_attestations (
|
||||
CREATE TABLE IF NOT EXISTS findings.ledger_attestations (
|
||||
tenant_id text NOT NULL,
|
||||
attestation_id uuid NOT NULL,
|
||||
artifact_id text NOT NULL,
|
||||
@@ -21,20 +23,20 @@ CREATE TABLE IF NOT EXISTS ledger_attestations (
|
||||
projection_version text NOT NULL
|
||||
);
|
||||
|
||||
ALTER TABLE ledger_attestations
|
||||
ALTER TABLE findings.ledger_attestations
|
||||
ADD CONSTRAINT pk_ledger_attestations PRIMARY KEY (tenant_id, attestation_id);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_ledger_attestations_recorded
|
||||
ON ledger_attestations (tenant_id, recorded_at, attestation_id);
|
||||
ON findings.ledger_attestations (tenant_id, recorded_at, attestation_id);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_ledger_attestations_artifact
|
||||
ON ledger_attestations (tenant_id, artifact_id, recorded_at DESC);
|
||||
ON findings.ledger_attestations (tenant_id, artifact_id, recorded_at DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_ledger_attestations_finding
|
||||
ON ledger_attestations (tenant_id, finding_id, recorded_at DESC)
|
||||
ON findings.ledger_attestations (tenant_id, finding_id, recorded_at DESC)
|
||||
WHERE finding_id IS NOT NULL;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_ledger_attestations_status
|
||||
ON ledger_attestations (tenant_id, verification_status, recorded_at DESC);
|
||||
ON findings.ledger_attestations (tenant_id, verification_status, recorded_at DESC);
|
||||
|
||||
COMMIT;
|
||||
|
||||
@@ -1,15 +1,17 @@
|
||||
-- 004_risk_fields.sql
|
||||
-- Add risk scoring fields to findings_projection (LEDGER-RISK-66-001/002)
|
||||
|
||||
SET search_path TO findings, public;
|
||||
|
||||
BEGIN;
|
||||
|
||||
ALTER TABLE findings_projection
|
||||
ALTER TABLE findings.findings_projection
|
||||
ADD COLUMN IF NOT EXISTS risk_score NUMERIC(6,3),
|
||||
ADD COLUMN IF NOT EXISTS risk_severity TEXT,
|
||||
ADD COLUMN IF NOT EXISTS risk_profile_version TEXT,
|
||||
ADD COLUMN IF NOT EXISTS risk_explanation_id UUID,
|
||||
ADD COLUMN IF NOT EXISTS risk_event_sequence BIGINT;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_projection_risk ON findings_projection (tenant_id, risk_severity, risk_score DESC);
|
||||
CREATE INDEX IF NOT EXISTS ix_projection_risk ON findings.findings_projection (tenant_id, risk_severity, risk_score DESC);
|
||||
|
||||
COMMIT;
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
-- 005_risk_fields.sql
|
||||
-- LEDGER-RISK-66-001: add risk scoring fields to findings projection
|
||||
|
||||
SET search_path TO findings, public;
|
||||
|
||||
BEGIN;
|
||||
|
||||
ALTER TABLE findings_projection
|
||||
ALTER TABLE findings.findings_projection
|
||||
ADD COLUMN IF NOT EXISTS risk_score numeric(6,2) NULL,
|
||||
ADD COLUMN IF NOT EXISTS risk_severity text NULL,
|
||||
ADD COLUMN IF NOT EXISTS risk_profile_version text NULL,
|
||||
@@ -11,6 +13,6 @@ ALTER TABLE findings_projection
|
||||
ADD COLUMN IF NOT EXISTS risk_event_sequence bigint NULL;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_findings_projection_risk
|
||||
ON findings_projection (tenant_id, risk_severity, risk_score DESC, recorded_at DESC);
|
||||
ON findings.findings_projection (tenant_id, risk_severity, risk_score DESC, recorded_at DESC);
|
||||
|
||||
COMMIT;
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
-- 006_orchestrator_airgap.sql
|
||||
-- Add orchestrator export provenance and air-gap import provenance tables (LEDGER-34-101, LEDGER-AIRGAP-56-001)
|
||||
|
||||
SET search_path TO findings, public;
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS orchestrator_exports
|
||||
CREATE TABLE IF NOT EXISTS findings.orchestrator_exports
|
||||
(
|
||||
tenant_id TEXT NOT NULL,
|
||||
run_id UUID NOT NULL,
|
||||
@@ -21,12 +23,12 @@ CREATE TABLE IF NOT EXISTS orchestrator_exports
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS ix_orchestrator_exports_artifact_run
|
||||
ON orchestrator_exports (tenant_id, artifact_hash, run_id);
|
||||
ON findings.orchestrator_exports (tenant_id, artifact_hash, run_id);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_orchestrator_exports_artifact
|
||||
ON orchestrator_exports (tenant_id, artifact_hash);
|
||||
ON findings.orchestrator_exports (tenant_id, artifact_hash);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS airgap_imports
|
||||
CREATE TABLE IF NOT EXISTS findings.airgap_imports
|
||||
(
|
||||
tenant_id TEXT NOT NULL,
|
||||
bundle_id TEXT NOT NULL,
|
||||
@@ -43,9 +45,9 @@ CREATE TABLE IF NOT EXISTS airgap_imports
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_airgap_imports_bundle
|
||||
ON airgap_imports (tenant_id, bundle_id);
|
||||
ON findings.airgap_imports (tenant_id, bundle_id);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_airgap_imports_event
|
||||
ON airgap_imports (tenant_id, ledger_event_id);
|
||||
ON findings.airgap_imports (tenant_id, ledger_event_id);
|
||||
|
||||
COMMIT;
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
-- Enable Row-Level Security for Findings Ledger tenant isolation (LEDGER-TEN-48-001-DEV)
|
||||
-- Based on Evidence Locker pattern per CONTRACT-FINDINGS-LEDGER-RLS-011
|
||||
|
||||
SET search_path TO findings, public;
|
||||
|
||||
BEGIN;
|
||||
|
||||
-- ============================================
|
||||
@@ -34,12 +36,12 @@ COMMENT ON FUNCTION findings_ledger_app.require_current_tenant() IS
|
||||
-- 2. Enable RLS on ledger_events
|
||||
-- ============================================
|
||||
|
||||
ALTER TABLE ledger_events ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE ledger_events FORCE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.ledger_events ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.ledger_events FORCE ROW LEVEL SECURITY;
|
||||
|
||||
DROP POLICY IF EXISTS ledger_events_tenant_isolation ON ledger_events;
|
||||
DROP POLICY IF EXISTS ledger_events_tenant_isolation ON findings.ledger_events;
|
||||
CREATE POLICY ledger_events_tenant_isolation
|
||||
ON ledger_events
|
||||
ON findings.ledger_events
|
||||
FOR ALL
|
||||
USING (tenant_id = findings_ledger_app.require_current_tenant())
|
||||
WITH CHECK (tenant_id = findings_ledger_app.require_current_tenant());
|
||||
@@ -48,12 +50,12 @@ CREATE POLICY ledger_events_tenant_isolation
|
||||
-- 3. Enable RLS on ledger_merkle_roots
|
||||
-- ============================================
|
||||
|
||||
ALTER TABLE ledger_merkle_roots ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE ledger_merkle_roots FORCE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.ledger_merkle_roots ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.ledger_merkle_roots FORCE ROW LEVEL SECURITY;
|
||||
|
||||
DROP POLICY IF EXISTS ledger_merkle_roots_tenant_isolation ON ledger_merkle_roots;
|
||||
DROP POLICY IF EXISTS ledger_merkle_roots_tenant_isolation ON findings.ledger_merkle_roots;
|
||||
CREATE POLICY ledger_merkle_roots_tenant_isolation
|
||||
ON ledger_merkle_roots
|
||||
ON findings.ledger_merkle_roots
|
||||
FOR ALL
|
||||
USING (tenant_id = findings_ledger_app.require_current_tenant())
|
||||
WITH CHECK (tenant_id = findings_ledger_app.require_current_tenant());
|
||||
@@ -62,12 +64,12 @@ CREATE POLICY ledger_merkle_roots_tenant_isolation
|
||||
-- 4. Enable RLS on findings_projection
|
||||
-- ============================================
|
||||
|
||||
ALTER TABLE findings_projection ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings_projection FORCE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.findings_projection ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.findings_projection FORCE ROW LEVEL SECURITY;
|
||||
|
||||
DROP POLICY IF EXISTS findings_projection_tenant_isolation ON findings_projection;
|
||||
DROP POLICY IF EXISTS findings_projection_tenant_isolation ON findings.findings_projection;
|
||||
CREATE POLICY findings_projection_tenant_isolation
|
||||
ON findings_projection
|
||||
ON findings.findings_projection
|
||||
FOR ALL
|
||||
USING (tenant_id = findings_ledger_app.require_current_tenant())
|
||||
WITH CHECK (tenant_id = findings_ledger_app.require_current_tenant());
|
||||
@@ -76,12 +78,12 @@ CREATE POLICY findings_projection_tenant_isolation
|
||||
-- 5. Enable RLS on finding_history
|
||||
-- ============================================
|
||||
|
||||
ALTER TABLE finding_history ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE finding_history FORCE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.finding_history ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.finding_history FORCE ROW LEVEL SECURITY;
|
||||
|
||||
DROP POLICY IF EXISTS finding_history_tenant_isolation ON finding_history;
|
||||
DROP POLICY IF EXISTS finding_history_tenant_isolation ON findings.finding_history;
|
||||
CREATE POLICY finding_history_tenant_isolation
|
||||
ON finding_history
|
||||
ON findings.finding_history
|
||||
FOR ALL
|
||||
USING (tenant_id = findings_ledger_app.require_current_tenant())
|
||||
WITH CHECK (tenant_id = findings_ledger_app.require_current_tenant());
|
||||
@@ -90,12 +92,12 @@ CREATE POLICY finding_history_tenant_isolation
|
||||
-- 6. Enable RLS on triage_actions
|
||||
-- ============================================
|
||||
|
||||
ALTER TABLE triage_actions ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE triage_actions FORCE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.triage_actions ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.triage_actions FORCE ROW LEVEL SECURITY;
|
||||
|
||||
DROP POLICY IF EXISTS triage_actions_tenant_isolation ON triage_actions;
|
||||
DROP POLICY IF EXISTS triage_actions_tenant_isolation ON findings.triage_actions;
|
||||
CREATE POLICY triage_actions_tenant_isolation
|
||||
ON triage_actions
|
||||
ON findings.triage_actions
|
||||
FOR ALL
|
||||
USING (tenant_id = findings_ledger_app.require_current_tenant())
|
||||
WITH CHECK (tenant_id = findings_ledger_app.require_current_tenant());
|
||||
@@ -104,12 +106,12 @@ CREATE POLICY triage_actions_tenant_isolation
|
||||
-- 7. Enable RLS on ledger_attestations
|
||||
-- ============================================
|
||||
|
||||
ALTER TABLE ledger_attestations ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE ledger_attestations FORCE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.ledger_attestations ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.ledger_attestations FORCE ROW LEVEL SECURITY;
|
||||
|
||||
DROP POLICY IF EXISTS ledger_attestations_tenant_isolation ON ledger_attestations;
|
||||
DROP POLICY IF EXISTS ledger_attestations_tenant_isolation ON findings.ledger_attestations;
|
||||
CREATE POLICY ledger_attestations_tenant_isolation
|
||||
ON ledger_attestations
|
||||
ON findings.ledger_attestations
|
||||
FOR ALL
|
||||
USING (tenant_id = findings_ledger_app.require_current_tenant())
|
||||
WITH CHECK (tenant_id = findings_ledger_app.require_current_tenant());
|
||||
@@ -118,12 +120,12 @@ CREATE POLICY ledger_attestations_tenant_isolation
|
||||
-- 8. Enable RLS on orchestrator_exports
|
||||
-- ============================================
|
||||
|
||||
ALTER TABLE orchestrator_exports ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE orchestrator_exports FORCE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.orchestrator_exports ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.orchestrator_exports FORCE ROW LEVEL SECURITY;
|
||||
|
||||
DROP POLICY IF EXISTS orchestrator_exports_tenant_isolation ON orchestrator_exports;
|
||||
DROP POLICY IF EXISTS orchestrator_exports_tenant_isolation ON findings.orchestrator_exports;
|
||||
CREATE POLICY orchestrator_exports_tenant_isolation
|
||||
ON orchestrator_exports
|
||||
ON findings.orchestrator_exports
|
||||
FOR ALL
|
||||
USING (tenant_id = findings_ledger_app.require_current_tenant())
|
||||
WITH CHECK (tenant_id = findings_ledger_app.require_current_tenant());
|
||||
@@ -132,12 +134,12 @@ CREATE POLICY orchestrator_exports_tenant_isolation
|
||||
-- 9. Enable RLS on airgap_imports
|
||||
-- ============================================
|
||||
|
||||
ALTER TABLE airgap_imports ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE airgap_imports FORCE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.airgap_imports ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.airgap_imports FORCE ROW LEVEL SECURITY;
|
||||
|
||||
DROP POLICY IF EXISTS airgap_imports_tenant_isolation ON airgap_imports;
|
||||
DROP POLICY IF EXISTS airgap_imports_tenant_isolation ON findings.airgap_imports;
|
||||
CREATE POLICY airgap_imports_tenant_isolation
|
||||
ON airgap_imports
|
||||
ON findings.airgap_imports
|
||||
FOR ALL
|
||||
USING (tenant_id = findings_ledger_app.require_current_tenant())
|
||||
WITH CHECK (tenant_id = findings_ledger_app.require_current_tenant());
|
||||
|
||||
@@ -1,33 +1,35 @@
|
||||
-- 007_enable_rls_rollback.sql
|
||||
-- Rollback: Disable Row-Level Security for Findings Ledger (LEDGER-TEN-48-001-DEV)
|
||||
|
||||
SET search_path TO findings, public;
|
||||
|
||||
BEGIN;
|
||||
|
||||
-- ============================================
|
||||
-- 1. Disable RLS on all tables
|
||||
-- ============================================
|
||||
|
||||
ALTER TABLE ledger_events DISABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE ledger_merkle_roots DISABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings_projection DISABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE finding_history DISABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE triage_actions DISABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE ledger_attestations DISABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE orchestrator_exports DISABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE airgap_imports DISABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.ledger_events DISABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.ledger_merkle_roots DISABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.findings_projection DISABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.finding_history DISABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.triage_actions DISABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.ledger_attestations DISABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.orchestrator_exports DISABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.airgap_imports DISABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- ============================================
|
||||
-- 2. Drop all tenant isolation policies
|
||||
-- ============================================
|
||||
|
||||
DROP POLICY IF EXISTS ledger_events_tenant_isolation ON ledger_events;
|
||||
DROP POLICY IF EXISTS ledger_merkle_roots_tenant_isolation ON ledger_merkle_roots;
|
||||
DROP POLICY IF EXISTS findings_projection_tenant_isolation ON findings_projection;
|
||||
DROP POLICY IF EXISTS finding_history_tenant_isolation ON finding_history;
|
||||
DROP POLICY IF EXISTS triage_actions_tenant_isolation ON triage_actions;
|
||||
DROP POLICY IF EXISTS ledger_attestations_tenant_isolation ON ledger_attestations;
|
||||
DROP POLICY IF EXISTS orchestrator_exports_tenant_isolation ON orchestrator_exports;
|
||||
DROP POLICY IF EXISTS airgap_imports_tenant_isolation ON airgap_imports;
|
||||
DROP POLICY IF EXISTS ledger_events_tenant_isolation ON findings.ledger_events;
|
||||
DROP POLICY IF EXISTS ledger_merkle_roots_tenant_isolation ON findings.ledger_merkle_roots;
|
||||
DROP POLICY IF EXISTS findings_projection_tenant_isolation ON findings.findings_projection;
|
||||
DROP POLICY IF EXISTS finding_history_tenant_isolation ON findings.finding_history;
|
||||
DROP POLICY IF EXISTS triage_actions_tenant_isolation ON findings.triage_actions;
|
||||
DROP POLICY IF EXISTS ledger_attestations_tenant_isolation ON findings.ledger_attestations;
|
||||
DROP POLICY IF EXISTS orchestrator_exports_tenant_isolation ON findings.orchestrator_exports;
|
||||
DROP POLICY IF EXISTS airgap_imports_tenant_isolation ON findings.airgap_imports;
|
||||
|
||||
-- ============================================
|
||||
-- 3. Drop tenant validation function and schema
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
-- 008_attestation_pointers.sql
|
||||
-- LEDGER-ATTEST-73-001: Persist pointers from findings to verification reports and attestation envelopes
|
||||
|
||||
SET search_path TO findings, public;
|
||||
|
||||
BEGIN;
|
||||
|
||||
-- ============================================
|
||||
-- 1. Create attestation pointers table
|
||||
-- ============================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS ledger_attestation_pointers (
|
||||
CREATE TABLE IF NOT EXISTS findings.ledger_attestation_pointers (
|
||||
tenant_id text NOT NULL,
|
||||
pointer_id uuid NOT NULL,
|
||||
finding_id text NOT NULL,
|
||||
@@ -21,7 +23,7 @@ CREATE TABLE IF NOT EXISTS ledger_attestation_pointers (
|
||||
ledger_event_id uuid NULL
|
||||
);
|
||||
|
||||
ALTER TABLE ledger_attestation_pointers
|
||||
ALTER TABLE findings.ledger_attestation_pointers
|
||||
ADD CONSTRAINT pk_ledger_attestation_pointers PRIMARY KEY (tenant_id, pointer_id);
|
||||
|
||||
-- ============================================
|
||||
@@ -30,41 +32,41 @@ ALTER TABLE ledger_attestation_pointers
|
||||
|
||||
-- Index for finding lookups (most common query pattern)
|
||||
CREATE INDEX IF NOT EXISTS ix_ledger_attestation_pointers_finding
|
||||
ON ledger_attestation_pointers (tenant_id, finding_id, created_at DESC);
|
||||
ON findings.ledger_attestation_pointers (tenant_id, finding_id, created_at DESC);
|
||||
|
||||
-- Index for digest-based lookups (idempotency checks)
|
||||
CREATE INDEX IF NOT EXISTS ix_ledger_attestation_pointers_digest
|
||||
ON ledger_attestation_pointers (tenant_id, (attestation_ref->>'digest'));
|
||||
ON findings.ledger_attestation_pointers (tenant_id, (attestation_ref->>'digest'));
|
||||
|
||||
-- Index for attestation type filtering
|
||||
CREATE INDEX IF NOT EXISTS ix_ledger_attestation_pointers_type
|
||||
ON ledger_attestation_pointers (tenant_id, attestation_type, created_at DESC);
|
||||
ON findings.ledger_attestation_pointers (tenant_id, attestation_type, created_at DESC);
|
||||
|
||||
-- Index for verification status filtering (verified/unverified/failed)
|
||||
CREATE INDEX IF NOT EXISTS ix_ledger_attestation_pointers_verified
|
||||
ON ledger_attestation_pointers (tenant_id, ((verification_result->>'verified')::boolean))
|
||||
ON findings.ledger_attestation_pointers (tenant_id, ((verification_result->>'verified')::boolean))
|
||||
WHERE verification_result IS NOT NULL;
|
||||
|
||||
-- Index for signer identity searches
|
||||
CREATE INDEX IF NOT EXISTS ix_ledger_attestation_pointers_signer
|
||||
ON ledger_attestation_pointers (tenant_id, (attestation_ref->'signer_info'->>'subject'))
|
||||
ON findings.ledger_attestation_pointers (tenant_id, (attestation_ref->'signer_info'->>'subject'))
|
||||
WHERE attestation_ref->'signer_info' IS NOT NULL;
|
||||
|
||||
-- Index for predicate type searches
|
||||
CREATE INDEX IF NOT EXISTS ix_ledger_attestation_pointers_predicate
|
||||
ON ledger_attestation_pointers (tenant_id, (attestation_ref->>'predicate_type'))
|
||||
ON findings.ledger_attestation_pointers (tenant_id, (attestation_ref->>'predicate_type'))
|
||||
WHERE attestation_ref->>'predicate_type' IS NOT NULL;
|
||||
|
||||
-- ============================================
|
||||
-- 3. Enable Row-Level Security
|
||||
-- ============================================
|
||||
|
||||
ALTER TABLE ledger_attestation_pointers ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE ledger_attestation_pointers FORCE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.ledger_attestation_pointers ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.ledger_attestation_pointers FORCE ROW LEVEL SECURITY;
|
||||
|
||||
DROP POLICY IF EXISTS ledger_attestation_pointers_tenant_isolation ON ledger_attestation_pointers;
|
||||
DROP POLICY IF EXISTS ledger_attestation_pointers_tenant_isolation ON findings.ledger_attestation_pointers;
|
||||
CREATE POLICY ledger_attestation_pointers_tenant_isolation
|
||||
ON ledger_attestation_pointers
|
||||
ON findings.ledger_attestation_pointers
|
||||
FOR ALL
|
||||
USING (tenant_id = findings_ledger_app.require_current_tenant())
|
||||
WITH CHECK (tenant_id = findings_ledger_app.require_current_tenant());
|
||||
@@ -73,28 +75,28 @@ CREATE POLICY ledger_attestation_pointers_tenant_isolation
|
||||
-- 4. Add comments for documentation
|
||||
-- ============================================
|
||||
|
||||
COMMENT ON TABLE ledger_attestation_pointers IS
|
||||
COMMENT ON TABLE findings.ledger_attestation_pointers IS
|
||||
'Links findings to verification reports and attestation envelopes for explainability (LEDGER-ATTEST-73-001)';
|
||||
|
||||
COMMENT ON COLUMN ledger_attestation_pointers.pointer_id IS
|
||||
COMMENT ON COLUMN findings.ledger_attestation_pointers.pointer_id IS
|
||||
'Unique identifier for this attestation pointer';
|
||||
|
||||
COMMENT ON COLUMN ledger_attestation_pointers.finding_id IS
|
||||
COMMENT ON COLUMN findings.ledger_attestation_pointers.finding_id IS
|
||||
'Finding that this pointer references';
|
||||
|
||||
COMMENT ON COLUMN ledger_attestation_pointers.attestation_type IS
|
||||
COMMENT ON COLUMN findings.ledger_attestation_pointers.attestation_type IS
|
||||
'Type of attestation: verification_report, dsse_envelope, slsa_provenance, vex_attestation, sbom_attestation, scan_attestation, policy_attestation, approval_attestation';
|
||||
|
||||
COMMENT ON COLUMN ledger_attestation_pointers.relationship IS
|
||||
COMMENT ON COLUMN findings.ledger_attestation_pointers.relationship IS
|
||||
'Semantic relationship: verified_by, attested_by, signed_by, approved_by, derived_from';
|
||||
|
||||
COMMENT ON COLUMN ledger_attestation_pointers.attestation_ref IS
|
||||
COMMENT ON COLUMN findings.ledger_attestation_pointers.attestation_ref IS
|
||||
'JSON object containing digest, storage_uri, payload_type, predicate_type, subject_digests, signer_info, rekor_entry';
|
||||
|
||||
COMMENT ON COLUMN ledger_attestation_pointers.verification_result IS
|
||||
COMMENT ON COLUMN findings.ledger_attestation_pointers.verification_result IS
|
||||
'JSON object containing verified (bool), verified_at, verifier, verifier_version, policy_ref, checks, warnings, errors';
|
||||
|
||||
COMMENT ON COLUMN ledger_attestation_pointers.ledger_event_id IS
|
||||
COMMENT ON COLUMN findings.ledger_attestation_pointers.ledger_event_id IS
|
||||
'Reference to the ledger event that recorded this pointer creation';
|
||||
|
||||
COMMIT;
|
||||
|
||||
@@ -2,8 +2,10 @@
|
||||
-- Description: Creates ledger_snapshots table for time-travel/snapshot functionality
|
||||
-- Date: 2025-12-07
|
||||
|
||||
SET search_path TO findings, public;
|
||||
|
||||
-- Create ledger_snapshots table
|
||||
CREATE TABLE IF NOT EXISTS ledger_snapshots (
|
||||
CREATE TABLE IF NOT EXISTS findings.ledger_snapshots (
|
||||
tenant_id TEXT NOT NULL,
|
||||
snapshot_id UUID NOT NULL,
|
||||
label TEXT,
|
||||
@@ -30,24 +32,24 @@ CREATE TABLE IF NOT EXISTS ledger_snapshots (
|
||||
|
||||
-- Index for listing snapshots by status
|
||||
CREATE INDEX IF NOT EXISTS idx_ledger_snapshots_status
|
||||
ON ledger_snapshots (tenant_id, status, created_at DESC);
|
||||
ON findings.ledger_snapshots (tenant_id, status, created_at DESC);
|
||||
|
||||
-- Index for finding expired snapshots
|
||||
CREATE INDEX IF NOT EXISTS idx_ledger_snapshots_expires
|
||||
ON ledger_snapshots (expires_at)
|
||||
ON findings.ledger_snapshots (expires_at)
|
||||
WHERE expires_at IS NOT NULL AND status = 'Available';
|
||||
|
||||
-- Index for sequence lookups
|
||||
CREATE INDEX IF NOT EXISTS idx_ledger_snapshots_sequence
|
||||
ON ledger_snapshots (tenant_id, sequence_number);
|
||||
ON findings.ledger_snapshots (tenant_id, sequence_number);
|
||||
|
||||
-- Index for label search
|
||||
CREATE INDEX IF NOT EXISTS idx_ledger_snapshots_label
|
||||
ON ledger_snapshots (tenant_id, label)
|
||||
ON findings.ledger_snapshots (tenant_id, label)
|
||||
WHERE label IS NOT NULL;
|
||||
|
||||
-- Enable RLS
|
||||
ALTER TABLE ledger_snapshots ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE findings.ledger_snapshots ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- RLS policy for tenant isolation
|
||||
DO $$
|
||||
@@ -57,15 +59,15 @@ BEGIN
|
||||
WHERE tablename = 'ledger_snapshots'
|
||||
AND policyname = 'ledger_snapshots_tenant_isolation'
|
||||
) THEN
|
||||
CREATE POLICY ledger_snapshots_tenant_isolation ON ledger_snapshots
|
||||
CREATE POLICY ledger_snapshots_tenant_isolation ON findings.ledger_snapshots
|
||||
USING (tenant_id = current_setting('app.tenant_id', true))
|
||||
WITH CHECK (tenant_id = current_setting('app.tenant_id', true));
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Add comment
|
||||
COMMENT ON TABLE ledger_snapshots IS 'Point-in-time snapshots of ledger state for time-travel queries';
|
||||
COMMENT ON COLUMN ledger_snapshots.sequence_number IS 'Ledger sequence number at snapshot time';
|
||||
COMMENT ON COLUMN ledger_snapshots.snapshot_timestamp IS 'Timestamp of ledger state captured';
|
||||
COMMENT ON COLUMN ledger_snapshots.merkle_root IS 'Merkle root hash of all events up to sequence_number';
|
||||
COMMENT ON COLUMN ledger_snapshots.dsse_digest IS 'DSSE envelope digest if signed';
|
||||
COMMENT ON TABLE findings.ledger_snapshots IS 'Point-in-time snapshots of ledger state for time-travel queries';
|
||||
COMMENT ON COLUMN findings.ledger_snapshots.sequence_number IS 'Ledger sequence number at snapshot time';
|
||||
COMMENT ON COLUMN findings.ledger_snapshots.snapshot_timestamp IS 'Timestamp of ledger state captured';
|
||||
COMMENT ON COLUMN findings.ledger_snapshots.merkle_root IS 'Merkle root hash of all events up to sequence_number';
|
||||
COMMENT ON COLUMN findings.ledger_snapshots.dsse_digest IS 'DSSE envelope digest if signed';
|
||||
|
||||
@@ -0,0 +1,18 @@
|
||||
-- 001_initial_schema.sql
|
||||
-- RiskEngine: schema and risk_score_results table.
|
||||
|
||||
CREATE SCHEMA IF NOT EXISTS riskengine;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS riskengine.risk_score_results (
|
||||
job_id UUID PRIMARY KEY,
|
||||
provider TEXT NOT NULL,
|
||||
subject TEXT NOT NULL,
|
||||
score DOUBLE PRECISION NOT NULL,
|
||||
success BOOLEAN NOT NULL,
|
||||
error TEXT NULL,
|
||||
signals JSONB NOT NULL,
|
||||
completed_at TIMESTAMPTZ NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_risk_score_results_completed_at
|
||||
ON riskengine.risk_score_results (completed_at DESC);
|
||||
@@ -14,6 +14,11 @@
|
||||
<ItemGroup>
|
||||
<PackageReference Include="Npgsql" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<!-- Embed SQL migrations as resources -->
|
||||
<EmbeddedResource Include="Migrations\**\*.sql" />
|
||||
</ItemGroup>
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -14,8 +14,6 @@ public sealed class PostgresRiskScoreResultStore : IRiskScoreResultStore, IAsync
|
||||
private static readonly JsonSerializerOptions JsonOptions = new(JsonSerializerDefaults.Web);
|
||||
|
||||
private readonly NpgsqlDataSource _dataSource;
|
||||
private readonly object _initGate = new();
|
||||
private bool _tableInitialized;
|
||||
|
||||
public PostgresRiskScoreResultStore(string connectionString)
|
||||
{
|
||||
@@ -32,7 +30,6 @@ public sealed class PostgresRiskScoreResultStore : IRiskScoreResultStore, IAsync
|
||||
public async Task SaveAsync(RiskScoreResult result, CancellationToken cancellationToken)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
await EnsureTableAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
const string sql = """
|
||||
INSERT INTO riskengine.risk_score_results (
|
||||
@@ -79,8 +76,6 @@ public sealed class PostgresRiskScoreResultStore : IRiskScoreResultStore, IAsync
|
||||
|
||||
public bool TryGet(Guid jobId, out RiskScoreResult result)
|
||||
{
|
||||
EnsureTable();
|
||||
|
||||
const string sql = """
|
||||
SELECT provider, subject, score, success, error, signals, completed_at
|
||||
FROM riskengine.risk_score_results
|
||||
@@ -127,75 +122,4 @@ public sealed class PostgresRiskScoreResultStore : IRiskScoreResultStore, IAsync
|
||||
return _dataSource.DisposeAsync();
|
||||
}
|
||||
|
||||
private async Task EnsureTableAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
lock (_initGate)
|
||||
{
|
||||
if (_tableInitialized)
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const string ddl = """
|
||||
CREATE SCHEMA IF NOT EXISTS riskengine;
|
||||
CREATE TABLE IF NOT EXISTS riskengine.risk_score_results (
|
||||
job_id UUID PRIMARY KEY,
|
||||
provider TEXT NOT NULL,
|
||||
subject TEXT NOT NULL,
|
||||
score DOUBLE PRECISION NOT NULL,
|
||||
success BOOLEAN NOT NULL,
|
||||
error TEXT NULL,
|
||||
signals JSONB NOT NULL,
|
||||
completed_at TIMESTAMPTZ NOT NULL
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_risk_score_results_completed_at
|
||||
ON riskengine.risk_score_results (completed_at DESC);
|
||||
""";
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(cancellationToken).ConfigureAwait(false);
|
||||
await using var command = new NpgsqlCommand(ddl, connection);
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
lock (_initGate)
|
||||
{
|
||||
_tableInitialized = true;
|
||||
}
|
||||
}
|
||||
|
||||
private void EnsureTable()
|
||||
{
|
||||
lock (_initGate)
|
||||
{
|
||||
if (_tableInitialized)
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const string ddl = """
|
||||
CREATE SCHEMA IF NOT EXISTS riskengine;
|
||||
CREATE TABLE IF NOT EXISTS riskengine.risk_score_results (
|
||||
job_id UUID PRIMARY KEY,
|
||||
provider TEXT NOT NULL,
|
||||
subject TEXT NOT NULL,
|
||||
score DOUBLE PRECISION NOT NULL,
|
||||
success BOOLEAN NOT NULL,
|
||||
error TEXT NULL,
|
||||
signals JSONB NOT NULL,
|
||||
completed_at TIMESTAMPTZ NOT NULL
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_risk_score_results_completed_at
|
||||
ON riskengine.risk_score_results (completed_at DESC);
|
||||
""";
|
||||
|
||||
using var connection = _dataSource.OpenConnection();
|
||||
using var command = new NpgsqlCommand(ddl, connection);
|
||||
command.ExecuteNonQuery();
|
||||
|
||||
lock (_initGate)
|
||||
{
|
||||
_tableInitialized = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,421 @@
|
||||
// Licensed under BUSL-1.1. Copyright (C) 2026 StellaOps Contributors.
|
||||
// Integration tests for VulnExplorer endpoints merged into Findings Ledger WebService.
|
||||
// Sprint: SPRINT_20260408_002 Task: VXLM-005
|
||||
|
||||
using System.Net;
|
||||
using System.Net.Http.Headers;
|
||||
using System.Net.Http.Json;
|
||||
using System.Text;
|
||||
using System.Text.Json;
|
||||
using System.Text.Json.Nodes;
|
||||
using StellaOps.TestKit;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Findings.Ledger.Tests.Integration;
|
||||
|
||||
/// <summary>
|
||||
/// Integration tests validating the VulnExplorer endpoints that were merged into
|
||||
/// the Findings Ledger WebService. Tests cover:
|
||||
/// - VEX decision CRUD (create, get, list, update)
|
||||
/// - VEX decision with attestation (signed override + rekor reference)
|
||||
/// - Fix verification workflow (create + state transition)
|
||||
/// - Audit bundle creation from persisted decisions
|
||||
/// - Evidence subgraph retrieval
|
||||
/// - Vulnerability list/detail queries via Ledger projections
|
||||
/// - Input validation (bad request handling)
|
||||
/// </summary>
|
||||
[Trait("Category", TestCategories.Integration)]
|
||||
public sealed class VulnExplorerEndpointsIntegrationTests : IClassFixture<FindingsLedgerWebApplicationFactory>
|
||||
{
|
||||
private static readonly JsonSerializerOptions JsonOptions = new(JsonSerializerDefaults.Web);
|
||||
private readonly FindingsLedgerWebApplicationFactory _factory;
|
||||
|
||||
public VulnExplorerEndpointsIntegrationTests(FindingsLedgerWebApplicationFactory factory)
|
||||
{
|
||||
_factory = factory;
|
||||
}
|
||||
|
||||
// ====================================================================
|
||||
// VEX Decision endpoints
|
||||
// ====================================================================
|
||||
|
||||
[Fact(DisplayName = "POST /v1/vex-decisions creates decision and GET returns it")]
|
||||
public async Task CreateAndGetVexDecision_WorksEndToEnd()
|
||||
{
|
||||
using var client = CreateAuthenticatedClient();
|
||||
var ct = TestContext.Current.CancellationToken;
|
||||
|
||||
var createPayload = BuildVexDecisionPayload("CVE-2025-LEDGER-001", "notAffected", withAttestation: false);
|
||||
var createResponse = await client.PostAsJsonAsync("/v1/vex-decisions", createPayload, JsonOptions, ct);
|
||||
|
||||
Assert.Equal(HttpStatusCode.Created, createResponse.StatusCode);
|
||||
|
||||
var created = await createResponse.Content.ReadFromJsonAsync<JsonObject>(ct);
|
||||
Assert.NotNull(created);
|
||||
var decisionId = created?["id"]?.GetValue<string>();
|
||||
Assert.False(string.IsNullOrWhiteSpace(decisionId), "Created decision should have a non-empty ID");
|
||||
|
||||
// Verify GET by ID
|
||||
var getResponse = await client.GetAsync($"/v1/vex-decisions/{decisionId}", ct);
|
||||
Assert.Equal(HttpStatusCode.OK, getResponse.StatusCode);
|
||||
|
||||
var fetched = await getResponse.Content.ReadFromJsonAsync<JsonObject>(ct);
|
||||
Assert.Equal("CVE-2025-LEDGER-001", fetched?["vulnerabilityId"]?.GetValue<string>());
|
||||
Assert.Equal("notAffected", fetched?["status"]?.GetValue<string>());
|
||||
}
|
||||
|
||||
[Fact(DisplayName = "POST /v1/vex-decisions with attestation returns signed override")]
|
||||
public async Task CreateWithAttestation_ReturnsSignedOverrideAndRekorReference()
|
||||
{
|
||||
using var client = CreateAuthenticatedClient();
|
||||
var ct = TestContext.Current.CancellationToken;
|
||||
|
||||
var payload = BuildVexDecisionPayload("CVE-2025-LEDGER-002", "affectedMitigated", withAttestation: true);
|
||||
var response = await client.PostAsJsonAsync("/v1/vex-decisions", payload, JsonOptions, ct);
|
||||
|
||||
Assert.Equal(HttpStatusCode.Created, response.StatusCode);
|
||||
|
||||
var body = await response.Content.ReadFromJsonAsync<JsonObject>(ct);
|
||||
var signedOverride = body?["signedOverride"]?.AsObject();
|
||||
Assert.NotNull(signedOverride);
|
||||
Assert.False(string.IsNullOrWhiteSpace(signedOverride?["envelopeDigest"]?.GetValue<string>()),
|
||||
"Signed override should contain an envelope digest");
|
||||
Assert.NotNull(signedOverride?["rekorLogIndex"]);
|
||||
}
|
||||
|
||||
[Fact(DisplayName = "GET /v1/vex-decisions lists created decisions")]
|
||||
public async Task ListVexDecisions_ReturnsCreatedDecisions()
|
||||
{
|
||||
using var client = CreateAuthenticatedClient();
|
||||
var ct = TestContext.Current.CancellationToken;
|
||||
|
||||
// Create a decision first
|
||||
var payload = BuildVexDecisionPayload("CVE-2025-LEDGER-LIST", "notAffected", withAttestation: false);
|
||||
var createResponse = await client.PostAsJsonAsync("/v1/vex-decisions", payload, JsonOptions, ct);
|
||||
Assert.Equal(HttpStatusCode.Created, createResponse.StatusCode);
|
||||
|
||||
// List decisions
|
||||
var listResponse = await client.GetAsync("/v1/vex-decisions", ct);
|
||||
Assert.Equal(HttpStatusCode.OK, listResponse.StatusCode);
|
||||
|
||||
var listBody = await listResponse.Content.ReadFromJsonAsync<JsonObject>(ct);
|
||||
Assert.NotNull(listBody?["items"]);
|
||||
var items = listBody!["items"]!.AsArray();
|
||||
Assert.True(items.Count > 0, "Decision list should contain at least one item");
|
||||
}
|
||||
|
||||
[Fact(DisplayName = "PATCH /v1/vex-decisions/{id} updates decision status")]
|
||||
public async Task UpdateVexDecision_ChangesStatus()
|
||||
{
|
||||
using var client = CreateAuthenticatedClient();
|
||||
var ct = TestContext.Current.CancellationToken;
|
||||
|
||||
// Create
|
||||
var payload = BuildVexDecisionPayload("CVE-2025-LEDGER-PATCH", "notAffected", withAttestation: false);
|
||||
var createResponse = await client.PostAsJsonAsync("/v1/vex-decisions", payload, JsonOptions, ct);
|
||||
Assert.Equal(HttpStatusCode.Created, createResponse.StatusCode);
|
||||
|
||||
var created = await createResponse.Content.ReadFromJsonAsync<JsonObject>(ct);
|
||||
var decisionId = created?["id"]?.GetValue<string>();
|
||||
Assert.False(string.IsNullOrWhiteSpace(decisionId));
|
||||
|
||||
// Update
|
||||
var patchResponse = await client.PatchAsync(
|
||||
$"/v1/vex-decisions/{decisionId}",
|
||||
JsonContent.Create(new { status = "affectedMitigated", justificationText = "Mitigation deployed." }),
|
||||
ct);
|
||||
Assert.Equal(HttpStatusCode.OK, patchResponse.StatusCode);
|
||||
|
||||
// Verify
|
||||
var getResponse = await client.GetAsync($"/v1/vex-decisions/{decisionId}", ct);
|
||||
var fetched = await getResponse.Content.ReadFromJsonAsync<JsonObject>(ct);
|
||||
Assert.Equal("affectedMitigated", fetched?["status"]?.GetValue<string>());
|
||||
}
|
||||
|
||||
[Fact(DisplayName = "POST /v1/vex-decisions with invalid status returns 400")]
|
||||
public async Task CreateVexDecision_InvalidStatus_ReturnsBadRequest()
|
||||
{
|
||||
using var client = CreateAuthenticatedClient();
|
||||
var ct = TestContext.Current.CancellationToken;
|
||||
|
||||
const string invalidJson = """
|
||||
{
|
||||
"vulnerabilityId": "CVE-2025-LEDGER-BAD",
|
||||
"subject": {
|
||||
"type": "image",
|
||||
"name": "registry.example/app:9.9.9",
|
||||
"digest": { "sha256": "zzz999" }
|
||||
},
|
||||
"status": "invalidStatusLiteral",
|
||||
"justificationType": "other"
|
||||
}
|
||||
""";
|
||||
|
||||
using var content = new StringContent(invalidJson, Encoding.UTF8, "application/json");
|
||||
var response = await client.PostAsync("/v1/vex-decisions", content, ct);
|
||||
Assert.Equal(HttpStatusCode.BadRequest, response.StatusCode);
|
||||
}
|
||||
|
||||
// ====================================================================
|
||||
// Evidence subgraph endpoint
|
||||
// ====================================================================
|
||||
|
||||
[Fact(DisplayName = "GET /v1/evidence-subgraph/{vulnId} returns subgraph structure")]
|
||||
public async Task EvidenceSubgraph_ReturnsGraphStructure()
|
||||
{
|
||||
using var client = CreateAuthenticatedClient();
|
||||
var ct = TestContext.Current.CancellationToken;
|
||||
|
||||
// Use a non-GUID vulnerability ID to exercise the stub fallback path
|
||||
var response = await client.GetAsync("/v1/evidence-subgraph/CVE-2025-0001", ct);
|
||||
Assert.Equal(HttpStatusCode.OK, response.StatusCode);
|
||||
|
||||
var body = await response.Content.ReadFromJsonAsync<JsonObject>(ct);
|
||||
Assert.NotNull(body?["root"]);
|
||||
Assert.NotNull(body?["edges"]);
|
||||
Assert.NotNull(body?["verdict"]);
|
||||
}
|
||||
|
||||
// ====================================================================
|
||||
// Fix verification endpoints
|
||||
// ====================================================================
|
||||
|
||||
[Fact(DisplayName = "POST + PATCH /v1/fix-verifications tracks state transitions")]
|
||||
public async Task FixVerificationWorkflow_TracksStateTransitions()
|
||||
{
|
||||
using var client = CreateAuthenticatedClient();
|
||||
var ct = TestContext.Current.CancellationToken;
|
||||
|
||||
var createResponse = await client.PostAsJsonAsync(
|
||||
"/v1/fix-verifications",
|
||||
new
|
||||
{
|
||||
cveId = "CVE-2025-LEDGER-FIX-001",
|
||||
componentPurl = "pkg:maven/org.example/app@1.2.3",
|
||||
artifactDigest = "sha256:abc123"
|
||||
},
|
||||
ct);
|
||||
|
||||
Assert.Equal(HttpStatusCode.Created, createResponse.StatusCode);
|
||||
|
||||
var patchResponse = await client.PatchAsync(
|
||||
"/v1/fix-verifications/CVE-2025-LEDGER-FIX-001",
|
||||
JsonContent.Create(new { verdict = "verified_by_scanner" }),
|
||||
ct);
|
||||
|
||||
Assert.Equal(HttpStatusCode.OK, patchResponse.StatusCode);
|
||||
|
||||
var body = await patchResponse.Content.ReadFromJsonAsync<JsonObject>(ct);
|
||||
Assert.Equal("verified_by_scanner", body?["verdict"]?.GetValue<string>());
|
||||
}
|
||||
|
||||
// ====================================================================
|
||||
// Audit bundle endpoint
|
||||
// ====================================================================
|
||||
|
||||
[Fact(DisplayName = "POST /v1/audit-bundles creates bundle from persisted decisions")]
|
||||
public async Task CreateAuditBundle_ReturnsBundleForDecisionSet()
|
||||
{
|
||||
using var client = CreateAuthenticatedClient();
|
||||
var ct = TestContext.Current.CancellationToken;
|
||||
|
||||
// Create a decision first
|
||||
var createPayload = BuildVexDecisionPayload("CVE-2025-LEDGER-BUNDLE", "notAffected", withAttestation: false);
|
||||
var decisionResponse = await client.PostAsJsonAsync("/v1/vex-decisions", createPayload, JsonOptions, ct);
|
||||
Assert.Equal(HttpStatusCode.Created, decisionResponse.StatusCode);
|
||||
|
||||
var decision = await decisionResponse.Content.ReadFromJsonAsync<JsonObject>(ct);
|
||||
var decisionId = decision?["id"]?.GetValue<string>();
|
||||
Assert.False(string.IsNullOrWhiteSpace(decisionId));
|
||||
|
||||
// Create audit bundle
|
||||
var bundleResponse = await client.PostAsJsonAsync(
|
||||
"/v1/audit-bundles",
|
||||
new
|
||||
{
|
||||
tenant = "tenant-qa",
|
||||
decisionIds = new[] { decisionId }
|
||||
},
|
||||
ct);
|
||||
|
||||
Assert.Equal(HttpStatusCode.Created, bundleResponse.StatusCode);
|
||||
|
||||
var bundle = await bundleResponse.Content.ReadFromJsonAsync<JsonObject>(ct);
|
||||
Assert.NotNull(bundle?["bundleId"]);
|
||||
Assert.NotNull(bundle?["decisions"]);
|
||||
}
|
||||
|
||||
// ====================================================================
|
||||
// Vulnerability list/detail endpoints (Ledger projection queries)
|
||||
// ====================================================================
|
||||
|
||||
[Fact(DisplayName = "GET /v1/vulns returns vulnerability list")]
|
||||
public async Task ListVulns_ReturnsListFromLedgerProjection()
|
||||
{
|
||||
using var client = CreateAuthenticatedClient();
|
||||
var ct = TestContext.Current.CancellationToken;
|
||||
|
||||
var response = await client.GetAsync("/v1/vulns", ct);
|
||||
|
||||
// May return OK with empty list or items depending on DB state
|
||||
Assert.Equal(HttpStatusCode.OK, response.StatusCode);
|
||||
|
||||
var body = await response.Content.ReadFromJsonAsync<JsonObject>(ct);
|
||||
Assert.NotNull(body?["items"]);
|
||||
}
|
||||
|
||||
[Fact(DisplayName = "GET /v1/vulns/{id} returns 404 for non-existent finding")]
|
||||
public async Task GetVulnDetail_NonExistent_ReturnsNotFound()
|
||||
{
|
||||
using var client = CreateAuthenticatedClient();
|
||||
var ct = TestContext.Current.CancellationToken;
|
||||
|
||||
var response = await client.GetAsync("/v1/vulns/non-existent-id", ct);
|
||||
Assert.Equal(HttpStatusCode.NotFound, response.StatusCode);
|
||||
}
|
||||
|
||||
// ====================================================================
|
||||
// Full triage workflow (end-to-end sequence)
|
||||
// ====================================================================
|
||||
|
||||
[Fact(DisplayName = "Full triage workflow: VEX decision -> fix verification -> audit bundle")]
|
||||
public async Task FullTriageWorkflow_EndToEnd()
|
||||
{
|
||||
using var client = CreateAuthenticatedClient();
|
||||
var ct = TestContext.Current.CancellationToken;
|
||||
|
||||
// Step 1: Create VEX decision
|
||||
var vexPayload = BuildVexDecisionPayload("CVE-2025-LEDGER-TRIAGE", "affectedMitigated", withAttestation: true);
|
||||
var vexResponse = await client.PostAsJsonAsync("/v1/vex-decisions", vexPayload, JsonOptions, ct);
|
||||
Assert.Equal(HttpStatusCode.Created, vexResponse.StatusCode);
|
||||
|
||||
var vexDecision = await vexResponse.Content.ReadFromJsonAsync<JsonObject>(ct);
|
||||
var vexDecisionId = vexDecision?["id"]?.GetValue<string>();
|
||||
Assert.False(string.IsNullOrWhiteSpace(vexDecisionId));
|
||||
|
||||
// Verify attestation was created
|
||||
Assert.NotNull(vexDecision?["signedOverride"]?.AsObject());
|
||||
|
||||
// Step 2: Create fix verification
|
||||
var fixResponse = await client.PostAsJsonAsync(
|
||||
"/v1/fix-verifications",
|
||||
new
|
||||
{
|
||||
cveId = "CVE-2025-LEDGER-TRIAGE",
|
||||
componentPurl = "pkg:npm/stellaops/core@3.0.0",
|
||||
artifactDigest = "sha256:triage123"
|
||||
},
|
||||
ct);
|
||||
Assert.Equal(HttpStatusCode.Created, fixResponse.StatusCode);
|
||||
|
||||
// Step 3: Update fix verification
|
||||
var fixPatchResponse = await client.PatchAsync(
|
||||
"/v1/fix-verifications/CVE-2025-LEDGER-TRIAGE",
|
||||
JsonContent.Create(new { verdict = "verified_by_scanner" }),
|
||||
ct);
|
||||
Assert.Equal(HttpStatusCode.OK, fixPatchResponse.StatusCode);
|
||||
|
||||
// Step 4: Create audit bundle
|
||||
var bundleResponse = await client.PostAsJsonAsync(
|
||||
"/v1/audit-bundles",
|
||||
new
|
||||
{
|
||||
tenant = "tenant-qa",
|
||||
decisionIds = new[] { vexDecisionId }
|
||||
},
|
||||
ct);
|
||||
Assert.Equal(HttpStatusCode.Created, bundleResponse.StatusCode);
|
||||
|
||||
// Step 5: Retrieve evidence subgraph
|
||||
var subgraphResponse = await client.GetAsync("/v1/evidence-subgraph/CVE-2025-LEDGER-TRIAGE", ct);
|
||||
Assert.Equal(HttpStatusCode.OK, subgraphResponse.StatusCode);
|
||||
|
||||
var subgraph = await subgraphResponse.Content.ReadFromJsonAsync<JsonObject>(ct);
|
||||
Assert.NotNull(subgraph?["root"]);
|
||||
Assert.NotNull(subgraph?["verdict"]);
|
||||
|
||||
// Step 6: Verify all decisions are queryable
|
||||
var listResponse = await client.GetAsync("/v1/vex-decisions?vulnerabilityId=CVE-2025-LEDGER-TRIAGE", ct);
|
||||
Assert.Equal(HttpStatusCode.OK, listResponse.StatusCode);
|
||||
|
||||
var listBody = await listResponse.Content.ReadFromJsonAsync<JsonObject>(ct);
|
||||
var items = listBody!["items"]!.AsArray();
|
||||
Assert.True(items.Count >= 1, "Should find at least the decision we created");
|
||||
}
|
||||
|
||||
// ====================================================================
|
||||
// Authorization checks
|
||||
// ====================================================================
|
||||
|
||||
[Fact(DisplayName = "Unauthenticated requests to VulnExplorer endpoints are rejected")]
|
||||
public async Task UnauthenticatedRequest_IsRejected()
|
||||
{
|
||||
using var client = _factory.CreateClient();
|
||||
var ct = TestContext.Current.CancellationToken;
|
||||
|
||||
// No auth headers at all
|
||||
var response = await client.GetAsync("/v1/vex-decisions", ct);
|
||||
|
||||
// Should be 401 or 403 (depends on auth handler config)
|
||||
Assert.True(
|
||||
response.StatusCode is HttpStatusCode.Unauthorized or HttpStatusCode.Forbidden,
|
||||
$"Expected 401 or 403 but got {(int)response.StatusCode}");
|
||||
}
|
||||
|
||||
// ====================================================================
|
||||
// Helpers
|
||||
// ====================================================================
|
||||
|
||||
private HttpClient CreateAuthenticatedClient()
|
||||
{
|
||||
var client = _factory.CreateClient();
|
||||
client.DefaultRequestHeaders.Authorization = new AuthenticationHeaderValue("Bearer", "test-token");
|
||||
client.DefaultRequestHeaders.Add("X-Scopes",
|
||||
"vuln:view vuln:investigate vuln:operate vuln:audit findings:read findings:write");
|
||||
client.DefaultRequestHeaders.Add("X-Tenant-Id", "11111111-1111-1111-1111-111111111111");
|
||||
client.DefaultRequestHeaders.Add("X-Stella-Tenant", "tenant-qa");
|
||||
client.DefaultRequestHeaders.Add("x-stella-user-id", "integration-test-user");
|
||||
client.DefaultRequestHeaders.Add("x-stella-user-name", "Integration Test User");
|
||||
return client;
|
||||
}
|
||||
|
||||
private static object BuildVexDecisionPayload(string vulnerabilityId, string status, bool withAttestation)
|
||||
{
|
||||
if (withAttestation)
|
||||
{
|
||||
return new
|
||||
{
|
||||
vulnerabilityId,
|
||||
subject = new
|
||||
{
|
||||
type = "image",
|
||||
name = "registry.example/app:2.0.0",
|
||||
digest = new Dictionary<string, string> { ["sha256"] = "def456" }
|
||||
},
|
||||
status,
|
||||
justificationType = "runtimeMitigationPresent",
|
||||
justificationText = "Runtime guard active.",
|
||||
attestationOptions = new
|
||||
{
|
||||
createAttestation = true,
|
||||
anchorToRekor = true,
|
||||
signingKeyId = "test-key"
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
return new
|
||||
{
|
||||
vulnerabilityId,
|
||||
subject = new
|
||||
{
|
||||
type = "image",
|
||||
name = "registry.example/app:1.2.3",
|
||||
digest = new Dictionary<string, string> { ["sha256"] = "abc123" }
|
||||
},
|
||||
status,
|
||||
justificationType = "codeNotReachable",
|
||||
justificationText = "Guarded by deployment policy."
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,200 @@
|
||||
-- PacksRegistry Schema Migration 001: Initial Schema (Consolidated)
|
||||
-- Combines the JobEngine 009_packs_registry.sql DDL with inline EnsureTable DDL
|
||||
-- from the 6 PacksRegistry repository classes.
|
||||
|
||||
CREATE SCHEMA IF NOT EXISTS packs;
|
||||
|
||||
-- ============================================================================
|
||||
-- ENUM types
|
||||
-- ============================================================================
|
||||
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE packs.pack_status AS ENUM (
|
||||
'draft',
|
||||
'published',
|
||||
'deprecated',
|
||||
'archived'
|
||||
);
|
||||
EXCEPTION WHEN duplicate_object OR SQLSTATE '42P17' THEN NULL; END $$;
|
||||
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE packs.pack_version_status AS ENUM (
|
||||
'draft',
|
||||
'published',
|
||||
'deprecated',
|
||||
'archived'
|
||||
);
|
||||
EXCEPTION WHEN duplicate_object OR SQLSTATE '42P17' THEN NULL; END $$;
|
||||
|
||||
-- ============================================================================
|
||||
-- Core tables (from 009_packs_registry.sql)
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS packs.packs (
|
||||
pack_id UUID NOT NULL,
|
||||
tenant_id TEXT NOT NULL,
|
||||
project_id TEXT,
|
||||
name TEXT NOT NULL,
|
||||
display_name TEXT NOT NULL,
|
||||
description TEXT,
|
||||
status packs.pack_status NOT NULL DEFAULT 'draft',
|
||||
created_by TEXT NOT NULL,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_by TEXT,
|
||||
metadata TEXT,
|
||||
tags TEXT,
|
||||
icon_uri TEXT,
|
||||
version_count INTEGER NOT NULL DEFAULT 0,
|
||||
latest_version TEXT,
|
||||
published_at TIMESTAMPTZ,
|
||||
published_by TEXT,
|
||||
CONSTRAINT pk_pack_registry_packs PRIMARY KEY (tenant_id, pack_id),
|
||||
CONSTRAINT uq_pack_registry_pack_name UNIQUE (tenant_id, name),
|
||||
CONSTRAINT ck_pack_registry_version_count_non_negative CHECK (version_count >= 0)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_pack_registry_packs_status_updated
|
||||
ON packs.packs (tenant_id, status, updated_at DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_pack_registry_packs_project_status_updated
|
||||
ON packs.packs (tenant_id, project_id, status, updated_at DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_pack_registry_packs_published
|
||||
ON packs.packs (tenant_id, published_at DESC NULLS LAST, updated_at DESC);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS packs.pack_versions (
|
||||
pack_version_id UUID NOT NULL,
|
||||
tenant_id TEXT NOT NULL,
|
||||
pack_id UUID NOT NULL,
|
||||
version TEXT NOT NULL,
|
||||
sem_ver TEXT,
|
||||
status packs.pack_version_status NOT NULL DEFAULT 'draft',
|
||||
artifact_uri TEXT NOT NULL,
|
||||
artifact_digest TEXT NOT NULL,
|
||||
artifact_mime_type TEXT,
|
||||
artifact_size_bytes BIGINT,
|
||||
manifest_json TEXT,
|
||||
manifest_digest TEXT,
|
||||
release_notes TEXT,
|
||||
min_engine_version TEXT,
|
||||
dependencies TEXT,
|
||||
created_by TEXT NOT NULL,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_by TEXT,
|
||||
published_at TIMESTAMPTZ,
|
||||
published_by TEXT,
|
||||
deprecated_at TIMESTAMPTZ,
|
||||
deprecated_by TEXT,
|
||||
deprecation_reason TEXT,
|
||||
signature_uri TEXT,
|
||||
signature_algorithm TEXT,
|
||||
signed_by TEXT,
|
||||
signed_at TIMESTAMPTZ,
|
||||
metadata TEXT,
|
||||
download_count INTEGER NOT NULL DEFAULT 0,
|
||||
CONSTRAINT pk_pack_registry_pack_versions PRIMARY KEY (tenant_id, pack_version_id),
|
||||
CONSTRAINT uq_pack_registry_pack_version UNIQUE (tenant_id, pack_id, version),
|
||||
CONSTRAINT ck_pack_registry_download_count_non_negative CHECK (download_count >= 0),
|
||||
CONSTRAINT fk_pack_registry_pack_versions_pack
|
||||
FOREIGN KEY (tenant_id, pack_id)
|
||||
REFERENCES packs.packs (tenant_id, pack_id)
|
||||
ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_pack_registry_pack_versions_pack_status_created
|
||||
ON packs.pack_versions (tenant_id, pack_id, status, created_at DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_pack_registry_pack_versions_status_published
|
||||
ON packs.pack_versions (tenant_id, status, published_at DESC NULLS LAST, updated_at DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_pack_registry_pack_versions_downloads
|
||||
ON packs.pack_versions (tenant_id, pack_id, download_count DESC);
|
||||
|
||||
-- ============================================================================
|
||||
-- Attestations table (from PostgresAttestationRepository inline DDL)
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS packs.attestations (
|
||||
pack_id TEXT NOT NULL,
|
||||
tenant_id TEXT NOT NULL,
|
||||
type TEXT NOT NULL,
|
||||
digest TEXT NOT NULL,
|
||||
content BYTEA NOT NULL,
|
||||
notes TEXT,
|
||||
created_at TIMESTAMPTZ NOT NULL,
|
||||
PRIMARY KEY (pack_id, type)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_attestations_tenant_id ON packs.attestations (tenant_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_attestations_created_at ON packs.attestations (created_at DESC);
|
||||
|
||||
-- ============================================================================
|
||||
-- Audit log table (from PostgresAuditRepository inline DDL)
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS packs.audit_log (
|
||||
id TEXT PRIMARY KEY,
|
||||
pack_id TEXT,
|
||||
tenant_id TEXT NOT NULL,
|
||||
event TEXT NOT NULL,
|
||||
actor TEXT,
|
||||
notes TEXT,
|
||||
occurred_at TIMESTAMPTZ NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_audit_log_tenant_id ON packs.audit_log (tenant_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_audit_log_pack_id ON packs.audit_log (pack_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_audit_log_occurred_at ON packs.audit_log (occurred_at DESC);
|
||||
|
||||
-- ============================================================================
|
||||
-- Lifecycles table (from PostgresLifecycleRepository inline DDL)
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS packs.lifecycles (
|
||||
pack_id TEXT PRIMARY KEY,
|
||||
tenant_id TEXT NOT NULL,
|
||||
state TEXT NOT NULL,
|
||||
notes TEXT,
|
||||
updated_at TIMESTAMPTZ NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_lifecycles_tenant_id ON packs.lifecycles (tenant_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_lifecycles_state ON packs.lifecycles (state);
|
||||
CREATE INDEX IF NOT EXISTS idx_lifecycles_updated_at ON packs.lifecycles (updated_at DESC);
|
||||
|
||||
-- ============================================================================
|
||||
-- Mirror sources table (from PostgresMirrorRepository inline DDL)
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS packs.mirror_sources (
|
||||
id TEXT PRIMARY KEY,
|
||||
tenant_id TEXT NOT NULL,
|
||||
upstream_uri TEXT NOT NULL,
|
||||
enabled BOOLEAN NOT NULL DEFAULT true,
|
||||
status TEXT NOT NULL,
|
||||
notes TEXT,
|
||||
updated_at TIMESTAMPTZ NOT NULL,
|
||||
last_successful_sync_at TIMESTAMPTZ
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_mirror_sources_tenant_id ON packs.mirror_sources (tenant_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_mirror_sources_enabled ON packs.mirror_sources (enabled);
|
||||
CREATE INDEX IF NOT EXISTS idx_mirror_sources_updated_at ON packs.mirror_sources (updated_at DESC);
|
||||
|
||||
-- ============================================================================
|
||||
-- Parities table (from PostgresParityRepository inline DDL)
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS packs.parities (
|
||||
pack_id TEXT PRIMARY KEY,
|
||||
tenant_id TEXT NOT NULL,
|
||||
status TEXT NOT NULL,
|
||||
notes TEXT,
|
||||
updated_at TIMESTAMPTZ NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_parities_tenant_id ON packs.parities (tenant_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_parities_status ON packs.parities (status);
|
||||
CREATE INDEX IF NOT EXISTS idx_parities_updated_at ON packs.parities (updated_at DESC);
|
||||
@@ -14,7 +14,6 @@ public sealed class PostgresAttestationRepository : RepositoryBase<PacksRegistry
|
||||
{
|
||||
private static readonly byte[] EmptyPayload = Array.Empty<byte>();
|
||||
|
||||
private bool _tableInitialized;
|
||||
private readonly IPacksRegistryBlobStore? _blobStore;
|
||||
|
||||
public PostgresAttestationRepository(
|
||||
@@ -31,7 +30,6 @@ public sealed class PostgresAttestationRepository : RepositoryBase<PacksRegistry
|
||||
ArgumentNullException.ThrowIfNull(record);
|
||||
ArgumentNullException.ThrowIfNull(content);
|
||||
|
||||
await EnsureTableAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
var dbContent = content;
|
||||
if (_blobStore is not null)
|
||||
@@ -76,7 +74,6 @@ public sealed class PostgresAttestationRepository : RepositoryBase<PacksRegistry
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(packId);
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(type);
|
||||
|
||||
await EnsureTableAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
const string sql = @"
|
||||
SELECT pack_id, tenant_id, type, digest, notes, created_at
|
||||
@@ -101,7 +98,6 @@ public sealed class PostgresAttestationRepository : RepositoryBase<PacksRegistry
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(packId);
|
||||
|
||||
await EnsureTableAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
const string sql = @"
|
||||
SELECT pack_id, tenant_id, type, digest, notes, created_at
|
||||
@@ -129,7 +125,6 @@ public sealed class PostgresAttestationRepository : RepositoryBase<PacksRegistry
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(packId);
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(type);
|
||||
|
||||
await EnsureTableAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
const string sql = """
|
||||
SELECT tenant_id, digest, content
|
||||
@@ -181,34 +176,4 @@ public sealed class PostgresAttestationRepository : RepositoryBase<PacksRegistry
|
||||
Notes: reader.IsDBNull(4) ? null : reader.GetString(4));
|
||||
}
|
||||
|
||||
private async Task EnsureTableAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
if (_tableInitialized)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
const string ddl = @"
|
||||
CREATE SCHEMA IF NOT EXISTS packs;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS packs.attestations (
|
||||
pack_id TEXT NOT NULL,
|
||||
tenant_id TEXT NOT NULL,
|
||||
type TEXT NOT NULL,
|
||||
digest TEXT NOT NULL,
|
||||
content BYTEA NOT NULL,
|
||||
notes TEXT,
|
||||
created_at TIMESTAMPTZ NOT NULL,
|
||||
PRIMARY KEY (pack_id, type)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_attestations_tenant_id ON packs.attestations (tenant_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_attestations_created_at ON packs.attestations (created_at DESC);";
|
||||
|
||||
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
|
||||
await using var command = CreateCommand(ddl, connection);
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
_tableInitialized = true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,8 +12,6 @@ namespace StellaOps.PacksRegistry.Persistence.Postgres.Repositories;
|
||||
/// </summary>
|
||||
public sealed class PostgresAuditRepository : RepositoryBase<PacksRegistryDataSource>, IAuditRepository
|
||||
{
|
||||
private bool _tableInitialized;
|
||||
|
||||
public PostgresAuditRepository(PacksRegistryDataSource dataSource, ILogger<PostgresAuditRepository> logger)
|
||||
: base(dataSource, logger)
|
||||
{
|
||||
@@ -23,7 +21,6 @@ public sealed class PostgresAuditRepository : RepositoryBase<PacksRegistryDataSo
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(record);
|
||||
|
||||
await EnsureTableAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
const string sql = @"
|
||||
INSERT INTO packs.audit_log (id, pack_id, tenant_id, event, actor, notes, occurred_at)
|
||||
@@ -44,7 +41,6 @@ public sealed class PostgresAuditRepository : RepositoryBase<PacksRegistryDataSo
|
||||
|
||||
public async Task<IReadOnlyList<AuditRecord>> ListAsync(string? tenantId = null, CancellationToken cancellationToken = default)
|
||||
{
|
||||
await EnsureTableAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
var sql = @"
|
||||
SELECT pack_id, tenant_id, event, occurred_at, actor, notes
|
||||
@@ -87,34 +83,4 @@ public sealed class PostgresAuditRepository : RepositoryBase<PacksRegistryDataSo
|
||||
Notes: reader.IsDBNull(5) ? null : reader.GetString(5));
|
||||
}
|
||||
|
||||
private async Task EnsureTableAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
if (_tableInitialized)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
const string ddl = @"
|
||||
CREATE SCHEMA IF NOT EXISTS packs;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS packs.audit_log (
|
||||
id TEXT PRIMARY KEY,
|
||||
pack_id TEXT,
|
||||
tenant_id TEXT NOT NULL,
|
||||
event TEXT NOT NULL,
|
||||
actor TEXT,
|
||||
notes TEXT,
|
||||
occurred_at TIMESTAMPTZ NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_audit_log_tenant_id ON packs.audit_log (tenant_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_audit_log_pack_id ON packs.audit_log (pack_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_audit_log_occurred_at ON packs.audit_log (occurred_at DESC);";
|
||||
|
||||
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
|
||||
await using var command = CreateCommand(ddl, connection);
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
_tableInitialized = true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,8 +11,6 @@ namespace StellaOps.PacksRegistry.Persistence.Postgres.Repositories;
|
||||
/// </summary>
|
||||
public sealed class PostgresLifecycleRepository : RepositoryBase<PacksRegistryDataSource>, ILifecycleRepository
|
||||
{
|
||||
private bool _tableInitialized;
|
||||
|
||||
public PostgresLifecycleRepository(PacksRegistryDataSource dataSource, ILogger<PostgresLifecycleRepository> logger)
|
||||
: base(dataSource, logger)
|
||||
{
|
||||
@@ -22,7 +20,6 @@ public sealed class PostgresLifecycleRepository : RepositoryBase<PacksRegistryDa
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(record);
|
||||
|
||||
await EnsureTableAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
const string sql = @"
|
||||
INSERT INTO packs.lifecycles (pack_id, tenant_id, state, notes, updated_at)
|
||||
@@ -48,7 +45,6 @@ public sealed class PostgresLifecycleRepository : RepositoryBase<PacksRegistryDa
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(packId);
|
||||
|
||||
await EnsureTableAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
const string sql = @"
|
||||
SELECT pack_id, tenant_id, state, notes, updated_at
|
||||
@@ -70,7 +66,6 @@ public sealed class PostgresLifecycleRepository : RepositoryBase<PacksRegistryDa
|
||||
|
||||
public async Task<IReadOnlyList<LifecycleRecord>> ListAsync(string? tenantId = null, CancellationToken cancellationToken = default)
|
||||
{
|
||||
await EnsureTableAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
var sql = @"
|
||||
SELECT pack_id, tenant_id, state, notes, updated_at
|
||||
@@ -112,32 +107,4 @@ public sealed class PostgresLifecycleRepository : RepositoryBase<PacksRegistryDa
|
||||
UpdatedAtUtc: reader.GetFieldValue<DateTimeOffset>(4));
|
||||
}
|
||||
|
||||
private async Task EnsureTableAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
if (_tableInitialized)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
const string ddl = @"
|
||||
CREATE SCHEMA IF NOT EXISTS packs;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS packs.lifecycles (
|
||||
pack_id TEXT PRIMARY KEY,
|
||||
tenant_id TEXT NOT NULL,
|
||||
state TEXT NOT NULL,
|
||||
notes TEXT,
|
||||
updated_at TIMESTAMPTZ NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_lifecycles_tenant_id ON packs.lifecycles (tenant_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_lifecycles_state ON packs.lifecycles (state);
|
||||
CREATE INDEX IF NOT EXISTS idx_lifecycles_updated_at ON packs.lifecycles (updated_at DESC);";
|
||||
|
||||
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
|
||||
await using var command = CreateCommand(ddl, connection);
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
_tableInitialized = true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,8 +11,6 @@ namespace StellaOps.PacksRegistry.Persistence.Postgres.Repositories;
|
||||
/// </summary>
|
||||
public sealed class PostgresMirrorRepository : RepositoryBase<PacksRegistryDataSource>, IMirrorRepository
|
||||
{
|
||||
private bool _tableInitialized;
|
||||
|
||||
public PostgresMirrorRepository(PacksRegistryDataSource dataSource, ILogger<PostgresMirrorRepository> logger)
|
||||
: base(dataSource, logger)
|
||||
{
|
||||
@@ -22,7 +20,6 @@ public sealed class PostgresMirrorRepository : RepositoryBase<PacksRegistryDataS
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(record);
|
||||
|
||||
await EnsureTableAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
const string sql = @"
|
||||
INSERT INTO packs.mirror_sources (id, tenant_id, upstream_uri, enabled, status, notes, updated_at, last_successful_sync_at)
|
||||
@@ -54,7 +51,6 @@ public sealed class PostgresMirrorRepository : RepositoryBase<PacksRegistryDataS
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(id);
|
||||
|
||||
await EnsureTableAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
const string sql = @"
|
||||
SELECT id, tenant_id, upstream_uri, enabled, status, updated_at, notes, last_successful_sync_at
|
||||
@@ -76,7 +72,6 @@ public sealed class PostgresMirrorRepository : RepositoryBase<PacksRegistryDataS
|
||||
|
||||
public async Task<IReadOnlyList<MirrorSourceRecord>> ListAsync(string? tenantId = null, CancellationToken cancellationToken = default)
|
||||
{
|
||||
await EnsureTableAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
var sql = @"
|
||||
SELECT id, tenant_id, upstream_uri, enabled, status, updated_at, notes, last_successful_sync_at
|
||||
@@ -121,35 +116,4 @@ public sealed class PostgresMirrorRepository : RepositoryBase<PacksRegistryDataS
|
||||
LastSuccessfulSyncUtc: reader.IsDBNull(7) ? null : reader.GetFieldValue<DateTimeOffset>(7));
|
||||
}
|
||||
|
||||
private async Task EnsureTableAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
if (_tableInitialized)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
const string ddl = @"
|
||||
CREATE SCHEMA IF NOT EXISTS packs;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS packs.mirror_sources (
|
||||
id TEXT PRIMARY KEY,
|
||||
tenant_id TEXT NOT NULL,
|
||||
upstream_uri TEXT NOT NULL,
|
||||
enabled BOOLEAN NOT NULL DEFAULT true,
|
||||
status TEXT NOT NULL,
|
||||
notes TEXT,
|
||||
updated_at TIMESTAMPTZ NOT NULL,
|
||||
last_successful_sync_at TIMESTAMPTZ
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_mirror_sources_tenant_id ON packs.mirror_sources (tenant_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_mirror_sources_enabled ON packs.mirror_sources (enabled);
|
||||
CREATE INDEX IF NOT EXISTS idx_mirror_sources_updated_at ON packs.mirror_sources (updated_at DESC);";
|
||||
|
||||
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
|
||||
await using var command = CreateCommand(ddl, connection);
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
_tableInitialized = true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@ public sealed class PostgresPackRepository : RepositoryBase<PacksRegistryDataSou
|
||||
|
||||
private static readonly byte[] EmptyPayload = Array.Empty<byte>();
|
||||
|
||||
private bool _tableInitialized;
|
||||
private readonly IPacksRegistryBlobStore? _blobStore;
|
||||
|
||||
public PostgresPackRepository(
|
||||
@@ -39,7 +38,6 @@ public sealed class PostgresPackRepository : RepositoryBase<PacksRegistryDataSou
|
||||
ArgumentNullException.ThrowIfNull(record);
|
||||
ArgumentNullException.ThrowIfNull(content);
|
||||
|
||||
await EnsureTableAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
var dbContent = content;
|
||||
byte[]? dbProvenance = provenance;
|
||||
@@ -99,7 +97,6 @@ public sealed class PostgresPackRepository : RepositoryBase<PacksRegistryDataSou
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(packId);
|
||||
|
||||
await EnsureTableAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
const string sql = @"
|
||||
SELECT pack_id, name, version, tenant_id, digest, signature, provenance_uri, provenance_digest, metadata, created_at
|
||||
@@ -121,7 +118,6 @@ public sealed class PostgresPackRepository : RepositoryBase<PacksRegistryDataSou
|
||||
|
||||
public async Task<IReadOnlyList<PackRecord>> ListAsync(string? tenantId = null, CancellationToken cancellationToken = default)
|
||||
{
|
||||
await EnsureTableAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
var sql = @"
|
||||
SELECT pack_id, name, version, tenant_id, digest, signature, provenance_uri, provenance_digest, metadata, created_at
|
||||
@@ -157,7 +153,6 @@ public sealed class PostgresPackRepository : RepositoryBase<PacksRegistryDataSou
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(packId);
|
||||
|
||||
await EnsureTableAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
const string sql = """
|
||||
SELECT tenant_id, digest, content
|
||||
@@ -201,7 +196,6 @@ public sealed class PostgresPackRepository : RepositoryBase<PacksRegistryDataSou
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(packId);
|
||||
|
||||
await EnsureTableAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
const string sql = """
|
||||
SELECT tenant_id, provenance_digest, provenance
|
||||
@@ -261,39 +255,4 @@ public sealed class PostgresPackRepository : RepositoryBase<PacksRegistryDataSou
|
||||
Metadata: metadata);
|
||||
}
|
||||
|
||||
private async Task EnsureTableAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
if (_tableInitialized)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
const string ddl = @"
|
||||
CREATE SCHEMA IF NOT EXISTS packs;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS packs.packs (
|
||||
pack_id TEXT PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
version TEXT NOT NULL,
|
||||
tenant_id TEXT NOT NULL,
|
||||
digest TEXT NOT NULL,
|
||||
signature TEXT,
|
||||
provenance_uri TEXT,
|
||||
provenance_digest TEXT,
|
||||
metadata JSONB,
|
||||
content BYTEA NOT NULL,
|
||||
provenance BYTEA,
|
||||
created_at TIMESTAMPTZ NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_packs_tenant_id ON packs.packs (tenant_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_packs_name_version ON packs.packs (name, version);
|
||||
CREATE INDEX IF NOT EXISTS idx_packs_created_at ON packs.packs (created_at DESC);";
|
||||
|
||||
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
|
||||
await using var command = CreateCommand(ddl, connection);
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
_tableInitialized = true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,8 +11,6 @@ namespace StellaOps.PacksRegistry.Persistence.Postgres.Repositories;
|
||||
/// </summary>
|
||||
public sealed class PostgresParityRepository : RepositoryBase<PacksRegistryDataSource>, IParityRepository
|
||||
{
|
||||
private bool _tableInitialized;
|
||||
|
||||
public PostgresParityRepository(PacksRegistryDataSource dataSource, ILogger<PostgresParityRepository> logger)
|
||||
: base(dataSource, logger)
|
||||
{
|
||||
@@ -22,7 +20,6 @@ public sealed class PostgresParityRepository : RepositoryBase<PacksRegistryDataS
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(record);
|
||||
|
||||
await EnsureTableAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
const string sql = @"
|
||||
INSERT INTO packs.parities (pack_id, tenant_id, status, notes, updated_at)
|
||||
@@ -48,7 +45,6 @@ public sealed class PostgresParityRepository : RepositoryBase<PacksRegistryDataS
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(packId);
|
||||
|
||||
await EnsureTableAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
const string sql = @"
|
||||
SELECT pack_id, tenant_id, status, notes, updated_at
|
||||
@@ -70,7 +66,6 @@ public sealed class PostgresParityRepository : RepositoryBase<PacksRegistryDataS
|
||||
|
||||
public async Task<IReadOnlyList<ParityRecord>> ListAsync(string? tenantId = null, CancellationToken cancellationToken = default)
|
||||
{
|
||||
await EnsureTableAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
var sql = @"
|
||||
SELECT pack_id, tenant_id, status, notes, updated_at
|
||||
@@ -112,32 +107,4 @@ public sealed class PostgresParityRepository : RepositoryBase<PacksRegistryDataS
|
||||
UpdatedAtUtc: reader.GetFieldValue<DateTimeOffset>(4));
|
||||
}
|
||||
|
||||
private async Task EnsureTableAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
if (_tableInitialized)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
const string ddl = @"
|
||||
CREATE SCHEMA IF NOT EXISTS packs;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS packs.parities (
|
||||
pack_id TEXT PRIMARY KEY,
|
||||
tenant_id TEXT NOT NULL,
|
||||
status TEXT NOT NULL,
|
||||
notes TEXT,
|
||||
updated_at TIMESTAMPTZ NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_parities_tenant_id ON packs.parities (tenant_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_parities_status ON packs.parities (status);
|
||||
CREATE INDEX IF NOT EXISTS idx_parities_updated_at ON packs.parities (updated_at DESC);";
|
||||
|
||||
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
|
||||
await using var command = CreateCommand(ddl, connection);
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
_tableInitialized = true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,6 +10,10 @@
|
||||
<Description>Consolidated persistence layer for StellaOps PacksRegistry module</Description>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<EmbeddedResource Include="Migrations\**\*.sql" LogicalName="%(RecursiveDir)%(Filename)%(Extension)" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="Npgsql" />
|
||||
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" />
|
||||
|
||||
@@ -10,17 +10,40 @@ namespace StellaOps.Scheduler.WebService.Bootstrap;
|
||||
/// <summary>
|
||||
/// Creates system-managed schedules on startup for each tenant.
|
||||
/// Missing schedules are inserted; existing ones are left untouched.
|
||||
/// Includes both scan schedules and Doctor health check schedules.
|
||||
/// </summary>
|
||||
internal sealed class SystemScheduleBootstrap : BackgroundService
|
||||
{
|
||||
private static readonly (string Slug, string Name, string Cron, ScheduleMode Mode, SelectorScope Scope)[] SystemSchedules =
|
||||
private static readonly (string Slug, string Name, string Cron, ScheduleMode Mode, SelectorScope Scope, string JobKind, ImmutableDictionary<string, object?>? PluginConfig)[] SystemSchedules =
|
||||
[
|
||||
("nightly-vuln-scan", "Nightly Vulnerability Scan", "0 2 * * *", ScheduleMode.AnalysisOnly, SelectorScope.AllImages),
|
||||
("advisory-refresh", "Continuous Advisory Refresh", "0 */4 * * *", ScheduleMode.ContentRefresh, SelectorScope.AllImages),
|
||||
("weekly-compliance-sweep", "Weekly Compliance Sweep", "0 3 * * 0", ScheduleMode.AnalysisOnly, SelectorScope.AllImages),
|
||||
("epss-score-update", "EPSS Score Update", "0 6 * * *", ScheduleMode.ContentRefresh, SelectorScope.AllImages),
|
||||
("reachability-reeval", "Reachability Re-evaluation", "0 5 * * 1-5", ScheduleMode.AnalysisOnly, SelectorScope.AllImages),
|
||||
("registry-sync", "Registry Sync", "0 */2 * * *", ScheduleMode.ContentRefresh, SelectorScope.AllImages),
|
||||
// Scan schedules (jobKind = "scan")
|
||||
("nightly-vuln-scan", "Nightly Vulnerability Scan", "0 2 * * *", ScheduleMode.AnalysisOnly, SelectorScope.AllImages, "scan", null),
|
||||
("advisory-refresh", "Continuous Advisory Refresh", "0 */4 * * *", ScheduleMode.ContentRefresh, SelectorScope.AllImages, "scan", null),
|
||||
("weekly-compliance-sweep", "Weekly Compliance Sweep", "0 3 * * 0", ScheduleMode.AnalysisOnly, SelectorScope.AllImages, "scan", null),
|
||||
("epss-score-update", "EPSS Score Update", "0 6 * * *", ScheduleMode.ContentRefresh, SelectorScope.AllImages, "scan", null),
|
||||
("reachability-reeval", "Reachability Re-evaluation", "0 5 * * 1-5", ScheduleMode.AnalysisOnly, SelectorScope.AllImages, "scan", null),
|
||||
("registry-sync", "Registry Sync", "0 */2 * * *", ScheduleMode.ContentRefresh, SelectorScope.AllImages, "scan", null),
|
||||
|
||||
// Doctor health check schedules (jobKind = "doctor")
|
||||
("doctor-full-daily", "Daily Health Check", "0 4 * * *", ScheduleMode.AnalysisOnly, SelectorScope.AllImages, "doctor",
|
||||
ImmutableDictionary.CreateRange<string, object?>(new KeyValuePair<string, object?>[]
|
||||
{
|
||||
new("doctorMode", "full"),
|
||||
new("timeoutSeconds", 300),
|
||||
})),
|
||||
("doctor-quick-hourly", "Hourly Quick Check", "0 * * * *", ScheduleMode.AnalysisOnly, SelectorScope.AllImages, "doctor",
|
||||
ImmutableDictionary.CreateRange<string, object?>(new KeyValuePair<string, object?>[]
|
||||
{
|
||||
new("doctorMode", "quick"),
|
||||
new("timeoutSeconds", 120),
|
||||
})),
|
||||
("doctor-compliance-weekly", "Weekly Compliance Audit", "0 5 * * 0", ScheduleMode.AnalysisOnly, SelectorScope.AllImages, "doctor",
|
||||
ImmutableDictionary.CreateRange<string, object?>(new KeyValuePair<string, object?>[]
|
||||
{
|
||||
new("doctorMode", "categories"),
|
||||
new("categories", new[] { "compliance" }),
|
||||
new("timeoutSeconds", 600),
|
||||
})),
|
||||
];
|
||||
|
||||
// TODO: Replace with real multi-tenant resolution when available.
|
||||
@@ -65,7 +88,7 @@ internal sealed class SystemScheduleBootstrap : BackgroundService
|
||||
{
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
|
||||
foreach (var (slug, name, cron, mode, selectorScope) in SystemSchedules)
|
||||
foreach (var (slug, name, cron, mode, selectorScope, jobKind, pluginConfig) in SystemSchedules)
|
||||
{
|
||||
var scheduleId = $"sys-{tenantId}-{slug}";
|
||||
|
||||
@@ -96,10 +119,12 @@ internal sealed class SystemScheduleBootstrap : BackgroundService
|
||||
updatedBy: "system-bootstrap",
|
||||
subscribers: null,
|
||||
schemaVersion: SchedulerSchemaVersions.Schedule,
|
||||
source: "system");
|
||||
source: "system",
|
||||
jobKind: jobKind,
|
||||
pluginConfig: pluginConfig);
|
||||
|
||||
await repository.UpsertAsync(schedule, cancellationToken).ConfigureAwait(false);
|
||||
_logger.LogInformation("Created system schedule {ScheduleId} ({Name}) for tenant {TenantId}.", scheduleId, name, tenantId);
|
||||
_logger.LogInformation("Created system schedule {ScheduleId} ({Name}, jobKind={JobKind}) for tenant {TenantId}.", scheduleId, name, jobKind, tenantId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,9 @@ internal sealed record ScheduleCreateRequest(
|
||||
[property: JsonPropertyName("limits")] ScheduleLimits? Limits = null,
|
||||
[property: JsonPropertyName("subscribers")] ImmutableArray<string>? Subscribers = null,
|
||||
[property: JsonPropertyName("enabled")] bool Enabled = true,
|
||||
[property: JsonPropertyName("source")] string? Source = null);
|
||||
[property: JsonPropertyName("source")] string? Source = null,
|
||||
[property: JsonPropertyName("jobKind")] string? JobKind = null,
|
||||
[property: JsonPropertyName("pluginConfig")] ImmutableDictionary<string, object?>? PluginConfig = null);
|
||||
|
||||
internal sealed record ScheduleUpdateRequest(
|
||||
[property: JsonPropertyName("name")] string? Name,
|
||||
@@ -29,7 +31,9 @@ internal sealed record ScheduleUpdateRequest(
|
||||
[property: JsonPropertyName("onlyIf")] ScheduleOnlyIf? OnlyIf,
|
||||
[property: JsonPropertyName("notify")] ScheduleNotify? Notify,
|
||||
[property: JsonPropertyName("limits")] ScheduleLimits? Limits,
|
||||
[property: JsonPropertyName("subscribers")] ImmutableArray<string>? Subscribers);
|
||||
[property: JsonPropertyName("subscribers")] ImmutableArray<string>? Subscribers,
|
||||
[property: JsonPropertyName("jobKind")] string? JobKind = null,
|
||||
[property: JsonPropertyName("pluginConfig")] ImmutableDictionary<string, object?>? PluginConfig = null);
|
||||
|
||||
internal sealed record ScheduleCollectionResponse(IReadOnlyList<ScheduleResponse> Schedules);
|
||||
|
||||
|
||||
@@ -26,7 +26,9 @@ public sealed record Schedule
|
||||
string updatedBy,
|
||||
ImmutableArray<string>? subscribers = null,
|
||||
string? schemaVersion = null,
|
||||
string source = "user")
|
||||
string source = "user",
|
||||
string jobKind = "scan",
|
||||
ImmutableDictionary<string, object?>? pluginConfig = null)
|
||||
: this(
|
||||
id,
|
||||
tenantId,
|
||||
@@ -45,7 +47,9 @@ public sealed record Schedule
|
||||
updatedAt,
|
||||
updatedBy,
|
||||
schemaVersion,
|
||||
source)
|
||||
source,
|
||||
jobKind,
|
||||
pluginConfig)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -68,7 +72,9 @@ public sealed record Schedule
|
||||
DateTimeOffset updatedAt,
|
||||
string updatedBy,
|
||||
string? schemaVersion = null,
|
||||
string source = "user")
|
||||
string source = "user",
|
||||
string jobKind = "scan",
|
||||
ImmutableDictionary<string, object?>? pluginConfig = null)
|
||||
{
|
||||
Id = Validation.EnsureId(id, nameof(id));
|
||||
TenantId = Validation.EnsureTenantId(tenantId, nameof(tenantId));
|
||||
@@ -92,6 +98,8 @@ public sealed record Schedule
|
||||
UpdatedBy = Validation.EnsureSimpleIdentifier(updatedBy, nameof(updatedBy));
|
||||
SchemaVersion = SchedulerSchemaVersions.EnsureSchedule(schemaVersion);
|
||||
Source = string.IsNullOrWhiteSpace(source) ? "user" : source.Trim();
|
||||
JobKind = string.IsNullOrWhiteSpace(jobKind) ? "scan" : jobKind.Trim().ToLowerInvariant();
|
||||
PluginConfig = pluginConfig;
|
||||
|
||||
if (Selection.TenantId is not null && !string.Equals(Selection.TenantId, TenantId, StringComparison.Ordinal))
|
||||
{
|
||||
@@ -135,6 +143,20 @@ public sealed record Schedule
|
||||
public string UpdatedBy { get; }
|
||||
|
||||
public string Source { get; } = "user";
|
||||
|
||||
/// <summary>
|
||||
/// Identifies which <see cref="Plugin.ISchedulerJobPlugin"/> handles this schedule.
|
||||
/// Defaults to "scan" for backward compatibility with existing schedules.
|
||||
/// </summary>
|
||||
public string JobKind { get; } = "scan";
|
||||
|
||||
/// <summary>
|
||||
/// Plugin-specific configuration stored as JSON. For scan jobs this is null
|
||||
/// (mode/selector cover everything). For other job kinds (e.g., "doctor") this
|
||||
/// contains plugin-specific settings.
|
||||
/// </summary>
|
||||
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
|
||||
public ImmutableDictionary<string, object?>? PluginConfig { get; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
|
||||
@@ -0,0 +1,16 @@
|
||||
-- Migration: 007_add_job_kind_plugin_config
|
||||
-- Adds plugin architecture columns to the schedules table.
|
||||
-- job_kind: identifies which ISchedulerJobPlugin handles the schedule (default: 'scan')
|
||||
-- plugin_config: optional JSON blob for plugin-specific configuration
|
||||
|
||||
ALTER TABLE scheduler.schedules
|
||||
ADD COLUMN IF NOT EXISTS job_kind TEXT NOT NULL DEFAULT 'scan';
|
||||
|
||||
ALTER TABLE scheduler.schedules
|
||||
ADD COLUMN IF NOT EXISTS plugin_config JSONB;
|
||||
|
||||
COMMENT ON COLUMN scheduler.schedules.job_kind IS 'Routes the schedule to the correct ISchedulerJobPlugin implementation (scan, doctor, policy-sweep, etc.)';
|
||||
COMMENT ON COLUMN scheduler.schedules.plugin_config IS 'Plugin-specific configuration as JSON. Null for scan jobs (mode/selector suffice). Validated by the plugin on create/update.';
|
||||
|
||||
-- Index for filtering schedules by job kind (common query for plugin-specific endpoints)
|
||||
CREATE INDEX IF NOT EXISTS idx_schedules_job_kind ON scheduler.schedules(job_kind) WHERE deleted_at IS NULL;
|
||||
@@ -0,0 +1,43 @@
|
||||
-- Migration: 008_doctor_trends_table
|
||||
-- Creates the doctor_trends table for the Doctor scheduler plugin.
|
||||
-- Stores health check trend data points from Doctor scheduled runs.
|
||||
|
||||
CREATE TABLE IF NOT EXISTS scheduler.doctor_trends (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
timestamp TIMESTAMPTZ NOT NULL,
|
||||
tenant_id TEXT NOT NULL,
|
||||
check_id TEXT NOT NULL,
|
||||
plugin_id TEXT NOT NULL,
|
||||
category TEXT NOT NULL,
|
||||
run_id TEXT NOT NULL,
|
||||
status TEXT NOT NULL,
|
||||
health_score INT NOT NULL DEFAULT 0,
|
||||
duration_ms INT NOT NULL DEFAULT 0,
|
||||
evidence_values JSONB NOT NULL DEFAULT '{}'
|
||||
);
|
||||
|
||||
-- Performance indexes for common query patterns
|
||||
CREATE INDEX IF NOT EXISTS idx_doctor_trends_tenant_check
|
||||
ON scheduler.doctor_trends(tenant_id, check_id, timestamp DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_doctor_trends_tenant_category
|
||||
ON scheduler.doctor_trends(tenant_id, category, timestamp DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_doctor_trends_tenant_timestamp
|
||||
ON scheduler.doctor_trends(tenant_id, timestamp DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_doctor_trends_run
|
||||
ON scheduler.doctor_trends(run_id);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_doctor_trends_timestamp_prune
|
||||
ON scheduler.doctor_trends(timestamp);
|
||||
|
||||
-- Row-Level Security
|
||||
ALTER TABLE scheduler.doctor_trends ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE scheduler.doctor_trends FORCE ROW LEVEL SECURITY;
|
||||
DROP POLICY IF EXISTS doctor_trends_tenant_isolation ON scheduler.doctor_trends;
|
||||
CREATE POLICY doctor_trends_tenant_isolation ON scheduler.doctor_trends FOR ALL
|
||||
USING (tenant_id = scheduler_app.require_current_tenant())
|
||||
WITH CHECK (tenant_id = scheduler_app.require_current_tenant());
|
||||
|
||||
COMMENT ON TABLE scheduler.doctor_trends IS 'Health check trend data points from Doctor plugin scheduled runs. Retained per configurable retention period (default 365 days).';
|
||||
@@ -30,11 +30,13 @@ public sealed class ScheduleRepository : RepositoryBase<SchedulerDataSource>, IS
|
||||
INSERT INTO scheduler.schedules (
|
||||
id, tenant_id, name, description, enabled, cron_expression, timezone, mode,
|
||||
selection, only_if, notify, limits, subscribers, created_at, created_by,
|
||||
updated_at, updated_by, deleted_at, deleted_by, schema_version, source)
|
||||
updated_at, updated_by, deleted_at, deleted_by, schema_version, source,
|
||||
job_kind, plugin_config)
|
||||
VALUES (
|
||||
@id, @tenant_id, @name, @description, @enabled, @cron_expression, @timezone, @mode,
|
||||
@selection, @only_if, @notify, @limits, @subscribers, @created_at, @created_by,
|
||||
@updated_at, @updated_by, NULL, NULL, @schema_version, @source)
|
||||
@updated_at, @updated_by, NULL, NULL, @schema_version, @source,
|
||||
@job_kind, @plugin_config)
|
||||
ON CONFLICT (id) DO UPDATE SET
|
||||
name = EXCLUDED.name,
|
||||
description = EXCLUDED.description,
|
||||
@@ -51,7 +53,9 @@ public sealed class ScheduleRepository : RepositoryBase<SchedulerDataSource>, IS
|
||||
updated_by = EXCLUDED.updated_by,
|
||||
schema_version = EXCLUDED.schema_version,
|
||||
deleted_at = NULL,
|
||||
deleted_by = NULL
|
||||
deleted_by = NULL,
|
||||
job_kind = EXCLUDED.job_kind,
|
||||
plugin_config = EXCLUDED.plugin_config
|
||||
""";
|
||||
|
||||
await using var command = CreateCommand(sql, conn);
|
||||
@@ -75,6 +79,10 @@ public sealed class ScheduleRepository : RepositoryBase<SchedulerDataSource>, IS
|
||||
AddParameter(command, "updated_by", schedule.UpdatedBy);
|
||||
AddParameter(command, "schema_version", schedule.SchemaVersion ?? (object)DBNull.Value);
|
||||
AddParameter(command, "source", schedule.Source);
|
||||
AddParameter(command, "job_kind", schedule.JobKind);
|
||||
AddJsonbParameter(command, "plugin_config", schedule.PluginConfig is not null
|
||||
? JsonSerializer.Serialize(schedule.PluginConfig, _serializer)
|
||||
: null);
|
||||
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
@@ -162,6 +170,18 @@ public sealed class ScheduleRepository : RepositoryBase<SchedulerDataSource>, IS
|
||||
|
||||
private Schedule MapSchedule(NpgsqlDataReader reader)
|
||||
{
|
||||
// Read plugin_config as nullable JSON string
|
||||
var pluginConfigOrdinal = reader.GetOrdinal("plugin_config");
|
||||
ImmutableDictionary<string, object?>? pluginConfig = null;
|
||||
if (!reader.IsDBNull(pluginConfigOrdinal))
|
||||
{
|
||||
var pluginConfigJson = reader.GetString(pluginConfigOrdinal);
|
||||
if (!string.IsNullOrWhiteSpace(pluginConfigJson))
|
||||
{
|
||||
pluginConfig = JsonSerializer.Deserialize<ImmutableDictionary<string, object?>>(pluginConfigJson, _serializer);
|
||||
}
|
||||
}
|
||||
|
||||
return new Schedule(
|
||||
reader.GetString(reader.GetOrdinal("id")),
|
||||
reader.GetString(reader.GetOrdinal("tenant_id")),
|
||||
@@ -180,6 +200,8 @@ public sealed class ScheduleRepository : RepositoryBase<SchedulerDataSource>, IS
|
||||
DateTime.SpecifyKind(reader.GetDateTime(reader.GetOrdinal("updated_at")), DateTimeKind.Utc),
|
||||
reader.GetString(reader.GetOrdinal("updated_by")),
|
||||
GetNullableString(reader, reader.GetOrdinal("schema_version")),
|
||||
source: GetNullableString(reader, reader.GetOrdinal("source")) ?? "user");
|
||||
source: GetNullableString(reader, reader.GetOrdinal("source")) ?? "user",
|
||||
jobKind: GetNullableString(reader, reader.GetOrdinal("job_kind")) ?? "scan",
|
||||
pluginConfig: pluginConfig);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,36 @@
|
||||
using StellaOps.Scheduler.Models;
|
||||
|
||||
namespace StellaOps.Scheduler.Plugin;
|
||||
|
||||
/// <summary>
|
||||
/// Callback interface for plugins to report progress and update Run state.
|
||||
/// Implementations are provided by the Scheduler infrastructure and persist
|
||||
/// progress updates to storage.
|
||||
/// </summary>
|
||||
public interface IRunProgressReporter
|
||||
{
|
||||
/// <summary>
|
||||
/// Reports progress as a fraction of estimated steps.
|
||||
/// </summary>
|
||||
/// <param name="completed">Number of steps completed so far.</param>
|
||||
/// <param name="total">Total number of steps expected.</param>
|
||||
/// <param name="message">Optional human-readable progress message.</param>
|
||||
/// <param name="ct">Cancellation token.</param>
|
||||
Task ReportProgressAsync(int completed, int total, string? message = null, CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Transitions the Run to a new state (e.g., Running, Completed, Error).
|
||||
/// </summary>
|
||||
/// <param name="newState">Target state.</param>
|
||||
/// <param name="error">Error message when transitioning to Error state.</param>
|
||||
/// <param name="ct">Cancellation token.</param>
|
||||
Task TransitionStateAsync(RunState newState, string? error = null, CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Appends a log entry to the Run's execution log.
|
||||
/// </summary>
|
||||
/// <param name="message">Log message.</param>
|
||||
/// <param name="level">Log level (info, warn, error).</param>
|
||||
/// <param name="ct">Cancellation token.</param>
|
||||
Task AppendLogAsync(string message, string level = "info", CancellationToken ct = default);
|
||||
}
|
||||
@@ -0,0 +1,71 @@
|
||||
using Microsoft.AspNetCore.Routing;
|
||||
using Microsoft.Extensions.Configuration;
|
||||
using Microsoft.Extensions.DependencyInjection;
|
||||
using StellaOps.Scheduler.Models;
|
||||
|
||||
namespace StellaOps.Scheduler.Plugin;
|
||||
|
||||
/// <summary>
|
||||
/// Defines a pluggable job type for the Scheduler service.
|
||||
/// Each implementation handles a specific <see cref="JobKind"/> (e.g., "scan", "doctor", "policy-sweep").
|
||||
/// The Scheduler routes cron triggers and manual runs to the correct plugin based on
|
||||
/// <see cref="Schedule.JobKind"/>.
|
||||
/// </summary>
|
||||
public interface ISchedulerJobPlugin
|
||||
{
|
||||
/// <summary>
|
||||
/// Unique, stable identifier for this job kind (e.g., "scan", "doctor", "policy-sweep").
|
||||
/// Stored in the Schedule record; must be immutable once published.
|
||||
/// </summary>
|
||||
string JobKind { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Human-readable display name for the UI.
|
||||
/// </summary>
|
||||
string DisplayName { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Plugin version for compatibility checking.
|
||||
/// </summary>
|
||||
Version Version { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Creates a typed execution plan from a Schedule + Run.
|
||||
/// Called when the cron fires or a manual run is created.
|
||||
/// Returns a plan object that the Scheduler persists as the Run's plan payload.
|
||||
/// </summary>
|
||||
Task<JobPlan> CreatePlanAsync(JobPlanContext context, CancellationToken ct);
|
||||
|
||||
/// <summary>
|
||||
/// Executes the plan. Called by the Worker Host.
|
||||
/// Must be idempotent and support cancellation.
|
||||
/// Updates Run state via the provided <see cref="IRunProgressReporter"/>.
|
||||
/// </summary>
|
||||
Task ExecuteAsync(JobExecutionContext context, CancellationToken ct);
|
||||
|
||||
/// <summary>
|
||||
/// Validates plugin-specific configuration stored in <see cref="Schedule.PluginConfig"/>.
|
||||
/// Called on schedule create/update.
|
||||
/// </summary>
|
||||
Task<JobConfigValidationResult> ValidateConfigAsync(
|
||||
IReadOnlyDictionary<string, object?> pluginConfig,
|
||||
CancellationToken ct);
|
||||
|
||||
/// <summary>
|
||||
/// Returns the JSON schema for plugin-specific configuration, enabling UI-driven forms.
|
||||
/// Returns null if the plugin requires no configuration.
|
||||
/// </summary>
|
||||
string? GetConfigJsonSchema();
|
||||
|
||||
/// <summary>
|
||||
/// Registers plugin-specific services into DI.
|
||||
/// Called once during host startup.
|
||||
/// </summary>
|
||||
void ConfigureServices(IServiceCollection services, IConfiguration configuration);
|
||||
|
||||
/// <summary>
|
||||
/// Registers plugin-specific HTTP endpoints (optional).
|
||||
/// Called during app.Map* phase.
|
||||
/// </summary>
|
||||
void MapEndpoints(IEndpointRouteBuilder routes);
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
namespace StellaOps.Scheduler.Plugin;
|
||||
|
||||
/// <summary>
|
||||
/// Registry of available scheduler job plugins keyed by <see cref="ISchedulerJobPlugin.JobKind"/>.
|
||||
/// Used by the Scheduler to route schedule triggers and manual runs to the correct plugin.
|
||||
/// </summary>
|
||||
public interface ISchedulerPluginRegistry
|
||||
{
|
||||
/// <summary>
|
||||
/// Registers a plugin. Throws if a plugin with the same <see cref="ISchedulerJobPlugin.JobKind"/>
|
||||
/// is already registered.
|
||||
/// </summary>
|
||||
void Register(ISchedulerJobPlugin plugin);
|
||||
|
||||
/// <summary>
|
||||
/// Resolves the plugin for the given job kind.
|
||||
/// Returns null if no plugin is registered for the kind.
|
||||
/// </summary>
|
||||
ISchedulerJobPlugin? Resolve(string jobKind);
|
||||
|
||||
/// <summary>
|
||||
/// Returns all registered plugin summaries.
|
||||
/// </summary>
|
||||
IReadOnlyList<(string JobKind, string DisplayName)> ListRegistered();
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
namespace StellaOps.Scheduler.Plugin;
|
||||
|
||||
/// <summary>
|
||||
/// Result of plugin configuration validation.
|
||||
/// Returned by <see cref="ISchedulerJobPlugin.ValidateConfigAsync"/>.
|
||||
/// </summary>
|
||||
public sealed record JobConfigValidationResult(
|
||||
bool IsValid,
|
||||
IReadOnlyList<string> Errors)
|
||||
{
|
||||
/// <summary>
|
||||
/// Returns a successful validation result with no errors.
|
||||
/// </summary>
|
||||
public static JobConfigValidationResult Success { get; } = new(true, []);
|
||||
|
||||
/// <summary>
|
||||
/// Creates a failed validation result with the specified errors.
|
||||
/// </summary>
|
||||
public static JobConfigValidationResult Failure(params string[] errors)
|
||||
=> new(false, errors);
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
using StellaOps.Scheduler.Models;
|
||||
|
||||
namespace StellaOps.Scheduler.Plugin;
|
||||
|
||||
/// <summary>
|
||||
/// Context passed to <see cref="ISchedulerJobPlugin.ExecuteAsync"/>.
|
||||
/// Provides access to the schedule, run, plan, a progress reporter for
|
||||
/// updating run state, the DI container, and a deterministic time source.
|
||||
/// </summary>
|
||||
public sealed record JobExecutionContext(
|
||||
Schedule Schedule,
|
||||
Run Run,
|
||||
JobPlan Plan,
|
||||
IRunProgressReporter Reporter,
|
||||
IServiceProvider Services,
|
||||
TimeProvider TimeProvider);
|
||||
@@ -0,0 +1,11 @@
|
||||
namespace StellaOps.Scheduler.Plugin;
|
||||
|
||||
/// <summary>
|
||||
/// The plan produced by a plugin. Serialized to JSON and stored on the Run.
|
||||
/// Contains the <see cref="JobKind"/> to identify which plugin created it,
|
||||
/// a typed payload dictionary, and an estimated step count for progress tracking.
|
||||
/// </summary>
|
||||
public sealed record JobPlan(
|
||||
string JobKind,
|
||||
IReadOnlyDictionary<string, object?> Payload,
|
||||
int EstimatedSteps = 1);
|
||||
@@ -0,0 +1,14 @@
|
||||
using StellaOps.Scheduler.Models;
|
||||
|
||||
namespace StellaOps.Scheduler.Plugin;
|
||||
|
||||
/// <summary>
|
||||
/// Immutable context passed to <see cref="ISchedulerJobPlugin.CreatePlanAsync"/>.
|
||||
/// Provides access to the schedule definition, the newly created run record,
|
||||
/// the DI container, and a deterministic time source.
|
||||
/// </summary>
|
||||
public sealed record JobPlanContext(
|
||||
Schedule Schedule,
|
||||
Run Run,
|
||||
IServiceProvider Services,
|
||||
TimeProvider TimeProvider);
|
||||
@@ -0,0 +1,80 @@
|
||||
using Microsoft.AspNetCore.Routing;
|
||||
using Microsoft.Extensions.Configuration;
|
||||
using Microsoft.Extensions.DependencyInjection;
|
||||
using StellaOps.Scheduler.Models;
|
||||
|
||||
namespace StellaOps.Scheduler.Plugin;
|
||||
|
||||
/// <summary>
|
||||
/// Built-in plugin wrapping the existing scan scheduling logic.
|
||||
/// This is the default plugin for all existing schedules (JobKind = "scan").
|
||||
/// It delegates to the existing run-planning and worker-segment pipeline
|
||||
/// with zero behavioral change.
|
||||
/// </summary>
|
||||
public sealed class ScanJobPlugin : ISchedulerJobPlugin
|
||||
{
|
||||
/// <inheritdoc />
|
||||
public string JobKind => "scan";
|
||||
|
||||
/// <inheritdoc />
|
||||
public string DisplayName => "Vulnerability Scan";
|
||||
|
||||
/// <inheritdoc />
|
||||
public Version Version { get; } = new(1, 0, 0);
|
||||
|
||||
/// <inheritdoc />
|
||||
public Task<JobPlan> CreatePlanAsync(JobPlanContext context, CancellationToken ct)
|
||||
{
|
||||
// For scan jobs, the existing run-planning pipeline handles everything.
|
||||
// The plan payload captures the schedule mode and selector scope for traceability.
|
||||
var schedule = context.Schedule;
|
||||
var payload = new Dictionary<string, object?>
|
||||
{
|
||||
["mode"] = schedule.Mode.ToString(),
|
||||
["selectorScope"] = schedule.Selection.Scope.ToString(),
|
||||
["scheduleId"] = schedule.Id,
|
||||
};
|
||||
|
||||
var plan = new JobPlan(
|
||||
JobKind: "scan",
|
||||
Payload: payload,
|
||||
EstimatedSteps: 1);
|
||||
|
||||
return Task.FromResult(plan);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public Task ExecuteAsync(JobExecutionContext context, CancellationToken ct)
|
||||
{
|
||||
// Scan execution is handled by the existing Worker Host segment processing.
|
||||
// The ScanJobPlugin does not override execution; the Scheduler's built-in
|
||||
// run-planning and queue-dispatch pipeline remains the execution path.
|
||||
// This method is a no-op pass-through for scan-type jobs.
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public Task<JobConfigValidationResult> ValidateConfigAsync(
|
||||
IReadOnlyDictionary<string, object?> pluginConfig,
|
||||
CancellationToken ct)
|
||||
{
|
||||
// Scan jobs use the standard Mode/Selector fields, not PluginConfig.
|
||||
// Any PluginConfig on a scan schedule is ignored but valid.
|
||||
return Task.FromResult(JobConfigValidationResult.Success);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public string? GetConfigJsonSchema() => null;
|
||||
|
||||
/// <inheritdoc />
|
||||
public void ConfigureServices(IServiceCollection services, IConfiguration configuration)
|
||||
{
|
||||
// Scan services are already registered in Program.cs. No additional DI needed.
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public void MapEndpoints(IEndpointRouteBuilder routes)
|
||||
{
|
||||
// Scan endpoints are already registered in Program.cs. No additional endpoints needed.
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,52 @@
|
||||
using System.Collections.Concurrent;
|
||||
|
||||
namespace StellaOps.Scheduler.Plugin;
|
||||
|
||||
/// <summary>
|
||||
/// Thread-safe in-memory registry for scheduler job plugins.
|
||||
/// Plugins are registered at startup and resolved at trigger time.
|
||||
/// </summary>
|
||||
public sealed class SchedulerPluginRegistry : ISchedulerPluginRegistry
|
||||
{
|
||||
private readonly ConcurrentDictionary<string, ISchedulerJobPlugin> _plugins = new(StringComparer.OrdinalIgnoreCase);
|
||||
|
||||
/// <inheritdoc />
|
||||
public void Register(ISchedulerJobPlugin plugin)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(plugin);
|
||||
|
||||
if (string.IsNullOrWhiteSpace(plugin.JobKind))
|
||||
{
|
||||
throw new ArgumentException("Plugin JobKind must not be null or whitespace.", nameof(plugin));
|
||||
}
|
||||
|
||||
if (!_plugins.TryAdd(plugin.JobKind, plugin))
|
||||
{
|
||||
throw new InvalidOperationException(
|
||||
$"A scheduler plugin with JobKind '{plugin.JobKind}' is already registered " +
|
||||
$"(existing: {_plugins[plugin.JobKind].GetType().FullName}, " +
|
||||
$"new: {plugin.GetType().FullName}).");
|
||||
}
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public ISchedulerJobPlugin? Resolve(string jobKind)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(jobKind))
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
return _plugins.TryGetValue(jobKind, out var plugin) ? plugin : null;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public IReadOnlyList<(string JobKind, string DisplayName)> ListRegistered()
|
||||
{
|
||||
return _plugins.Values
|
||||
.OrderBy(p => p.JobKind, StringComparer.OrdinalIgnoreCase)
|
||||
.Select(p => (p.JobKind, p.DisplayName))
|
||||
.ToList()
|
||||
.AsReadOnly();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
<PropertyGroup>
|
||||
<TargetFramework>net10.0</TargetFramework>
|
||||
<LangVersion>preview</LangVersion>
|
||||
<ImplicitUsings>enable</ImplicitUsings>
|
||||
<Nullable>enable</Nullable>
|
||||
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
|
||||
<RootNamespace>StellaOps.Scheduler.Plugin</RootNamespace>
|
||||
<AssemblyName>StellaOps.Scheduler.Plugin.Abstractions</AssemblyName>
|
||||
<Description>Plugin contract abstractions for the StellaOps Scheduler job plugin architecture</Description>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="..\StellaOps.Scheduler.Models\StellaOps.Scheduler.Models.csproj" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<FrameworkReference Include="Microsoft.AspNetCore.App" />
|
||||
</ItemGroup>
|
||||
</Project>
|
||||
54
src/JobEngine/StellaOps.Scheduler.plugins/AGENTS.md
Normal file
54
src/JobEngine/StellaOps.Scheduler.plugins/AGENTS.md
Normal file
@@ -0,0 +1,54 @@
|
||||
# AGENTS.md -- Scheduler Plugins
|
||||
|
||||
## Overview
|
||||
|
||||
This directory contains **scheduler job plugins** that extend the Scheduler service
|
||||
with new job types. Each plugin implements `ISchedulerJobPlugin` from the
|
||||
`StellaOps.Scheduler.Plugin.Abstractions` library.
|
||||
|
||||
## Plugin Architecture
|
||||
|
||||
Plugins are discovered in two ways:
|
||||
1. **Built-in**: `ScanJobPlugin` is registered unconditionally in `Program.cs`.
|
||||
2. **Assembly-loaded**: The `PluginHost.LoadPlugins()` pipeline scans `plugins/scheduler/`
|
||||
for DLLs matching `StellaOps.Scheduler.Plugin.*.dll`. Any type implementing
|
||||
`ISchedulerJobPlugin` is instantiated and registered.
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
StellaOps.Scheduler.plugins/
|
||||
scheduler/ # Runtime plugin DLLs (empty in dev; populated by build)
|
||||
StellaOps.Scheduler.Plugin.Doctor/ # Doctor health check plugin (source)
|
||||
```
|
||||
|
||||
## Creating a New Plugin
|
||||
|
||||
1. Create a new class library under `StellaOps.Scheduler.plugins/`.
|
||||
2. Reference `StellaOps.Scheduler.Plugin.Abstractions`.
|
||||
3. Implement `ISchedulerJobPlugin`:
|
||||
- `JobKind`: unique string identifier (stored in `Schedule.job_kind`).
|
||||
- `CreatePlanAsync`: build an execution plan from the schedule config.
|
||||
- `ExecuteAsync`: run the plan (HTTP calls, computations, etc.).
|
||||
- `ValidateConfigAsync`: validate the `Schedule.PluginConfig` JSON.
|
||||
- `ConfigureServices`: register plugin-specific DI services.
|
||||
- `MapEndpoints`: register plugin-specific HTTP endpoints.
|
||||
4. Build the DLL and place it in `plugins/scheduler/` (or add a project reference
|
||||
in the WebService csproj for development).
|
||||
|
||||
## Existing Plugins
|
||||
|
||||
| Plugin | JobKind | Description |
|
||||
|--------|---------|-------------|
|
||||
| ScanJobPlugin | `scan` | Built-in; wraps existing scan scheduling logic |
|
||||
| DoctorJobPlugin | `doctor` | Doctor health check scheduling via HTTP to Doctor WebService |
|
||||
|
||||
## Schedule Model Extensions
|
||||
|
||||
- `Schedule.JobKind` (string, default "scan"): routes to the correct plugin.
|
||||
- `Schedule.PluginConfig` (JSONB, nullable): plugin-specific configuration.
|
||||
|
||||
## Testing
|
||||
|
||||
Plugin tests should be placed in `StellaOps.Scheduler.__Tests/` alongside
|
||||
the existing Scheduler test projects.
|
||||
@@ -0,0 +1,361 @@
|
||||
using System.Diagnostics;
|
||||
using System.Net.Http.Json;
|
||||
using System.Text.Json;
|
||||
using Microsoft.AspNetCore.Http;
|
||||
using Microsoft.AspNetCore.Routing;
|
||||
using Microsoft.Extensions.Configuration;
|
||||
using Microsoft.Extensions.DependencyInjection;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using StellaOps.Scheduler.Models;
|
||||
using StellaOps.Scheduler.Plugin.Doctor.Endpoints;
|
||||
using StellaOps.Scheduler.Plugin.Doctor.Models;
|
||||
using StellaOps.Scheduler.Plugin.Doctor.Persistence;
|
||||
using StellaOps.Scheduler.Plugin.Doctor.Services;
|
||||
|
||||
namespace StellaOps.Scheduler.Plugin.Doctor;
|
||||
|
||||
/// <summary>
|
||||
/// Scheduler job plugin for Doctor health checks.
|
||||
/// Replaces the standalone Doctor Scheduler service by integrating Doctor
|
||||
/// scheduling, execution, trend storage, and alert evaluation directly
|
||||
/// into the Scheduler service.
|
||||
/// </summary>
|
||||
public sealed class DoctorJobPlugin : ISchedulerJobPlugin
|
||||
{
|
||||
/// <inheritdoc />
|
||||
public string JobKind => "doctor";
|
||||
|
||||
/// <inheritdoc />
|
||||
public string DisplayName => "Doctor Health Checks";
|
||||
|
||||
/// <inheritdoc />
|
||||
public Version Version { get; } = new(1, 0, 0);
|
||||
|
||||
/// <inheritdoc />
|
||||
public Task<JobPlan> CreatePlanAsync(JobPlanContext context, CancellationToken ct)
|
||||
{
|
||||
var config = DoctorScheduleConfig.FromPluginConfig(context.Schedule.PluginConfig);
|
||||
|
||||
var payload = new Dictionary<string, object?>
|
||||
{
|
||||
["doctorMode"] = config.DoctorMode,
|
||||
["categories"] = config.Categories,
|
||||
["plugins"] = config.Plugins,
|
||||
["timeoutSeconds"] = config.TimeoutSeconds,
|
||||
["scheduleId"] = context.Schedule.Id,
|
||||
};
|
||||
|
||||
var plan = new JobPlan(
|
||||
JobKind: "doctor",
|
||||
Payload: payload,
|
||||
EstimatedSteps: 3); // trigger, poll, store trends
|
||||
|
||||
return Task.FromResult(plan);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task ExecuteAsync(JobExecutionContext context, CancellationToken ct)
|
||||
{
|
||||
var logger = context.Services.GetRequiredService<ILoggerFactory>().CreateLogger<DoctorJobPlugin>();
|
||||
var httpClientFactory = context.Services.GetRequiredService<IHttpClientFactory>();
|
||||
var trendRepository = context.Services.GetRequiredService<IDoctorTrendRepository>();
|
||||
var alertService = context.Services.GetRequiredService<IDoctorAlertService>();
|
||||
|
||||
var config = DoctorScheduleConfig.FromPluginConfig(context.Schedule.PluginConfig);
|
||||
var httpClient = httpClientFactory.CreateClient("DoctorApi");
|
||||
|
||||
await context.Reporter.TransitionStateAsync(RunState.Running, ct: ct);
|
||||
await context.Reporter.AppendLogAsync($"Starting Doctor run (mode={config.DoctorMode})", ct: ct);
|
||||
|
||||
try
|
||||
{
|
||||
// Step 1: Trigger Doctor run
|
||||
await context.Reporter.ReportProgressAsync(0, 3, "Triggering Doctor run", ct);
|
||||
var runId = await TriggerDoctorRunAsync(httpClient, config, ct);
|
||||
await context.Reporter.AppendLogAsync($"Doctor run triggered: {runId}", ct: ct);
|
||||
|
||||
// Step 2: Wait for completion
|
||||
await context.Reporter.ReportProgressAsync(1, 3, "Waiting for Doctor run completion", ct);
|
||||
var result = await WaitForRunCompletionAsync(httpClient, runId, config.TimeoutSeconds, ct);
|
||||
await context.Reporter.AppendLogAsync(
|
||||
$"Doctor run completed: {result.Status} (passed={result.PassedChecks}, warned={result.WarnedChecks}, failed={result.FailedChecks})",
|
||||
ct: ct);
|
||||
|
||||
// Step 3: Store trend data and evaluate alerts
|
||||
await context.Reporter.ReportProgressAsync(2, 3, "Storing trend data", ct);
|
||||
await StoreTrendDataAsync(httpClient, trendRepository, runId, context.Schedule.TenantId, ct);
|
||||
await alertService.EvaluateAndSendAsync(config, result, ct);
|
||||
|
||||
await context.Reporter.ReportProgressAsync(3, 3, "Completed", ct);
|
||||
await context.Reporter.TransitionStateAsync(RunState.Completed, ct: ct);
|
||||
}
|
||||
catch (Exception ex) when (!ct.IsCancellationRequested)
|
||||
{
|
||||
logger.LogError(ex, "Doctor plugin execution failed for schedule {ScheduleId}", context.Schedule.Id);
|
||||
await context.Reporter.AppendLogAsync($"Error: {ex.Message}", "error", ct);
|
||||
await context.Reporter.TransitionStateAsync(RunState.Error, ex.Message, ct);
|
||||
}
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public Task<JobConfigValidationResult> ValidateConfigAsync(
|
||||
IReadOnlyDictionary<string, object?> pluginConfig,
|
||||
CancellationToken ct)
|
||||
{
|
||||
var errors = new List<string>();
|
||||
|
||||
if (pluginConfig.TryGetValue("doctorMode", out var modeObj))
|
||||
{
|
||||
var mode = modeObj?.ToString()?.ToLowerInvariant();
|
||||
if (mode is not ("full" or "quick" or "categories" or "plugins"))
|
||||
{
|
||||
errors.Add($"Invalid doctorMode '{mode}'. Must be one of: full, quick, categories, plugins.");
|
||||
}
|
||||
|
||||
if (mode == "categories" &&
|
||||
(!pluginConfig.TryGetValue("categories", out var cats) || cats is null))
|
||||
{
|
||||
errors.Add("categories list is required when doctorMode is 'categories'.");
|
||||
}
|
||||
|
||||
if (mode == "plugins" &&
|
||||
(!pluginConfig.TryGetValue("plugins", out var plugins) || plugins is null))
|
||||
{
|
||||
errors.Add("plugins list is required when doctorMode is 'plugins'.");
|
||||
}
|
||||
}
|
||||
|
||||
if (pluginConfig.TryGetValue("timeoutSeconds", out var timeoutObj))
|
||||
{
|
||||
if (timeoutObj is int timeout && timeout <= 0)
|
||||
{
|
||||
errors.Add("timeoutSeconds must be a positive integer.");
|
||||
}
|
||||
}
|
||||
|
||||
return Task.FromResult(errors.Count == 0
|
||||
? JobConfigValidationResult.Success
|
||||
: new JobConfigValidationResult(false, errors));
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public string? GetConfigJsonSchema()
|
||||
{
|
||||
return """
|
||||
{
|
||||
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"doctorMode": {
|
||||
"type": "string",
|
||||
"enum": ["full", "quick", "categories", "plugins"],
|
||||
"default": "full"
|
||||
},
|
||||
"categories": {
|
||||
"type": "array",
|
||||
"items": { "type": "string" }
|
||||
},
|
||||
"plugins": {
|
||||
"type": "array",
|
||||
"items": { "type": "string" }
|
||||
},
|
||||
"timeoutSeconds": {
|
||||
"type": "integer",
|
||||
"minimum": 1,
|
||||
"default": 300
|
||||
},
|
||||
"alerts": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": { "type": "boolean", "default": true },
|
||||
"alertOnFail": { "type": "boolean", "default": true },
|
||||
"alertOnWarn": { "type": "boolean", "default": false },
|
||||
"alertOnStatusChange": { "type": "boolean", "default": true },
|
||||
"channels": { "type": "array", "items": { "type": "string" } },
|
||||
"emailRecipients": { "type": "array", "items": { "type": "string" } },
|
||||
"webhookUrls": { "type": "array", "items": { "type": "string" } },
|
||||
"minSeverity": { "type": "string", "default": "Fail" }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
""";
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public void ConfigureServices(IServiceCollection services, IConfiguration configuration)
|
||||
{
|
||||
// Register HttpClient for Doctor API
|
||||
var doctorApiUrl = configuration["Scheduler:Doctor:ApiUrl"] ?? "http://doctor-web.stella-ops.local";
|
||||
services.AddHttpClient("DoctorApi", client =>
|
||||
{
|
||||
client.BaseAddress = new Uri(doctorApiUrl);
|
||||
client.Timeout = TimeSpan.FromSeconds(600);
|
||||
});
|
||||
|
||||
// Register trend repository
|
||||
var connectionString = configuration["Scheduler:Storage:ConnectionString"]
|
||||
?? configuration.GetConnectionString("Default")
|
||||
?? "";
|
||||
services.AddSingleton<IDoctorTrendRepository>(sp =>
|
||||
new PostgresDoctorTrendRepository(connectionString, sp.GetRequiredService<ILogger<PostgresDoctorTrendRepository>>()));
|
||||
|
||||
// Register alert service
|
||||
services.AddSingleton<IDoctorAlertService, ConsoleAlertService>();
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public void MapEndpoints(IEndpointRouteBuilder routes)
|
||||
{
|
||||
DoctorTrendEndpoints.Map(routes);
|
||||
}
|
||||
|
||||
// --- Private helpers porting logic from ScheduleExecutor ---
|
||||
|
||||
private static async Task<string> TriggerDoctorRunAsync(
|
||||
HttpClient httpClient, DoctorScheduleConfig config, CancellationToken ct)
|
||||
{
|
||||
var request = new
|
||||
{
|
||||
mode = config.DoctorMode,
|
||||
categories = config.Categories,
|
||||
plugins = config.Plugins,
|
||||
@async = true,
|
||||
};
|
||||
|
||||
var response = await httpClient.PostAsJsonAsync("/api/v1/doctor/run", request, ct);
|
||||
response.EnsureSuccessStatusCode();
|
||||
|
||||
var result = await response.Content.ReadFromJsonAsync<RunTriggerResponse>(cancellationToken: ct);
|
||||
return result?.RunId ?? throw new InvalidOperationException("No run ID returned from Doctor API");
|
||||
}
|
||||
|
||||
private static async Task<DoctorExecutionResult> WaitForRunCompletionAsync(
|
||||
HttpClient httpClient, string runId, int timeoutSeconds, CancellationToken ct)
|
||||
{
|
||||
var timeout = TimeSpan.FromSeconds(timeoutSeconds);
|
||||
var sw = Stopwatch.StartNew();
|
||||
|
||||
while (sw.Elapsed < timeout)
|
||||
{
|
||||
ct.ThrowIfCancellationRequested();
|
||||
|
||||
var response = await httpClient.GetAsync($"/api/v1/doctor/run/{runId}", ct);
|
||||
if (!response.IsSuccessStatusCode)
|
||||
{
|
||||
await Task.Delay(TimeSpan.FromSeconds(5), ct);
|
||||
continue;
|
||||
}
|
||||
|
||||
var result = await response.Content.ReadFromJsonAsync<RunStatusResponse>(cancellationToken: ct);
|
||||
if (result?.Status == "completed")
|
||||
{
|
||||
var status = result.FailedChecks > 0 ? "failed"
|
||||
: result.WarnedChecks > 0 ? "warning"
|
||||
: "success";
|
||||
|
||||
return new DoctorExecutionResult
|
||||
{
|
||||
RunId = runId,
|
||||
Status = status,
|
||||
TotalChecks = result.TotalChecks,
|
||||
PassedChecks = result.PassedChecks,
|
||||
WarnedChecks = result.WarnedChecks,
|
||||
FailedChecks = result.FailedChecks,
|
||||
SkippedChecks = result.SkippedChecks,
|
||||
HealthScore = result.HealthScore,
|
||||
CategoriesWithIssues = result.CategoriesWithIssues ?? [],
|
||||
};
|
||||
}
|
||||
|
||||
await Task.Delay(TimeSpan.FromSeconds(2), ct);
|
||||
}
|
||||
|
||||
throw new TimeoutException($"Doctor run {runId} did not complete within {timeout.TotalSeconds}s");
|
||||
}
|
||||
|
||||
private static async Task StoreTrendDataAsync(
|
||||
HttpClient httpClient,
|
||||
IDoctorTrendRepository trendRepository,
|
||||
string runId,
|
||||
string tenantId,
|
||||
CancellationToken ct)
|
||||
{
|
||||
var response = await httpClient.GetAsync($"/api/v1/doctor/run/{runId}/results", ct);
|
||||
if (!response.IsSuccessStatusCode)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
var results = await response.Content.ReadFromJsonAsync<RunResultsResponse>(cancellationToken: ct);
|
||||
if (results?.Results is null || results.Results.Count == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
var timestamp = DateTimeOffset.UtcNow;
|
||||
var dataPoints = results.Results.Select(r => new DoctorTrendDataPoint
|
||||
{
|
||||
Timestamp = timestamp,
|
||||
TenantId = tenantId,
|
||||
CheckId = r.CheckId,
|
||||
PluginId = r.PluginId,
|
||||
Category = r.Category,
|
||||
RunId = runId,
|
||||
Status = r.Status,
|
||||
HealthScore = CalculateHealthScore(r.Status),
|
||||
DurationMs = r.DurationMs,
|
||||
EvidenceValues = ExtractTrendEvidence(r.Evidence),
|
||||
}).ToList();
|
||||
|
||||
await trendRepository.StoreTrendDataAsync(dataPoints, ct);
|
||||
}
|
||||
|
||||
private static int CalculateHealthScore(string status) => status.ToLowerInvariant() switch
|
||||
{
|
||||
"pass" => 100,
|
||||
"warn" => 50,
|
||||
"fail" => 0,
|
||||
"skip" => -1,
|
||||
_ => 0,
|
||||
};
|
||||
|
||||
private static IReadOnlyDictionary<string, string> ExtractTrendEvidence(
|
||||
Dictionary<string, object>? evidence)
|
||||
{
|
||||
if (evidence is null)
|
||||
{
|
||||
return new Dictionary<string, string>();
|
||||
}
|
||||
|
||||
return evidence
|
||||
.Where(kv => kv.Value is int or long or double or string or JsonElement)
|
||||
.Where(kv => !kv.Key.Contains("url", StringComparison.OrdinalIgnoreCase))
|
||||
.Where(kv => !kv.Key.Contains("message", StringComparison.OrdinalIgnoreCase))
|
||||
.Take(10)
|
||||
.ToDictionary(kv => kv.Key, kv => kv.Value?.ToString() ?? string.Empty);
|
||||
}
|
||||
|
||||
// Response DTOs for Doctor API
|
||||
private sealed record RunTriggerResponse(string RunId);
|
||||
|
||||
private sealed record RunStatusResponse(
|
||||
string Status,
|
||||
int TotalChecks,
|
||||
int PassedChecks,
|
||||
int WarnedChecks,
|
||||
int FailedChecks,
|
||||
int SkippedChecks,
|
||||
int HealthScore,
|
||||
IReadOnlyList<string>? CategoriesWithIssues);
|
||||
|
||||
private sealed record RunResultsResponse(IReadOnlyList<CheckResult>? Results);
|
||||
|
||||
private sealed record CheckResult(
|
||||
string CheckId,
|
||||
string PluginId,
|
||||
string Category,
|
||||
string Status,
|
||||
int DurationMs,
|
||||
Dictionary<string, object>? Evidence);
|
||||
}
|
||||
@@ -0,0 +1,180 @@
|
||||
using Microsoft.AspNetCore.Builder;
|
||||
using Microsoft.AspNetCore.Http;
|
||||
using Microsoft.AspNetCore.Routing;
|
||||
using StellaOps.Scheduler.Plugin.Doctor.Services;
|
||||
|
||||
namespace StellaOps.Scheduler.Plugin.Doctor.Endpoints;
|
||||
|
||||
/// <summary>
|
||||
/// Registers Doctor trend HTTP endpoints in the Scheduler service.
|
||||
/// These endpoints serve the same data shapes as the former Doctor Scheduler service,
|
||||
/// enabling the Doctor UI to work without code changes.
|
||||
/// </summary>
|
||||
public static class DoctorTrendEndpoints
|
||||
{
|
||||
public static void Map(IEndpointRouteBuilder routes)
|
||||
{
|
||||
var group = routes.MapGroup("/api/v1/scheduler/doctor/trends")
|
||||
.WithTags("Doctor", "Trends");
|
||||
|
||||
group.MapGet("/", async (
|
||||
DateTimeOffset? from,
|
||||
DateTimeOffset? to,
|
||||
IDoctorTrendRepository repository,
|
||||
TimeProvider timeProvider,
|
||||
HttpContext httpContext,
|
||||
CancellationToken ct) =>
|
||||
{
|
||||
var tenantId = ResolveTenantId(httpContext);
|
||||
var window = ResolveWindow(from, to, timeProvider);
|
||||
if (window is null)
|
||||
{
|
||||
return Results.BadRequest(new { message = "Invalid time window: 'from' must be before 'to'." });
|
||||
}
|
||||
|
||||
var summaries = await repository.GetTrendSummariesAsync(tenantId, window.Value.From, window.Value.To, ct);
|
||||
return Results.Ok(new
|
||||
{
|
||||
window = new { from = window.Value.From, to = window.Value.To },
|
||||
summaries,
|
||||
});
|
||||
})
|
||||
.WithName("GetDoctorPluginTrends")
|
||||
.WithDescription("Returns aggregated health-check trend summaries across all checks for the specified time window.");
|
||||
|
||||
group.MapGet("/checks/{checkId}", async (
|
||||
string checkId,
|
||||
DateTimeOffset? from,
|
||||
DateTimeOffset? to,
|
||||
IDoctorTrendRepository repository,
|
||||
TimeProvider timeProvider,
|
||||
HttpContext httpContext,
|
||||
CancellationToken ct) =>
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(checkId))
|
||||
{
|
||||
return Results.BadRequest(new { message = "checkId is required." });
|
||||
}
|
||||
|
||||
var tenantId = ResolveTenantId(httpContext);
|
||||
var window = ResolveWindow(from, to, timeProvider);
|
||||
if (window is null)
|
||||
{
|
||||
return Results.BadRequest(new { message = "Invalid time window." });
|
||||
}
|
||||
|
||||
var data = await repository.GetTrendDataAsync(tenantId, checkId, window.Value.From, window.Value.To, ct);
|
||||
var summary = await repository.GetCheckTrendSummaryAsync(tenantId, checkId, window.Value.From, window.Value.To, ct);
|
||||
return Results.Ok(new
|
||||
{
|
||||
window = new { from = window.Value.From, to = window.Value.To },
|
||||
summary,
|
||||
dataPoints = data,
|
||||
});
|
||||
})
|
||||
.WithName("GetDoctorPluginCheckTrend")
|
||||
.WithDescription("Returns detailed trend data and summary statistics for a specific Doctor health check.");
|
||||
|
||||
group.MapGet("/categories/{category}", async (
|
||||
string category,
|
||||
DateTimeOffset? from,
|
||||
DateTimeOffset? to,
|
||||
IDoctorTrendRepository repository,
|
||||
TimeProvider timeProvider,
|
||||
HttpContext httpContext,
|
||||
CancellationToken ct) =>
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(category))
|
||||
{
|
||||
return Results.BadRequest(new { message = "category is required." });
|
||||
}
|
||||
|
||||
var tenantId = ResolveTenantId(httpContext);
|
||||
var window = ResolveWindow(from, to, timeProvider);
|
||||
if (window is null)
|
||||
{
|
||||
return Results.BadRequest(new { message = "Invalid time window." });
|
||||
}
|
||||
|
||||
var data = await repository.GetCategoryTrendDataAsync(tenantId, category, window.Value.From, window.Value.To, ct);
|
||||
return Results.Ok(new
|
||||
{
|
||||
window = new { from = window.Value.From, to = window.Value.To },
|
||||
category,
|
||||
dataPoints = data,
|
||||
});
|
||||
})
|
||||
.WithName("GetDoctorPluginCategoryTrend")
|
||||
.WithDescription("Returns trend data points for all checks within a specific Doctor check category.");
|
||||
|
||||
group.MapGet("/degrading", async (
|
||||
DateTimeOffset? from,
|
||||
DateTimeOffset? to,
|
||||
double? threshold,
|
||||
IDoctorTrendRepository repository,
|
||||
TimeProvider timeProvider,
|
||||
HttpContext httpContext,
|
||||
CancellationToken ct) =>
|
||||
{
|
||||
var tenantId = ResolveTenantId(httpContext);
|
||||
var window = ResolveWindow(from, to, timeProvider);
|
||||
if (window is null)
|
||||
{
|
||||
return Results.BadRequest(new { message = "Invalid time window." });
|
||||
}
|
||||
|
||||
var effectiveThreshold = threshold ?? 0.1d;
|
||||
if (effectiveThreshold < 0 || double.IsNaN(effectiveThreshold))
|
||||
{
|
||||
return Results.BadRequest(new { message = "threshold must be a non-negative number." });
|
||||
}
|
||||
|
||||
var degrading = await repository.GetDegradingChecksAsync(
|
||||
tenantId, window.Value.From, window.Value.To, effectiveThreshold, ct);
|
||||
return Results.Ok(new
|
||||
{
|
||||
window = new { from = window.Value.From, to = window.Value.To },
|
||||
threshold = effectiveThreshold,
|
||||
checks = degrading,
|
||||
});
|
||||
})
|
||||
.WithName("GetDoctorPluginDegradingChecks")
|
||||
.WithDescription("Returns the set of Doctor health checks that have been degrading over the specified time window.");
|
||||
}
|
||||
|
||||
private static (DateTimeOffset From, DateTimeOffset To)? ResolveWindow(
|
||||
DateTimeOffset? from,
|
||||
DateTimeOffset? to,
|
||||
TimeProvider timeProvider)
|
||||
{
|
||||
var end = to ?? timeProvider.GetUtcNow();
|
||||
var start = from ?? end.AddDays(-30);
|
||||
|
||||
if (start > end)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
return (start, end);
|
||||
}
|
||||
|
||||
private static string ResolveTenantId(HttpContext httpContext)
|
||||
{
|
||||
// Try to get tenant from the StellaOps tenant context (set by middleware)
|
||||
var tenantClaim = httpContext.User?.FindFirst("stellaops:tenant")
|
||||
?? httpContext.User?.FindFirst("tenant");
|
||||
if (tenantClaim is not null && !string.IsNullOrWhiteSpace(tenantClaim.Value))
|
||||
{
|
||||
return tenantClaim.Value;
|
||||
}
|
||||
|
||||
// Fall back to header (development mode)
|
||||
if (httpContext.Request.Headers.TryGetValue("X-Tenant-Id", out var tenantHeader)
|
||||
&& !string.IsNullOrWhiteSpace(tenantHeader))
|
||||
{
|
||||
return tenantHeader.ToString();
|
||||
}
|
||||
|
||||
return "demo-prod";
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,44 @@
|
||||
-- Doctor Trends Table: Stores health check trend data points
|
||||
-- Created by the DoctorJobPlugin (scheduler plugin architecture)
|
||||
-- Uses the scheduler schema to share the same database/schema as the Scheduler service.
|
||||
|
||||
CREATE TABLE IF NOT EXISTS scheduler.doctor_trends (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
timestamp TIMESTAMPTZ NOT NULL,
|
||||
tenant_id TEXT NOT NULL,
|
||||
check_id TEXT NOT NULL,
|
||||
plugin_id TEXT NOT NULL,
|
||||
category TEXT NOT NULL,
|
||||
run_id TEXT NOT NULL,
|
||||
status TEXT NOT NULL,
|
||||
health_score INT NOT NULL DEFAULT 0,
|
||||
duration_ms INT NOT NULL DEFAULT 0,
|
||||
evidence_values JSONB NOT NULL DEFAULT '{}'
|
||||
);
|
||||
|
||||
-- Performance indexes for common query patterns
|
||||
CREATE INDEX IF NOT EXISTS idx_doctor_trends_tenant_check
|
||||
ON scheduler.doctor_trends(tenant_id, check_id, timestamp DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_doctor_trends_tenant_category
|
||||
ON scheduler.doctor_trends(tenant_id, category, timestamp DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_doctor_trends_tenant_timestamp
|
||||
ON scheduler.doctor_trends(tenant_id, timestamp DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_doctor_trends_run
|
||||
ON scheduler.doctor_trends(run_id);
|
||||
|
||||
-- Retention pruning index (used by PruneOldDataAsync)
|
||||
CREATE INDEX IF NOT EXISTS idx_doctor_trends_timestamp_prune
|
||||
ON scheduler.doctor_trends(timestamp);
|
||||
|
||||
-- Row-Level Security
|
||||
ALTER TABLE scheduler.doctor_trends ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE scheduler.doctor_trends FORCE ROW LEVEL SECURITY;
|
||||
DROP POLICY IF EXISTS doctor_trends_tenant_isolation ON scheduler.doctor_trends;
|
||||
CREATE POLICY doctor_trends_tenant_isolation ON scheduler.doctor_trends FOR ALL
|
||||
USING (tenant_id = scheduler_app.require_current_tenant())
|
||||
WITH CHECK (tenant_id = scheduler_app.require_current_tenant());
|
||||
|
||||
COMMENT ON TABLE scheduler.doctor_trends IS 'Health check trend data points from Doctor plugin scheduled runs. Retained per configurable retention period (default 365 days).';
|
||||
@@ -0,0 +1,86 @@
|
||||
using System.Text.Json;
|
||||
using System.Text.Json.Serialization;
|
||||
|
||||
namespace StellaOps.Scheduler.Plugin.Doctor.Models;
|
||||
|
||||
/// <summary>
|
||||
/// Plugin-specific configuration for Doctor job schedules.
|
||||
/// Deserialized from <see cref="StellaOps.Scheduler.Models.Schedule.PluginConfig"/>.
|
||||
/// </summary>
|
||||
public sealed record DoctorScheduleConfig
|
||||
{
|
||||
/// <summary>
|
||||
/// Doctor run mode: full, quick, categories, plugins.
|
||||
/// </summary>
|
||||
[JsonPropertyName("doctorMode")]
|
||||
public string DoctorMode { get; init; } = "full";
|
||||
|
||||
/// <summary>
|
||||
/// Optional list of categories to include (empty = all).
|
||||
/// </summary>
|
||||
[JsonPropertyName("categories")]
|
||||
public IReadOnlyList<string> Categories { get; init; } = [];
|
||||
|
||||
/// <summary>
|
||||
/// Optional list of specific plugins to run (empty = all).
|
||||
/// </summary>
|
||||
[JsonPropertyName("plugins")]
|
||||
public IReadOnlyList<string> Plugins { get; init; } = [];
|
||||
|
||||
/// <summary>
|
||||
/// Timeout in seconds for the Doctor run.
|
||||
/// </summary>
|
||||
[JsonPropertyName("timeoutSeconds")]
|
||||
public int TimeoutSeconds { get; init; } = 300;
|
||||
|
||||
/// <summary>
|
||||
/// Alert configuration for this schedule.
|
||||
/// </summary>
|
||||
[JsonPropertyName("alerts")]
|
||||
public DoctorAlertConfig? Alerts { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Deserializes a DoctorScheduleConfig from the Schedule's PluginConfig dictionary.
|
||||
/// </summary>
|
||||
public static DoctorScheduleConfig FromPluginConfig(IReadOnlyDictionary<string, object?>? pluginConfig)
|
||||
{
|
||||
if (pluginConfig is null || pluginConfig.Count == 0)
|
||||
{
|
||||
return new DoctorScheduleConfig();
|
||||
}
|
||||
|
||||
// Round-trip through JSON to correctly deserialize typed properties
|
||||
var json = JsonSerializer.Serialize(pluginConfig);
|
||||
return JsonSerializer.Deserialize<DoctorScheduleConfig>(json) ?? new DoctorScheduleConfig();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Alert configuration for Doctor scheduled runs.
|
||||
/// </summary>
|
||||
public sealed record DoctorAlertConfig
|
||||
{
|
||||
[JsonPropertyName("enabled")]
|
||||
public bool Enabled { get; init; } = true;
|
||||
|
||||
[JsonPropertyName("alertOnFail")]
|
||||
public bool AlertOnFail { get; init; } = true;
|
||||
|
||||
[JsonPropertyName("alertOnWarn")]
|
||||
public bool AlertOnWarn { get; init; }
|
||||
|
||||
[JsonPropertyName("alertOnStatusChange")]
|
||||
public bool AlertOnStatusChange { get; init; } = true;
|
||||
|
||||
[JsonPropertyName("channels")]
|
||||
public IReadOnlyList<string> Channels { get; init; } = [];
|
||||
|
||||
[JsonPropertyName("emailRecipients")]
|
||||
public IReadOnlyList<string> EmailRecipients { get; init; } = [];
|
||||
|
||||
[JsonPropertyName("webhookUrls")]
|
||||
public IReadOnlyList<string> WebhookUrls { get; init; } = [];
|
||||
|
||||
[JsonPropertyName("minSeverity")]
|
||||
public string MinSeverity { get; init; } = "Fail";
|
||||
}
|
||||
@@ -0,0 +1,79 @@
|
||||
namespace StellaOps.Scheduler.Plugin.Doctor.Models;
|
||||
|
||||
/// <summary>
|
||||
/// Represents a single data point in a Doctor health trend.
|
||||
/// Stored in the scheduler.doctor_trends table.
|
||||
/// </summary>
|
||||
public sealed record DoctorTrendDataPoint
|
||||
{
|
||||
/// <summary>
|
||||
/// Timestamp of the data point.
|
||||
/// </summary>
|
||||
public DateTimeOffset Timestamp { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Check ID this data point is for.
|
||||
/// </summary>
|
||||
public required string CheckId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Plugin ID the check belongs to.
|
||||
/// </summary>
|
||||
public required string PluginId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Category of the check.
|
||||
/// </summary>
|
||||
public required string Category { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Run ID that generated this data point.
|
||||
/// </summary>
|
||||
public required string RunId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Tenant ID for multi-tenant isolation.
|
||||
/// </summary>
|
||||
public required string TenantId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Status of the check at this point (pass, warn, fail, skip).
|
||||
/// </summary>
|
||||
public required string Status { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Health score (0-100) at this point.
|
||||
/// </summary>
|
||||
public int HealthScore { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Duration of the check in milliseconds.
|
||||
/// </summary>
|
||||
public int DurationMs { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Selected evidence values for trending (stored as JSON).
|
||||
/// </summary>
|
||||
public IReadOnlyDictionary<string, string> EvidenceValues { get; init; } =
|
||||
new Dictionary<string, string>();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Aggregated trend summary over a time period.
|
||||
/// </summary>
|
||||
public sealed record DoctorTrendSummary
|
||||
{
|
||||
public required string CheckId { get; init; }
|
||||
public required string CheckName { get; init; }
|
||||
public DateTimeOffset PeriodStart { get; init; }
|
||||
public DateTimeOffset PeriodEnd { get; init; }
|
||||
public int TotalRuns { get; init; }
|
||||
public int PassCount { get; init; }
|
||||
public int WarnCount { get; init; }
|
||||
public int FailCount { get; init; }
|
||||
public double SuccessRate => TotalRuns > 0 ? (double)PassCount / TotalRuns : 0;
|
||||
public double AvgHealthScore { get; init; }
|
||||
public string Direction { get; init; } = "stable";
|
||||
public double ChangePercent { get; init; }
|
||||
public int AvgDurationMs { get; init; }
|
||||
}
|
||||
@@ -0,0 +1,236 @@
|
||||
using System.Text.Json;
|
||||
using Dapper;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Npgsql;
|
||||
using StellaOps.Scheduler.Plugin.Doctor.Models;
|
||||
using StellaOps.Scheduler.Plugin.Doctor.Services;
|
||||
|
||||
namespace StellaOps.Scheduler.Plugin.Doctor.Persistence;
|
||||
|
||||
/// <summary>
|
||||
/// Postgres-backed implementation of <see cref="IDoctorTrendRepository"/>.
|
||||
/// Uses the scheduler.doctor_trends table via Dapper.
|
||||
/// </summary>
|
||||
public sealed class PostgresDoctorTrendRepository : IDoctorTrendRepository
|
||||
{
|
||||
private readonly string _connectionString;
|
||||
private readonly ILogger<PostgresDoctorTrendRepository> _logger;
|
||||
|
||||
public PostgresDoctorTrendRepository(string connectionString, ILogger<PostgresDoctorTrendRepository> logger)
|
||||
{
|
||||
_connectionString = connectionString;
|
||||
_logger = logger;
|
||||
}
|
||||
|
||||
public async Task StoreTrendDataAsync(IEnumerable<DoctorTrendDataPoint> dataPoints, CancellationToken ct)
|
||||
{
|
||||
const string sql = """
|
||||
INSERT INTO scheduler.doctor_trends
|
||||
(timestamp, tenant_id, check_id, plugin_id, category, run_id, status, health_score, duration_ms, evidence_values)
|
||||
VALUES
|
||||
(@Timestamp, @TenantId, @CheckId, @PluginId, @Category, @RunId, @Status, @HealthScore, @DurationMs, @EvidenceValues::jsonb)
|
||||
ON CONFLICT DO NOTHING
|
||||
""";
|
||||
|
||||
await using var connection = new NpgsqlConnection(_connectionString);
|
||||
await connection.OpenAsync(ct);
|
||||
|
||||
foreach (var point in dataPoints)
|
||||
{
|
||||
var evidenceJson = JsonSerializer.Serialize(point.EvidenceValues);
|
||||
await connection.ExecuteAsync(new CommandDefinition(sql, new
|
||||
{
|
||||
point.Timestamp,
|
||||
point.TenantId,
|
||||
point.CheckId,
|
||||
point.PluginId,
|
||||
point.Category,
|
||||
point.RunId,
|
||||
point.Status,
|
||||
point.HealthScore,
|
||||
point.DurationMs,
|
||||
EvidenceValues = evidenceJson,
|
||||
}, cancellationToken: ct));
|
||||
}
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<DoctorTrendDataPoint>> GetTrendDataAsync(
|
||||
string tenantId, string checkId, DateTimeOffset from, DateTimeOffset to, CancellationToken ct)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT timestamp, tenant_id, check_id, plugin_id, category, run_id, status,
|
||||
health_score, duration_ms, evidence_values
|
||||
FROM scheduler.doctor_trends
|
||||
WHERE tenant_id = @TenantId AND check_id = @CheckId
|
||||
AND timestamp >= @From AND timestamp <= @To
|
||||
ORDER BY timestamp ASC, run_id ASC
|
||||
""";
|
||||
|
||||
await using var connection = new NpgsqlConnection(_connectionString);
|
||||
await connection.OpenAsync(ct);
|
||||
|
||||
var rows = await connection.QueryAsync<TrendRow>(new CommandDefinition(sql, new
|
||||
{
|
||||
TenantId = tenantId,
|
||||
CheckId = checkId,
|
||||
From = from,
|
||||
To = to,
|
||||
}, cancellationToken: ct));
|
||||
|
||||
return rows.Select(MapToDataPoint).ToList().AsReadOnly();
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<DoctorTrendDataPoint>> GetCategoryTrendDataAsync(
|
||||
string tenantId, string category, DateTimeOffset from, DateTimeOffset to, CancellationToken ct)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT timestamp, tenant_id, check_id, plugin_id, category, run_id, status,
|
||||
health_score, duration_ms, evidence_values
|
||||
FROM scheduler.doctor_trends
|
||||
WHERE tenant_id = @TenantId AND category = @Category
|
||||
AND timestamp >= @From AND timestamp <= @To
|
||||
ORDER BY timestamp ASC, check_id ASC, run_id ASC
|
||||
""";
|
||||
|
||||
await using var connection = new NpgsqlConnection(_connectionString);
|
||||
await connection.OpenAsync(ct);
|
||||
|
||||
var rows = await connection.QueryAsync<TrendRow>(new CommandDefinition(sql, new
|
||||
{
|
||||
TenantId = tenantId,
|
||||
Category = category,
|
||||
From = from,
|
||||
To = to,
|
||||
}, cancellationToken: ct));
|
||||
|
||||
return rows.Select(MapToDataPoint).ToList().AsReadOnly();
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<DoctorTrendSummary>> GetTrendSummariesAsync(
|
||||
string tenantId, DateTimeOffset from, DateTimeOffset to, CancellationToken ct)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT check_id,
|
||||
check_id AS check_name,
|
||||
COUNT(*) AS total_runs,
|
||||
COUNT(*) FILTER (WHERE LOWER(status) IN ('pass','success')) AS pass_count,
|
||||
COUNT(*) FILTER (WHERE LOWER(status) IN ('warn','warning')) AS warn_count,
|
||||
COUNT(*) FILTER (WHERE LOWER(status) IN ('fail','failed','error')) AS fail_count,
|
||||
AVG(health_score) AS avg_health_score,
|
||||
AVG(duration_ms)::int AS avg_duration_ms
|
||||
FROM scheduler.doctor_trends
|
||||
WHERE tenant_id = @TenantId AND timestamp >= @From AND timestamp <= @To
|
||||
GROUP BY check_id
|
||||
ORDER BY check_id
|
||||
""";
|
||||
|
||||
await using var connection = new NpgsqlConnection(_connectionString);
|
||||
await connection.OpenAsync(ct);
|
||||
|
||||
var rows = await connection.QueryAsync<SummaryRow>(new CommandDefinition(sql, new
|
||||
{
|
||||
TenantId = tenantId,
|
||||
From = from,
|
||||
To = to,
|
||||
}, cancellationToken: ct));
|
||||
|
||||
return rows.Select(r => new DoctorTrendSummary
|
||||
{
|
||||
CheckId = r.check_id,
|
||||
CheckName = r.check_name,
|
||||
PeriodStart = from,
|
||||
PeriodEnd = to,
|
||||
TotalRuns = r.total_runs,
|
||||
PassCount = r.pass_count,
|
||||
WarnCount = r.warn_count,
|
||||
FailCount = r.fail_count,
|
||||
AvgHealthScore = r.avg_health_score,
|
||||
Direction = DetermineDirection(r),
|
||||
ChangePercent = 0, // Simplified: full implementation would compare first/last scores
|
||||
AvgDurationMs = r.avg_duration_ms,
|
||||
}).ToList().AsReadOnly();
|
||||
}
|
||||
|
||||
public async Task<DoctorTrendSummary?> GetCheckTrendSummaryAsync(
|
||||
string tenantId, string checkId, DateTimeOffset from, DateTimeOffset to, CancellationToken ct)
|
||||
{
|
||||
var summaries = await GetTrendSummariesAsync(tenantId, from, to, ct);
|
||||
return summaries.FirstOrDefault(s => string.Equals(s.CheckId, checkId, StringComparison.Ordinal));
|
||||
}
|
||||
|
||||
public async Task<IReadOnlyList<DoctorTrendSummary>> GetDegradingChecksAsync(
|
||||
string tenantId, DateTimeOffset from, DateTimeOffset to, double degradationThreshold, CancellationToken ct)
|
||||
{
|
||||
var summaries = await GetTrendSummariesAsync(tenantId, from, to, ct);
|
||||
return summaries
|
||||
.Where(s => string.Equals(s.Direction, "degrading", StringComparison.OrdinalIgnoreCase))
|
||||
.ToList()
|
||||
.AsReadOnly();
|
||||
}
|
||||
|
||||
public async Task PruneOldDataAsync(DateTimeOffset olderThan, CancellationToken ct)
|
||||
{
|
||||
const string sql = "DELETE FROM scheduler.doctor_trends WHERE timestamp < @OlderThan";
|
||||
|
||||
await using var connection = new NpgsqlConnection(_connectionString);
|
||||
await connection.OpenAsync(ct);
|
||||
|
||||
var deleted = await connection.ExecuteAsync(new CommandDefinition(sql, new { OlderThan = olderThan }, cancellationToken: ct));
|
||||
_logger.LogInformation("Pruned {Count} old Doctor trend data points (older than {OlderThan}).", deleted, olderThan);
|
||||
}
|
||||
|
||||
private static string DetermineDirection(SummaryRow row)
|
||||
{
|
||||
if (row.total_runs < 2) return "stable";
|
||||
var failRate = row.total_runs > 0 ? (double)row.fail_count / row.total_runs : 0;
|
||||
return failRate > 0.3 ? "degrading" : failRate < 0.05 ? "improving" : "stable";
|
||||
}
|
||||
|
||||
private static DoctorTrendDataPoint MapToDataPoint(TrendRow row)
|
||||
{
|
||||
var evidence = string.IsNullOrWhiteSpace(row.evidence_values)
|
||||
? new Dictionary<string, string>()
|
||||
: JsonSerializer.Deserialize<Dictionary<string, string>>(row.evidence_values) ?? new Dictionary<string, string>();
|
||||
|
||||
return new DoctorTrendDataPoint
|
||||
{
|
||||
Timestamp = row.timestamp,
|
||||
TenantId = row.tenant_id,
|
||||
CheckId = row.check_id,
|
||||
PluginId = row.plugin_id,
|
||||
Category = row.category,
|
||||
RunId = row.run_id,
|
||||
Status = row.status,
|
||||
HealthScore = row.health_score,
|
||||
DurationMs = row.duration_ms,
|
||||
EvidenceValues = evidence,
|
||||
};
|
||||
}
|
||||
|
||||
// Dapper row mapping types
|
||||
private sealed record TrendRow
|
||||
{
|
||||
public DateTimeOffset timestamp { get; init; }
|
||||
public string tenant_id { get; init; } = "";
|
||||
public string check_id { get; init; } = "";
|
||||
public string plugin_id { get; init; } = "";
|
||||
public string category { get; init; } = "";
|
||||
public string run_id { get; init; } = "";
|
||||
public string status { get; init; } = "";
|
||||
public int health_score { get; init; }
|
||||
public int duration_ms { get; init; }
|
||||
public string? evidence_values { get; init; }
|
||||
}
|
||||
|
||||
private sealed record SummaryRow
|
||||
{
|
||||
public string check_id { get; init; } = "";
|
||||
public string check_name { get; init; } = "";
|
||||
public int total_runs { get; init; }
|
||||
public int pass_count { get; init; }
|
||||
public int warn_count { get; init; }
|
||||
public int fail_count { get; init; }
|
||||
public double avg_health_score { get; init; }
|
||||
public int avg_duration_ms { get; init; }
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,63 @@
|
||||
using Microsoft.Extensions.Logging;
|
||||
using StellaOps.Scheduler.Plugin.Doctor.Models;
|
||||
|
||||
namespace StellaOps.Scheduler.Plugin.Doctor.Services;
|
||||
|
||||
/// <summary>
|
||||
/// Console/logging-based alert service for Doctor scheduled runs.
|
||||
/// Logs alerts to the application logger. In production, this would be replaced
|
||||
/// with a notification channel (email, webhook, etc.) implementation.
|
||||
/// </summary>
|
||||
public sealed class ConsoleAlertService : IDoctorAlertService
|
||||
{
|
||||
private readonly ILogger<ConsoleAlertService> _logger;
|
||||
|
||||
public ConsoleAlertService(ILogger<ConsoleAlertService> logger)
|
||||
{
|
||||
_logger = logger;
|
||||
}
|
||||
|
||||
public Task EvaluateAndSendAsync(
|
||||
DoctorScheduleConfig config,
|
||||
DoctorExecutionResult result,
|
||||
CancellationToken ct)
|
||||
{
|
||||
if (config.Alerts is null || !config.Alerts.Enabled)
|
||||
{
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
var shouldAlert = false;
|
||||
|
||||
if (config.Alerts.AlertOnFail && result.FailedChecks > 0)
|
||||
{
|
||||
shouldAlert = true;
|
||||
}
|
||||
|
||||
if (config.Alerts.AlertOnWarn && result.WarnedChecks > 0)
|
||||
{
|
||||
shouldAlert = true;
|
||||
}
|
||||
|
||||
if (!string.IsNullOrEmpty(result.ErrorMessage))
|
||||
{
|
||||
shouldAlert = true;
|
||||
}
|
||||
|
||||
if (shouldAlert)
|
||||
{
|
||||
_logger.LogWarning(
|
||||
"Doctor alert triggered for run {RunId}: Status={Status}, " +
|
||||
"Failed={Failed}, Warned={Warned}, HealthScore={Score}, " +
|
||||
"Categories with issues: [{Categories}]",
|
||||
result.RunId,
|
||||
result.Status,
|
||||
result.FailedChecks,
|
||||
result.WarnedChecks,
|
||||
result.HealthScore,
|
||||
string.Join(", ", result.CategoriesWithIssues));
|
||||
}
|
||||
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,34 @@
|
||||
using StellaOps.Scheduler.Plugin.Doctor.Models;
|
||||
|
||||
namespace StellaOps.Scheduler.Plugin.Doctor.Services;
|
||||
|
||||
/// <summary>
|
||||
/// Service for sending alerts based on Doctor schedule execution results.
|
||||
/// </summary>
|
||||
public interface IDoctorAlertService
|
||||
{
|
||||
/// <summary>
|
||||
/// Evaluates alert rules and sends notifications if triggered.
|
||||
/// </summary>
|
||||
Task EvaluateAndSendAsync(
|
||||
DoctorScheduleConfig config,
|
||||
DoctorExecutionResult result,
|
||||
CancellationToken ct = default);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Result of a Doctor execution for alert evaluation.
|
||||
/// </summary>
|
||||
public sealed record DoctorExecutionResult
|
||||
{
|
||||
public required string RunId { get; init; }
|
||||
public required string Status { get; init; }
|
||||
public int TotalChecks { get; init; }
|
||||
public int PassedChecks { get; init; }
|
||||
public int WarnedChecks { get; init; }
|
||||
public int FailedChecks { get; init; }
|
||||
public int SkippedChecks { get; init; }
|
||||
public int HealthScore { get; init; }
|
||||
public IReadOnlyList<string> CategoriesWithIssues { get; init; } = [];
|
||||
public string? ErrorMessage { get; init; }
|
||||
}
|
||||
@@ -0,0 +1,69 @@
|
||||
using StellaOps.Scheduler.Plugin.Doctor.Models;
|
||||
|
||||
namespace StellaOps.Scheduler.Plugin.Doctor.Services;
|
||||
|
||||
/// <summary>
|
||||
/// Repository for persisting and querying Doctor health trend data.
|
||||
/// Backed by the scheduler.doctor_trends table.
|
||||
/// </summary>
|
||||
public interface IDoctorTrendRepository
|
||||
{
|
||||
/// <summary>
|
||||
/// Stores trend data points from a Doctor run.
|
||||
/// </summary>
|
||||
Task StoreTrendDataAsync(IEnumerable<DoctorTrendDataPoint> dataPoints, CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets trend data points for a specific check over a time range.
|
||||
/// </summary>
|
||||
Task<IReadOnlyList<DoctorTrendDataPoint>> GetTrendDataAsync(
|
||||
string tenantId,
|
||||
string checkId,
|
||||
DateTimeOffset from,
|
||||
DateTimeOffset to,
|
||||
CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets trend data points for a category over a time range.
|
||||
/// </summary>
|
||||
Task<IReadOnlyList<DoctorTrendDataPoint>> GetCategoryTrendDataAsync(
|
||||
string tenantId,
|
||||
string category,
|
||||
DateTimeOffset from,
|
||||
DateTimeOffset to,
|
||||
CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets aggregated trend summaries for all checks over a time range.
|
||||
/// </summary>
|
||||
Task<IReadOnlyList<DoctorTrendSummary>> GetTrendSummariesAsync(
|
||||
string tenantId,
|
||||
DateTimeOffset from,
|
||||
DateTimeOffset to,
|
||||
CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets trend summary for a specific check.
|
||||
/// </summary>
|
||||
Task<DoctorTrendSummary?> GetCheckTrendSummaryAsync(
|
||||
string tenantId,
|
||||
string checkId,
|
||||
DateTimeOffset from,
|
||||
DateTimeOffset to,
|
||||
CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets checks with degrading trends.
|
||||
/// </summary>
|
||||
Task<IReadOnlyList<DoctorTrendSummary>> GetDegradingChecksAsync(
|
||||
string tenantId,
|
||||
DateTimeOffset from,
|
||||
DateTimeOffset to,
|
||||
double degradationThreshold = 0.1,
|
||||
CancellationToken ct = default);
|
||||
|
||||
/// <summary>
|
||||
/// Prunes old trend data beyond retention period.
|
||||
/// </summary>
|
||||
Task PruneOldDataAsync(DateTimeOffset olderThan, CancellationToken ct = default);
|
||||
}
|
||||
@@ -0,0 +1,36 @@
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
<PropertyGroup>
|
||||
<TargetFramework>net10.0</TargetFramework>
|
||||
<LangVersion>preview</LangVersion>
|
||||
<ImplicitUsings>enable</ImplicitUsings>
|
||||
<Nullable>enable</Nullable>
|
||||
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
|
||||
<RootNamespace>StellaOps.Scheduler.Plugin.Doctor</RootNamespace>
|
||||
<AssemblyName>StellaOps.Scheduler.Plugin.Doctor</AssemblyName>
|
||||
<Description>Doctor health check plugin for the StellaOps Scheduler</Description>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<FrameworkReference Include="Microsoft.AspNetCore.App" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="../../StellaOps.Scheduler.__Libraries/StellaOps.Scheduler.Plugin.Abstractions/StellaOps.Scheduler.Plugin.Abstractions.csproj" />
|
||||
<ProjectReference Include="../../StellaOps.Scheduler.__Libraries/StellaOps.Scheduler.Models/StellaOps.Scheduler.Models.csproj" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="Npgsql" />
|
||||
<PackageReference Include="Dapper" />
|
||||
</ItemGroup>
|
||||
|
||||
<!-- Embed SQL migrations as resources -->
|
||||
<ItemGroup>
|
||||
<EmbeddedResource Include="Migrations\**\*.sql" />
|
||||
</ItemGroup>
|
||||
|
||||
<PropertyGroup Label="StellaOpsReleaseVersion">
|
||||
<Version>1.0.0-alpha1</Version>
|
||||
<InformationalVersion>1.0.0-alpha1</InformationalVersion>
|
||||
</PropertyGroup>
|
||||
</Project>
|
||||
@@ -10,7 +10,10 @@ using StellaOps.Authority.Persistence.Postgres;
|
||||
using StellaOps.Concelier.Persistence.Postgres;
|
||||
using StellaOps.Evidence.Persistence.Postgres;
|
||||
using StellaOps.Excititor.Persistence.Postgres;
|
||||
using StellaOps.IssuerDirectory.Persistence.Postgres;
|
||||
using StellaOps.Notify.Persistence.Postgres;
|
||||
using StellaOps.OpsMemory.Storage;
|
||||
using StellaOps.PacksRegistry.Persistence.Postgres;
|
||||
using StellaOps.Plugin.Registry;
|
||||
using StellaOps.Policy.Persistence.Postgres;
|
||||
using StellaOps.ReachGraph.Persistence.Postgres;
|
||||
@@ -19,6 +22,7 @@ using StellaOps.SbomService.Lineage.Persistence;
|
||||
using StellaOps.Scanner.Storage.Postgres;
|
||||
using StellaOps.Scanner.Triage;
|
||||
using StellaOps.Scheduler.Persistence.Postgres;
|
||||
using StellaOps.Signer.KeyManagement.EfCore.Context;
|
||||
using StellaOps.Timeline.Core.Postgres;
|
||||
using StellaOps.TimelineIndexer.Infrastructure;
|
||||
using StellaOps.Verdict.Persistence.Postgres;
|
||||
@@ -27,7 +31,12 @@ using StellaOps.Graph.Indexer.Persistence.Postgres;
|
||||
using StellaOps.Unknowns.Persistence.Postgres;
|
||||
using StellaOps.VexHub.Persistence.Postgres;
|
||||
using StellaOps.VexLens.Persistence.Postgres;
|
||||
using StellaOps.Workflow.DataStore.PostgreSQL;
|
||||
using StellaOps.ExportCenter.Infrastructure.Db;
|
||||
using StellaOps.Findings.Ledger.Infrastructure.Postgres;
|
||||
using StellaOps.Integrations.Persistence;
|
||||
using StellaOps.Replay.WebService;
|
||||
using StellaOps.RiskEngine.Infrastructure.Stores;
|
||||
|
||||
namespace StellaOps.Platform.Database;
|
||||
|
||||
@@ -278,11 +287,87 @@ public sealed class VerdictMigrationModulePlugin : IMigrationModulePlugin
|
||||
}
|
||||
|
||||
|
||||
public sealed class SignerMigrationModulePlugin : IMigrationModulePlugin
|
||||
{
|
||||
public MigrationModuleInfo Module { get; } = new(
|
||||
name: "Signer",
|
||||
schemaName: "signer",
|
||||
migrationsAssembly: typeof(KeyManagementDbContext).Assembly);
|
||||
}
|
||||
|
||||
public sealed class IssuerDirectoryMigrationModulePlugin : IMigrationModulePlugin
|
||||
{
|
||||
public MigrationModuleInfo Module { get; } = new(
|
||||
name: "IssuerDirectory",
|
||||
schemaName: "issuer",
|
||||
migrationsAssembly: typeof(IssuerDirectoryDataSource).Assembly);
|
||||
}
|
||||
|
||||
public sealed class WorkflowMigrationModulePlugin : IMigrationModulePlugin
|
||||
{
|
||||
public MigrationModuleInfo Module { get; } = new(
|
||||
name: "Workflow",
|
||||
schemaName: "workflow",
|
||||
migrationsAssembly: typeof(PostgresWorkflowDatabase).Assembly);
|
||||
}
|
||||
|
||||
public sealed class PacksRegistryMigrationModulePlugin : IMigrationModulePlugin
|
||||
{
|
||||
public MigrationModuleInfo Module { get; } = new(
|
||||
name: "PacksRegistry",
|
||||
schemaName: "packs",
|
||||
migrationsAssembly: typeof(PacksRegistryDataSource).Assembly);
|
||||
}
|
||||
|
||||
public sealed class OpsMemoryMigrationModulePlugin : IMigrationModulePlugin
|
||||
{
|
||||
public MigrationModuleInfo Module { get; } = new(
|
||||
name: "OpsMemory",
|
||||
schemaName: "opsmemory",
|
||||
migrationsAssembly: typeof(PostgresOpsMemoryStore).Assembly);
|
||||
}
|
||||
|
||||
public sealed class ExportCenterMigrationModulePlugin : IMigrationModulePlugin
|
||||
{
|
||||
public MigrationModuleInfo Module { get; } = new(
|
||||
name: "ExportCenter",
|
||||
schemaName: "export_center",
|
||||
migrationsAssembly: typeof(ExportCenterDataSource).Assembly,
|
||||
resourcePrefix: "StellaOps.ExportCenter.Infrastructure.Db.Migrations");
|
||||
}
|
||||
|
||||
public sealed class IntegrationsMigrationModulePlugin : IMigrationModulePlugin
|
||||
{
|
||||
public MigrationModuleInfo Module { get; } = new(
|
||||
name: "Integrations",
|
||||
schemaName: "integrations",
|
||||
migrationsAssembly: typeof(IntegrationDbContext).Assembly,
|
||||
resourcePrefix: "StellaOps.Integrations.Persistence.Migrations");
|
||||
}
|
||||
|
||||
public sealed class ReplayMigrationModulePlugin : IMigrationModulePlugin
|
||||
{
|
||||
public MigrationModuleInfo Module { get; } = new(
|
||||
name: "Replay",
|
||||
schemaName: "replay",
|
||||
migrationsAssembly: typeof(PostgresFeedSnapshotIndexStore).Assembly,
|
||||
resourcePrefix: "StellaOps.Replay.WebService.Migrations");
|
||||
}
|
||||
|
||||
public sealed class RiskEngineMigrationModulePlugin : IMigrationModulePlugin
|
||||
{
|
||||
public MigrationModuleInfo Module { get; } = new(
|
||||
name: "RiskEngine",
|
||||
schemaName: "riskengine",
|
||||
migrationsAssembly: typeof(PostgresRiskScoreResultStore).Assembly,
|
||||
resourcePrefix: "StellaOps.RiskEngine.Infrastructure.Migrations");
|
||||
}
|
||||
|
||||
public sealed class FindingsLedgerMigrationModulePlugin : IMigrationModulePlugin
|
||||
{
|
||||
public MigrationModuleInfo Module { get; } = new(
|
||||
name: "FindingsLedger",
|
||||
schemaName: "public",
|
||||
schemaName: "findings",
|
||||
migrationsAssembly: typeof(LedgerDataSource).Assembly,
|
||||
resourcePrefix: "StellaOps.Findings.Ledger.migrations");
|
||||
}
|
||||
|
||||
@@ -17,7 +17,9 @@
|
||||
<ProjectReference Include="..\..\..\BinaryIndex\__Libraries\StellaOps.BinaryIndex.Persistence\StellaOps.BinaryIndex.Persistence.csproj" />
|
||||
<ProjectReference Include="..\..\..\BinaryIndex\__Libraries\StellaOps.BinaryIndex.GoldenSet\StellaOps.BinaryIndex.GoldenSet.csproj" />
|
||||
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Artifact.Infrastructure\StellaOps.Artifact.Infrastructure.csproj" />
|
||||
<ProjectReference Include="..\..\..\Attestor\__Libraries\StellaOps.Signer.KeyManagement\StellaOps.Signer.KeyManagement.csproj" />
|
||||
<ProjectReference Include="..\..\..\Authority\__Libraries\StellaOps.Authority.Persistence\StellaOps.Authority.Persistence.csproj" />
|
||||
<ProjectReference Include="..\..\..\Authority\__Libraries\StellaOps.IssuerDirectory.Persistence\StellaOps.IssuerDirectory.Persistence.csproj" />
|
||||
<ProjectReference Include="..\..\..\Concelier\__Libraries\StellaOps.Concelier.Persistence\StellaOps.Concelier.Persistence.csproj" />
|
||||
<ProjectReference Include="..\..\..\Graph\__Libraries\StellaOps.Graph.Indexer.Persistence\StellaOps.Graph.Indexer.Persistence.csproj" />
|
||||
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Evidence.Persistence\StellaOps.Evidence.Persistence.csproj" />
|
||||
@@ -41,7 +43,14 @@
|
||||
<ProjectReference Include="..\..\..\EvidenceLocker\StellaOps.EvidenceLocker\StellaOps.EvidenceLocker.Infrastructure\StellaOps.EvidenceLocker.Infrastructure.csproj" />
|
||||
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Eventing\StellaOps.Eventing.csproj" />
|
||||
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Infrastructure.Postgres\StellaOps.Infrastructure.Postgres.csproj" />
|
||||
<ProjectReference Include="..\..\..\ExportCenter\StellaOps.ExportCenter\StellaOps.ExportCenter.Infrastructure\StellaOps.ExportCenter.Infrastructure.csproj" />
|
||||
<ProjectReference Include="..\..\..\Findings\StellaOps.Findings.Ledger\StellaOps.Findings.Ledger.csproj" />
|
||||
<ProjectReference Include="..\..\..\Findings\__Libraries\StellaOps.RiskEngine.Infrastructure\StellaOps.RiskEngine.Infrastructure.csproj" />
|
||||
<ProjectReference Include="..\..\..\Integrations\__Libraries\StellaOps.Integrations.Persistence\StellaOps.Integrations.Persistence.csproj" />
|
||||
<ProjectReference Include="..\..\..\JobEngine\StellaOps.PacksRegistry.__Libraries\StellaOps.PacksRegistry.Persistence\StellaOps.PacksRegistry.Persistence.csproj" />
|
||||
<ProjectReference Include="..\..\..\Replay\StellaOps.Replay.WebService\StellaOps.Replay.WebService.csproj" />
|
||||
<ProjectReference Include="..\..\..\AdvisoryAI\__Libraries\StellaOps.OpsMemory\StellaOps.OpsMemory.csproj" />
|
||||
<ProjectReference Include="..\..\..\Workflow\__Libraries\StellaOps.Workflow.DataStore.PostgreSQL\StellaOps.Workflow.DataStore.PostgreSQL.csproj" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
|
||||
@@ -0,0 +1,15 @@
|
||||
-- 001_initial_schema.sql
|
||||
-- Replay: schema and feed_snapshot_index table.
|
||||
|
||||
CREATE SCHEMA IF NOT EXISTS replay;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS replay.feed_snapshot_index (
|
||||
provider_id TEXT NOT NULL,
|
||||
digest TEXT NOT NULL,
|
||||
captured_at TIMESTAMPTZ NOT NULL,
|
||||
epoch_timestamp TIMESTAMPTZ NOT NULL,
|
||||
PRIMARY KEY (provider_id, captured_at, digest)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_replay_snapshot_index_lookup
|
||||
ON replay.feed_snapshot_index (provider_id, captured_at DESC, digest ASC);
|
||||
@@ -8,8 +8,6 @@ namespace StellaOps.Replay.WebService;
|
||||
public sealed class PostgresFeedSnapshotIndexStore : IFeedSnapshotIndexStore, IAsyncDisposable
|
||||
{
|
||||
private readonly NpgsqlDataSource _dataSource;
|
||||
private readonly object _initGate = new();
|
||||
private bool _tableInitialized;
|
||||
|
||||
public PostgresFeedSnapshotIndexStore(string connectionString)
|
||||
{
|
||||
@@ -26,7 +24,6 @@ public sealed class PostgresFeedSnapshotIndexStore : IFeedSnapshotIndexStore, IA
|
||||
public async Task IndexSnapshotAsync(FeedSnapshotIndexEntry entry, CancellationToken ct = default)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(entry);
|
||||
await EnsureTableAsync(ct).ConfigureAwait(false);
|
||||
|
||||
const string sql = """
|
||||
INSERT INTO replay.feed_snapshot_index (
|
||||
@@ -58,7 +55,6 @@ public sealed class PostgresFeedSnapshotIndexStore : IFeedSnapshotIndexStore, IA
|
||||
CancellationToken ct = default)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(providerId);
|
||||
await EnsureTableAsync(ct).ConfigureAwait(false);
|
||||
|
||||
const string sql = """
|
||||
SELECT provider_id, digest, captured_at, epoch_timestamp
|
||||
@@ -97,7 +93,6 @@ public sealed class PostgresFeedSnapshotIndexStore : IFeedSnapshotIndexStore, IA
|
||||
CancellationToken ct = default)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(providerId);
|
||||
await EnsureTableAsync(ct).ConfigureAwait(false);
|
||||
|
||||
const string sql = """
|
||||
SELECT provider_id, digest, captured_at, epoch_timestamp
|
||||
@@ -139,38 +134,6 @@ public sealed class PostgresFeedSnapshotIndexStore : IFeedSnapshotIndexStore, IA
|
||||
return _dataSource.DisposeAsync();
|
||||
}
|
||||
|
||||
private async Task EnsureTableAsync(CancellationToken ct)
|
||||
{
|
||||
lock (_initGate)
|
||||
{
|
||||
if (_tableInitialized)
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const string ddl = """
|
||||
CREATE SCHEMA IF NOT EXISTS replay;
|
||||
CREATE TABLE IF NOT EXISTS replay.feed_snapshot_index (
|
||||
provider_id TEXT NOT NULL,
|
||||
digest TEXT NOT NULL,
|
||||
captured_at TIMESTAMPTZ NOT NULL,
|
||||
epoch_timestamp TIMESTAMPTZ NOT NULL,
|
||||
PRIMARY KEY (provider_id, captured_at, digest)
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_replay_snapshot_index_lookup
|
||||
ON replay.feed_snapshot_index (provider_id, captured_at DESC, digest ASC);
|
||||
""";
|
||||
|
||||
await using var connection = await _dataSource.OpenConnectionAsync(ct).ConfigureAwait(false);
|
||||
await using var command = new NpgsqlCommand(ddl, connection);
|
||||
await command.ExecuteNonQueryAsync(ct).ConfigureAwait(false);
|
||||
|
||||
lock (_initGate)
|
||||
{
|
||||
_tableInitialized = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public sealed class SeedFsFeedSnapshotBlobStore : IFeedSnapshotBlobStore
|
||||
|
||||
@@ -30,6 +30,11 @@
|
||||
<EmbeddedResource Include="Translations\*.json" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<!-- Embed SQL migrations as resources -->
|
||||
<EmbeddedResource Include="Migrations\**\*.sql" />
|
||||
</ItemGroup>
|
||||
|
||||
<PropertyGroup Label="StellaOpsReleaseVersion">
|
||||
<Version>1.0.0-alpha1</Version>
|
||||
<InformationalVersion>1.0.0-alpha1</InformationalVersion>
|
||||
|
||||
@@ -94,8 +94,8 @@
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/evidence(.*)", "IsRegex": true, "TranslatesTo": "http://evidencelocker.stella-ops.local/api/v1/evidence$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/proofs(.*)", "IsRegex": true, "TranslatesTo": "http://evidencelocker.stella-ops.local/api/v1/proofs$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/verdicts(.*)", "IsRegex": true, "TranslatesTo": "http://evidencelocker.stella-ops.local/api/v1/verdicts$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/release-orchestrator(.*)", "IsRegex": true, "TranslatesTo": "http://jobengine.stella-ops.local/api/v1/release-orchestrator$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/approvals(.*)", "IsRegex": true, "TranslatesTo": "http://jobengine.stella-ops.local/api/v1/approvals$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/release-orchestrator(.*)", "IsRegex": true, "TranslatesTo": "http://release-orchestrator.stella-ops.local/api/v1/release-orchestrator$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/approvals(.*)", "IsRegex": true, "TranslatesTo": "http://release-orchestrator.stella-ops.local/api/v1/approvals$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/attestations(.*)", "IsRegex": true, "TranslatesTo": "http://attestor.stella-ops.local/api/v1/attestations$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/sbom(.*)", "IsRegex": true, "TranslatesTo": "http://sbomservice.stella-ops.local/api/v1/sbom$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/lineage(.*)", "IsRegex": true, "TranslatesTo": "http://sbomservice.stella-ops.local/api/v1/lineage$1" },
|
||||
@@ -104,7 +104,7 @@
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/policy(.*)", "IsRegex": true, "TranslatesTo": "http://policy-engine.stella-ops.local/api/v1/policy$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/governance(.*)", "IsRegex": true, "TranslatesTo": "http://policy-engine.stella-ops.local/api/v1/governance$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/determinization(.*)", "IsRegex": true, "TranslatesTo": "http://policy-engine.stella-ops.local/api/v1/determinization$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/workflows(.*)", "IsRegex": true, "TranslatesTo": "http://jobengine.stella-ops.local/api/v1/workflows$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/workflows(.*)", "IsRegex": true, "TranslatesTo": "http://release-orchestrator.stella-ops.local/api/v1/workflows$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/aoc(.*)", "IsRegex": true, "TranslatesTo": "http://platform.stella-ops.local/api/v1/aoc$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/administration(.*)", "IsRegex": true, "TranslatesTo": "http://platform.stella-ops.local/api/v1/administration$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/authority/quotas(.*)", "IsRegex": true, "TranslatesTo": "http://platform.stella-ops.local/api/v1/authority/quotas$1" },
|
||||
@@ -124,6 +124,18 @@
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/doctor/scheduler(.*)", "IsRegex": true, "TranslatesTo": "http://doctor-scheduler.stella-ops.local/api/v1/doctor/scheduler$1" },
|
||||
{ "Type": "ReverseProxy", "Path": "^/api/v1/registries(.*)", "IsRegex": true, "TranslatesTo": "http://platform.stella-ops.local/api/v1/registries$1", "PreserveAuthHeaders": true },
|
||||
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/jobengine/registry/packs(.*)", "IsRegex": true, "TranslatesTo": "http://packsregistry.stella-ops.local/api/v1/jobengine/registry/packs$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/jobengine/quotas(.*)", "IsRegex": true, "TranslatesTo": "http://release-orchestrator.stella-ops.local/api/v1/jobengine/quotas$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/jobengine/deadletter(.*)", "IsRegex": true, "TranslatesTo": "http://release-orchestrator.stella-ops.local/api/v1/jobengine/deadletter$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/jobengine/jobs(.*)", "IsRegex": true, "TranslatesTo": "http://release-orchestrator.stella-ops.local/api/v1/jobengine/jobs$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/jobengine/runs(.*)", "IsRegex": true, "TranslatesTo": "http://release-orchestrator.stella-ops.local/api/v1/jobengine/runs$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/jobengine/dag(.*)", "IsRegex": true, "TranslatesTo": "http://release-orchestrator.stella-ops.local/api/v1/jobengine/dag$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/jobengine/pack-runs(.*)", "IsRegex": true, "TranslatesTo": "http://release-orchestrator.stella-ops.local/api/v1/jobengine/pack-runs$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/jobengine/stream(.*)", "IsRegex": true, "TranslatesTo": "http://release-orchestrator.stella-ops.local/api/v1/jobengine/stream$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/jobengine/audit(.*)", "IsRegex": true, "TranslatesTo": "http://release-orchestrator.stella-ops.local/api/v1/jobengine/audit$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/jobengine/sources(.*)", "IsRegex": true, "TranslatesTo": "http://release-orchestrator.stella-ops.local/api/v1/jobengine/sources$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v1/jobengine/slos(.*)", "IsRegex": true, "TranslatesTo": "http://release-orchestrator.stella-ops.local/api/v1/jobengine/slos$1" },
|
||||
|
||||
{ "Type": "Microservice", "Path": "^/api/v2/context(.*)", "IsRegex": true, "TranslatesTo": "http://platform.stella-ops.local/api/v2/context$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v2/releases(.*)", "IsRegex": true, "TranslatesTo": "http://platform.stella-ops.local/api/v2/releases$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/v2/security(.*)", "IsRegex": true, "TranslatesTo": "http://platform.stella-ops.local/api/v2/security$1" },
|
||||
@@ -136,7 +148,7 @@
|
||||
|
||||
{ "Type": "Microservice", "Path": "^/api/(cvss|gate|exceptions|policy)(.*)", "IsRegex": true, "TranslatesTo": "http://policy-engine.stella-ops.local/api/$1$2" },
|
||||
{ "Type": "Microservice", "Path": "^/api/(risk|risk-budget)(.*)", "IsRegex": true, "TranslatesTo": "http://policy-engine.stella-ops.local/api/$1$2" },
|
||||
{ "Type": "Microservice", "Path": "^/api/(release-orchestrator|releases|approvals)(.*)", "IsRegex": true, "TranslatesTo": "http://jobengine.stella-ops.local/api/$1$2" },
|
||||
{ "Type": "Microservice", "Path": "^/api/(release-orchestrator|releases|approvals)(.*)", "IsRegex": true, "TranslatesTo": "http://release-orchestrator.stella-ops.local/api/$1$2" },
|
||||
{ "Type": "Microservice", "Path": "^/api/(compare|change-traces|sbomservice)(.*)", "IsRegex": true, "TranslatesTo": "http://sbomservice.stella-ops.local/api/$1$2" },
|
||||
{ "Type": "Microservice", "Path": "^/api/fix-verification(.*)", "IsRegex": true, "TranslatesTo": "http://scanner.stella-ops.local/api/fix-verification$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/verdicts(.*)", "IsRegex": true, "TranslatesTo": "http://evidencelocker.stella-ops.local/api/verdicts$1" },
|
||||
@@ -147,8 +159,8 @@
|
||||
{ "Type": "Microservice", "Path": "^/api/analytics(.*)", "IsRegex": true, "TranslatesTo": "http://platform.stella-ops.local/api/analytics$1" },
|
||||
{ "Type": "Microservice", "Path": "^/scheduler(?=/|$)(.*)", "IsRegex": true, "TranslatesTo": "http://scheduler.stella-ops.local$1" },
|
||||
{ "Type": "Microservice", "Path": "^/doctor(?=/|$)(.*)", "IsRegex": true, "TranslatesTo": "http://doctor.stella-ops.local$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/orchestrator(.*)", "IsRegex": true, "TranslatesTo": "http://jobengine.stella-ops.local/api/orchestrator$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/jobengine(.*)", "IsRegex": true, "TranslatesTo": "http://jobengine.stella-ops.local/api/jobengine$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/orchestrator(.*)", "IsRegex": true, "TranslatesTo": "http://release-orchestrator.stella-ops.local/api/orchestrator$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/jobengine(.*)", "IsRegex": true, "TranslatesTo": "http://release-orchestrator.stella-ops.local/api/jobengine$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/scheduler(.*)", "IsRegex": true, "TranslatesTo": "http://scheduler.stella-ops.local/api/scheduler$1" },
|
||||
{ "Type": "Microservice", "Path": "^/api/doctor(.*)", "IsRegex": true, "TranslatesTo": "http://doctor.stella-ops.local/api/doctor$1" },
|
||||
|
||||
@@ -157,7 +169,7 @@
|
||||
{ "Type": "Microservice", "Path": "^/policy(?=/|$)(.*)", "IsRegex": true, "TranslatesTo": "http://policy-engine.stella-ops.local/policy$1" },
|
||||
|
||||
{ "Type": "Microservice", "Path": "^/v1/evidence-packs(.*)", "IsRegex": true, "TranslatesTo": "http://advisoryai.stella-ops.local/v1/evidence-packs$1" },
|
||||
{ "Type": "Microservice", "Path": "^/v1/runs(.*)", "IsRegex": true, "TranslatesTo": "http://jobengine.stella-ops.local/v1/runs$1" },
|
||||
{ "Type": "Microservice", "Path": "^/v1/runs(.*)", "IsRegex": true, "TranslatesTo": "http://release-orchestrator.stella-ops.local/v1/runs$1" },
|
||||
{ "Type": "Microservice", "Path": "^/v1/advisory-ai(.*)", "IsRegex": true, "TranslatesTo": "http://advisoryai.stella-ops.local/v1/advisory-ai$1" },
|
||||
{ "Type": "Microservice", "Path": "^/v1/audit-bundles(.*)", "IsRegex": true, "TranslatesTo": "http://exportcenter.stella-ops.local/v1/audit-bundles$1" },
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ public sealed class GatewayRouteSearchMappingsTests
|
||||
("^/api/v2/integrations(.*)", "http://platform.stella-ops.local/api/v2/integrations$1", "Microservice", true),
|
||||
("^/scheduler(?=/|$)(.*)", "http://scheduler.stella-ops.local$1", "Microservice", true),
|
||||
("^/doctor(?=/|$)(.*)", "http://doctor.stella-ops.local$1", "Microservice", true),
|
||||
("^/api/jobengine(.*)", "http://jobengine.stella-ops.local/api/jobengine$1", "Microservice", true),
|
||||
("^/api/jobengine(.*)", "http://release-orchestrator.stella-ops.local/api/jobengine$1", "Microservice", true),
|
||||
("^/api/scheduler(.*)", "http://scheduler.stella-ops.local/api/scheduler$1", "Microservice", true)
|
||||
];
|
||||
|
||||
|
||||
@@ -152,8 +152,6 @@ public sealed class PostgresCatalogRepository : RepositoryBase<SbomServiceDataSo
|
||||
}
|
||||
|
||||
const string ddl = @"
|
||||
CREATE SCHEMA IF NOT EXISTS sbom;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS sbom.catalog (
|
||||
id TEXT PRIMARY KEY,
|
||||
artifact TEXT NOT NULL,
|
||||
|
||||
@@ -90,8 +90,6 @@ public sealed class PostgresComponentLookupRepository : RepositoryBase<SbomServi
|
||||
}
|
||||
|
||||
const string ddl = @"
|
||||
CREATE SCHEMA IF NOT EXISTS sbom;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS sbom.component_lookups (
|
||||
id TEXT PRIMARY KEY,
|
||||
artifact TEXT NOT NULL,
|
||||
|
||||
@@ -90,8 +90,6 @@ public sealed class PostgresEntrypointRepository : RepositoryBase<SbomServiceDat
|
||||
}
|
||||
|
||||
const string ddl = @"
|
||||
CREATE SCHEMA IF NOT EXISTS sbom;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS sbom.entrypoints (
|
||||
tenant_id TEXT NOT NULL,
|
||||
artifact TEXT NOT NULL,
|
||||
|
||||
@@ -115,8 +115,6 @@ public sealed class PostgresOrchestratorControlRepository : RepositoryBase<SbomS
|
||||
}
|
||||
|
||||
const string ddl = @"
|
||||
CREATE SCHEMA IF NOT EXISTS sbom;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS sbom.orchestrator_control (
|
||||
tenant_id TEXT PRIMARY KEY,
|
||||
paused BOOLEAN NOT NULL DEFAULT false,
|
||||
|
||||
@@ -128,8 +128,6 @@ public sealed class PostgresOrchestratorRepository : RepositoryBase<SbomServiceD
|
||||
}
|
||||
|
||||
const string ddl = @"
|
||||
CREATE SCHEMA IF NOT EXISTS sbom;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS sbom.orchestrator_sources (
|
||||
tenant_id TEXT NOT NULL,
|
||||
source_id TEXT NOT NULL,
|
||||
|
||||
@@ -92,8 +92,6 @@ public sealed class PostgresProjectionRepository : RepositoryBase<SbomServiceDat
|
||||
}
|
||||
|
||||
const string ddl = @"
|
||||
CREATE SCHEMA IF NOT EXISTS sbom;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS sbom.projections (
|
||||
snapshot_id TEXT NOT NULL,
|
||||
tenant_id TEXT NOT NULL,
|
||||
|
||||
@@ -315,8 +315,6 @@ public sealed class PostgresSbomLineageEdgeRepository : RepositoryBase<SbomServi
|
||||
}
|
||||
|
||||
const string ddl = @"
|
||||
CREATE SCHEMA IF NOT EXISTS sbom;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS sbom.lineage_edges (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
parent_digest TEXT NOT NULL,
|
||||
|
||||
@@ -305,8 +305,6 @@ public sealed class PostgresSbomVerdictLinkRepository : RepositoryBase<SbomServi
|
||||
}
|
||||
|
||||
const string ddl = @"
|
||||
CREATE SCHEMA IF NOT EXISTS sbom;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS sbom.verdict_links (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
sbom_version_id UUID NOT NULL,
|
||||
|
||||
@@ -105,8 +105,6 @@ public sealed class PostgresCallgraphRepository : RepositoryBase<SignalsDataSour
|
||||
}
|
||||
|
||||
const string ddl = @"
|
||||
CREATE SCHEMA IF NOT EXISTS signals;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS signals.callgraphs (
|
||||
id TEXT PRIMARY KEY,
|
||||
language TEXT NOT NULL,
|
||||
|
||||
@@ -234,8 +234,6 @@ public sealed class PostgresDeploymentRefsRepository : RepositoryBase<SignalsDat
|
||||
return;
|
||||
|
||||
const string ddl = @"
|
||||
CREATE SCHEMA IF NOT EXISTS signals;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS signals.deploy_refs (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
purl TEXT NOT NULL,
|
||||
|
||||
@@ -261,8 +261,6 @@ public sealed class PostgresGraphMetricsRepository : RepositoryBase<SignalsDataS
|
||||
return;
|
||||
|
||||
const string ddl = @"
|
||||
CREATE SCHEMA IF NOT EXISTS signals;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS signals.graph_metrics (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
node_id TEXT NOT NULL,
|
||||
|
||||
@@ -206,8 +206,6 @@ public sealed class PostgresReachabilityFactRepository : RepositoryBase<SignalsD
|
||||
}
|
||||
|
||||
const string ddl = @"
|
||||
CREATE SCHEMA IF NOT EXISTS signals;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS signals.reachability_facts (
|
||||
subject_key TEXT PRIMARY KEY,
|
||||
id TEXT NOT NULL,
|
||||
|
||||
@@ -325,8 +325,6 @@ public sealed class PostgresReachabilityStoreRepository : RepositoryBase<Signals
|
||||
}
|
||||
|
||||
const string ddl = @"
|
||||
CREATE SCHEMA IF NOT EXISTS signals;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS signals.func_nodes (
|
||||
id TEXT PRIMARY KEY,
|
||||
graph_hash TEXT NOT NULL,
|
||||
|
||||
@@ -458,10 +458,8 @@ public sealed class PostgresUnknownsRepository : RepositoryBase<SignalsDataSourc
|
||||
return;
|
||||
}
|
||||
|
||||
// Create schema and base table
|
||||
// Create base table (schema created by central migration runner)
|
||||
const string ddl = @"
|
||||
CREATE SCHEMA IF NOT EXISTS signals;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS signals.unknowns (
|
||||
id TEXT NOT NULL,
|
||||
subject_key TEXT NOT NULL,
|
||||
|
||||
@@ -41,7 +41,7 @@ describe('AppConfigService', () => {
|
||||
apiBaseUrls: {
|
||||
gateway: 'http://router.stella-ops.local',
|
||||
scanner: 'http://scanner.stella-ops.local',
|
||||
policy: 'http://policy-gateway.stella-ops.local',
|
||||
policy: 'http://policy-engine.stella-ops.local',
|
||||
concelier: 'http://concelier.stella-ops.local',
|
||||
attestor: 'http://attestor.stella-ops.local',
|
||||
authority: 'http://authority.stella-ops.local',
|
||||
|
||||
Reference in New Issue
Block a user