release orchestrator v1 draft and build fixes

This commit is contained in:
master
2026-01-12 12:24:17 +02:00
parent f3de858c59
commit 9873f80830
1598 changed files with 240385 additions and 5944 deletions

View File

@@ -0,0 +1,651 @@
// <copyright file="EvidenceThreadEndpoints.cs" company="StellaOps">
// Copyright (c) StellaOps. Licensed under the AGPL-3.0-or-later.
// </copyright>
using System;
using System.IO;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Builder;
using Microsoft.AspNetCore.Http;
using Microsoft.AspNetCore.Mvc;
using Microsoft.AspNetCore.Routing;
using StellaOps.Platform.WebService.Services;
using StellaOps.ReleaseOrchestrator.EvidenceThread.Export;
using StellaOps.ReleaseOrchestrator.EvidenceThread.Models;
using StellaOps.ReleaseOrchestrator.EvidenceThread.Services;
using StellaOps.ReleaseOrchestrator.EvidenceThread.Transcript;
namespace StellaOps.Platform.WebService.Endpoints;
/// <summary>
/// REST endpoints for Evidence Thread operations.
/// </summary>
public static class EvidenceThreadEndpoints
{
/// <summary>
/// Maps Evidence Thread API endpoints.
/// </summary>
public static IEndpointRouteBuilder MapEvidenceThreadEndpoints(this IEndpointRouteBuilder app)
{
var evidence = app.MapGroup("/api/v1/evidence")
.WithTags("Evidence Thread");
// GET /api/v1/evidence/{artifactDigest} - Get evidence thread for artifact
evidence.MapGet("/{artifactDigest}", GetEvidenceThread)
.WithName("GetEvidenceThread")
.WithSummary("Get evidence thread for an artifact")
.WithDescription("Retrieves the full evidence thread graph for an artifact by its digest.")
.Produces<EvidenceThreadResponse>(StatusCodes.Status200OK)
.Produces(StatusCodes.Status404NotFound)
.Produces(StatusCodes.Status400BadRequest);
// POST /api/v1/evidence/{artifactDigest}/export - Export thread as DSSE bundle
evidence.MapPost("/{artifactDigest}/export", ExportEvidenceThread)
.WithName("ExportEvidenceThread")
.WithSummary("Export evidence thread as DSSE bundle")
.WithDescription("Exports the evidence thread as a signed DSSE envelope for offline verification.")
.Produces<EvidenceExportResponse>(StatusCodes.Status200OK)
.Produces(StatusCodes.Status404NotFound)
.Produces(StatusCodes.Status400BadRequest);
// POST /api/v1/evidence/{artifactDigest}/transcript - Generate transcript
evidence.MapPost("/{artifactDigest}/transcript", GenerateTranscript)
.WithName("GenerateEvidenceTranscript")
.WithSummary("Generate natural language transcript")
.WithDescription("Generates a natural language transcript explaining the evidence thread.")
.Produces<EvidenceTranscriptResponse>(StatusCodes.Status200OK)
.Produces(StatusCodes.Status404NotFound)
.Produces(StatusCodes.Status400BadRequest);
// GET /api/v1/evidence/{artifactDigest}/nodes - Get evidence nodes
evidence.MapGet("/{artifactDigest}/nodes", GetEvidenceNodes)
.WithName("GetEvidenceNodes")
.WithSummary("Get evidence nodes for an artifact")
.WithDescription("Retrieves all evidence nodes in the thread.")
.Produces<EvidenceNodeListResponse>(StatusCodes.Status200OK)
.Produces(StatusCodes.Status404NotFound)
.Produces(StatusCodes.Status400BadRequest);
// GET /api/v1/evidence/{artifactDigest}/links - Get evidence links
evidence.MapGet("/{artifactDigest}/links", GetEvidenceLinks)
.WithName("GetEvidenceLinks")
.WithSummary("Get evidence links for an artifact")
.WithDescription("Retrieves all evidence links in the thread.")
.Produces<EvidenceLinkListResponse>(StatusCodes.Status200OK)
.Produces(StatusCodes.Status404NotFound)
.Produces(StatusCodes.Status400BadRequest);
// POST /api/v1/evidence/{artifactDigest}/collect - Trigger evidence collection
evidence.MapPost("/{artifactDigest}/collect", CollectEvidence)
.WithName("CollectEvidence")
.WithSummary("Collect evidence for an artifact")
.WithDescription("Triggers collection of all available evidence for an artifact.")
.Produces<EvidenceCollectionResponse>(StatusCodes.Status200OK)
.Produces(StatusCodes.Status400BadRequest);
return app;
}
private static async Task<IResult> GetEvidenceThread(
HttpContext context,
PlatformRequestContextResolver resolver,
IEvidenceThreadService service,
string artifactDigest,
[FromQuery] bool? includeContent,
CancellationToken ct)
{
if (!TryResolveContext(context, resolver, out var requestContext, out var failure))
{
return failure!;
}
if (string.IsNullOrWhiteSpace(artifactDigest))
{
return Results.BadRequest(new { error = "artifact_digest_required" });
}
var options = new EvidenceThreadOptions
{
IncludeContent = includeContent ?? true
};
var graph = await service.GetThreadGraphAsync(
requestContext!.TenantId,
artifactDigest,
options,
ct).ConfigureAwait(false);
if (graph is null)
{
return Results.NotFound(new { error = "thread_not_found", artifactDigest });
}
return Results.Ok(new EvidenceThreadResponse
{
ThreadId = graph.Thread.Id,
TenantId = graph.Thread.TenantId,
ArtifactDigest = graph.Thread.ArtifactDigest,
ArtifactName = graph.Thread.ArtifactName,
Status = graph.Thread.Status.ToString(),
Verdict = graph.Thread.Verdict?.ToString(),
RiskScore = graph.Thread.RiskScore,
ReachabilityMode = graph.Thread.ReachabilityMode?.ToString(),
NodeCount = graph.Nodes.Length,
LinkCount = graph.Links.Length,
CreatedAt = graph.Thread.CreatedAt,
UpdatedAt = graph.Thread.UpdatedAt
});
}
private static async Task<IResult> ExportEvidenceThread(
HttpContext context,
PlatformRequestContextResolver resolver,
IEvidenceThreadService threadService,
IDsseThreadExporter exporter,
string artifactDigest,
[FromBody] EvidenceExportRequest? request,
CancellationToken ct)
{
if (!TryResolveContext(context, resolver, out var requestContext, out var failure))
{
return failure!;
}
if (string.IsNullOrWhiteSpace(artifactDigest))
{
return Results.BadRequest(new { error = "artifact_digest_required" });
}
var graph = await threadService.GetThreadGraphAsync(
requestContext!.TenantId,
artifactDigest,
null,
ct).ConfigureAwait(false);
if (graph is null)
{
return Results.NotFound(new { error = "thread_not_found", artifactDigest });
}
var options = new ThreadExportOptions
{
Format = ParseExportFormat(request?.Format),
IncludeTranscript = request?.IncludeTranscript ?? true,
Sign = request?.Sign ?? true,
SigningKeyId = request?.SigningKeyId
};
var result = await exporter.ExportAsync(graph, options, ct).ConfigureAwait(false);
if (!result.Success)
{
return Results.BadRequest(new
{
error = result.ErrorCode,
message = result.ErrorMessage
});
}
return Results.Ok(new EvidenceExportResponse
{
ThreadId = result.ThreadId,
Format = result.Format.ToString(),
ContentDigest = result.ContentDigest,
SizeBytes = result.SizeBytes,
SigningKeyId = result.SigningKeyId,
ExportedAt = result.ExportedAt,
DurationMs = (long)result.Duration.TotalMilliseconds,
Envelope = result.Envelope is not null
? new DsseEnvelopeResponse
{
PayloadType = result.Envelope.PayloadType,
Payload = result.Envelope.Payload,
PayloadDigest = result.Envelope.PayloadDigest,
Signatures = result.Envelope.Signatures.Select(s => new DsseSignatureResponse
{
KeyId = s.KeyId,
Sig = s.Sig,
Algorithm = s.Algorithm
}).ToList()
}
: null
});
}
private static async Task<IResult> GenerateTranscript(
HttpContext context,
PlatformRequestContextResolver resolver,
IEvidenceThreadService threadService,
ITranscriptGenerator transcriptGenerator,
string artifactDigest,
[FromBody] EvidenceTranscriptRequest? request,
CancellationToken ct)
{
if (!TryResolveContext(context, resolver, out var requestContext, out var failure))
{
return failure!;
}
if (string.IsNullOrWhiteSpace(artifactDigest))
{
return Results.BadRequest(new { error = "artifact_digest_required" });
}
var graph = await threadService.GetThreadGraphAsync(
requestContext!.TenantId,
artifactDigest,
null,
ct).ConfigureAwait(false);
if (graph is null)
{
return Results.NotFound(new { error = "thread_not_found", artifactDigest });
}
var options = new TranscriptOptions
{
Type = ParseTranscriptType(request?.Type),
IncludeLlmRationale = request?.IncludeLlmRationale ?? true,
RationalePromptHint = request?.RationalePromptHint,
MaxLength = request?.MaxLength
};
var transcript = await transcriptGenerator.GenerateTranscriptAsync(graph, options, ct).ConfigureAwait(false);
return Results.Ok(new EvidenceTranscriptResponse
{
TranscriptId = transcript.Id,
ThreadId = transcript.ThreadId,
Type = transcript.TranscriptType.ToString(),
TemplateVersion = transcript.TemplateVersion,
LlmModel = transcript.LlmModel,
Content = transcript.Content,
AnchorCount = transcript.Anchors.Length,
GeneratedAt = transcript.GeneratedAt
});
}
private static async Task<IResult> GetEvidenceNodes(
HttpContext context,
PlatformRequestContextResolver resolver,
IEvidenceThreadService service,
string artifactDigest,
[FromQuery] string? kind,
CancellationToken ct)
{
if (!TryResolveContext(context, resolver, out var requestContext, out var failure))
{
return failure!;
}
if (string.IsNullOrWhiteSpace(artifactDigest))
{
return Results.BadRequest(new { error = "artifact_digest_required" });
}
var filterKinds = string.IsNullOrWhiteSpace(kind)
? null
: kind.Split(',')
.Select(k => Enum.TryParse<EvidenceNodeKind>(k.Trim(), true, out var nk) ? nk : (EvidenceNodeKind?)null)
.Where(k => k.HasValue)
.Select(k => k!.Value)
.ToList();
var options = new EvidenceThreadOptions
{
IncludeContent = true,
NodeKinds = filterKinds
};
var graph = await service.GetThreadGraphAsync(
requestContext!.TenantId,
artifactDigest,
options,
ct).ConfigureAwait(false);
if (graph is null)
{
return Results.NotFound(new { error = "thread_not_found", artifactDigest });
}
var nodes = graph.Nodes.Select(n => new EvidenceNodeResponse
{
Id = n.Id,
Kind = n.Kind.ToString(),
RefId = n.RefId,
RefDigest = n.RefDigest,
Title = n.Title,
Summary = n.Summary,
Confidence = n.Confidence,
AnchorCount = n.Anchors.Length,
CreatedAt = n.CreatedAt
}).ToList();
return Results.Ok(new EvidenceNodeListResponse
{
ThreadId = graph.Thread.Id,
ArtifactDigest = artifactDigest,
Nodes = nodes,
TotalCount = nodes.Count
});
}
private static async Task<IResult> GetEvidenceLinks(
HttpContext context,
PlatformRequestContextResolver resolver,
IEvidenceThreadService service,
string artifactDigest,
CancellationToken ct)
{
if (!TryResolveContext(context, resolver, out var requestContext, out var failure))
{
return failure!;
}
if (string.IsNullOrWhiteSpace(artifactDigest))
{
return Results.BadRequest(new { error = "artifact_digest_required" });
}
var graph = await service.GetThreadGraphAsync(
requestContext!.TenantId,
artifactDigest,
null,
ct).ConfigureAwait(false);
if (graph is null)
{
return Results.NotFound(new { error = "thread_not_found", artifactDigest });
}
var links = graph.Links.Select(l => new EvidenceLinkResponse
{
Id = l.Id,
SrcNodeId = l.SrcNodeId,
DstNodeId = l.DstNodeId,
Relation = l.Relation.ToString(),
Weight = l.Weight,
CreatedAt = l.CreatedAt
}).ToList();
return Results.Ok(new EvidenceLinkListResponse
{
ThreadId = graph.Thread.Id,
ArtifactDigest = artifactDigest,
Links = links,
TotalCount = links.Count
});
}
private static async Task<IResult> CollectEvidence(
HttpContext context,
PlatformRequestContextResolver resolver,
IEvidenceThreadService threadService,
IEvidenceNodeCollector collector,
string artifactDigest,
[FromBody] EvidenceCollectionRequest? request,
CancellationToken ct)
{
if (!TryResolveContext(context, resolver, out var requestContext, out var failure))
{
return failure!;
}
if (string.IsNullOrWhiteSpace(artifactDigest))
{
return Results.BadRequest(new { error = "artifact_digest_required" });
}
// Get or create the thread
var thread = await threadService.GetOrCreateThreadAsync(
requestContext!.TenantId,
artifactDigest,
artifactName: null,
ct).ConfigureAwait(false);
var options = new EvidenceCollectionOptions
{
BaseArtifactDigest = request?.BaseArtifactDigest,
CollectSbomDiff = request?.CollectSbomDiff ?? true,
CollectReachability = request?.CollectReachability ?? true,
CollectVex = request?.CollectVex ?? true,
CollectAttestations = request?.CollectAttestations ?? true
};
var result = await collector.CollectAllAsync(thread.Id, artifactDigest, options, ct).ConfigureAwait(false);
return Results.Ok(new EvidenceCollectionResponse
{
ThreadId = thread.Id,
ArtifactDigest = artifactDigest,
NodesCollected = result.Nodes.Count,
LinksCreated = result.Links.Count,
ErrorCount = result.Errors.Count,
Errors = result.Errors.Select(e => new EvidenceCollectionErrorResponse
{
Source = e.Source,
Message = e.Message,
ExceptionType = e.ExceptionType
}).ToList(),
DurationMs = result.DurationMs
});
}
private static ThreadExportFormat ParseExportFormat(string? format) => format?.ToLowerInvariant() switch
{
"dsse" => ThreadExportFormat.Dsse,
"json" => ThreadExportFormat.Json,
"markdown" => ThreadExportFormat.Markdown,
"pdf" => ThreadExportFormat.Pdf,
_ => ThreadExportFormat.Dsse
};
private static TranscriptType ParseTranscriptType(string? type) => type?.ToLowerInvariant() switch
{
"summary" => TranscriptType.Summary,
"detailed" => TranscriptType.Detailed,
"audit" => TranscriptType.Audit,
_ => TranscriptType.Detailed
};
private static bool TryResolveContext(
HttpContext context,
PlatformRequestContextResolver resolver,
out PlatformRequestContext? requestContext,
out IResult? failure)
{
if (resolver.TryResolve(context, out requestContext, out var error))
{
failure = null;
return true;
}
failure = Results.BadRequest(new { error = error ?? "tenant_missing" });
return false;
}
}
#region Request/Response DTOs
/// <summary>
/// Response for evidence thread query.
/// </summary>
public sealed record EvidenceThreadResponse
{
public Guid ThreadId { get; init; }
public string? TenantId { get; init; }
public string? ArtifactDigest { get; init; }
public string? ArtifactName { get; init; }
public string? Status { get; init; }
public string? Verdict { get; init; }
public decimal? RiskScore { get; init; }
public string? ReachabilityMode { get; init; }
public int NodeCount { get; init; }
public int LinkCount { get; init; }
public DateTimeOffset CreatedAt { get; init; }
public DateTimeOffset UpdatedAt { get; init; }
}
/// <summary>
/// Request for evidence export.
/// </summary>
public sealed record EvidenceExportRequest
{
public string? Format { get; init; }
public bool? IncludeTranscript { get; init; }
public bool? Sign { get; init; }
public string? SigningKeyId { get; init; }
}
/// <summary>
/// Response for evidence export.
/// </summary>
public sealed record EvidenceExportResponse
{
public Guid ThreadId { get; init; }
public string? Format { get; init; }
public string? ContentDigest { get; init; }
public long SizeBytes { get; init; }
public string? SigningKeyId { get; init; }
public DateTimeOffset ExportedAt { get; init; }
public long DurationMs { get; init; }
public DsseEnvelopeResponse? Envelope { get; init; }
}
/// <summary>
/// DSSE envelope response DTO.
/// </summary>
public sealed record DsseEnvelopeResponse
{
public string? PayloadType { get; init; }
public string? Payload { get; init; }
public string? PayloadDigest { get; init; }
public List<DsseSignatureResponse>? Signatures { get; init; }
}
/// <summary>
/// DSSE signature response DTO.
/// </summary>
public sealed record DsseSignatureResponse
{
public string? KeyId { get; init; }
public string? Sig { get; init; }
public string? Algorithm { get; init; }
}
/// <summary>
/// Request for transcript generation.
/// </summary>
public sealed record EvidenceTranscriptRequest
{
public string? Type { get; init; }
public bool? IncludeLlmRationale { get; init; }
public string? RationalePromptHint { get; init; }
public int? MaxLength { get; init; }
}
/// <summary>
/// Response for transcript generation.
/// </summary>
public sealed record EvidenceTranscriptResponse
{
public Guid TranscriptId { get; init; }
public Guid ThreadId { get; init; }
public string? Type { get; init; }
public string? TemplateVersion { get; init; }
public string? LlmModel { get; init; }
public string? Content { get; init; }
public int AnchorCount { get; init; }
public DateTimeOffset GeneratedAt { get; init; }
}
/// <summary>
/// Response for evidence node query.
/// </summary>
public sealed record EvidenceNodeResponse
{
public Guid Id { get; init; }
public string? Kind { get; init; }
public string? RefId { get; init; }
public string? RefDigest { get; init; }
public string? Title { get; init; }
public string? Summary { get; init; }
public decimal? Confidence { get; init; }
public int AnchorCount { get; init; }
public DateTimeOffset CreatedAt { get; init; }
}
/// <summary>
/// Response for evidence node list.
/// </summary>
public sealed record EvidenceNodeListResponse
{
public Guid ThreadId { get; init; }
public string? ArtifactDigest { get; init; }
public List<EvidenceNodeResponse>? Nodes { get; init; }
public int TotalCount { get; init; }
}
/// <summary>
/// Response for evidence link query.
/// </summary>
public sealed record EvidenceLinkResponse
{
public Guid Id { get; init; }
public Guid SrcNodeId { get; init; }
public Guid DstNodeId { get; init; }
public string? Relation { get; init; }
public decimal? Weight { get; init; }
public DateTimeOffset CreatedAt { get; init; }
}
/// <summary>
/// Response for evidence link list.
/// </summary>
public sealed record EvidenceLinkListResponse
{
public Guid ThreadId { get; init; }
public string? ArtifactDigest { get; init; }
public List<EvidenceLinkResponse>? Links { get; init; }
public int TotalCount { get; init; }
}
/// <summary>
/// Request for evidence collection.
/// </summary>
public sealed record EvidenceCollectionRequest
{
public string? BaseArtifactDigest { get; init; }
public bool? CollectSbomDiff { get; init; }
public bool? CollectReachability { get; init; }
public bool? CollectVex { get; init; }
public bool? CollectAttestations { get; init; }
}
/// <summary>
/// Response for evidence collection.
/// </summary>
public sealed record EvidenceCollectionResponse
{
public Guid ThreadId { get; init; }
public string? ArtifactDigest { get; init; }
public int NodesCollected { get; init; }
public int LinksCreated { get; init; }
public int ErrorCount { get; init; }
public List<EvidenceCollectionErrorResponse>? Errors { get; init; }
public long DurationMs { get; init; }
}
/// <summary>
/// Error response for evidence collection.
/// </summary>
public sealed record EvidenceCollectionErrorResponse
{
public string? Source { get; init; }
public string? Message { get; init; }
public string? ExceptionType { get; init; }
}
#endregion

View File

@@ -18,6 +18,7 @@
<ProjectReference Include="..\..\__Libraries\StellaOps.Configuration\StellaOps.Configuration.csproj" />
<ProjectReference Include="..\..\Telemetry\StellaOps.Telemetry.Core\StellaOps.Telemetry.Core\StellaOps.Telemetry.Core.csproj" />
<ProjectReference Include="..\..\Router\__Libraries\StellaOps.Router.AspNet\StellaOps.Router.AspNet.csproj" />
<ProjectReference Include="..\..\ReleaseOrchestrator\__Libraries\StellaOps.ReleaseOrchestrator.EvidenceThread\StellaOps.ReleaseOrchestrator.EvidenceThread.csproj" />
</ItemGroup>
</Project>

View File

@@ -0,0 +1,154 @@
-- Release Orchestrator Schema Migration 001: Integration Hub Tables
-- Creates the release schema and integration hub tables for external system connections.
-- Compliant with docs/db/SPECIFICATION.md
-- ============================================================================
-- Schema Creation
-- ============================================================================
CREATE SCHEMA IF NOT EXISTS release;
CREATE SCHEMA IF NOT EXISTS release_app;
-- ============================================================================
-- Helper Functions (per SPECIFICATION.md Section 2.3)
-- ============================================================================
CREATE OR REPLACE FUNCTION release_app.require_current_tenant()
RETURNS UUID
LANGUAGE sql
STABLE
AS $$
SELECT COALESCE(
NULLIF(current_setting('app.current_tenant_id', true), '')::UUID,
(SELECT id FROM shared.tenants WHERE is_default = true LIMIT 1)
)
$$;
-- ============================================================================
-- Integration Type Registry (Enum-like)
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.integration_types (
id TEXT PRIMARY KEY,
name TEXT NOT NULL,
category TEXT NOT NULL CHECK (category IN ('scm', 'ci', 'registry', 'vault', 'notify')),
description TEXT,
config_schema JSONB NOT NULL DEFAULT '{}',
icon TEXT,
documentation_url TEXT,
is_builtin BOOLEAN NOT NULL DEFAULT false,
created_at TIMESTAMPTZ NOT NULL DEFAULT now()
);
COMMENT ON TABLE release.integration_types IS 'Registry of available integration types (SCM, CI, Registry, Vault, Notify)';
COMMENT ON COLUMN release.integration_types.id IS 'Unique identifier (e.g., github, gitlab, azdo, docker-hub)';
COMMENT ON COLUMN release.integration_types.category IS 'Integration category: scm, ci, registry, vault, notify';
COMMENT ON COLUMN release.integration_types.config_schema IS 'JSON Schema for configuration validation';
-- Seed built-in integration types
INSERT INTO release.integration_types (id, name, category, description, is_builtin) VALUES
('github', 'GitHub', 'scm', 'GitHub.com and GitHub Enterprise integration', true),
('gitlab', 'GitLab', 'scm', 'GitLab.com and GitLab self-hosted integration', true),
('azdo', 'Azure DevOps', 'scm', 'Azure DevOps Services and Server integration', true),
('gitea', 'Gitea', 'scm', 'Gitea self-hosted Git service integration', true),
('docker-hub', 'Docker Hub', 'registry', 'Docker Hub container registry', true),
('acr', 'Azure Container Registry', 'registry', 'Azure Container Registry integration', true),
('ecr', 'Amazon ECR', 'registry', 'Amazon Elastic Container Registry', true),
('gcr', 'Google Container Registry', 'registry', 'Google Cloud Container Registry', true),
('ghcr', 'GitHub Container Registry', 'registry', 'GitHub Container Registry integration', true),
('harbor', 'Harbor', 'registry', 'Harbor open-source registry', true),
('vault', 'HashiCorp Vault', 'vault', 'HashiCorp Vault secrets management', true),
('azure-keyvault', 'Azure Key Vault', 'vault', 'Azure Key Vault secrets management', true),
('aws-secrets', 'AWS Secrets Manager', 'vault', 'AWS Secrets Manager integration', true),
('slack', 'Slack', 'notify', 'Slack webhook notifications', true),
('teams', 'Microsoft Teams', 'notify', 'Microsoft Teams webhook notifications', true),
('email', 'Email', 'notify', 'Email notifications via SMTP', true),
('webhook', 'Webhook', 'notify', 'Generic webhook notifications', true),
('github-actions', 'GitHub Actions', 'ci', 'GitHub Actions CI/CD', true),
('gitlab-ci', 'GitLab CI', 'ci', 'GitLab CI/CD pipelines', true),
('azdo-pipelines', 'Azure Pipelines', 'ci', 'Azure DevOps Pipelines', true),
('jenkins', 'Jenkins', 'ci', 'Jenkins CI server', true)
ON CONFLICT (id) DO NOTHING;
-- ============================================================================
-- Integrations Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.integrations (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
type_id TEXT NOT NULL REFERENCES release.integration_types(id),
name TEXT NOT NULL,
display_name TEXT NOT NULL,
description TEXT,
config_encrypted BYTEA NOT NULL,
is_enabled BOOLEAN NOT NULL DEFAULT true,
health_status TEXT NOT NULL DEFAULT 'unknown'
CHECK (health_status IN ('unknown', 'healthy', 'degraded', 'unhealthy')),
last_health_check TIMESTAMPTZ,
last_health_message TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
created_by UUID NOT NULL,
UNIQUE (tenant_id, name)
);
CREATE INDEX idx_integrations_tenant_type ON release.integrations(tenant_id, type_id);
CREATE INDEX idx_integrations_tenant_enabled ON release.integrations(tenant_id) WHERE is_enabled = true;
CREATE INDEX idx_integrations_health ON release.integrations(tenant_id, health_status)
WHERE health_status != 'healthy';
COMMENT ON TABLE release.integrations IS 'Configured integrations with external systems per tenant';
COMMENT ON COLUMN release.integrations.config_encrypted IS 'Encrypted connection configuration (AES-256-GCM)';
COMMENT ON COLUMN release.integrations.health_status IS 'Last known health status from periodic checks';
-- ============================================================================
-- Integration Health Checks (Append-only audit)
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.integration_health_checks (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
integration_id UUID NOT NULL REFERENCES release.integrations(id) ON DELETE CASCADE,
status TEXT NOT NULL CHECK (status IN ('healthy', 'degraded', 'unhealthy', 'timeout')),
response_time_ms INT,
message TEXT,
details JSONB,
checked_at TIMESTAMPTZ NOT NULL DEFAULT now()
-- No updated_at - append only
);
CREATE INDEX idx_integration_health_integration ON release.integration_health_checks(integration_id, checked_at DESC);
CREATE INDEX idx_integration_health_tenant ON release.integration_health_checks(tenant_id, checked_at DESC);
COMMENT ON TABLE release.integration_health_checks IS 'Health check history for integration monitoring';
-- ============================================================================
-- Row Level Security
-- ============================================================================
ALTER TABLE release.integrations ENABLE ROW LEVEL SECURITY;
ALTER TABLE release.integration_health_checks ENABLE ROW LEVEL SECURITY;
CREATE POLICY tenant_isolation ON release.integrations
USING (tenant_id = release_app.require_current_tenant());
CREATE POLICY tenant_isolation ON release.integration_health_checks
USING (tenant_id = release_app.require_current_tenant());
-- ============================================================================
-- Updated At Trigger
-- ============================================================================
CREATE OR REPLACE FUNCTION release.update_updated_at_column()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = now();
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER update_integrations_updated_at
BEFORE UPDATE ON release.integrations
FOR EACH ROW
EXECUTE FUNCTION release.update_updated_at_column();

View File

@@ -0,0 +1,153 @@
-- Release Orchestrator Schema Migration 002: Environment Tables
-- Creates environment and deployment target tables.
-- Compliant with docs/db/SPECIFICATION.md
-- ============================================================================
-- Environments Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.environments (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
name TEXT NOT NULL,
display_name TEXT NOT NULL,
description TEXT,
order_index INT NOT NULL,
is_production BOOLEAN NOT NULL DEFAULT false,
required_approvals INT NOT NULL DEFAULT 0,
require_separation_of_duties BOOLEAN NOT NULL DEFAULT false,
auto_promote_from UUID REFERENCES release.environments(id),
deployment_timeout_seconds INT NOT NULL DEFAULT 600,
notification_channels JSONB NOT NULL DEFAULT '[]',
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
created_by UUID NOT NULL,
UNIQUE (tenant_id, name),
UNIQUE (tenant_id, order_index)
);
CREATE INDEX idx_environments_tenant_order ON release.environments(tenant_id, order_index);
CREATE INDEX idx_environments_auto_promote ON release.environments(auto_promote_from)
WHERE auto_promote_from IS NOT NULL;
COMMENT ON TABLE release.environments IS 'Deployment environments with promotion ordering';
COMMENT ON COLUMN release.environments.order_index IS 'Promotion order (lower = earlier in pipeline)';
COMMENT ON COLUMN release.environments.is_production IS 'Marks production environments for special handling';
COMMENT ON COLUMN release.environments.required_approvals IS 'Number of approvals required for promotion';
COMMENT ON COLUMN release.environments.require_separation_of_duties IS 'Requester cannot be approver';
COMMENT ON COLUMN release.environments.auto_promote_from IS 'Auto-promote from this environment on success';
-- ============================================================================
-- Deployment Targets Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.targets (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
environment_id UUID NOT NULL REFERENCES release.environments(id) ON DELETE CASCADE,
name TEXT NOT NULL,
display_name TEXT NOT NULL,
description TEXT,
type TEXT NOT NULL CHECK (type IN ('docker_host', 'compose_host', 'ecs_service', 'nomad_job', 'ssh_host')),
connection_config_encrypted BYTEA NOT NULL,
agent_id UUID,
health_status TEXT NOT NULL DEFAULT 'unknown'
CHECK (health_status IN ('unknown', 'healthy', 'degraded', 'unhealthy', 'offline')),
last_health_check TIMESTAMPTZ,
last_health_message TEXT,
last_sync_at TIMESTAMPTZ,
inventory_snapshot JSONB,
labels JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
UNIQUE (tenant_id, environment_id, name)
);
CREATE INDEX idx_targets_environment ON release.targets(environment_id);
CREATE INDEX idx_targets_agent ON release.targets(agent_id) WHERE agent_id IS NOT NULL;
CREATE INDEX idx_targets_tenant_type ON release.targets(tenant_id, type);
CREATE INDEX idx_targets_health ON release.targets(tenant_id, health_status)
WHERE health_status != 'healthy';
-- Generated column for current deployed digest from inventory
ALTER TABLE release.targets
ADD COLUMN IF NOT EXISTS current_digest TEXT GENERATED ALWAYS AS (
inventory_snapshot->>'digest'
) STORED;
CREATE INDEX idx_targets_current_digest ON release.targets(current_digest)
WHERE current_digest IS NOT NULL;
COMMENT ON TABLE release.targets IS 'Deployment targets within environments';
COMMENT ON COLUMN release.targets.type IS 'Target type: docker_host, compose_host, ecs_service, nomad_job, ssh_host';
COMMENT ON COLUMN release.targets.connection_config_encrypted IS 'Encrypted connection details';
COMMENT ON COLUMN release.targets.agent_id IS 'Assigned agent for this target';
COMMENT ON COLUMN release.targets.inventory_snapshot IS 'Last known deployed state';
COMMENT ON COLUMN release.targets.current_digest IS 'Generated: current OCI digest from inventory';
-- ============================================================================
-- Freeze Windows Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.freeze_windows (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
environment_id UUID REFERENCES release.environments(id) ON DELETE CASCADE,
name TEXT NOT NULL,
description TEXT,
start_at TIMESTAMPTZ NOT NULL,
end_at TIMESTAMPTZ NOT NULL,
is_active BOOLEAN NOT NULL DEFAULT true,
allow_emergency BOOLEAN NOT NULL DEFAULT true,
created_by UUID NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
CONSTRAINT valid_window CHECK (end_at > start_at)
);
CREATE INDEX idx_freeze_windows_tenant ON release.freeze_windows(tenant_id);
CREATE INDEX idx_freeze_windows_environment ON release.freeze_windows(environment_id)
WHERE environment_id IS NOT NULL;
CREATE INDEX idx_freeze_windows_active ON release.freeze_windows(tenant_id, start_at, end_at)
WHERE is_active = true;
COMMENT ON TABLE release.freeze_windows IS 'Deployment freeze periods';
COMMENT ON COLUMN release.freeze_windows.environment_id IS 'NULL applies to all environments';
COMMENT ON COLUMN release.freeze_windows.allow_emergency IS 'Allow emergency deployments during freeze';
-- ============================================================================
-- Row Level Security
-- ============================================================================
ALTER TABLE release.environments ENABLE ROW LEVEL SECURITY;
ALTER TABLE release.targets ENABLE ROW LEVEL SECURITY;
ALTER TABLE release.freeze_windows ENABLE ROW LEVEL SECURITY;
CREATE POLICY tenant_isolation ON release.environments
USING (tenant_id = release_app.require_current_tenant());
CREATE POLICY tenant_isolation ON release.targets
USING (tenant_id = release_app.require_current_tenant());
CREATE POLICY tenant_isolation ON release.freeze_windows
USING (tenant_id = release_app.require_current_tenant());
-- ============================================================================
-- Triggers
-- ============================================================================
CREATE TRIGGER update_environments_updated_at
BEFORE UPDATE ON release.environments
FOR EACH ROW
EXECUTE FUNCTION release.update_updated_at_column();
CREATE TRIGGER update_targets_updated_at
BEFORE UPDATE ON release.targets
FOR EACH ROW
EXECUTE FUNCTION release.update_updated_at_column();
CREATE TRIGGER update_freeze_windows_updated_at
BEFORE UPDATE ON release.freeze_windows
FOR EACH ROW
EXECUTE FUNCTION release.update_updated_at_column();

View File

@@ -0,0 +1,198 @@
-- Release Orchestrator Schema Migration 003: Release Management Tables
-- Creates components, versions, and release bundle tables.
-- Compliant with docs/db/SPECIFICATION.md
-- ============================================================================
-- Components Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.components (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
name TEXT NOT NULL,
display_name TEXT NOT NULL,
description TEXT,
registry_integration_id UUID NOT NULL REFERENCES release.integrations(id),
repository TEXT NOT NULL,
default_tag TEXT NOT NULL DEFAULT 'latest',
scm_integration_id UUID REFERENCES release.integrations(id),
scm_repository TEXT,
scm_branch TEXT,
build_config JSONB NOT NULL DEFAULT '{}',
labels JSONB NOT NULL DEFAULT '{}',
is_archived BOOLEAN NOT NULL DEFAULT false,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
created_by UUID NOT NULL,
UNIQUE (tenant_id, name)
);
CREATE INDEX idx_components_tenant ON release.components(tenant_id);
CREATE INDEX idx_components_registry ON release.components(registry_integration_id);
CREATE INDEX idx_components_scm ON release.components(scm_integration_id)
WHERE scm_integration_id IS NOT NULL;
CREATE INDEX idx_components_active ON release.components(tenant_id)
WHERE is_archived = false;
COMMENT ON TABLE release.components IS 'Container image components tracked for release';
COMMENT ON COLUMN release.components.registry_integration_id IS 'Container registry integration';
COMMENT ON COLUMN release.components.repository IS 'Full repository path (e.g., myorg/myapp)';
COMMENT ON COLUMN release.components.scm_integration_id IS 'Optional SCM integration for source linking';
-- ============================================================================
-- Component Versions Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.component_versions (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
component_id UUID NOT NULL REFERENCES release.components(id) ON DELETE CASCADE,
digest TEXT NOT NULL,
tag TEXT,
semver TEXT,
size_bytes BIGINT,
architecture TEXT,
os TEXT,
source_commit_sha TEXT,
source_branch TEXT,
ci_build_id TEXT,
ci_pipeline_url TEXT,
sbom_digest TEXT,
vulnerability_scan_digest TEXT,
attestation_digests TEXT[] NOT NULL DEFAULT '{}',
labels JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
discovered_at TIMESTAMPTZ NOT NULL DEFAULT now(),
UNIQUE (tenant_id, component_id, digest)
);
CREATE INDEX idx_component_versions_component ON release.component_versions(component_id);
CREATE INDEX idx_component_versions_tenant ON release.component_versions(tenant_id);
CREATE INDEX idx_component_versions_digest ON release.component_versions(digest);
CREATE INDEX idx_component_versions_tag ON release.component_versions(component_id, tag)
WHERE tag IS NOT NULL;
CREATE INDEX idx_component_versions_semver ON release.component_versions(component_id, semver)
WHERE semver IS NOT NULL;
CREATE INDEX idx_component_versions_recent ON release.component_versions(component_id, created_at DESC);
COMMENT ON TABLE release.component_versions IS 'Immutable snapshots of component versions';
COMMENT ON COLUMN release.component_versions.digest IS 'OCI content-addressable digest (sha256:...)';
COMMENT ON COLUMN release.component_versions.sbom_digest IS 'Digest of associated SBOM document';
COMMENT ON COLUMN release.component_versions.attestation_digests IS 'Array of attestation document digests';
-- ============================================================================
-- Releases Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.releases (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
name TEXT NOT NULL,
display_name TEXT NOT NULL,
description TEXT,
status TEXT NOT NULL DEFAULT 'draft'
CHECK (status IN ('draft', 'ready', 'promoting', 'deployed', 'deprecated', 'cancelled')),
source_commit_sha TEXT,
source_branch TEXT,
source_tag TEXT,
ci_build_id TEXT,
ci_pipeline_url TEXT,
notes TEXT,
changelog TEXT,
labels JSONB NOT NULL DEFAULT '{}',
finalized_at TIMESTAMPTZ,
finalized_by UUID,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
created_by UUID NOT NULL,
UNIQUE (tenant_id, name)
);
CREATE INDEX idx_releases_tenant ON release.releases(tenant_id);
CREATE INDEX idx_releases_tenant_status ON release.releases(tenant_id, status);
CREATE INDEX idx_releases_recent ON release.releases(tenant_id, created_at DESC);
CREATE INDEX idx_releases_active ON release.releases(tenant_id)
WHERE status IN ('draft', 'ready', 'promoting');
COMMENT ON TABLE release.releases IS 'Release bundles containing component versions';
COMMENT ON COLUMN release.releases.status IS 'Lifecycle: draft -> ready -> promoting -> deployed/deprecated';
COMMENT ON COLUMN release.releases.finalized_at IS 'Timestamp when release was locked for deployment';
-- ============================================================================
-- Release Components (Junction Table)
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.release_components (
release_id UUID NOT NULL REFERENCES release.releases(id) ON DELETE CASCADE,
component_version_id UUID NOT NULL REFERENCES release.component_versions(id),
is_primary BOOLEAN NOT NULL DEFAULT false,
deploy_order INT NOT NULL DEFAULT 0,
deploy_config JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
PRIMARY KEY (release_id, component_version_id)
);
CREATE INDEX idx_release_components_version ON release.release_components(component_version_id);
CREATE INDEX idx_release_components_primary ON release.release_components(release_id)
WHERE is_primary = true;
COMMENT ON TABLE release.release_components IS 'Components included in a release bundle';
COMMENT ON COLUMN release.release_components.is_primary IS 'Primary component for release identification';
COMMENT ON COLUMN release.release_components.deploy_order IS 'Deployment ordering within release';
COMMENT ON COLUMN release.release_components.deploy_config IS 'Component-specific deployment configuration';
-- ============================================================================
-- Release Tags Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.release_tags (
tenant_id UUID NOT NULL,
tag TEXT NOT NULL,
release_id UUID NOT NULL REFERENCES release.releases(id) ON DELETE CASCADE,
environment_id UUID REFERENCES release.environments(id),
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
created_by UUID NOT NULL,
PRIMARY KEY (tenant_id, tag, COALESCE(environment_id, '00000000-0000-0000-0000-000000000000'::UUID))
);
CREATE INDEX idx_release_tags_release ON release.release_tags(release_id);
CREATE INDEX idx_release_tags_environment ON release.release_tags(environment_id)
WHERE environment_id IS NOT NULL;
COMMENT ON TABLE release.release_tags IS 'Semantic tags for releases (latest, stable, etc.)';
COMMENT ON COLUMN release.release_tags.environment_id IS 'NULL = global tag, non-null = environment-specific';
-- ============================================================================
-- Row Level Security
-- ============================================================================
ALTER TABLE release.components ENABLE ROW LEVEL SECURITY;
ALTER TABLE release.component_versions ENABLE ROW LEVEL SECURITY;
ALTER TABLE release.releases ENABLE ROW LEVEL SECURITY;
ALTER TABLE release.release_tags ENABLE ROW LEVEL SECURITY;
CREATE POLICY tenant_isolation ON release.components
USING (tenant_id = release_app.require_current_tenant());
CREATE POLICY tenant_isolation ON release.component_versions
USING (tenant_id = release_app.require_current_tenant());
CREATE POLICY tenant_isolation ON release.releases
USING (tenant_id = release_app.require_current_tenant());
CREATE POLICY tenant_isolation ON release.release_tags
USING (tenant_id = release_app.require_current_tenant());
-- ============================================================================
-- Triggers
-- ============================================================================
CREATE TRIGGER update_components_updated_at
BEFORE UPDATE ON release.components
FOR EACH ROW
EXECUTE FUNCTION release.update_updated_at_column();
CREATE TRIGGER update_releases_updated_at
BEFORE UPDATE ON release.releases
FOR EACH ROW
EXECUTE FUNCTION release.update_updated_at_column();

View File

@@ -0,0 +1,169 @@
-- Release Orchestrator Schema Migration 004: Workflow Tables
-- Creates workflow template and execution tables.
-- Compliant with docs/db/SPECIFICATION.md
-- ============================================================================
-- Workflow Templates Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.workflow_templates (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
name TEXT NOT NULL,
display_name TEXT NOT NULL,
description TEXT,
definition JSONB NOT NULL,
version INT NOT NULL DEFAULT 1,
is_active BOOLEAN NOT NULL DEFAULT true,
is_system BOOLEAN NOT NULL DEFAULT false,
trigger_type TEXT NOT NULL DEFAULT 'manual'
CHECK (trigger_type IN ('manual', 'promotion', 'schedule', 'webhook', 'release_created')),
labels JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
created_by UUID NOT NULL,
UNIQUE (tenant_id, name, version)
);
CREATE INDEX idx_workflow_templates_tenant ON release.workflow_templates(tenant_id);
CREATE INDEX idx_workflow_templates_active ON release.workflow_templates(tenant_id, is_active)
WHERE is_active = true;
CREATE INDEX idx_workflow_templates_trigger ON release.workflow_templates(tenant_id, trigger_type);
-- Generated column for step count
ALTER TABLE release.workflow_templates
ADD COLUMN IF NOT EXISTS step_count INT GENERATED ALWAYS AS (
jsonb_array_length(COALESCE(definition->'steps', '[]'::JSONB))
) STORED;
COMMENT ON TABLE release.workflow_templates IS 'DAG workflow templates for release automation';
COMMENT ON COLUMN release.workflow_templates.definition IS 'JSON DAG definition with steps and dependencies';
COMMENT ON COLUMN release.workflow_templates.version IS 'Template version for versioned workflows';
COMMENT ON COLUMN release.workflow_templates.trigger_type IS 'What triggers this workflow';
COMMENT ON COLUMN release.workflow_templates.step_count IS 'Generated: number of steps in workflow';
-- ============================================================================
-- Workflow Runs Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.workflow_runs (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
template_id UUID NOT NULL REFERENCES release.workflow_templates(id),
template_version INT NOT NULL,
context_type TEXT NOT NULL CHECK (context_type IN ('release', 'promotion', 'deployment', 'rollback', 'manual')),
context_id UUID NOT NULL,
status TEXT NOT NULL DEFAULT 'pending'
CHECK (status IN ('pending', 'running', 'paused', 'succeeded', 'failed', 'cancelled', 'timed_out')),
trigger_type TEXT NOT NULL DEFAULT 'manual',
triggered_by UUID,
input_parameters JSONB NOT NULL DEFAULT '{}',
output_data JSONB,
started_at TIMESTAMPTZ,
completed_at TIMESTAMPTZ,
timeout_at TIMESTAMPTZ,
error_message TEXT,
error_details JSONB,
created_at TIMESTAMPTZ NOT NULL DEFAULT now()
);
CREATE INDEX idx_workflow_runs_tenant ON release.workflow_runs(tenant_id);
CREATE INDEX idx_workflow_runs_template ON release.workflow_runs(template_id);
CREATE INDEX idx_workflow_runs_context ON release.workflow_runs(context_type, context_id);
CREATE INDEX idx_workflow_runs_status ON release.workflow_runs(tenant_id, status);
CREATE INDEX idx_workflow_runs_active ON release.workflow_runs(tenant_id)
WHERE status IN ('pending', 'running', 'paused');
CREATE INDEX idx_workflow_runs_recent ON release.workflow_runs(tenant_id, created_at DESC);
COMMENT ON TABLE release.workflow_runs IS 'Workflow execution instances';
COMMENT ON COLUMN release.workflow_runs.context_type IS 'What this workflow is operating on';
COMMENT ON COLUMN release.workflow_runs.context_id IS 'ID of the context entity (release, promotion, etc.)';
COMMENT ON COLUMN release.workflow_runs.template_version IS 'Snapshot of template version used';
-- ============================================================================
-- Workflow Steps Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.workflow_steps (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
run_id UUID NOT NULL REFERENCES release.workflow_runs(id) ON DELETE CASCADE,
step_key TEXT NOT NULL,
step_type TEXT NOT NULL,
step_config JSONB NOT NULL DEFAULT '{}',
status TEXT NOT NULL DEFAULT 'pending'
CHECK (status IN ('pending', 'waiting', 'running', 'succeeded', 'failed', 'skipped', 'cancelled')),
depends_on TEXT[] NOT NULL DEFAULT '{}',
input_data JSONB,
output_data JSONB,
started_at TIMESTAMPTZ,
completed_at TIMESTAMPTZ,
duration_ms INT,
error_message TEXT,
error_details JSONB,
retry_count INT NOT NULL DEFAULT 0,
max_retries INT NOT NULL DEFAULT 0,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
UNIQUE (run_id, step_key)
);
CREATE INDEX idx_workflow_steps_run ON release.workflow_steps(run_id);
CREATE INDEX idx_workflow_steps_status ON release.workflow_steps(run_id, status);
CREATE INDEX idx_workflow_steps_pending ON release.workflow_steps(run_id)
WHERE status IN ('pending', 'waiting', 'running');
COMMENT ON TABLE release.workflow_steps IS 'Individual step executions within a workflow run';
COMMENT ON COLUMN release.workflow_steps.step_key IS 'Unique key within the workflow (from template)';
COMMENT ON COLUMN release.workflow_steps.step_type IS 'Step provider type (e.g., scan, approve, deploy)';
COMMENT ON COLUMN release.workflow_steps.depends_on IS 'Array of step_keys this step depends on';
-- ============================================================================
-- Workflow Step Logs Table (Append-only)
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.workflow_step_logs (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
step_id UUID NOT NULL REFERENCES release.workflow_steps(id) ON DELETE CASCADE,
level TEXT NOT NULL DEFAULT 'info' CHECK (level IN ('trace', 'debug', 'info', 'warn', 'error')),
message TEXT NOT NULL,
data JSONB,
logged_at TIMESTAMPTZ NOT NULL DEFAULT now()
-- No updated_at - append only
);
CREATE INDEX idx_workflow_step_logs_step ON release.workflow_step_logs(step_id, logged_at);
CREATE INDEX idx_workflow_step_logs_level ON release.workflow_step_logs(step_id, level)
WHERE level IN ('warn', 'error');
COMMENT ON TABLE release.workflow_step_logs IS 'Structured logs from workflow step execution';
-- ============================================================================
-- Row Level Security
-- ============================================================================
ALTER TABLE release.workflow_templates ENABLE ROW LEVEL SECURITY;
ALTER TABLE release.workflow_runs ENABLE ROW LEVEL SECURITY;
ALTER TABLE release.workflow_steps ENABLE ROW LEVEL SECURITY;
ALTER TABLE release.workflow_step_logs ENABLE ROW LEVEL SECURITY;
CREATE POLICY tenant_isolation ON release.workflow_templates
USING (tenant_id = release_app.require_current_tenant());
CREATE POLICY tenant_isolation ON release.workflow_runs
USING (tenant_id = release_app.require_current_tenant());
CREATE POLICY tenant_isolation ON release.workflow_steps
USING (tenant_id = release_app.require_current_tenant());
CREATE POLICY tenant_isolation ON release.workflow_step_logs
USING (tenant_id = release_app.require_current_tenant());
-- ============================================================================
-- Triggers
-- ============================================================================
CREATE TRIGGER update_workflow_templates_updated_at
BEFORE UPDATE ON release.workflow_templates
FOR EACH ROW
EXECUTE FUNCTION release.update_updated_at_column();

View File

@@ -0,0 +1,177 @@
-- Release Orchestrator Schema Migration 005: Promotion Tables
-- Creates promotion requests, approvals, and gate results tables.
-- Compliant with docs/db/SPECIFICATION.md
-- ============================================================================
-- Promotions Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.promotions (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
release_id UUID NOT NULL REFERENCES release.releases(id),
source_environment_id UUID REFERENCES release.environments(id),
target_environment_id UUID NOT NULL REFERENCES release.environments(id),
status TEXT NOT NULL DEFAULT 'pending'
CHECK (status IN (
'pending', 'awaiting_approval', 'approved', 'rejected',
'gates_running', 'gates_passed', 'gates_failed',
'deploying', 'deployed', 'failed', 'cancelled', 'rolled_back'
)),
priority TEXT NOT NULL DEFAULT 'normal' CHECK (priority IN ('low', 'normal', 'high', 'emergency')),
is_emergency BOOLEAN NOT NULL DEFAULT false,
requested_by UUID NOT NULL,
requested_at TIMESTAMPTZ NOT NULL DEFAULT now(),
request_reason TEXT,
decision TEXT CHECK (decision IN ('allow', 'block')),
decision_reason TEXT,
decided_at TIMESTAMPTZ,
decided_by UUID,
deployment_job_id UUID,
workflow_run_id UUID,
expires_at TIMESTAMPTZ,
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
);
CREATE INDEX idx_promotions_tenant ON release.promotions(tenant_id);
CREATE INDEX idx_promotions_release ON release.promotions(release_id);
CREATE INDEX idx_promotions_tenant_status ON release.promotions(tenant_id, status);
CREATE INDEX idx_promotions_target ON release.promotions(target_environment_id);
CREATE INDEX idx_promotions_source ON release.promotions(source_environment_id)
WHERE source_environment_id IS NOT NULL;
CREATE INDEX idx_promotions_pending ON release.promotions(tenant_id, target_environment_id)
WHERE status IN ('pending', 'awaiting_approval', 'gates_running');
CREATE INDEX idx_promotions_recent ON release.promotions(tenant_id, created_at DESC);
COMMENT ON TABLE release.promotions IS 'Promotion requests from one environment to another';
COMMENT ON COLUMN release.promotions.source_environment_id IS 'Source env (NULL for initial deployment)';
COMMENT ON COLUMN release.promotions.status IS 'Promotion lifecycle status';
COMMENT ON COLUMN release.promotions.is_emergency IS 'Bypasses freeze windows if allowed';
COMMENT ON COLUMN release.promotions.decision IS 'Final allow/block decision';
-- ============================================================================
-- Approvals Table (Append-only)
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.approvals (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
promotion_id UUID NOT NULL REFERENCES release.promotions(id) ON DELETE CASCADE,
approver_id UUID NOT NULL,
approver_name TEXT NOT NULL,
approver_email TEXT,
decision TEXT NOT NULL CHECK (decision IN ('approved', 'rejected')),
comment TEXT,
conditions JSONB,
created_at TIMESTAMPTZ NOT NULL DEFAULT now()
-- No updated_at - approvals are immutable
);
CREATE INDEX idx_approvals_promotion ON release.approvals(promotion_id);
CREATE INDEX idx_approvals_approver ON release.approvals(approver_id);
CREATE INDEX idx_approvals_tenant ON release.approvals(tenant_id);
-- Prevent modifications to approvals (append-only)
-- Note: This is enforced at the application level as REVOKE may not apply to the app role
COMMENT ON TABLE release.approvals IS 'Immutable approval records for promotions';
COMMENT ON COLUMN release.approvals.conditions IS 'Any conditions attached to approval';
-- ============================================================================
-- Gate Types Table (Enum-like)
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.gate_types (
id TEXT PRIMARY KEY,
name TEXT NOT NULL,
category TEXT NOT NULL CHECK (category IN ('security', 'quality', 'compliance', 'custom')),
description TEXT,
config_schema JSONB NOT NULL DEFAULT '{}',
is_builtin BOOLEAN NOT NULL DEFAULT false,
created_at TIMESTAMPTZ NOT NULL DEFAULT now()
);
COMMENT ON TABLE release.gate_types IS 'Registry of available gate types';
-- Seed built-in gate types
INSERT INTO release.gate_types (id, name, category, description, is_builtin) VALUES
('security-scan', 'Security Scan', 'security', 'Vulnerability scan gate', true),
('sbom-required', 'SBOM Required', 'compliance', 'Requires valid SBOM', true),
('attestation-required', 'Attestation Required', 'compliance', 'Requires signed attestation', true),
('approval-required', 'Manual Approval', 'compliance', 'Requires manual approval', true),
('freeze-window', 'Freeze Window', 'compliance', 'Checks deployment freeze windows', true),
('test-results', 'Test Results', 'quality', 'Requires passing test results', true),
('policy-check', 'Policy Check', 'security', 'Runs policy engine evaluation', true),
('risk-score', 'Risk Score', 'security', 'Checks risk score threshold', true),
('vex-status', 'VEX Status', 'security', 'Checks VEX exploitability status', true),
('reachability', 'Reachability Analysis', 'security', 'Checks reachability analysis results', true)
ON CONFLICT (id) DO NOTHING;
-- ============================================================================
-- Gate Results Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.gate_results (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
promotion_id UUID NOT NULL REFERENCES release.promotions(id) ON DELETE CASCADE,
gate_type_id TEXT NOT NULL REFERENCES release.gate_types(id),
gate_config JSONB NOT NULL DEFAULT '{}',
status TEXT NOT NULL DEFAULT 'pending'
CHECK (status IN ('pending', 'running', 'passed', 'failed', 'skipped', 'error')),
passed BOOLEAN,
score NUMERIC(5,2),
threshold NUMERIC(5,2),
message TEXT,
details JSONB,
evidence_refs TEXT[] NOT NULL DEFAULT '{}',
started_at TIMESTAMPTZ,
completed_at TIMESTAMPTZ,
duration_ms INT,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
);
CREATE INDEX idx_gate_results_promotion ON release.gate_results(promotion_id);
CREATE INDEX idx_gate_results_type ON release.gate_results(gate_type_id);
CREATE INDEX idx_gate_results_status ON release.gate_results(promotion_id, status);
CREATE INDEX idx_gate_results_failed ON release.gate_results(promotion_id)
WHERE passed = false;
COMMENT ON TABLE release.gate_results IS 'Gate evaluation results for promotions';
COMMENT ON COLUMN release.gate_results.score IS 'Numeric score if applicable';
COMMENT ON COLUMN release.gate_results.threshold IS 'Threshold used for pass/fail decision';
COMMENT ON COLUMN release.gate_results.evidence_refs IS 'References to evidence documents';
-- ============================================================================
-- Row Level Security
-- ============================================================================
ALTER TABLE release.promotions ENABLE ROW LEVEL SECURITY;
ALTER TABLE release.approvals ENABLE ROW LEVEL SECURITY;
ALTER TABLE release.gate_results ENABLE ROW LEVEL SECURITY;
CREATE POLICY tenant_isolation ON release.promotions
USING (tenant_id = release_app.require_current_tenant());
CREATE POLICY tenant_isolation ON release.approvals
USING (tenant_id = release_app.require_current_tenant());
CREATE POLICY tenant_isolation ON release.gate_results
USING (tenant_id = release_app.require_current_tenant());
-- ============================================================================
-- Triggers
-- ============================================================================
CREATE TRIGGER update_promotions_updated_at
BEFORE UPDATE ON release.promotions
FOR EACH ROW
EXECUTE FUNCTION release.update_updated_at_column();
CREATE TRIGGER update_gate_results_updated_at
BEFORE UPDATE ON release.gate_results
FOR EACH ROW
EXECUTE FUNCTION release.update_updated_at_column();

View File

@@ -0,0 +1,184 @@
-- Release Orchestrator Schema Migration 006: Deployment Tables
-- Creates deployment jobs, tasks, and artifacts tables.
-- Compliant with docs/db/SPECIFICATION.md
-- ============================================================================
-- Deployment Jobs Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.deployment_jobs (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
promotion_id UUID NOT NULL REFERENCES release.promotions(id),
environment_id UUID NOT NULL REFERENCES release.environments(id),
strategy TEXT NOT NULL DEFAULT 'rolling'
CHECK (strategy IN ('rolling', 'blue_green', 'canary', 'all_at_once', 'recreate')),
status TEXT NOT NULL DEFAULT 'pending'
CHECK (status IN (
'pending', 'preparing', 'pulling', 'deploying', 'verifying',
'succeeded', 'failed', 'rolling_back', 'rolled_back', 'cancelled'
)),
strategy_config JSONB NOT NULL DEFAULT '{}',
total_targets INT NOT NULL DEFAULT 0,
completed_targets INT NOT NULL DEFAULT 0,
failed_targets INT NOT NULL DEFAULT 0,
started_at TIMESTAMPTZ,
completed_at TIMESTAMPTZ,
timeout_at TIMESTAMPTZ,
duration_ms INT,
error_message TEXT,
error_details JSONB,
rollback_job_id UUID REFERENCES release.deployment_jobs(id),
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
);
CREATE INDEX idx_deployment_jobs_tenant ON release.deployment_jobs(tenant_id);
CREATE INDEX idx_deployment_jobs_promotion ON release.deployment_jobs(promotion_id);
CREATE INDEX idx_deployment_jobs_environment ON release.deployment_jobs(environment_id);
CREATE INDEX idx_deployment_jobs_status ON release.deployment_jobs(tenant_id, status);
CREATE INDEX idx_deployment_jobs_active ON release.deployment_jobs(tenant_id)
WHERE status IN ('pending', 'preparing', 'pulling', 'deploying', 'verifying', 'rolling_back');
CREATE INDEX idx_deployment_jobs_recent ON release.deployment_jobs(tenant_id, created_at DESC);
COMMENT ON TABLE release.deployment_jobs IS 'Deployment job executions for promotions';
COMMENT ON COLUMN release.deployment_jobs.strategy IS 'Deployment strategy: rolling, blue_green, canary, all_at_once';
COMMENT ON COLUMN release.deployment_jobs.strategy_config IS 'Strategy-specific configuration (batch size, etc.)';
COMMENT ON COLUMN release.deployment_jobs.rollback_job_id IS 'Reference to rollback job if this job was rolled back';
-- ============================================================================
-- Deployment Tasks Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.deployment_tasks (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
job_id UUID NOT NULL REFERENCES release.deployment_jobs(id) ON DELETE CASCADE,
target_id UUID NOT NULL REFERENCES release.targets(id),
agent_id UUID,
batch_index INT NOT NULL DEFAULT 0,
status TEXT NOT NULL DEFAULT 'pending'
CHECK (status IN (
'pending', 'assigned', 'pulling', 'stopping_old', 'starting_new',
'verifying', 'succeeded', 'failed', 'skipped', 'cancelled'
)),
old_digest TEXT,
new_digest TEXT,
digest_deployed TEXT,
sticker_written BOOLEAN NOT NULL DEFAULT false,
health_check_passed BOOLEAN,
started_at TIMESTAMPTZ,
completed_at TIMESTAMPTZ,
duration_ms INT,
error_message TEXT,
error_details JSONB,
logs_ref TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
);
CREATE INDEX idx_deployment_tasks_job ON release.deployment_tasks(job_id);
CREATE INDEX idx_deployment_tasks_target ON release.deployment_tasks(target_id);
CREATE INDEX idx_deployment_tasks_agent ON release.deployment_tasks(agent_id)
WHERE agent_id IS NOT NULL;
CREATE INDEX idx_deployment_tasks_status ON release.deployment_tasks(job_id, status);
CREATE INDEX idx_deployment_tasks_batch ON release.deployment_tasks(job_id, batch_index);
CREATE INDEX idx_deployment_tasks_pending ON release.deployment_tasks(job_id)
WHERE status IN ('pending', 'assigned', 'pulling', 'stopping_old', 'starting_new', 'verifying');
COMMENT ON TABLE release.deployment_tasks IS 'Per-target deployment task executions';
COMMENT ON COLUMN release.deployment_tasks.batch_index IS 'Batch number for rolling deployments';
COMMENT ON COLUMN release.deployment_tasks.old_digest IS 'Digest before deployment';
COMMENT ON COLUMN release.deployment_tasks.new_digest IS 'Target digest to deploy';
COMMENT ON COLUMN release.deployment_tasks.digest_deployed IS 'Actually deployed digest (verified)';
COMMENT ON COLUMN release.deployment_tasks.sticker_written IS 'Version sticker was written successfully';
COMMENT ON COLUMN release.deployment_tasks.logs_ref IS 'Reference to task execution logs';
-- ============================================================================
-- Deployment Artifacts Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.deployment_artifacts (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
job_id UUID NOT NULL REFERENCES release.deployment_jobs(id) ON DELETE CASCADE,
type TEXT NOT NULL CHECK (type IN (
'compose_file', 'task_definition', 'nomad_job', 'helm_values',
'env_file', 'config_map', 'script', 'manifest', 'other'
)),
name TEXT NOT NULL,
content_hash TEXT NOT NULL,
storage_ref TEXT NOT NULL,
size_bytes INT NOT NULL,
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT now()
);
CREATE INDEX idx_deployment_artifacts_job ON release.deployment_artifacts(job_id);
CREATE INDEX idx_deployment_artifacts_type ON release.deployment_artifacts(job_id, type);
CREATE INDEX idx_deployment_artifacts_hash ON release.deployment_artifacts(content_hash);
COMMENT ON TABLE release.deployment_artifacts IS 'Generated deployment artifacts (compose files, etc.)';
COMMENT ON COLUMN release.deployment_artifacts.storage_ref IS 'Reference to artifact storage';
COMMENT ON COLUMN release.deployment_artifacts.content_hash IS 'SHA-256 of artifact content';
-- ============================================================================
-- Deployment Events Table (Append-only timeline)
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.deployment_events (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
job_id UUID NOT NULL REFERENCES release.deployment_jobs(id) ON DELETE CASCADE,
task_id UUID REFERENCES release.deployment_tasks(id),
event_type TEXT NOT NULL,
severity TEXT NOT NULL DEFAULT 'info' CHECK (severity IN ('trace', 'debug', 'info', 'warn', 'error')),
message TEXT NOT NULL,
data JSONB,
occurred_at TIMESTAMPTZ NOT NULL DEFAULT now()
-- No updated_at - events are immutable
);
CREATE INDEX idx_deployment_events_job ON release.deployment_events(job_id, occurred_at);
CREATE INDEX idx_deployment_events_task ON release.deployment_events(task_id, occurred_at)
WHERE task_id IS NOT NULL;
CREATE INDEX idx_deployment_events_type ON release.deployment_events(job_id, event_type);
CREATE INDEX idx_deployment_events_errors ON release.deployment_events(job_id)
WHERE severity = 'error';
COMMENT ON TABLE release.deployment_events IS 'Timeline of deployment events';
-- ============================================================================
-- Row Level Security
-- ============================================================================
ALTER TABLE release.deployment_jobs ENABLE ROW LEVEL SECURITY;
ALTER TABLE release.deployment_tasks ENABLE ROW LEVEL SECURITY;
ALTER TABLE release.deployment_artifacts ENABLE ROW LEVEL SECURITY;
ALTER TABLE release.deployment_events ENABLE ROW LEVEL SECURITY;
CREATE POLICY tenant_isolation ON release.deployment_jobs
USING (tenant_id = release_app.require_current_tenant());
CREATE POLICY tenant_isolation ON release.deployment_tasks
USING (tenant_id = release_app.require_current_tenant());
CREATE POLICY tenant_isolation ON release.deployment_artifacts
USING (tenant_id = release_app.require_current_tenant());
CREATE POLICY tenant_isolation ON release.deployment_events
USING (tenant_id = release_app.require_current_tenant());
-- ============================================================================
-- Triggers
-- ============================================================================
CREATE TRIGGER update_deployment_jobs_updated_at
BEFORE UPDATE ON release.deployment_jobs
FOR EACH ROW
EXECUTE FUNCTION release.update_updated_at_column();
CREATE TRIGGER update_deployment_tasks_updated_at
BEFORE UPDATE ON release.deployment_tasks
FOR EACH ROW
EXECUTE FUNCTION release.update_updated_at_column();

View File

@@ -0,0 +1,191 @@
-- Release Orchestrator Schema Migration 007: Agent Tables
-- Creates agent registration and health tables.
-- Compliant with docs/db/SPECIFICATION.md
-- ============================================================================
-- Agents Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.agents (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
name TEXT NOT NULL,
display_name TEXT NOT NULL,
description TEXT,
version TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'pending'
CHECK (status IN ('pending', 'active', 'inactive', 'revoked', 'offline')),
host_info JSONB NOT NULL DEFAULT '{}',
certificate_thumbprint TEXT,
certificate_expires_at TIMESTAMPTZ,
api_key_hash TEXT,
last_heartbeat_at TIMESTAMPTZ,
last_heartbeat_status JSONB,
heartbeat_interval_seconds INT NOT NULL DEFAULT 30,
max_concurrent_tasks INT NOT NULL DEFAULT 5,
current_task_count INT NOT NULL DEFAULT 0,
labels JSONB NOT NULL DEFAULT '{}',
registered_at TIMESTAMPTZ,
registered_by UUID,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
UNIQUE (tenant_id, name)
);
CREATE INDEX idx_agents_tenant ON release.agents(tenant_id);
CREATE INDEX idx_agents_tenant_status ON release.agents(tenant_id, status);
CREATE INDEX idx_agents_active ON release.agents(tenant_id)
WHERE status = 'active';
CREATE INDEX idx_agents_certificate ON release.agents(certificate_thumbprint)
WHERE certificate_thumbprint IS NOT NULL;
CREATE INDEX idx_agents_heartbeat ON release.agents(last_heartbeat_at)
WHERE status = 'active';
-- Generated column for primary capability
ALTER TABLE release.agents
ADD COLUMN IF NOT EXISTS primary_capability TEXT GENERATED ALWAYS AS (
last_heartbeat_status->>'primaryCapability'
) STORED;
CREATE INDEX idx_agents_capability ON release.agents(primary_capability)
WHERE primary_capability IS NOT NULL;
COMMENT ON TABLE release.agents IS 'Registered deployment agents';
COMMENT ON COLUMN release.agents.status IS 'Agent lifecycle: pending -> active -> inactive/revoked';
COMMENT ON COLUMN release.agents.certificate_thumbprint IS 'mTLS certificate thumbprint for authentication';
COMMENT ON COLUMN release.agents.api_key_hash IS 'Hashed API key for authentication';
COMMENT ON COLUMN release.agents.host_info IS 'Host information (OS, arch, IP, etc.)';
COMMENT ON COLUMN release.agents.last_heartbeat_status IS 'Last heartbeat status payload';
COMMENT ON COLUMN release.agents.primary_capability IS 'Generated: primary capability from heartbeat';
-- ============================================================================
-- Agent Capabilities Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.agent_capabilities (
agent_id UUID NOT NULL REFERENCES release.agents(id) ON DELETE CASCADE,
capability TEXT NOT NULL CHECK (capability IN ('docker', 'compose', 'ssh', 'winrm', 'ecs', 'nomad')),
version TEXT,
config JSONB NOT NULL DEFAULT '{}',
is_healthy BOOLEAN NOT NULL DEFAULT true,
last_check_at TIMESTAMPTZ,
PRIMARY KEY (agent_id, capability)
);
CREATE INDEX idx_agent_capabilities_capability ON release.agent_capabilities(capability);
CREATE INDEX idx_agent_capabilities_healthy ON release.agent_capabilities(capability)
WHERE is_healthy = true;
COMMENT ON TABLE release.agent_capabilities IS 'Capabilities declared by each agent';
COMMENT ON COLUMN release.agent_capabilities.capability IS 'Capability type: docker, compose, ssh, winrm, ecs, nomad';
COMMENT ON COLUMN release.agent_capabilities.version IS 'Version of the capability (e.g., Docker version)';
-- ============================================================================
-- Agent Heartbeats Table (Append-only, partitioned)
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.agent_heartbeats (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
agent_id UUID NOT NULL,
received_at TIMESTAMPTZ NOT NULL DEFAULT now(),
status JSONB NOT NULL,
latency_ms INT,
created_at TIMESTAMPTZ NOT NULL DEFAULT now()
-- No FK to agents for partition performance
) PARTITION BY RANGE (created_at);
-- Create partitions for current and next month
CREATE TABLE IF NOT EXISTS release.agent_heartbeats_default
PARTITION OF release.agent_heartbeats DEFAULT;
CREATE INDEX idx_agent_heartbeats_agent ON release.agent_heartbeats(agent_id, received_at DESC);
CREATE INDEX idx_agent_heartbeats_tenant ON release.agent_heartbeats(tenant_id, received_at DESC);
COMMENT ON TABLE release.agent_heartbeats IS 'Agent heartbeat history (partitioned by time)';
-- ============================================================================
-- Agent Tasks Table (active task assignments)
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.agent_tasks (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
agent_id UUID NOT NULL REFERENCES release.agents(id),
task_type TEXT NOT NULL CHECK (task_type IN ('deployment', 'health_check', 'inventory_sync', 'cleanup')),
task_ref_type TEXT NOT NULL,
task_ref_id UUID NOT NULL,
status TEXT NOT NULL DEFAULT 'pending'
CHECK (status IN ('pending', 'assigned', 'running', 'completed', 'failed', 'cancelled')),
priority INT NOT NULL DEFAULT 0,
assigned_at TIMESTAMPTZ,
started_at TIMESTAMPTZ,
completed_at TIMESTAMPTZ,
timeout_at TIMESTAMPTZ,
result JSONB,
error_message TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
);
CREATE INDEX idx_agent_tasks_agent ON release.agent_tasks(agent_id);
CREATE INDEX idx_agent_tasks_status ON release.agent_tasks(agent_id, status);
CREATE INDEX idx_agent_tasks_pending ON release.agent_tasks(agent_id)
WHERE status IN ('pending', 'assigned', 'running');
CREATE INDEX idx_agent_tasks_ref ON release.agent_tasks(task_ref_type, task_ref_id);
COMMENT ON TABLE release.agent_tasks IS 'Task assignments to agents';
COMMENT ON COLUMN release.agent_tasks.task_ref_type IS 'Type of referenced task (deployment_task, etc.)';
COMMENT ON COLUMN release.agent_tasks.task_ref_id IS 'ID of the referenced task';
-- ============================================================================
-- Row Level Security
-- ============================================================================
ALTER TABLE release.agents ENABLE ROW LEVEL SECURITY;
ALTER TABLE release.agent_heartbeats ENABLE ROW LEVEL SECURITY;
ALTER TABLE release.agent_tasks ENABLE ROW LEVEL SECURITY;
CREATE POLICY tenant_isolation ON release.agents
USING (tenant_id = release_app.require_current_tenant());
CREATE POLICY tenant_isolation ON release.agent_heartbeats
USING (tenant_id = release_app.require_current_tenant());
CREATE POLICY tenant_isolation ON release.agent_tasks
USING (tenant_id = release_app.require_current_tenant());
-- ============================================================================
-- Triggers
-- ============================================================================
CREATE TRIGGER update_agents_updated_at
BEFORE UPDATE ON release.agents
FOR EACH ROW
EXECUTE FUNCTION release.update_updated_at_column();
CREATE TRIGGER update_agent_tasks_updated_at
BEFORE UPDATE ON release.agent_tasks
FOR EACH ROW
EXECUTE FUNCTION release.update_updated_at_column();
-- ============================================================================
-- Heartbeat cleanup function (call periodically)
-- ============================================================================
CREATE OR REPLACE FUNCTION release.cleanup_old_heartbeats(retention_days INT DEFAULT 7)
RETURNS INT
LANGUAGE plpgsql
AS $$
DECLARE
deleted_count INT;
BEGIN
DELETE FROM release.agent_heartbeats
WHERE created_at < now() - (retention_days || ' days')::INTERVAL;
GET DIAGNOSTICS deleted_count = ROW_COUNT;
RETURN deleted_count;
END;
$$;
COMMENT ON FUNCTION release.cleanup_old_heartbeats IS 'Cleanup old agent heartbeats (default 7 day retention)';

View File

@@ -0,0 +1,221 @@
-- Release Orchestrator Schema Migration 008: Evidence Tables
-- Creates evidence packets table for immutable audit records.
-- Compliant with docs/db/SPECIFICATION.md
-- ============================================================================
-- Evidence Packets Table (Append-only, immutable)
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.evidence_packets (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
promotion_id UUID NOT NULL REFERENCES release.promotions(id),
deployment_job_id UUID REFERENCES release.deployment_jobs(id),
type TEXT NOT NULL CHECK (type IN (
'release_decision', 'gate_evaluation', 'approval_record',
'deployment', 'rollback', 'ab_promotion', 'canary_promotion',
'security_scan', 'policy_evaluation', 'audit_snapshot'
)),
version TEXT NOT NULL DEFAULT '1.0',
content JSONB NOT NULL,
content_hash TEXT NOT NULL,
signature TEXT NOT NULL,
signature_algorithm TEXT NOT NULL,
signer_key_ref TEXT NOT NULL,
signer_key_fingerprint TEXT,
generated_at TIMESTAMPTZ NOT NULL,
generator_version TEXT NOT NULL,
predecessor_id UUID REFERENCES release.evidence_packets(id),
chain_position INT,
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT now()
-- No updated_at - packets are immutable
);
CREATE INDEX idx_evidence_packets_tenant ON release.evidence_packets(tenant_id);
CREATE INDEX idx_evidence_packets_promotion ON release.evidence_packets(promotion_id);
CREATE INDEX idx_evidence_packets_job ON release.evidence_packets(deployment_job_id)
WHERE deployment_job_id IS NOT NULL;
CREATE INDEX idx_evidence_packets_type ON release.evidence_packets(tenant_id, type);
CREATE INDEX idx_evidence_packets_hash ON release.evidence_packets(content_hash);
CREATE INDEX idx_evidence_packets_chain ON release.evidence_packets(predecessor_id)
WHERE predecessor_id IS NOT NULL;
CREATE INDEX idx_evidence_packets_recent ON release.evidence_packets(tenant_id, created_at DESC);
-- Generated columns for JSONB hot paths
ALTER TABLE release.evidence_packets
ADD COLUMN IF NOT EXISTS release_id UUID GENERATED ALWAYS AS (
(content->>'releaseId')::UUID
) STORED;
ALTER TABLE release.evidence_packets
ADD COLUMN IF NOT EXISTS evidence_what_type TEXT GENERATED ALWAYS AS (
content->'what'->>'type'
) STORED;
ALTER TABLE release.evidence_packets
ADD COLUMN IF NOT EXISTS verdict TEXT GENERATED ALWAYS AS (
content->>'verdict'
) STORED;
CREATE INDEX idx_evidence_packets_release ON release.evidence_packets(release_id)
WHERE release_id IS NOT NULL;
CREATE INDEX idx_evidence_packets_what_type ON release.evidence_packets(tenant_id, evidence_what_type)
WHERE evidence_what_type IS NOT NULL;
CREATE INDEX idx_evidence_packets_verdict ON release.evidence_packets(tenant_id, verdict)
WHERE verdict IS NOT NULL;
COMMENT ON TABLE release.evidence_packets IS 'Immutable signed evidence packets for audit';
COMMENT ON COLUMN release.evidence_packets.content IS 'JSON evidence payload';
COMMENT ON COLUMN release.evidence_packets.content_hash IS 'SHA-256 of canonical JSON content';
COMMENT ON COLUMN release.evidence_packets.signature IS 'Digital signature of content_hash';
COMMENT ON COLUMN release.evidence_packets.predecessor_id IS 'Previous packet in chain (for linked evidence)';
COMMENT ON COLUMN release.evidence_packets.chain_position IS 'Position in evidence chain';
COMMENT ON COLUMN release.evidence_packets.release_id IS 'Generated: extracted releaseId from content';
COMMENT ON COLUMN release.evidence_packets.evidence_what_type IS 'Generated: extracted what.type from content';
COMMENT ON COLUMN release.evidence_packets.verdict IS 'Generated: extracted verdict from content';
-- ============================================================================
-- Evidence Exports Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.evidence_exports (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
name TEXT NOT NULL,
description TEXT,
export_type TEXT NOT NULL CHECK (export_type IN ('pdf', 'json', 'bundle', 'archive')),
status TEXT NOT NULL DEFAULT 'pending'
CHECK (status IN ('pending', 'generating', 'completed', 'failed', 'expired')),
filter_criteria JSONB NOT NULL DEFAULT '{}',
packet_count INT,
file_size_bytes BIGINT,
storage_ref TEXT,
download_url TEXT,
expires_at TIMESTAMPTZ,
generated_at TIMESTAMPTZ,
requested_by UUID NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
);
CREATE INDEX idx_evidence_exports_tenant ON release.evidence_exports(tenant_id);
CREATE INDEX idx_evidence_exports_status ON release.evidence_exports(tenant_id, status);
CREATE INDEX idx_evidence_exports_recent ON release.evidence_exports(tenant_id, created_at DESC);
COMMENT ON TABLE release.evidence_exports IS 'Evidence export jobs and results';
-- ============================================================================
-- Evidence Export Packets (Junction table for export contents)
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.evidence_export_packets (
export_id UUID NOT NULL REFERENCES release.evidence_exports(id) ON DELETE CASCADE,
packet_id UUID NOT NULL REFERENCES release.evidence_packets(id),
include_order INT NOT NULL,
PRIMARY KEY (export_id, packet_id)
);
CREATE INDEX idx_evidence_export_packets_packet ON release.evidence_export_packets(packet_id);
COMMENT ON TABLE release.evidence_export_packets IS 'Packets included in an export';
-- ============================================================================
-- Row Level Security
-- ============================================================================
ALTER TABLE release.evidence_packets ENABLE ROW LEVEL SECURITY;
ALTER TABLE release.evidence_exports ENABLE ROW LEVEL SECURITY;
CREATE POLICY tenant_isolation ON release.evidence_packets
USING (tenant_id = release_app.require_current_tenant());
CREATE POLICY tenant_isolation ON release.evidence_exports
USING (tenant_id = release_app.require_current_tenant());
-- ============================================================================
-- Triggers
-- ============================================================================
CREATE TRIGGER update_evidence_exports_updated_at
BEFORE UPDATE ON release.evidence_exports
FOR EACH ROW
EXECUTE FUNCTION release.update_updated_at_column();
-- ============================================================================
-- Prevent modifications to evidence packets (append-only enforcement)
-- ============================================================================
CREATE OR REPLACE FUNCTION release.prevent_evidence_modification()
RETURNS TRIGGER AS $$
BEGIN
RAISE EXCEPTION 'Evidence packets are immutable and cannot be modified or deleted';
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER prevent_evidence_update
BEFORE UPDATE ON release.evidence_packets
FOR EACH ROW
EXECUTE FUNCTION release.prevent_evidence_modification();
CREATE TRIGGER prevent_evidence_delete
BEFORE DELETE ON release.evidence_packets
FOR EACH ROW
EXECUTE FUNCTION release.prevent_evidence_modification();
-- ============================================================================
-- Evidence verification function
-- ============================================================================
CREATE OR REPLACE FUNCTION release.verify_evidence_chain(p_packet_id UUID)
RETURNS TABLE (
packet_id UUID,
chain_position INT,
content_hash TEXT,
is_valid BOOLEAN,
validation_error TEXT
)
LANGUAGE plpgsql
STABLE
AS $$
BEGIN
RETURN QUERY
WITH RECURSIVE chain AS (
-- Start from the given packet
SELECT
ep.id,
ep.chain_position,
ep.content_hash,
ep.predecessor_id,
ep.content,
true AS is_valid,
NULL::TEXT AS validation_error
FROM release.evidence_packets ep
WHERE ep.id = p_packet_id
UNION ALL
-- Walk up the chain
SELECT
ep.id,
ep.chain_position,
ep.content_hash,
ep.predecessor_id,
ep.content,
true AS is_valid,
NULL::TEXT AS validation_error
FROM release.evidence_packets ep
INNER JOIN chain c ON ep.id = c.predecessor_id
)
SELECT
c.id AS packet_id,
c.chain_position,
c.content_hash,
c.is_valid,
c.validation_error
FROM chain c
ORDER BY c.chain_position DESC NULLS LAST;
END;
$$;
COMMENT ON FUNCTION release.verify_evidence_chain IS 'Verify evidence chain integrity for a given packet';

View File

@@ -0,0 +1,206 @@
-- Release Orchestrator Schema Migration 009: Plugin Tables
-- Creates plugin registration and version tables for Release Orchestrator extensions.
-- Compliant with docs/db/SPECIFICATION.md
-- Note: This extends the platform.plugins table from Phase 100 with release-specific plugin types.
-- ============================================================================
-- Release Plugin Types Table (Enum-like)
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.plugin_types (
id TEXT PRIMARY KEY,
name TEXT NOT NULL,
category TEXT NOT NULL CHECK (category IN ('connector', 'step', 'gate', 'strategy', 'notifier')),
description TEXT,
interface_contract TEXT NOT NULL,
config_schema JSONB NOT NULL DEFAULT '{}',
input_schema JSONB NOT NULL DEFAULT '{}',
output_schema JSONB NOT NULL DEFAULT '{}',
is_builtin BOOLEAN NOT NULL DEFAULT false,
created_at TIMESTAMPTZ NOT NULL DEFAULT now()
);
COMMENT ON TABLE release.plugin_types IS 'Registry of Release Orchestrator plugin types';
COMMENT ON COLUMN release.plugin_types.interface_contract IS 'Interface name (IStepProvider, IGateProvider, etc.)';
-- Seed built-in plugin types
INSERT INTO release.plugin_types (id, name, category, interface_contract, description, is_builtin) VALUES
-- Step providers
('step.scan', 'Security Scan Step', 'step', 'IStepProvider', 'Trigger security scan', true),
('step.approval', 'Approval Step', 'step', 'IStepProvider', 'Wait for manual approval', true),
('step.deploy', 'Deploy Step', 'step', 'IStepProvider', 'Execute deployment', true),
('step.rollback', 'Rollback Step', 'step', 'IStepProvider', 'Execute rollback', true),
('step.notify', 'Notification Step', 'step', 'IStepProvider', 'Send notifications', true),
('step.webhook', 'Webhook Step', 'step', 'IStepProvider', 'Call external webhook', true),
('step.script', 'Script Step', 'step', 'IStepProvider', 'Execute custom script', true),
('step.wait', 'Wait Step', 'step', 'IStepProvider', 'Wait for condition or time', true),
('step.parallel', 'Parallel Step', 'step', 'IStepProvider', 'Run steps in parallel', true),
('step.conditional', 'Conditional Step', 'step', 'IStepProvider', 'Conditional branching', true),
-- Gate providers
('gate.security', 'Security Gate', 'gate', 'IGateProvider', 'Security scan gate', true),
('gate.approval', 'Approval Gate', 'gate', 'IGateProvider', 'Manual approval gate', true),
('gate.policy', 'Policy Gate', 'gate', 'IGateProvider', 'Policy engine gate', true),
('gate.freeze', 'Freeze Window Gate', 'gate', 'IGateProvider', 'Deployment freeze check', true),
('gate.risk', 'Risk Score Gate', 'gate', 'IGateProvider', 'Risk threshold gate', true),
('gate.test', 'Test Results Gate', 'gate', 'IGateProvider', 'Test pass rate gate', true),
('gate.sbom', 'SBOM Gate', 'gate', 'IGateProvider', 'SBOM requirement gate', true),
('gate.attestation', 'Attestation Gate', 'gate', 'IGateProvider', 'Attestation requirement gate', true),
-- Deployment strategies
('strategy.rolling', 'Rolling Deployment', 'strategy', 'IDeploymentStrategy', 'Rolling update strategy', true),
('strategy.blue_green', 'Blue/Green Deployment', 'strategy', 'IDeploymentStrategy', 'Blue/green switch strategy', true),
('strategy.canary', 'Canary Deployment', 'strategy', 'IDeploymentStrategy', 'Canary release strategy', true),
('strategy.recreate', 'Recreate Deployment', 'strategy', 'IDeploymentStrategy', 'Stop-then-start strategy', true),
-- Connectors (reference to integration types)
('connector.scm', 'SCM Connector', 'connector', 'IScmConnector', 'Source control connector', true),
('connector.registry', 'Registry Connector', 'connector', 'IRegistryConnector', 'Container registry connector', true),
('connector.vault', 'Vault Connector', 'connector', 'IVaultConnector', 'Secrets vault connector', true),
('connector.ci', 'CI Connector', 'connector', 'ICiConnector', 'CI system connector', true),
-- Notifiers
('notifier.slack', 'Slack Notifier', 'notifier', 'INotifier', 'Slack webhook notifications', true),
('notifier.teams', 'Teams Notifier', 'notifier', 'INotifier', 'Microsoft Teams notifications', true),
('notifier.email', 'Email Notifier', 'notifier', 'INotifier', 'Email notifications', true),
('notifier.webhook', 'Webhook Notifier', 'notifier', 'INotifier', 'Generic webhook notifications', true)
ON CONFLICT (id) DO NOTHING;
-- ============================================================================
-- Plugins Table (Release Orchestrator specific)
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.plugins (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID, -- NULL for system/global plugins
plugin_type_id TEXT NOT NULL REFERENCES release.plugin_types(id),
name TEXT NOT NULL,
display_name TEXT NOT NULL,
description TEXT,
is_builtin BOOLEAN NOT NULL DEFAULT false,
is_enabled BOOLEAN NOT NULL DEFAULT true,
trust_level TEXT NOT NULL DEFAULT 'untrusted'
CHECK (trust_level IN ('builtin', 'trusted', 'untrusted')),
source TEXT NOT NULL DEFAULT 'discovered'
CHECK (source IN ('bundled', 'installed', 'discovered')),
assembly_path TEXT,
entry_point TEXT,
config_defaults JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
UNIQUE (COALESCE(tenant_id, '00000000-0000-0000-0000-000000000000'::UUID), name)
);
CREATE INDEX idx_plugins_tenant ON release.plugins(tenant_id);
CREATE INDEX idx_plugins_type ON release.plugins(plugin_type_id);
CREATE INDEX idx_plugins_enabled ON release.plugins(tenant_id, is_enabled)
WHERE is_enabled = true;
CREATE INDEX idx_plugins_builtin ON release.plugins(is_builtin)
WHERE is_builtin = true;
COMMENT ON TABLE release.plugins IS 'Registered Release Orchestrator plugins';
COMMENT ON COLUMN release.plugins.tenant_id IS 'NULL for system plugins, UUID for tenant plugins';
COMMENT ON COLUMN release.plugins.trust_level IS 'Plugin trust level: builtin, trusted, untrusted';
COMMENT ON COLUMN release.plugins.entry_point IS 'Type name for activation';
-- ============================================================================
-- Plugin Versions Table
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.plugin_versions (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
plugin_id UUID NOT NULL REFERENCES release.plugins(id) ON DELETE CASCADE,
version TEXT NOT NULL,
manifest JSONB NOT NULL,
package_hash TEXT NOT NULL,
package_url TEXT,
package_size_bytes BIGINT,
is_active BOOLEAN NOT NULL DEFAULT false,
is_deprecated BOOLEAN NOT NULL DEFAULT false,
min_platform_version TEXT,
release_notes TEXT,
published_at TIMESTAMPTZ,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
UNIQUE (plugin_id, version)
);
CREATE INDEX idx_plugin_versions_plugin ON release.plugin_versions(plugin_id);
CREATE INDEX idx_plugin_versions_active ON release.plugin_versions(plugin_id)
WHERE is_active = true;
CREATE INDEX idx_plugin_versions_hash ON release.plugin_versions(package_hash);
COMMENT ON TABLE release.plugin_versions IS 'Plugin version history and manifests';
COMMENT ON COLUMN release.plugin_versions.manifest IS 'Full plugin manifest JSON';
COMMENT ON COLUMN release.plugin_versions.package_hash IS 'SHA-256 of plugin package';
COMMENT ON COLUMN release.plugin_versions.is_active IS 'Currently active version';
-- ============================================================================
-- Plugin Instances Table (per-tenant configurations)
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.plugin_instances (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
plugin_id UUID NOT NULL REFERENCES release.plugins(id),
plugin_version_id UUID REFERENCES release.plugin_versions(id),
instance_name TEXT,
config JSONB NOT NULL DEFAULT '{}',
secrets_path TEXT,
is_enabled BOOLEAN NOT NULL DEFAULT true,
status TEXT NOT NULL DEFAULT 'pending'
CHECK (status IN ('pending', 'active', 'failed', 'disabled')),
status_message TEXT,
last_used_at TIMESTAMPTZ,
invocation_count BIGINT NOT NULL DEFAULT 0,
error_count BIGINT NOT NULL DEFAULT 0,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
UNIQUE (tenant_id, plugin_id, COALESCE(instance_name, ''))
);
CREATE INDEX idx_plugin_instances_tenant ON release.plugin_instances(tenant_id);
CREATE INDEX idx_plugin_instances_plugin ON release.plugin_instances(plugin_id);
CREATE INDEX idx_plugin_instances_enabled ON release.plugin_instances(tenant_id, is_enabled)
WHERE is_enabled = true;
CREATE INDEX idx_plugin_instances_status ON release.plugin_instances(tenant_id, status);
COMMENT ON TABLE release.plugin_instances IS 'Tenant-specific plugin instances and configurations';
COMMENT ON COLUMN release.plugin_instances.secrets_path IS 'Vault path for plugin secrets';
-- ============================================================================
-- Row Level Security
-- ============================================================================
ALTER TABLE release.plugins ENABLE ROW LEVEL SECURITY;
ALTER TABLE release.plugin_instances ENABLE ROW LEVEL SECURITY;
-- Plugins: allow access to system plugins (tenant_id IS NULL) plus tenant's own plugins
CREATE POLICY plugin_access ON release.plugins
USING (tenant_id IS NULL OR tenant_id = release_app.require_current_tenant());
CREATE POLICY tenant_isolation ON release.plugin_instances
USING (tenant_id = release_app.require_current_tenant());
-- ============================================================================
-- Triggers
-- ============================================================================
CREATE TRIGGER update_plugins_updated_at
BEFORE UPDATE ON release.plugins
FOR EACH ROW
EXECUTE FUNCTION release.update_updated_at_column();
CREATE TRIGGER update_plugin_instances_updated_at
BEFORE UPDATE ON release.plugin_instances
FOR EACH ROW
EXECUTE FUNCTION release.update_updated_at_column();
-- ============================================================================
-- Seed built-in plugins
-- ============================================================================
-- This would be done via the application during startup, but we can seed some examples
-- INSERT INTO release.plugins (tenant_id, plugin_type_id, name, display_name, is_builtin, trust_level, source)
-- SELECT NULL, id, id, name, true, 'builtin', 'bundled'
-- FROM release.plugin_types
-- WHERE is_builtin = true;

View File

@@ -0,0 +1,318 @@
-- Release Orchestrator Schema Migration 010: Evidence Thread Tables
-- Creates evidence thread, nodes, links, exports, and transcripts tables.
-- Compliant with docs/db/SPECIFICATION.md
-- Sprint: Evidence Thread & Air-Gap Policy Gates - Sprint A
-- ============================================================================
-- Evidence Threads Table
-- Groups evidence for an artifact into a unified thread
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.evidence_threads (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
artifact_digest TEXT NOT NULL,
artifact_name TEXT,
thread_status TEXT NOT NULL DEFAULT 'active'
CHECK (thread_status IN ('active', 'archived', 'exported')),
verdict TEXT CHECK (verdict IN ('allow', 'warn', 'block', 'pending', 'unknown')),
risk_score NUMERIC(5,2),
reachability_mode TEXT CHECK (reachability_mode IN (
'exploitable', 'likely_exploitable', 'possibly_exploitable', 'unreachable', 'unknown'
)),
knowledge_snapshot_hash TEXT,
engine_version TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
UNIQUE (tenant_id, artifact_digest)
);
CREATE INDEX idx_evidence_threads_tenant ON release.evidence_threads(tenant_id);
CREATE INDEX idx_evidence_threads_artifact ON release.evidence_threads(artifact_digest);
CREATE INDEX idx_evidence_threads_verdict ON release.evidence_threads(tenant_id, verdict)
WHERE verdict IS NOT NULL;
CREATE INDEX idx_evidence_threads_status ON release.evidence_threads(tenant_id, thread_status);
CREATE INDEX idx_evidence_threads_recent ON release.evidence_threads(tenant_id, updated_at DESC);
COMMENT ON TABLE release.evidence_threads IS 'Groups evidence for an artifact into a unified thread';
COMMENT ON COLUMN release.evidence_threads.artifact_digest IS 'OCI content digest (sha256:...)';
COMMENT ON COLUMN release.evidence_threads.verdict IS 'Overall thread verdict: allow, warn, block, pending, unknown';
COMMENT ON COLUMN release.evidence_threads.risk_score IS 'Computed risk score (0.00-100.00)';
COMMENT ON COLUMN release.evidence_threads.reachability_mode IS 'Overall reachability assessment';
COMMENT ON COLUMN release.evidence_threads.knowledge_snapshot_hash IS 'Hash of input knowledge (SBOM, VEX, etc.) for determinism';
COMMENT ON COLUMN release.evidence_threads.engine_version IS 'Version of evidence thread engine';
-- ============================================================================
-- Evidence Nodes Table
-- Individual evidence pieces within a thread
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.evidence_nodes (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
thread_id UUID NOT NULL REFERENCES release.evidence_threads(id) ON DELETE CASCADE,
kind TEXT NOT NULL CHECK (kind IN (
'sbom_diff', 'reachability', 'vex', 'attestation', 'policy_eval',
'runtime_observation', 'patch_verification', 'approval', 'ai_rationale'
)),
ref_id TEXT NOT NULL,
ref_digest TEXT,
title TEXT,
summary TEXT,
confidence NUMERIC(3,2) CHECK (confidence IS NULL OR (confidence >= 0 AND confidence <= 1)),
anchors JSONB NOT NULL DEFAULT '[]',
content JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT now()
);
CREATE INDEX idx_evidence_nodes_thread ON release.evidence_nodes(thread_id);
CREATE INDEX idx_evidence_nodes_tenant ON release.evidence_nodes(tenant_id);
CREATE INDEX idx_evidence_nodes_kind ON release.evidence_nodes(thread_id, kind);
CREATE INDEX idx_evidence_nodes_ref ON release.evidence_nodes(ref_id);
CREATE INDEX idx_evidence_nodes_ref_digest ON release.evidence_nodes(ref_digest)
WHERE ref_digest IS NOT NULL;
COMMENT ON TABLE release.evidence_nodes IS 'Individual evidence pieces within a thread';
COMMENT ON COLUMN release.evidence_nodes.kind IS 'Type of evidence: sbom_diff, reachability, vex, attestation, etc.';
COMMENT ON COLUMN release.evidence_nodes.ref_id IS 'Reference to source evidence (e.g., evidence_packet.id, sbom digest)';
COMMENT ON COLUMN release.evidence_nodes.ref_digest IS 'Content digest of referenced evidence';
COMMENT ON COLUMN release.evidence_nodes.anchors IS 'JSON array of anchor references [{type, value, label, href}]';
COMMENT ON COLUMN release.evidence_nodes.content IS 'Full node content (type-specific JSONB)';
COMMENT ON COLUMN release.evidence_nodes.confidence IS 'Confidence score 0.00-1.00';
-- ============================================================================
-- Evidence Links Table
-- Relationships between nodes
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.evidence_links (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
thread_id UUID NOT NULL REFERENCES release.evidence_threads(id) ON DELETE CASCADE,
src_node_id UUID NOT NULL REFERENCES release.evidence_nodes(id) ON DELETE CASCADE,
dst_node_id UUID NOT NULL REFERENCES release.evidence_nodes(id) ON DELETE CASCADE,
relation TEXT NOT NULL CHECK (relation IN (
'supports', 'contradicts', 'precedes', 'triggers', 'derived_from', 'references'
)),
weight NUMERIC(3,2) DEFAULT 1.0 CHECK (weight IS NULL OR (weight >= 0 AND weight <= 1)),
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
UNIQUE (src_node_id, dst_node_id, relation)
);
CREATE INDEX idx_evidence_links_thread ON release.evidence_links(thread_id);
CREATE INDEX idx_evidence_links_tenant ON release.evidence_links(tenant_id);
CREATE INDEX idx_evidence_links_src ON release.evidence_links(src_node_id);
CREATE INDEX idx_evidence_links_dst ON release.evidence_links(dst_node_id);
CREATE INDEX idx_evidence_links_relation ON release.evidence_links(thread_id, relation);
COMMENT ON TABLE release.evidence_links IS 'Relationships between evidence nodes';
COMMENT ON COLUMN release.evidence_links.relation IS 'Relationship type: supports, contradicts, precedes, triggers, derived_from, references';
COMMENT ON COLUMN release.evidence_links.weight IS 'Relationship weight 0.00-1.00';
-- ============================================================================
-- Evidence Thread Exports Table
-- DSSE bundle exports of threads
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.evidence_thread_exports (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
thread_id UUID NOT NULL REFERENCES release.evidence_threads(id),
export_format TEXT NOT NULL CHECK (export_format IN ('dsse', 'json', 'pdf', 'markdown')),
content_hash TEXT NOT NULL,
signature TEXT,
signature_algorithm TEXT,
signer_key_ref TEXT,
signer_key_fingerprint TEXT,
storage_path TEXT NOT NULL,
storage_size_bytes BIGINT,
download_url TEXT,
expires_at TIMESTAMPTZ,
created_at TIMESTAMPTZ NOT NULL DEFAULT now()
);
CREATE INDEX idx_evidence_thread_exports_thread ON release.evidence_thread_exports(thread_id);
CREATE INDEX idx_evidence_thread_exports_tenant ON release.evidence_thread_exports(tenant_id);
CREATE INDEX idx_evidence_thread_exports_hash ON release.evidence_thread_exports(content_hash);
CREATE INDEX idx_evidence_thread_exports_recent ON release.evidence_thread_exports(tenant_id, created_at DESC);
COMMENT ON TABLE release.evidence_thread_exports IS 'DSSE bundle exports of evidence threads';
COMMENT ON COLUMN release.evidence_thread_exports.content_hash IS 'SHA-256 of exported content';
COMMENT ON COLUMN release.evidence_thread_exports.storage_path IS 'Path to stored export (object store or filesystem)';
-- ============================================================================
-- Evidence Transcripts Table
-- Natural language explanations with anchors
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.evidence_transcripts (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
thread_id UUID NOT NULL REFERENCES release.evidence_threads(id) ON DELETE CASCADE,
transcript_type TEXT NOT NULL CHECK (transcript_type IN ('summary', 'detailed', 'audit')),
template_version TEXT NOT NULL,
llm_model TEXT,
content TEXT NOT NULL,
anchors JSONB NOT NULL DEFAULT '[]',
generated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
expires_at TIMESTAMPTZ
);
CREATE INDEX idx_evidence_transcripts_thread ON release.evidence_transcripts(thread_id);
CREATE INDEX idx_evidence_transcripts_tenant ON release.evidence_transcripts(tenant_id);
CREATE INDEX idx_evidence_transcripts_type ON release.evidence_transcripts(thread_id, transcript_type);
CREATE INDEX idx_evidence_transcripts_recent ON release.evidence_transcripts(tenant_id, generated_at DESC);
COMMENT ON TABLE release.evidence_transcripts IS 'Natural language explanations with anchor references';
COMMENT ON COLUMN release.evidence_transcripts.transcript_type IS 'Type: summary (brief), detailed (full), audit (compliance)';
COMMENT ON COLUMN release.evidence_transcripts.template_version IS 'Version of transcript template used';
COMMENT ON COLUMN release.evidence_transcripts.llm_model IS 'LLM model used for rationale (if hybrid generation)';
COMMENT ON COLUMN release.evidence_transcripts.anchors IS 'JSON array of anchor references used in transcript';
-- ============================================================================
-- Row Level Security
-- ============================================================================
ALTER TABLE release.evidence_threads ENABLE ROW LEVEL SECURITY;
ALTER TABLE release.evidence_nodes ENABLE ROW LEVEL SECURITY;
ALTER TABLE release.evidence_links ENABLE ROW LEVEL SECURITY;
ALTER TABLE release.evidence_thread_exports ENABLE ROW LEVEL SECURITY;
ALTER TABLE release.evidence_transcripts ENABLE ROW LEVEL SECURITY;
CREATE POLICY tenant_isolation ON release.evidence_threads
USING (tenant_id = release_app.require_current_tenant());
CREATE POLICY tenant_isolation ON release.evidence_nodes
USING (tenant_id = release_app.require_current_tenant());
CREATE POLICY tenant_isolation ON release.evidence_links
USING (tenant_id = release_app.require_current_tenant());
CREATE POLICY tenant_isolation ON release.evidence_thread_exports
USING (tenant_id = release_app.require_current_tenant());
CREATE POLICY tenant_isolation ON release.evidence_transcripts
USING (tenant_id = release_app.require_current_tenant());
-- ============================================================================
-- Triggers
-- ============================================================================
CREATE TRIGGER update_evidence_threads_updated_at
BEFORE UPDATE ON release.evidence_threads
FOR EACH ROW
EXECUTE FUNCTION release.update_updated_at_column();
-- ============================================================================
-- Functions
-- ============================================================================
-- Get full thread graph with nodes and links
CREATE OR REPLACE FUNCTION release.get_evidence_thread_graph(p_thread_id UUID)
RETURNS TABLE (
thread_id UUID,
thread_verdict TEXT,
thread_risk_score NUMERIC,
node_id UUID,
node_kind TEXT,
node_ref_id TEXT,
node_title TEXT,
node_summary TEXT,
node_confidence NUMERIC,
node_anchors JSONB,
node_content JSONB,
link_id UUID,
link_src_node_id UUID,
link_dst_node_id UUID,
link_relation TEXT,
link_weight NUMERIC
)
LANGUAGE sql
STABLE
AS $$
WITH thread_data AS (
SELECT id, verdict, risk_score
FROM release.evidence_threads
WHERE id = p_thread_id
),
nodes AS (
SELECT
n.id AS node_id,
n.kind AS node_kind,
n.ref_id AS node_ref_id,
n.title AS node_title,
n.summary AS node_summary,
n.confidence AS node_confidence,
n.anchors AS node_anchors,
n.content AS node_content
FROM release.evidence_nodes n
WHERE n.thread_id = p_thread_id
),
links AS (
SELECT
l.id AS link_id,
l.src_node_id,
l.dst_node_id,
l.relation AS link_relation,
l.weight AS link_weight
FROM release.evidence_links l
WHERE l.thread_id = p_thread_id
)
SELECT
t.id AS thread_id,
t.verdict AS thread_verdict,
t.risk_score AS thread_risk_score,
n.node_id,
n.node_kind,
n.node_ref_id,
n.node_title,
n.node_summary,
n.node_confidence,
n.node_anchors,
n.node_content,
l.link_id,
l.src_node_id AS link_src_node_id,
l.dst_node_id AS link_dst_node_id,
l.link_relation,
l.link_weight
FROM thread_data t
CROSS JOIN nodes n
LEFT JOIN links l ON (l.src_node_id = n.node_id OR l.dst_node_id = n.node_id)
ORDER BY n.node_kind, n.node_id;
$$;
COMMENT ON FUNCTION release.get_evidence_thread_graph IS 'Returns full thread graph with nodes and links';
-- Count nodes by kind for a thread
CREATE OR REPLACE FUNCTION release.get_evidence_thread_summary(p_thread_id UUID)
RETURNS TABLE (
thread_id UUID,
verdict TEXT,
risk_score NUMERIC,
total_nodes INT,
total_links INT,
node_counts JSONB
)
LANGUAGE sql
STABLE
AS $$
SELECT
t.id AS thread_id,
t.verdict,
t.risk_score,
(SELECT COUNT(*)::INT FROM release.evidence_nodes WHERE thread_id = p_thread_id) AS total_nodes,
(SELECT COUNT(*)::INT FROM release.evidence_links WHERE thread_id = p_thread_id) AS total_links,
(
SELECT COALESCE(jsonb_object_agg(kind, cnt), '{}'::JSONB)
FROM (
SELECT kind, COUNT(*)::INT AS cnt
FROM release.evidence_nodes
WHERE thread_id = p_thread_id
GROUP BY kind
) counts
) AS node_counts
FROM release.evidence_threads t
WHERE t.id = p_thread_id;
$$;
COMMENT ON FUNCTION release.get_evidence_thread_summary IS 'Returns summary statistics for an evidence thread';

View File

@@ -0,0 +1,417 @@
-- Release Orchestrator Schema Migration 011: Policy Profile Tables
-- Creates policy profiles, simulations, and feed freshness tables.
-- Compliant with docs/db/SPECIFICATION.md
-- Sprint: Evidence Thread & Air-Gap Policy Gates - Sprint C
-- ============================================================================
-- Policy Profiles Table
-- Hierarchical instance/tenant policy profile settings
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.policy_profiles (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID, -- NULL = instance-level default
name TEXT NOT NULL,
display_name TEXT NOT NULL,
description TEXT,
profile_type TEXT NOT NULL CHECK (profile_type IN (
'lenient_dev', 'standard', 'strict_prod', 'gov_defense', 'custom'
)),
is_default BOOLEAN NOT NULL DEFAULT false,
is_builtin BOOLEAN NOT NULL DEFAULT false,
policy_yaml TEXT NOT NULL,
policy_digest TEXT NOT NULL,
attestation_requirements JSONB NOT NULL DEFAULT '[]',
vex_requirements JSONB NOT NULL DEFAULT '{}',
reachability_requirements JSONB NOT NULL DEFAULT '{}',
on_fail_soft TEXT[] NOT NULL DEFAULT '{}',
on_fail_hard TEXT[] NOT NULL DEFAULT '{}',
created_by UUID,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
-- Ensure unique names within tenant scope (NULL tenant = instance level)
UNIQUE (COALESCE(tenant_id, '00000000-0000-0000-0000-000000000000'::UUID), name)
);
CREATE INDEX idx_policy_profiles_tenant ON release.policy_profiles(tenant_id);
CREATE INDEX idx_policy_profiles_type ON release.policy_profiles(profile_type);
CREATE INDEX idx_policy_profiles_default ON release.policy_profiles(tenant_id)
WHERE is_default = true;
CREATE INDEX idx_policy_profiles_builtin ON release.policy_profiles(is_builtin)
WHERE is_builtin = true;
CREATE INDEX idx_policy_profiles_digest ON release.policy_profiles(policy_digest);
COMMENT ON TABLE release.policy_profiles IS 'Hierarchical policy profiles for gate evaluation';
COMMENT ON COLUMN release.policy_profiles.tenant_id IS 'NULL = instance-level default, UUID = tenant-specific';
COMMENT ON COLUMN release.policy_profiles.profile_type IS 'Profile category: lenient_dev, standard, strict_prod, gov_defense, custom';
COMMENT ON COLUMN release.policy_profiles.is_default IS 'Whether this is the default profile for the tenant/instance';
COMMENT ON COLUMN release.policy_profiles.is_builtin IS 'Whether this is a built-in profile (cannot be deleted)';
COMMENT ON COLUMN release.policy_profiles.policy_yaml IS 'Full policy YAML definition';
COMMENT ON COLUMN release.policy_profiles.policy_digest IS 'SHA-256 digest of policy_yaml for change detection';
COMMENT ON COLUMN release.policy_profiles.attestation_requirements IS 'JSON array of attestation requirements';
COMMENT ON COLUMN release.policy_profiles.vex_requirements IS 'JSON object with VEX status requirements';
COMMENT ON COLUMN release.policy_profiles.reachability_requirements IS 'JSON object with reachability requirements';
COMMENT ON COLUMN release.policy_profiles.on_fail_soft IS 'Array of soft failure conditions (warnings)';
COMMENT ON COLUMN release.policy_profiles.on_fail_hard IS 'Array of hard failure conditions (blocking)';
-- ============================================================================
-- Policy Simulations Table
-- Cached policy gate simulation results
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.policy_simulations (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
promotion_id UUID REFERENCES release.promotions(id),
profile_id UUID NOT NULL REFERENCES release.policy_profiles(id),
simulation_type TEXT NOT NULL CHECK (simulation_type IN ('preview', 'whatif', 'bundle')),
input_digest TEXT NOT NULL,
result_status TEXT NOT NULL CHECK (result_status IN ('pass', 'fail', 'warn', 'error')),
result_summary JSONB NOT NULL,
gate_results JSONB NOT NULL,
missing_evidence TEXT[] NOT NULL DEFAULT '{}',
soft_failures TEXT[] NOT NULL DEFAULT '{}',
hard_failures TEXT[] NOT NULL DEFAULT '{}',
executed_at TIMESTAMPTZ NOT NULL DEFAULT now(),
duration_ms INT,
expires_at TIMESTAMPTZ,
requested_by UUID
);
CREATE INDEX idx_policy_simulations_tenant ON release.policy_simulations(tenant_id);
CREATE INDEX idx_policy_simulations_promotion ON release.policy_simulations(promotion_id)
WHERE promotion_id IS NOT NULL;
CREATE INDEX idx_policy_simulations_profile ON release.policy_simulations(profile_id);
CREATE INDEX idx_policy_simulations_status ON release.policy_simulations(tenant_id, result_status);
CREATE INDEX idx_policy_simulations_recent ON release.policy_simulations(tenant_id, executed_at DESC);
CREATE INDEX idx_policy_simulations_input ON release.policy_simulations(input_digest);
COMMENT ON TABLE release.policy_simulations IS 'Cached policy gate simulation results';
COMMENT ON COLUMN release.policy_simulations.simulation_type IS 'Type: preview (quick), whatif (full), bundle (promotion)';
COMMENT ON COLUMN release.policy_simulations.input_digest IS 'SHA-256 of simulation inputs for caching';
COMMENT ON COLUMN release.policy_simulations.result_status IS 'Overall result: pass, fail, warn, error';
COMMENT ON COLUMN release.policy_simulations.result_summary IS 'Summary of simulation results';
COMMENT ON COLUMN release.policy_simulations.gate_results IS 'Detailed per-gate results';
COMMENT ON COLUMN release.policy_simulations.missing_evidence IS 'List of missing evidence types';
COMMENT ON COLUMN release.policy_simulations.soft_failures IS 'List of soft failures (warnings)';
COMMENT ON COLUMN release.policy_simulations.hard_failures IS 'List of hard failures (blocking)';
COMMENT ON COLUMN release.policy_simulations.duration_ms IS 'Simulation execution time in milliseconds';
-- ============================================================================
-- Feed Freshness Table
-- Cache staleness status for vulnerability feeds
-- ============================================================================
CREATE TABLE IF NOT EXISTS release.feed_freshness (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL,
feed_name TEXT NOT NULL,
feed_url TEXT,
last_sync_at TIMESTAMPTZ,
last_success_at TIMESTAMPTZ,
staleness_seconds BIGINT,
staleness_status TEXT NOT NULL CHECK (staleness_status IN ('fresh', 'stale', 'warning', 'unknown')),
mirror_status TEXT CHECK (mirror_status IN ('active', 'offline', 'error')),
entry_count BIGINT,
error_message TEXT,
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
UNIQUE (tenant_id, feed_name)
);
CREATE INDEX idx_feed_freshness_tenant ON release.feed_freshness(tenant_id);
CREATE INDEX idx_feed_freshness_status ON release.feed_freshness(staleness_status);
CREATE INDEX idx_feed_freshness_feed ON release.feed_freshness(feed_name);
COMMENT ON TABLE release.feed_freshness IS 'Cache staleness status for vulnerability feeds';
COMMENT ON COLUMN release.feed_freshness.feed_name IS 'Feed identifier: NVD, GHSA, OSV, EPSS, Rekor-Mirror, etc.';
COMMENT ON COLUMN release.feed_freshness.last_sync_at IS 'Last sync attempt timestamp';
COMMENT ON COLUMN release.feed_freshness.last_success_at IS 'Last successful sync timestamp';
COMMENT ON COLUMN release.feed_freshness.staleness_seconds IS 'Seconds since last successful sync';
COMMENT ON COLUMN release.feed_freshness.staleness_status IS 'Status: fresh, stale, warning, unknown';
COMMENT ON COLUMN release.feed_freshness.mirror_status IS 'Mirror status: active, offline, error';
COMMENT ON COLUMN release.feed_freshness.entry_count IS 'Number of entries in the feed';
-- ============================================================================
-- Row Level Security
-- ============================================================================
ALTER TABLE release.policy_profiles ENABLE ROW LEVEL SECURITY;
ALTER TABLE release.policy_simulations ENABLE ROW LEVEL SECURITY;
ALTER TABLE release.feed_freshness ENABLE ROW LEVEL SECURITY;
-- Policy profiles: allow tenant + instance-level defaults (tenant_id IS NULL)
CREATE POLICY profile_access ON release.policy_profiles
USING (tenant_id IS NULL OR tenant_id = release_app.require_current_tenant());
CREATE POLICY tenant_isolation ON release.policy_simulations
USING (tenant_id = release_app.require_current_tenant());
CREATE POLICY tenant_isolation ON release.feed_freshness
USING (tenant_id = release_app.require_current_tenant());
-- ============================================================================
-- Triggers
-- ============================================================================
CREATE TRIGGER update_policy_profiles_updated_at
BEFORE UPDATE ON release.policy_profiles
FOR EACH ROW
EXECUTE FUNCTION release.update_updated_at_column();
CREATE TRIGGER update_feed_freshness_updated_at
BEFORE UPDATE ON release.feed_freshness
FOR EACH ROW
EXECUTE FUNCTION release.update_updated_at_column();
-- ============================================================================
-- Functions
-- ============================================================================
-- Get effective profile for tenant (tenant-specific or instance default)
CREATE OR REPLACE FUNCTION release.get_effective_policy_profile(
p_tenant_id UUID,
p_profile_name TEXT DEFAULT NULL
)
RETURNS TABLE (
profile_id UUID,
profile_name TEXT,
display_name TEXT,
profile_type TEXT,
is_tenant_specific BOOLEAN,
policy_yaml TEXT,
attestation_requirements JSONB,
vex_requirements JSONB,
reachability_requirements JSONB,
on_fail_soft TEXT[],
on_fail_hard TEXT[]
)
LANGUAGE sql
STABLE
AS $$
SELECT
p.id AS profile_id,
p.name AS profile_name,
p.display_name,
p.profile_type,
(p.tenant_id IS NOT NULL) AS is_tenant_specific,
p.policy_yaml,
p.attestation_requirements,
p.vex_requirements,
p.reachability_requirements,
p.on_fail_soft,
p.on_fail_hard
FROM release.policy_profiles p
WHERE
-- Match by name if provided
(p_profile_name IS NULL OR p.name = p_profile_name)
-- Tenant-specific or instance-level
AND (p.tenant_id = p_tenant_id OR p.tenant_id IS NULL)
ORDER BY
-- Prefer tenant-specific over instance-level
CASE WHEN p.tenant_id = p_tenant_id THEN 0 ELSE 1 END,
-- Then prefer default profile
CASE WHEN p.is_default THEN 0 ELSE 1 END
LIMIT 1;
$$;
COMMENT ON FUNCTION release.get_effective_policy_profile IS 'Gets effective profile for tenant (tenant-specific or instance default)';
-- List all available profiles for tenant (tenant + instance-level)
CREATE OR REPLACE FUNCTION release.list_policy_profiles_for_tenant(p_tenant_id UUID)
RETURNS TABLE (
profile_id UUID,
profile_name TEXT,
display_name TEXT,
profile_type TEXT,
is_default BOOLEAN,
is_builtin BOOLEAN,
is_tenant_specific BOOLEAN,
description TEXT
)
LANGUAGE sql
STABLE
AS $$
SELECT
p.id AS profile_id,
p.name AS profile_name,
p.display_name,
p.profile_type,
p.is_default,
p.is_builtin,
(p.tenant_id IS NOT NULL) AS is_tenant_specific,
p.description
FROM release.policy_profiles p
WHERE p.tenant_id = p_tenant_id OR p.tenant_id IS NULL
ORDER BY
p.profile_type,
CASE WHEN p.tenant_id = p_tenant_id THEN 0 ELSE 1 END,
p.name;
$$;
COMMENT ON FUNCTION release.list_policy_profiles_for_tenant IS 'Lists all available profiles for a tenant';
-- Get feed freshness summary for tenant
CREATE OR REPLACE FUNCTION release.get_feed_freshness_summary(p_tenant_id UUID)
RETURNS TABLE (
total_feeds INT,
fresh_count INT,
stale_count INT,
warning_count INT,
unknown_count INT,
overall_status TEXT,
oldest_sync_at TIMESTAMPTZ
)
LANGUAGE sql
STABLE
AS $$
SELECT
COUNT(*)::INT AS total_feeds,
COUNT(*) FILTER (WHERE staleness_status = 'fresh')::INT AS fresh_count,
COUNT(*) FILTER (WHERE staleness_status = 'stale')::INT AS stale_count,
COUNT(*) FILTER (WHERE staleness_status = 'warning')::INT AS warning_count,
COUNT(*) FILTER (WHERE staleness_status = 'unknown')::INT AS unknown_count,
CASE
WHEN COUNT(*) FILTER (WHERE staleness_status = 'stale') > 0 THEN 'stale'
WHEN COUNT(*) FILTER (WHERE staleness_status = 'warning') > 0 THEN 'warning'
WHEN COUNT(*) FILTER (WHERE staleness_status = 'unknown') > 0 THEN 'unknown'
ELSE 'fresh'
END AS overall_status,
MIN(last_success_at) AS oldest_sync_at
FROM release.feed_freshness
WHERE tenant_id = p_tenant_id;
$$;
COMMENT ON FUNCTION release.get_feed_freshness_summary IS 'Gets feed freshness summary for a tenant';
-- ============================================================================
-- Seed Built-in Profiles
-- ============================================================================
-- Insert built-in profiles (instance-level defaults)
INSERT INTO release.policy_profiles (
tenant_id, name, display_name, description, profile_type,
is_default, is_builtin, policy_yaml, policy_digest,
attestation_requirements, vex_requirements, reachability_requirements,
on_fail_soft, on_fail_hard
) VALUES
-- Lenient Dev
(
NULL, 'lenient-dev', 'Lenient (Development)',
'Relaxed policy for development environments. Allows affected CVEs with warnings.',
'lenient_dev', false, true,
'name: lenient-dev
display_name: "Lenient (Development)"
profile_type: lenient_dev
requires:
- attestation: sbom.cyclonedx
required: true
- attestation: provenance.intoto
required: false
- vex.status:
allow: ["affected", "not_affected", "under_investigation"]
- reachability.runtime.max_age_days: 30
on_fail:
soft: ["missing_runtime_evidence", "missing_vex", "stale_reachability"]
hard: []',
'sha256:lenient-dev-placeholder',
'[{"type": "sbom.cyclonedx", "required": true}, {"type": "provenance.intoto", "required": false}]'::JSONB,
'{"allow": ["affected", "not_affected", "under_investigation"]}'::JSONB,
'{"runtime_max_age_days": 30}'::JSONB,
ARRAY['missing_runtime_evidence', 'missing_vex', 'stale_reachability'],
ARRAY[]::TEXT[]
),
-- Standard
(
NULL, 'standard', 'Standard',
'Balanced policy for staging environments. Requires basic attestations.',
'standard', true, true,
'name: standard
display_name: "Standard"
profile_type: standard
requires:
- attestation: sbom.cyclonedx
required: true
- attestation: provenance.intoto
required: true
- vex.status:
allow: ["not_affected", "under_investigation"]
- reachability.runtime.max_age_days: 14
on_fail:
soft: ["missing_runtime_evidence", "stale_reachability"]
hard: ["unsigned_sbom", "affected_cve"]',
'sha256:standard-placeholder',
'[{"type": "sbom.cyclonedx", "required": true}, {"type": "provenance.intoto", "required": true}]'::JSONB,
'{"allow": ["not_affected", "under_investigation"]}'::JSONB,
'{"runtime_max_age_days": 14}'::JSONB,
ARRAY['missing_runtime_evidence', 'stale_reachability'],
ARRAY['unsigned_sbom', 'affected_cve']
),
-- Strict Prod
(
NULL, 'strict-prod', 'Strict (Production)',
'Strict policy for production environments. Requires signed attestations and SLSA L2+.',
'strict_prod', false, true,
'name: strict-prod
display_name: "Strict (Production)"
profile_type: strict_prod
requires:
- attestation: sbom.cyclonedx
signer: ["stella-authority-prod"]
- attestation: provenance.intoto
slsa_level: "L2+"
- vex.status:
allow: ["not_affected:component_not_present", "not_affected:fix_applied"]
require_rationale: true
- reachability.runtime.max_age_days: 7
- rekor_mirror.sync_lag_minutes: 60
on_fail:
soft: ["missing_runtime_evidence"]
hard: ["unsigned_provenance", "rekor_unverifiable", "affected_cve"]',
'sha256:strict-prod-placeholder',
'[{"type": "sbom.cyclonedx", "signer": ["stella-authority-prod"]}, {"type": "provenance.intoto", "slsa_level": "L2+"}]'::JSONB,
'{"allow": ["not_affected:component_not_present", "not_affected:fix_applied"], "require_rationale": true}'::JSONB,
'{"runtime_max_age_days": 7, "rekor_sync_lag_minutes": 60}'::JSONB,
ARRAY['missing_runtime_evidence'],
ARRAY['unsigned_provenance', 'rekor_unverifiable', 'affected_cve']
),
-- Gov/Defense
(
NULL, 'gov-defense', 'Government/Defense (PQ-Ready)',
'Maximum security for government/defense deployments. Requires SLSA L3+, PQ-ready signatures.',
'gov_defense', false, true,
'name: gov-defense
display_name: "Government/Defense (PQ-Ready)"
profile_type: gov_defense
requires:
- attestation: sbom.cyclonedx
signer: ["stella-authority-prod"]
algorithm: ["ecdsa-p384", "dilithium2?"]
- attestation: provenance.intoto
slsa_level: "L3+"
steps: ["build", "scan", "vex", "review"]
- vex.status:
allow: ["not_affected:component_not_present", "not_affected:fix_applied"]
require_rationale: true
require_justification: true
- reachability.runtime.max_age_days: 3
- rekor_mirror.sync_lag_minutes: 30
on_fail:
soft: []
hard: ["unsigned_provenance", "missing_review", "affected_cve", "stale_evidence"]',
'sha256:gov-defense-placeholder',
'[{"type": "sbom.cyclonedx", "signer": ["stella-authority-prod"], "algorithm": ["ecdsa-p384", "dilithium2?"]}, {"type": "provenance.intoto", "slsa_level": "L3+", "steps": ["build", "scan", "vex", "review"]}]'::JSONB,
'{"allow": ["not_affected:component_not_present", "not_affected:fix_applied"], "require_rationale": true, "require_justification": true}'::JSONB,
'{"runtime_max_age_days": 3, "rekor_sync_lag_minutes": 30}'::JSONB,
ARRAY[]::TEXT[],
ARRAY['unsigned_provenance', 'missing_review', 'affected_cve', 'stale_evidence']
)
ON CONFLICT (COALESCE(tenant_id, '00000000-0000-0000-0000-000000000000'::UUID), name) DO UPDATE SET
display_name = EXCLUDED.display_name,
description = EXCLUDED.description,
policy_yaml = EXCLUDED.policy_yaml,
attestation_requirements = EXCLUDED.attestation_requirements,
vex_requirements = EXCLUDED.vex_requirements,
reachability_requirements = EXCLUDED.reachability_requirements,
on_fail_soft = EXCLUDED.on_fail_soft,
on_fail_hard = EXCLUDED.on_fail_hard,
updated_at = now();

View File

@@ -0,0 +1,118 @@
namespace StellaOps.Platform.Database;
using System.Reflection;
using Microsoft.Extensions.Logging;
using StellaOps.Infrastructure.Postgres.Migrations;
/// <summary>
/// Migration runner for the Release Orchestrator schema.
/// </summary>
public sealed class ReleaseMigrationRunner
{
private const string SchemaName = "release";
private const string ModuleName = "ReleaseOrchestrator";
private const string MigrationResourcePrefix = "StellaOps.Platform.Database.Migrations.Release";
private readonly IMigrationRunner _runner;
private readonly ILogger<ReleaseMigrationRunner> _logger;
/// <summary>
/// Creates a new instance of the Release migration runner.
/// </summary>
/// <param name="connectionString">PostgreSQL connection string.</param>
/// <param name="logger">Logger instance.</param>
public ReleaseMigrationRunner(string connectionString, ILogger<ReleaseMigrationRunner> logger)
{
ArgumentException.ThrowIfNullOrWhiteSpace(connectionString);
ArgumentNullException.ThrowIfNull(logger);
_runner = new MigrationRunner(
connectionString,
SchemaName,
ModuleName,
logger);
_logger = logger;
}
/// <summary>
/// Creates a new instance using an existing migration runner.
/// </summary>
/// <param name="runner">Migration runner instance.</param>
/// <param name="logger">Logger instance.</param>
public ReleaseMigrationRunner(IMigrationRunner runner, ILogger<ReleaseMigrationRunner> logger)
{
_runner = runner ?? throw new ArgumentNullException(nameof(runner));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
}
/// <summary>
/// Runs all pending migrations from embedded resources.
/// </summary>
/// <param name="options">Migration run options.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Migration result.</returns>
public async Task<MigrationResult> RunAsync(
MigrationRunOptions? options = null,
CancellationToken cancellationToken = default)
{
_logger.LogInformation("Starting Release Orchestrator migrations...");
var result = await _runner.RunFromAssemblyAsync(
Assembly.GetExecutingAssembly(),
MigrationResourcePrefix,
options,
cancellationToken);
if (result.Success)
{
_logger.LogInformation(
"Release Orchestrator migrations completed: {Applied} applied, {Skipped} skipped in {Duration}ms",
result.AppliedCount,
result.SkippedCount,
result.DurationMs);
}
else
{
_logger.LogError(
"Release Orchestrator migrations failed: {Error}",
result.ErrorMessage);
}
return result;
}
/// <summary>
/// Gets the current schema version.
/// </summary>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Current version or null if no migrations applied.</returns>
public Task<string?> GetCurrentVersionAsync(CancellationToken cancellationToken = default)
{
return _runner.GetCurrentVersionAsync(cancellationToken);
}
/// <summary>
/// Gets information about all applied migrations.
/// </summary>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>List of applied migrations.</returns>
public Task<IReadOnlyList<MigrationInfo>> GetAppliedMigrationsAsync(
CancellationToken cancellationToken = default)
{
return _runner.GetAppliedMigrationInfoAsync(cancellationToken);
}
/// <summary>
/// Validates checksums of applied migrations.
/// </summary>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>List of checksum errors, empty if all valid.</returns>
public Task<IReadOnlyList<string>> ValidateChecksumsAsync(
CancellationToken cancellationToken = default)
{
return _runner.ValidateChecksumsAsync(
Assembly.GetExecutingAssembly(),
MigrationResourcePrefix,
cancellationToken);
}
}

View File

@@ -0,0 +1,53 @@
namespace StellaOps.Platform.Database;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Logging;
/// <summary>
/// Extension methods for registering Release Orchestrator database services.
/// </summary>
public static class ServiceCollectionExtensions
{
/// <summary>
/// Adds the Release Orchestrator migration runner to the service collection.
/// </summary>
/// <param name="services">Service collection.</param>
/// <param name="connectionString">PostgreSQL connection string.</param>
/// <returns>Service collection for chaining.</returns>
public static IServiceCollection AddReleaseMigrations(
this IServiceCollection services,
string connectionString)
{
ArgumentException.ThrowIfNullOrWhiteSpace(connectionString);
services.AddSingleton(sp =>
{
var logger = sp.GetRequiredService<ILogger<ReleaseMigrationRunner>>();
return new ReleaseMigrationRunner(connectionString, logger);
});
return services;
}
/// <summary>
/// Adds the Release Orchestrator migration runner with a connection string factory.
/// </summary>
/// <param name="services">Service collection.</param>
/// <param name="connectionStringFactory">Factory function to get connection string.</param>
/// <returns>Service collection for chaining.</returns>
public static IServiceCollection AddReleaseMigrations(
this IServiceCollection services,
Func<IServiceProvider, string> connectionStringFactory)
{
ArgumentNullException.ThrowIfNull(connectionStringFactory);
services.AddSingleton(sp =>
{
var connectionString = connectionStringFactory(sp);
var logger = sp.GetRequiredService<ILogger<ReleaseMigrationRunner>>();
return new ReleaseMigrationRunner(connectionString, logger);
});
return services;
}
}

View File

@@ -0,0 +1,26 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<LangVersion>preview</LangVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
<RootNamespace>StellaOps.Platform.Database</RootNamespace>
<Description>Release Orchestrator database schema and migrations</Description>
</PropertyGroup>
<ItemGroup>
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Infrastructure.Postgres\StellaOps.Infrastructure.Postgres.csproj" />
</ItemGroup>
<ItemGroup>
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" />
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" />
</ItemGroup>
<ItemGroup>
<EmbeddedResource Include="Migrations\**\*.sql" />
</ItemGroup>
</Project>