save progress
This commit is contained in:
@@ -0,0 +1,177 @@
|
||||
-- HLC Queue Chain: Hybrid Logical Clock Ordering with Cryptographic Sequence Proofs
|
||||
-- SPRINT_20260105_002_002_SCHEDULER: SQC-002, SQC-003, SQC-004
|
||||
--
|
||||
-- Adds HLC-based ordering with hash chain at enqueue time for audit-safe job queue ordering.
|
||||
-- See: Product Advisory "Audit-safe job queue ordering using monotonic timestamps"
|
||||
|
||||
BEGIN;
|
||||
|
||||
-- ============================================================================
|
||||
-- SECTION 1: Scheduler Log Table (SQC-002)
|
||||
-- ============================================================================
|
||||
-- HLC-ordered, chain-linked job entries. This is the authoritative order.
|
||||
-- Jobs are linked via: link_i = Hash(link_{i-1} || job_id || t_hlc || payload_hash)
|
||||
|
||||
CREATE TABLE IF NOT EXISTS scheduler.scheduler_log (
|
||||
seq_bigint BIGSERIAL PRIMARY KEY, -- Storage order (not authoritative)
|
||||
tenant_id TEXT NOT NULL,
|
||||
t_hlc TEXT NOT NULL, -- HLC timestamp: "0001704067200000-node-1-000042"
|
||||
partition_key TEXT NOT NULL DEFAULT '', -- Optional queue partition
|
||||
job_id UUID NOT NULL,
|
||||
payload_hash BYTEA NOT NULL, -- SHA-256 of canonical payload JSON
|
||||
prev_link BYTEA, -- Previous chain link (null for first)
|
||||
link BYTEA NOT NULL, -- Hash(prev_link || job_id || t_hlc || payload_hash)
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Ensure HLC order is unique within tenant/partition
|
||||
CONSTRAINT uq_scheduler_log_order UNIQUE (tenant_id, partition_key, t_hlc, job_id)
|
||||
);
|
||||
|
||||
COMMENT ON TABLE scheduler.scheduler_log IS
|
||||
'HLC-ordered job log with cryptographic chain linking for audit-safe ordering';
|
||||
COMMENT ON COLUMN scheduler.scheduler_log.t_hlc IS
|
||||
'Hybrid Logical Clock timestamp in sortable string format';
|
||||
COMMENT ON COLUMN scheduler.scheduler_log.link IS
|
||||
'SHA-256 chain link: Hash(prev_link || job_id || t_hlc || payload_hash)';
|
||||
|
||||
-- Index for tenant + HLC ordered queries (primary query path)
|
||||
CREATE INDEX IF NOT EXISTS idx_scheduler_log_tenant_hlc
|
||||
ON scheduler.scheduler_log(tenant_id, t_hlc);
|
||||
|
||||
-- Index for partition-scoped queries
|
||||
CREATE INDEX IF NOT EXISTS idx_scheduler_log_partition
|
||||
ON scheduler.scheduler_log(tenant_id, partition_key, t_hlc);
|
||||
|
||||
-- Index for job_id lookups (idempotency checks)
|
||||
CREATE INDEX IF NOT EXISTS idx_scheduler_log_job_id
|
||||
ON scheduler.scheduler_log(job_id);
|
||||
|
||||
-- ============================================================================
|
||||
-- SECTION 2: Batch Snapshot Table (SQC-003)
|
||||
-- ============================================================================
|
||||
-- Captures chain state at specific points for audit anchors and attestation.
|
||||
|
||||
CREATE TABLE IF NOT EXISTS scheduler.batch_snapshot (
|
||||
batch_id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
tenant_id TEXT NOT NULL,
|
||||
range_start_t TEXT NOT NULL, -- HLC range start (inclusive)
|
||||
range_end_t TEXT NOT NULL, -- HLC range end (inclusive)
|
||||
head_link BYTEA NOT NULL, -- Chain head at snapshot time
|
||||
job_count INT NOT NULL,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
signed_by TEXT, -- Optional: signing key ID for DSSE
|
||||
signature BYTEA -- Optional: DSSE signature bytes
|
||||
);
|
||||
|
||||
COMMENT ON TABLE scheduler.batch_snapshot IS
|
||||
'Audit anchors capturing chain state at specific HLC ranges';
|
||||
COMMENT ON COLUMN scheduler.batch_snapshot.head_link IS
|
||||
'The chain link at range_end_t - can be used to verify chain integrity';
|
||||
|
||||
-- Index for tenant + time ordered queries
|
||||
CREATE INDEX IF NOT EXISTS idx_batch_snapshot_tenant
|
||||
ON scheduler.batch_snapshot(tenant_id, created_at DESC);
|
||||
|
||||
-- Index for HLC range queries
|
||||
CREATE INDEX IF NOT EXISTS idx_batch_snapshot_hlc_range
|
||||
ON scheduler.batch_snapshot(tenant_id, range_start_t, range_end_t);
|
||||
|
||||
-- ============================================================================
|
||||
-- SECTION 3: Chain Heads Table (SQC-004)
|
||||
-- ============================================================================
|
||||
-- Tracks the last chain link per tenant/partition for efficient append.
|
||||
|
||||
CREATE TABLE IF NOT EXISTS scheduler.chain_heads (
|
||||
tenant_id TEXT NOT NULL,
|
||||
partition_key TEXT NOT NULL DEFAULT '',
|
||||
last_link BYTEA NOT NULL,
|
||||
last_t_hlc TEXT NOT NULL,
|
||||
last_job_id UUID NOT NULL,
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
PRIMARY KEY (tenant_id, partition_key)
|
||||
);
|
||||
|
||||
COMMENT ON TABLE scheduler.chain_heads IS
|
||||
'Per-partition chain head tracking for efficient chain append operations';
|
||||
|
||||
-- Trigger to update updated_at on chain_heads modifications
|
||||
CREATE OR REPLACE TRIGGER update_chain_heads_updated_at
|
||||
BEFORE UPDATE ON scheduler.chain_heads
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION scheduler.update_updated_at();
|
||||
|
||||
-- ============================================================================
|
||||
-- SECTION 4: Helper Functions
|
||||
-- ============================================================================
|
||||
|
||||
-- Function to get the current chain head for a tenant/partition
|
||||
CREATE OR REPLACE FUNCTION scheduler.get_chain_head(
|
||||
p_tenant_id TEXT,
|
||||
p_partition_key TEXT DEFAULT ''
|
||||
)
|
||||
RETURNS TABLE (
|
||||
last_link BYTEA,
|
||||
last_t_hlc TEXT,
|
||||
last_job_id UUID
|
||||
)
|
||||
LANGUAGE plpgsql STABLE
|
||||
AS $$
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT ch.last_link, ch.last_t_hlc, ch.last_job_id
|
||||
FROM scheduler.chain_heads ch
|
||||
WHERE ch.tenant_id = p_tenant_id
|
||||
AND ch.partition_key = p_partition_key;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- Function to insert log entry and update chain head atomically
|
||||
CREATE OR REPLACE FUNCTION scheduler.insert_log_with_chain_update(
|
||||
p_tenant_id TEXT,
|
||||
p_t_hlc TEXT,
|
||||
p_partition_key TEXT,
|
||||
p_job_id UUID,
|
||||
p_payload_hash BYTEA,
|
||||
p_prev_link BYTEA,
|
||||
p_link BYTEA
|
||||
)
|
||||
RETURNS BIGINT
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
DECLARE
|
||||
v_seq BIGINT;
|
||||
BEGIN
|
||||
-- Insert log entry
|
||||
INSERT INTO scheduler.scheduler_log (
|
||||
tenant_id, t_hlc, partition_key, job_id,
|
||||
payload_hash, prev_link, link
|
||||
)
|
||||
VALUES (
|
||||
p_tenant_id, p_t_hlc, p_partition_key, p_job_id,
|
||||
p_payload_hash, p_prev_link, p_link
|
||||
)
|
||||
RETURNING seq_bigint INTO v_seq;
|
||||
|
||||
-- Upsert chain head
|
||||
INSERT INTO scheduler.chain_heads (
|
||||
tenant_id, partition_key, last_link, last_t_hlc, last_job_id
|
||||
)
|
||||
VALUES (
|
||||
p_tenant_id, p_partition_key, p_link, p_t_hlc, p_job_id
|
||||
)
|
||||
ON CONFLICT (tenant_id, partition_key)
|
||||
DO UPDATE SET
|
||||
last_link = EXCLUDED.last_link,
|
||||
last_t_hlc = EXCLUDED.last_t_hlc,
|
||||
last_job_id = EXCLUDED.last_job_id,
|
||||
updated_at = NOW();
|
||||
|
||||
RETURN v_seq;
|
||||
END;
|
||||
$$;
|
||||
|
||||
COMMENT ON FUNCTION scheduler.insert_log_with_chain_update IS
|
||||
'Atomically inserts a scheduler log entry and updates the chain head';
|
||||
|
||||
COMMIT;
|
||||
@@ -0,0 +1,56 @@
|
||||
// <copyright file="BatchSnapshot.cs" company="StellaOps">
|
||||
// Copyright (c) StellaOps. Licensed under AGPL-3.0-or-later.
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Models;
|
||||
|
||||
/// <summary>
|
||||
/// Represents an audit anchor capturing chain state at a specific HLC range.
|
||||
/// </summary>
|
||||
public sealed record BatchSnapshot
|
||||
{
|
||||
/// <summary>
|
||||
/// Unique batch identifier.
|
||||
/// </summary>
|
||||
public Guid BatchId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Tenant identifier.
|
||||
/// </summary>
|
||||
public required string TenantId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// HLC range start (inclusive).
|
||||
/// </summary>
|
||||
public required string RangeStartT { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// HLC range end (inclusive).
|
||||
/// </summary>
|
||||
public required string RangeEndT { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Chain head link at snapshot time.
|
||||
/// </summary>
|
||||
public required byte[] HeadLink { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Number of jobs in the range.
|
||||
/// </summary>
|
||||
public int JobCount { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Timestamp when the snapshot was created.
|
||||
/// </summary>
|
||||
public DateTimeOffset CreatedAt { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Optional: signing key identifier for DSSE.
|
||||
/// </summary>
|
||||
public string? SignedBy { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Optional: DSSE signature bytes.
|
||||
/// </summary>
|
||||
public byte[]? Signature { get; init; }
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
// <copyright file="ChainHead.cs" company="StellaOps">
|
||||
// Copyright (c) StellaOps. Licensed under AGPL-3.0-or-later.
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Models;
|
||||
|
||||
/// <summary>
|
||||
/// Represents the current chain head for a tenant/partition.
|
||||
/// </summary>
|
||||
public sealed record ChainHead
|
||||
{
|
||||
/// <summary>
|
||||
/// Tenant identifier.
|
||||
/// </summary>
|
||||
public required string TenantId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Partition key (empty string for default partition).
|
||||
/// </summary>
|
||||
public string PartitionKey { get; init; } = string.Empty;
|
||||
|
||||
/// <summary>
|
||||
/// Last chain link.
|
||||
/// </summary>
|
||||
public required byte[] LastLink { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Last HLC timestamp.
|
||||
/// </summary>
|
||||
public required string LastTHlc { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Last job identifier.
|
||||
/// </summary>
|
||||
public required Guid LastJobId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Timestamp when the chain head was updated.
|
||||
/// </summary>
|
||||
public DateTimeOffset UpdatedAt { get; init; }
|
||||
}
|
||||
@@ -0,0 +1,56 @@
|
||||
// <copyright file="SchedulerLogEntry.cs" company="StellaOps">
|
||||
// Copyright (c) StellaOps. Licensed under AGPL-3.0-or-later.
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Models;
|
||||
|
||||
/// <summary>
|
||||
/// Represents an HLC-ordered, chain-linked scheduler log entry.
|
||||
/// </summary>
|
||||
public sealed record SchedulerLogEntry
|
||||
{
|
||||
/// <summary>
|
||||
/// Storage sequence number (not authoritative for ordering).
|
||||
/// </summary>
|
||||
public long SeqBigint { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Tenant identifier.
|
||||
/// </summary>
|
||||
public required string TenantId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// HLC timestamp in sortable string format.
|
||||
/// </summary>
|
||||
public required string THlc { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Optional queue partition key.
|
||||
/// </summary>
|
||||
public string PartitionKey { get; init; } = string.Empty;
|
||||
|
||||
/// <summary>
|
||||
/// Job identifier (deterministic from payload).
|
||||
/// </summary>
|
||||
public required Guid JobId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// SHA-256 hash of the canonical payload JSON.
|
||||
/// </summary>
|
||||
public required byte[] PayloadHash { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Previous chain link (null for first entry in chain).
|
||||
/// </summary>
|
||||
public byte[]? PrevLink { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Chain link: Hash(prev_link || job_id || t_hlc || payload_hash).
|
||||
/// </summary>
|
||||
public required byte[] Link { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Timestamp when the entry was created.
|
||||
/// </summary>
|
||||
public DateTimeOffset CreatedAt { get; init; }
|
||||
}
|
||||
@@ -0,0 +1,65 @@
|
||||
// <copyright file="IBatchSnapshotRepository.cs" company="StellaOps">
|
||||
// Copyright (c) StellaOps. Licensed under AGPL-3.0-or-later.
|
||||
// </copyright>
|
||||
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Models;
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Repositories;
|
||||
|
||||
/// <summary>
|
||||
/// Repository interface for batch snapshot operations.
|
||||
/// </summary>
|
||||
public interface IBatchSnapshotRepository
|
||||
{
|
||||
/// <summary>
|
||||
/// Inserts a new batch snapshot.
|
||||
/// </summary>
|
||||
/// <param name="snapshot">The snapshot to insert.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>A task representing the operation.</returns>
|
||||
Task InsertAsync(BatchSnapshot snapshot, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets a batch snapshot by ID.
|
||||
/// </summary>
|
||||
/// <param name="batchId">The batch identifier.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>The snapshot if found.</returns>
|
||||
Task<BatchSnapshot?> GetByIdAsync(Guid batchId, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets the most recent batch snapshot for a tenant.
|
||||
/// </summary>
|
||||
/// <param name="tenantId">Tenant identifier.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>The most recent snapshot if found.</returns>
|
||||
Task<BatchSnapshot?> GetLatestAsync(string tenantId, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets batch snapshots for a tenant within a time range.
|
||||
/// </summary>
|
||||
/// <param name="tenantId">Tenant identifier.</param>
|
||||
/// <param name="startTime">Start time (inclusive).</param>
|
||||
/// <param name="endTime">End time (inclusive).</param>
|
||||
/// <param name="limit">Maximum snapshots to return.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>Snapshots in the specified range.</returns>
|
||||
Task<IReadOnlyList<BatchSnapshot>> GetByTimeRangeAsync(
|
||||
string tenantId,
|
||||
DateTimeOffset startTime,
|
||||
DateTimeOffset endTime,
|
||||
int limit = 100,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets batch snapshots containing a specific HLC timestamp.
|
||||
/// </summary>
|
||||
/// <param name="tenantId">Tenant identifier.</param>
|
||||
/// <param name="tHlc">The HLC timestamp to search for.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>Snapshots containing the timestamp.</returns>
|
||||
Task<IReadOnlyList<BatchSnapshot>> GetContainingHlcAsync(
|
||||
string tenantId,
|
||||
string tHlc,
|
||||
CancellationToken cancellationToken = default);
|
||||
}
|
||||
@@ -0,0 +1,47 @@
|
||||
// <copyright file="IChainHeadRepository.cs" company="StellaOps">
|
||||
// Copyright (c) StellaOps. Licensed under AGPL-3.0-or-later.
|
||||
// </copyright>
|
||||
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Models;
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Repositories;
|
||||
|
||||
/// <summary>
|
||||
/// Repository interface for chain head operations.
|
||||
/// </summary>
|
||||
public interface IChainHeadRepository
|
||||
{
|
||||
/// <summary>
|
||||
/// Gets the last chain link for a tenant/partition.
|
||||
/// </summary>
|
||||
/// <param name="tenantId">Tenant identifier.</param>
|
||||
/// <param name="partitionKey">Partition key (empty string for default).</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>The last link bytes, or null if no chain exists.</returns>
|
||||
Task<byte[]?> GetLastLinkAsync(
|
||||
string tenantId,
|
||||
string partitionKey,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets the full chain head for a tenant/partition.
|
||||
/// </summary>
|
||||
/// <param name="tenantId">Tenant identifier.</param>
|
||||
/// <param name="partitionKey">Partition key (empty string for default).</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>The chain head, or null if no chain exists.</returns>
|
||||
Task<ChainHead?> GetAsync(
|
||||
string tenantId,
|
||||
string partitionKey,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets all chain heads for a tenant.
|
||||
/// </summary>
|
||||
/// <param name="tenantId">Tenant identifier.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>All chain heads for the tenant.</returns>
|
||||
Task<IReadOnlyList<ChainHead>> GetAllForTenantAsync(
|
||||
string tenantId,
|
||||
CancellationToken cancellationToken = default);
|
||||
}
|
||||
@@ -0,0 +1,109 @@
|
||||
// <copyright file="ISchedulerLogRepository.cs" company="StellaOps">
|
||||
// Copyright (c) StellaOps. Licensed under AGPL-3.0-or-later.
|
||||
// </copyright>
|
||||
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Models;
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Repositories;
|
||||
|
||||
/// <summary>
|
||||
/// Repository interface for HLC-ordered scheduler log operations.
|
||||
/// </summary>
|
||||
public interface ISchedulerLogRepository
|
||||
{
|
||||
/// <summary>
|
||||
/// Inserts a log entry and atomically updates the chain head.
|
||||
/// </summary>
|
||||
/// <param name="entry">The log entry to insert.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>The sequence number of the inserted entry.</returns>
|
||||
Task<long> InsertWithChainUpdateAsync(
|
||||
SchedulerLogEntry entry,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets log entries ordered by HLC timestamp.
|
||||
/// </summary>
|
||||
/// <param name="tenantId">Tenant identifier.</param>
|
||||
/// <param name="partitionKey">Optional partition key (null for all partitions).</param>
|
||||
/// <param name="limit">Maximum entries to return.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>Log entries in HLC order.</returns>
|
||||
Task<IReadOnlyList<SchedulerLogEntry>> GetByHlcOrderAsync(
|
||||
string tenantId,
|
||||
string? partitionKey,
|
||||
int limit,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets log entries within an HLC timestamp range.
|
||||
/// </summary>
|
||||
/// <param name="tenantId">Tenant identifier.</param>
|
||||
/// <param name="startTHlc">Start timestamp (inclusive, null for unbounded).</param>
|
||||
/// <param name="endTHlc">End timestamp (inclusive, null for unbounded).</param>
|
||||
/// <param name="limit">Maximum entries to return (0 for unlimited).</param>
|
||||
/// <param name="partitionKey">Optional partition key (null for all partitions).</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>Log entries in the specified range.</returns>
|
||||
Task<IReadOnlyList<SchedulerLogEntry>> GetByHlcRangeAsync(
|
||||
string tenantId,
|
||||
string? startTHlc,
|
||||
string? endTHlc,
|
||||
int limit = 0,
|
||||
string? partitionKey = null,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets log entries after an HLC timestamp (cursor-based pagination).
|
||||
/// </summary>
|
||||
/// <param name="tenantId">Tenant identifier.</param>
|
||||
/// <param name="afterTHlc">Start after this timestamp (exclusive).</param>
|
||||
/// <param name="limit">Maximum entries to return.</param>
|
||||
/// <param name="partitionKey">Optional partition key (null for all partitions).</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>Log entries after the specified timestamp.</returns>
|
||||
Task<IReadOnlyList<SchedulerLogEntry>> GetAfterHlcAsync(
|
||||
string tenantId,
|
||||
string afterTHlc,
|
||||
int limit,
|
||||
string? partitionKey = null,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Counts log entries within an HLC timestamp range.
|
||||
/// </summary>
|
||||
/// <param name="tenantId">Tenant identifier.</param>
|
||||
/// <param name="startTHlc">Start timestamp (inclusive, null for unbounded).</param>
|
||||
/// <param name="endTHlc">End timestamp (inclusive, null for unbounded).</param>
|
||||
/// <param name="partitionKey">Optional partition key (null for all partitions).</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>Count of entries in the range.</returns>
|
||||
Task<int> CountByHlcRangeAsync(
|
||||
string tenantId,
|
||||
string? startTHlc,
|
||||
string? endTHlc,
|
||||
string? partitionKey = null,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets a log entry by job ID.
|
||||
/// </summary>
|
||||
/// <param name="jobId">Job identifier.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>The log entry if found.</returns>
|
||||
Task<SchedulerLogEntry?> GetByJobIdAsync(
|
||||
Guid jobId,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Checks if a job ID already exists in the log.
|
||||
/// </summary>
|
||||
/// <param name="tenantId">Tenant identifier.</param>
|
||||
/// <param name="jobId">Job identifier.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>True if the job exists.</returns>
|
||||
Task<bool> ExistsAsync(
|
||||
string tenantId,
|
||||
Guid jobId,
|
||||
CancellationToken cancellationToken = default);
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Microsoft.Extensions.Options;
|
||||
using Npgsql;
|
||||
using StellaOps.Determinism;
|
||||
using StellaOps.Infrastructure.Postgres.Repositories;
|
||||
@@ -13,6 +14,7 @@ public sealed class JobRepository : RepositoryBase<SchedulerDataSource>, IJobRep
|
||||
{
|
||||
private readonly TimeProvider _timeProvider;
|
||||
private readonly IGuidProvider _guidProvider;
|
||||
private readonly bool _enableHlcOrdering;
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new job repository.
|
||||
@@ -20,12 +22,14 @@ public sealed class JobRepository : RepositoryBase<SchedulerDataSource>, IJobRep
|
||||
public JobRepository(
|
||||
SchedulerDataSource dataSource,
|
||||
ILogger<JobRepository> logger,
|
||||
IOptions<JobRepositoryOptions>? options = null,
|
||||
TimeProvider? timeProvider = null,
|
||||
IGuidProvider? guidProvider = null)
|
||||
: base(dataSource, logger)
|
||||
{
|
||||
_timeProvider = timeProvider ?? TimeProvider.System;
|
||||
_guidProvider = guidProvider ?? SystemGuidProvider.Instance;
|
||||
_enableHlcOrdering = options?.Value.EnableHlcOrdering ?? false;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
@@ -102,15 +106,28 @@ public sealed class JobRepository : RepositoryBase<SchedulerDataSource>, IJobRep
|
||||
int limit = 10,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT * FROM scheduler.jobs
|
||||
WHERE tenant_id = @tenant_id
|
||||
AND status = 'scheduled'
|
||||
AND (not_before IS NULL OR not_before <= NOW())
|
||||
AND job_type = ANY(@job_types)
|
||||
ORDER BY priority DESC, created_at
|
||||
LIMIT @limit
|
||||
""";
|
||||
// When HLC ordering is enabled, join with scheduler_log and order by t_hlc
|
||||
// This provides deterministic global ordering based on Hybrid Logical Clock timestamps
|
||||
var sql = _enableHlcOrdering
|
||||
? """
|
||||
SELECT j.* FROM scheduler.jobs j
|
||||
INNER JOIN scheduler.scheduler_log sl ON j.id = sl.job_id AND j.tenant_id = sl.tenant_id
|
||||
WHERE j.tenant_id = @tenant_id
|
||||
AND j.status = 'scheduled'
|
||||
AND (j.not_before IS NULL OR j.not_before <= NOW())
|
||||
AND j.job_type = ANY(@job_types)
|
||||
ORDER BY sl.t_hlc
|
||||
LIMIT @limit
|
||||
"""
|
||||
: """
|
||||
SELECT * FROM scheduler.jobs
|
||||
WHERE tenant_id = @tenant_id
|
||||
AND status = 'scheduled'
|
||||
AND (not_before IS NULL OR not_before <= NOW())
|
||||
AND job_type = ANY(@job_types)
|
||||
ORDER BY priority DESC, created_at
|
||||
LIMIT @limit
|
||||
""";
|
||||
|
||||
return await QueryAsync(
|
||||
tenantId,
|
||||
@@ -350,12 +367,22 @@ public sealed class JobRepository : RepositoryBase<SchedulerDataSource>, IJobRep
|
||||
int offset = 0,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT * FROM scheduler.jobs
|
||||
WHERE tenant_id = @tenant_id AND status = @status::scheduler.job_status
|
||||
ORDER BY created_at DESC, id
|
||||
LIMIT @limit OFFSET @offset
|
||||
""";
|
||||
// When HLC ordering is enabled, join with scheduler_log and order by t_hlc DESC
|
||||
// This maintains consistent ordering across all job retrieval methods
|
||||
var sql = _enableHlcOrdering
|
||||
? """
|
||||
SELECT j.* FROM scheduler.jobs j
|
||||
LEFT JOIN scheduler.scheduler_log sl ON j.id = sl.job_id AND j.tenant_id = sl.tenant_id
|
||||
WHERE j.tenant_id = @tenant_id AND j.status = @status::scheduler.job_status
|
||||
ORDER BY COALESCE(sl.t_hlc, to_char(j.created_at AT TIME ZONE 'UTC', 'YYYYMMDDHH24MISS')) DESC, j.id
|
||||
LIMIT @limit OFFSET @offset
|
||||
"""
|
||||
: """
|
||||
SELECT * FROM scheduler.jobs
|
||||
WHERE tenant_id = @tenant_id AND status = @status::scheduler.job_status
|
||||
ORDER BY created_at DESC, id
|
||||
LIMIT @limit OFFSET @offset
|
||||
""";
|
||||
|
||||
return await QueryAsync(
|
||||
tenantId,
|
||||
|
||||
@@ -0,0 +1,18 @@
|
||||
// <copyright file="JobRepositoryOptions.cs" company="StellaOps">
|
||||
// Copyright (c) StellaOps. Licensed under AGPL-3.0-or-later.
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Repositories;
|
||||
|
||||
/// <summary>
|
||||
/// Options for job repository behavior.
|
||||
/// </summary>
|
||||
public sealed class JobRepositoryOptions
|
||||
{
|
||||
/// <summary>
|
||||
/// Gets or sets whether to use HLC (Hybrid Logical Clock) ordering for job retrieval.
|
||||
/// When enabled, jobs are ordered by their HLC timestamp from the scheduler_log table.
|
||||
/// When disabled, legacy (priority, created_at) ordering is used.
|
||||
/// </summary>
|
||||
public bool EnableHlcOrdering { get; set; }
|
||||
}
|
||||
@@ -0,0 +1,183 @@
|
||||
// <copyright file="PostgresBatchSnapshotRepository.cs" company="StellaOps">
|
||||
// Copyright (c) StellaOps. Licensed under AGPL-3.0-or-later.
|
||||
// </copyright>
|
||||
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Npgsql;
|
||||
using StellaOps.Infrastructure.Postgres.Repositories;
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Models;
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Repositories;
|
||||
|
||||
/// <summary>
|
||||
/// PostgreSQL repository for batch snapshot operations.
|
||||
/// </summary>
|
||||
public sealed class PostgresBatchSnapshotRepository : RepositoryBase<SchedulerDataSource>, IBatchSnapshotRepository
|
||||
{
|
||||
/// <summary>
|
||||
/// Creates a new batch snapshot repository.
|
||||
/// </summary>
|
||||
public PostgresBatchSnapshotRepository(SchedulerDataSource dataSource, ILogger<PostgresBatchSnapshotRepository> logger)
|
||||
: base(dataSource, logger)
|
||||
{
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task InsertAsync(BatchSnapshot snapshot, CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
INSERT INTO scheduler.batch_snapshot (
|
||||
batch_id, tenant_id, range_start_t, range_end_t, head_link,
|
||||
job_count, created_at, signed_by, signature
|
||||
) VALUES (
|
||||
@batch_id, @tenant_id, @range_start_t, @range_end_t, @head_link,
|
||||
@job_count, @created_at, @signed_by, @signature
|
||||
)
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(snapshot.TenantId, "writer", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
|
||||
AddParameter(command, "batch_id", snapshot.BatchId);
|
||||
AddParameter(command, "tenant_id", snapshot.TenantId);
|
||||
AddParameter(command, "range_start_t", snapshot.RangeStartT);
|
||||
AddParameter(command, "range_end_t", snapshot.RangeEndT);
|
||||
AddParameter(command, "head_link", snapshot.HeadLink);
|
||||
AddParameter(command, "job_count", snapshot.JobCount);
|
||||
AddParameter(command, "created_at", snapshot.CreatedAt);
|
||||
AddParameter(command, "signed_by", snapshot.SignedBy ?? (object)DBNull.Value);
|
||||
AddParameter(command, "signature", snapshot.Signature ?? (object)DBNull.Value);
|
||||
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<BatchSnapshot?> GetByIdAsync(Guid batchId, CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT batch_id, tenant_id, range_start_t, range_end_t, head_link,
|
||||
job_count, created_at, signed_by, signature
|
||||
FROM scheduler.batch_snapshot
|
||||
WHERE batch_id = @batch_id
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "batch_id", batchId);
|
||||
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
return await reader.ReadAsync(cancellationToken).ConfigureAwait(false) ? MapSnapshot(reader) : null;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<BatchSnapshot?> GetLatestAsync(string tenantId, CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT batch_id, tenant_id, range_start_t, range_end_t, head_link,
|
||||
job_count, created_at, signed_by, signature
|
||||
FROM scheduler.batch_snapshot
|
||||
WHERE tenant_id = @tenant_id
|
||||
ORDER BY created_at DESC
|
||||
LIMIT 1
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
return await reader.ReadAsync(cancellationToken).ConfigureAwait(false) ? MapSnapshot(reader) : null;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<BatchSnapshot>> GetByTimeRangeAsync(
|
||||
string tenantId,
|
||||
DateTimeOffset startTime,
|
||||
DateTimeOffset endTime,
|
||||
int limit = 100,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT batch_id, tenant_id, range_start_t, range_end_t, head_link,
|
||||
job_count, created_at, signed_by, signature
|
||||
FROM scheduler.batch_snapshot
|
||||
WHERE tenant_id = @tenant_id
|
||||
AND created_at >= @start_time
|
||||
AND created_at <= @end_time
|
||||
ORDER BY created_at DESC
|
||||
LIMIT @limit
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
AddParameter(command, "start_time", startTime);
|
||||
AddParameter(command, "end_time", endTime);
|
||||
AddParameter(command, "limit", limit);
|
||||
|
||||
var snapshots = new List<BatchSnapshot>();
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
snapshots.Add(MapSnapshot(reader));
|
||||
}
|
||||
|
||||
return snapshots;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<BatchSnapshot>> GetContainingHlcAsync(
|
||||
string tenantId,
|
||||
string tHlc,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT batch_id, tenant_id, range_start_t, range_end_t, head_link,
|
||||
job_count, created_at, signed_by, signature
|
||||
FROM scheduler.batch_snapshot
|
||||
WHERE tenant_id = @tenant_id
|
||||
AND range_start_t <= @t_hlc
|
||||
AND range_end_t >= @t_hlc
|
||||
ORDER BY created_at DESC
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
AddParameter(command, "t_hlc", tHlc);
|
||||
|
||||
var snapshots = new List<BatchSnapshot>();
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
snapshots.Add(MapSnapshot(reader));
|
||||
}
|
||||
|
||||
return snapshots;
|
||||
}
|
||||
|
||||
private static BatchSnapshot MapSnapshot(NpgsqlDataReader reader)
|
||||
{
|
||||
return new BatchSnapshot
|
||||
{
|
||||
BatchId = reader.GetGuid(0),
|
||||
TenantId = reader.GetString(1),
|
||||
RangeStartT = reader.GetString(2),
|
||||
RangeEndT = reader.GetString(3),
|
||||
HeadLink = reader.GetFieldValue<byte[]>(4),
|
||||
JobCount = reader.GetInt32(5),
|
||||
CreatedAt = reader.GetFieldValue<DateTimeOffset>(6),
|
||||
SignedBy = reader.IsDBNull(7) ? null : reader.GetString(7),
|
||||
Signature = reader.IsDBNull(8) ? null : reader.GetFieldValue<byte[]>(8)
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,111 @@
|
||||
// <copyright file="PostgresChainHeadRepository.cs" company="StellaOps">
|
||||
// Copyright (c) StellaOps. Licensed under AGPL-3.0-or-later.
|
||||
// </copyright>
|
||||
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Npgsql;
|
||||
using StellaOps.Infrastructure.Postgres.Repositories;
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Models;
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Repositories;
|
||||
|
||||
/// <summary>
|
||||
/// PostgreSQL repository for chain head operations.
|
||||
/// </summary>
|
||||
public sealed class PostgresChainHeadRepository : RepositoryBase<SchedulerDataSource>, IChainHeadRepository
|
||||
{
|
||||
/// <summary>
|
||||
/// Creates a new chain head repository.
|
||||
/// </summary>
|
||||
public PostgresChainHeadRepository(SchedulerDataSource dataSource, ILogger<PostgresChainHeadRepository> logger)
|
||||
: base(dataSource, logger)
|
||||
{
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<byte[]?> GetLastLinkAsync(
|
||||
string tenantId,
|
||||
string partitionKey,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT last_link
|
||||
FROM scheduler.chain_heads
|
||||
WHERE tenant_id = @tenant_id AND partition_key = @partition_key
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
AddParameter(command, "partition_key", partitionKey);
|
||||
|
||||
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
|
||||
return result as byte[];
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<ChainHead?> GetAsync(
|
||||
string tenantId,
|
||||
string partitionKey,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT tenant_id, partition_key, last_link, last_t_hlc, last_job_id, updated_at
|
||||
FROM scheduler.chain_heads
|
||||
WHERE tenant_id = @tenant_id AND partition_key = @partition_key
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
AddParameter(command, "partition_key", partitionKey);
|
||||
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
return await reader.ReadAsync(cancellationToken).ConfigureAwait(false) ? MapChainHead(reader) : null;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<ChainHead>> GetAllForTenantAsync(
|
||||
string tenantId,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT tenant_id, partition_key, last_link, last_t_hlc, last_job_id, updated_at
|
||||
FROM scheduler.chain_heads
|
||||
WHERE tenant_id = @tenant_id
|
||||
ORDER BY partition_key
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
|
||||
var heads = new List<ChainHead>();
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
heads.Add(MapChainHead(reader));
|
||||
}
|
||||
|
||||
return heads;
|
||||
}
|
||||
|
||||
private static ChainHead MapChainHead(NpgsqlDataReader reader)
|
||||
{
|
||||
return new ChainHead
|
||||
{
|
||||
TenantId = reader.GetString(0),
|
||||
PartitionKey = reader.GetString(1),
|
||||
LastLink = reader.GetFieldValue<byte[]>(2),
|
||||
LastTHlc = reader.GetString(3),
|
||||
LastJobId = reader.GetGuid(4),
|
||||
UpdatedAt = reader.GetFieldValue<DateTimeOffset>(5)
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,335 @@
|
||||
// <copyright file="PostgresSchedulerLogRepository.cs" company="StellaOps">
|
||||
// Copyright (c) StellaOps. Licensed under AGPL-3.0-or-later.
|
||||
// </copyright>
|
||||
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Npgsql;
|
||||
using StellaOps.Infrastructure.Postgres.Repositories;
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Models;
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Repositories;
|
||||
|
||||
/// <summary>
|
||||
/// PostgreSQL repository for HLC-ordered scheduler log operations.
|
||||
/// </summary>
|
||||
public sealed class PostgresSchedulerLogRepository : RepositoryBase<SchedulerDataSource>, ISchedulerLogRepository
|
||||
{
|
||||
/// <summary>
|
||||
/// Creates a new scheduler log repository.
|
||||
/// </summary>
|
||||
public PostgresSchedulerLogRepository(SchedulerDataSource dataSource, ILogger<PostgresSchedulerLogRepository> logger)
|
||||
: base(dataSource, logger)
|
||||
{
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<long> InsertWithChainUpdateAsync(
|
||||
SchedulerLogEntry entry,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
// Use the stored function for atomic insert + chain head update
|
||||
const string sql = """
|
||||
SELECT scheduler.insert_log_with_chain_update(
|
||||
@tenant_id,
|
||||
@t_hlc,
|
||||
@partition_key,
|
||||
@job_id,
|
||||
@payload_hash,
|
||||
@prev_link,
|
||||
@link
|
||||
)
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(entry.TenantId, "writer", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
|
||||
AddParameter(command, "tenant_id", entry.TenantId);
|
||||
AddParameter(command, "t_hlc", entry.THlc);
|
||||
AddParameter(command, "partition_key", entry.PartitionKey);
|
||||
AddParameter(command, "job_id", entry.JobId);
|
||||
AddParameter(command, "payload_hash", entry.PayloadHash);
|
||||
AddParameter(command, "prev_link", entry.PrevLink ?? (object)DBNull.Value);
|
||||
AddParameter(command, "link", entry.Link);
|
||||
|
||||
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
|
||||
return Convert.ToInt64(result);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<SchedulerLogEntry>> GetByHlcOrderAsync(
|
||||
string tenantId,
|
||||
string? partitionKey,
|
||||
int limit,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var sql = partitionKey is null
|
||||
? """
|
||||
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id,
|
||||
payload_hash, prev_link, link, created_at
|
||||
FROM scheduler.scheduler_log
|
||||
WHERE tenant_id = @tenant_id
|
||||
ORDER BY t_hlc ASC
|
||||
LIMIT @limit
|
||||
"""
|
||||
: """
|
||||
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id,
|
||||
payload_hash, prev_link, link, created_at
|
||||
FROM scheduler.scheduler_log
|
||||
WHERE tenant_id = @tenant_id AND partition_key = @partition_key
|
||||
ORDER BY t_hlc ASC
|
||||
LIMIT @limit
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
AddParameter(command, "limit", limit);
|
||||
if (partitionKey is not null)
|
||||
{
|
||||
AddParameter(command, "partition_key", partitionKey);
|
||||
}
|
||||
|
||||
var entries = new List<SchedulerLogEntry>();
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
entries.Add(MapEntry(reader));
|
||||
}
|
||||
|
||||
return entries;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<SchedulerLogEntry>> GetByHlcRangeAsync(
|
||||
string tenantId,
|
||||
string? startTHlc,
|
||||
string? endTHlc,
|
||||
int limit = 0,
|
||||
string? partitionKey = null,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var conditions = new List<string> { "tenant_id = @tenant_id" };
|
||||
if (startTHlc is not null)
|
||||
{
|
||||
conditions.Add("t_hlc >= @start_t_hlc");
|
||||
}
|
||||
|
||||
if (endTHlc is not null)
|
||||
{
|
||||
conditions.Add("t_hlc <= @end_t_hlc");
|
||||
}
|
||||
|
||||
if (partitionKey is not null)
|
||||
{
|
||||
conditions.Add("partition_key = @partition_key");
|
||||
}
|
||||
|
||||
var limitClause = limit > 0 ? $"LIMIT {limit}" : string.Empty;
|
||||
var sql = $"""
|
||||
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id,
|
||||
payload_hash, prev_link, link, created_at
|
||||
FROM scheduler.scheduler_log
|
||||
WHERE {string.Join(" AND ", conditions)}
|
||||
ORDER BY t_hlc ASC
|
||||
{limitClause}
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
if (startTHlc is not null)
|
||||
{
|
||||
AddParameter(command, "start_t_hlc", startTHlc);
|
||||
}
|
||||
|
||||
if (endTHlc is not null)
|
||||
{
|
||||
AddParameter(command, "end_t_hlc", endTHlc);
|
||||
}
|
||||
|
||||
if (partitionKey is not null)
|
||||
{
|
||||
AddParameter(command, "partition_key", partitionKey);
|
||||
}
|
||||
|
||||
var entries = new List<SchedulerLogEntry>();
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
entries.Add(MapEntry(reader));
|
||||
}
|
||||
|
||||
return entries;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<SchedulerLogEntry>> GetAfterHlcAsync(
|
||||
string tenantId,
|
||||
string afterTHlc,
|
||||
int limit,
|
||||
string? partitionKey = null,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var sql = partitionKey is null
|
||||
? """
|
||||
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id,
|
||||
payload_hash, prev_link, link, created_at
|
||||
FROM scheduler.scheduler_log
|
||||
WHERE tenant_id = @tenant_id AND t_hlc > @after_t_hlc
|
||||
ORDER BY t_hlc ASC
|
||||
LIMIT @limit
|
||||
"""
|
||||
: """
|
||||
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id,
|
||||
payload_hash, prev_link, link, created_at
|
||||
FROM scheduler.scheduler_log
|
||||
WHERE tenant_id = @tenant_id AND t_hlc > @after_t_hlc AND partition_key = @partition_key
|
||||
ORDER BY t_hlc ASC
|
||||
LIMIT @limit
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
AddParameter(command, "after_t_hlc", afterTHlc);
|
||||
AddParameter(command, "limit", limit);
|
||||
if (partitionKey is not null)
|
||||
{
|
||||
AddParameter(command, "partition_key", partitionKey);
|
||||
}
|
||||
|
||||
var entries = new List<SchedulerLogEntry>();
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
entries.Add(MapEntry(reader));
|
||||
}
|
||||
|
||||
return entries;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<int> CountByHlcRangeAsync(
|
||||
string tenantId,
|
||||
string? startTHlc,
|
||||
string? endTHlc,
|
||||
string? partitionKey = null,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var conditions = new List<string> { "tenant_id = @tenant_id" };
|
||||
if (startTHlc is not null)
|
||||
{
|
||||
conditions.Add("t_hlc >= @start_t_hlc");
|
||||
}
|
||||
|
||||
if (endTHlc is not null)
|
||||
{
|
||||
conditions.Add("t_hlc <= @end_t_hlc");
|
||||
}
|
||||
|
||||
if (partitionKey is not null)
|
||||
{
|
||||
conditions.Add("partition_key = @partition_key");
|
||||
}
|
||||
|
||||
var sql = $"""
|
||||
SELECT COUNT(*)
|
||||
FROM scheduler.scheduler_log
|
||||
WHERE {string.Join(" AND ", conditions)}
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
if (startTHlc is not null)
|
||||
{
|
||||
AddParameter(command, "start_t_hlc", startTHlc);
|
||||
}
|
||||
|
||||
if (endTHlc is not null)
|
||||
{
|
||||
AddParameter(command, "end_t_hlc", endTHlc);
|
||||
}
|
||||
|
||||
if (partitionKey is not null)
|
||||
{
|
||||
AddParameter(command, "partition_key", partitionKey);
|
||||
}
|
||||
|
||||
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
|
||||
return Convert.ToInt32(result);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<SchedulerLogEntry?> GetByJobIdAsync(
|
||||
Guid jobId,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id,
|
||||
payload_hash, prev_link, link, created_at
|
||||
FROM scheduler.scheduler_log
|
||||
WHERE job_id = @job_id
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "job_id", jobId);
|
||||
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
return await reader.ReadAsync(cancellationToken).ConfigureAwait(false) ? MapEntry(reader) : null;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<bool> ExistsAsync(
|
||||
string tenantId,
|
||||
Guid jobId,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT EXISTS(
|
||||
SELECT 1 FROM scheduler.scheduler_log
|
||||
WHERE tenant_id = @tenant_id AND job_id = @job_id
|
||||
)
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
AddParameter(command, "job_id", jobId);
|
||||
|
||||
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
|
||||
return result is true;
|
||||
}
|
||||
|
||||
private static SchedulerLogEntry MapEntry(NpgsqlDataReader reader)
|
||||
{
|
||||
return new SchedulerLogEntry
|
||||
{
|
||||
SeqBigint = reader.GetInt64(0),
|
||||
TenantId = reader.GetString(1),
|
||||
THlc = reader.GetString(2),
|
||||
PartitionKey = reader.GetString(3),
|
||||
JobId = reader.GetGuid(4),
|
||||
PayloadHash = reader.GetFieldValue<byte[]>(5),
|
||||
PrevLink = reader.IsDBNull(6) ? null : reader.GetFieldValue<byte[]>(6),
|
||||
Link = reader.GetFieldValue<byte[]>(7),
|
||||
CreatedAt = reader.GetFieldValue<DateTimeOffset>(8)
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,123 @@
|
||||
// <copyright file="SchedulerChainLinking.cs" company="StellaOps">
|
||||
// Copyright (c) StellaOps. Licensed under AGPL-3.0-or-later.
|
||||
// </copyright>
|
||||
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
using StellaOps.Canonical.Json;
|
||||
using StellaOps.HybridLogicalClock;
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence;
|
||||
|
||||
/// <summary>
|
||||
/// Chain linking utilities for scheduler audit-safe ordering.
|
||||
/// Implements: link_i = Hash(link_{i-1} || job_id || t_hlc || payload_hash)
|
||||
/// </summary>
|
||||
public static class SchedulerChainLinking
|
||||
{
|
||||
/// <summary>
|
||||
/// Size of a chain link in bytes (SHA-256).
|
||||
/// </summary>
|
||||
public const int LinkSizeBytes = 32;
|
||||
|
||||
/// <summary>
|
||||
/// Zero link used as prev_link for the first entry in a chain.
|
||||
/// </summary>
|
||||
public static readonly byte[] ZeroLink = new byte[LinkSizeBytes];
|
||||
|
||||
/// <summary>
|
||||
/// Compute chain link per advisory specification:
|
||||
/// link_i = Hash(link_{i-1} || job_id || t_hlc || payload_hash)
|
||||
/// </summary>
|
||||
/// <param name="prevLink">Previous chain link (null or empty for first entry).</param>
|
||||
/// <param name="jobId">Job identifier.</param>
|
||||
/// <param name="tHlc">HLC timestamp.</param>
|
||||
/// <param name="payloadHash">SHA-256 hash of canonical payload.</param>
|
||||
/// <returns>The computed chain link (32 bytes).</returns>
|
||||
public static byte[] ComputeLink(
|
||||
byte[]? prevLink,
|
||||
Guid jobId,
|
||||
HlcTimestamp tHlc,
|
||||
byte[] payloadHash)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(payloadHash);
|
||||
|
||||
using var hasher = IncrementalHash.CreateHash(HashAlgorithmName.SHA256);
|
||||
|
||||
// Previous link (or 32 zero bytes for first entry)
|
||||
hasher.AppendData(prevLink is { Length: LinkSizeBytes } ? prevLink : ZeroLink);
|
||||
|
||||
// Job ID as bytes (big-endian for consistency)
|
||||
hasher.AppendData(jobId.ToByteArray());
|
||||
|
||||
// HLC timestamp as UTF-8 bytes
|
||||
hasher.AppendData(Encoding.UTF8.GetBytes(tHlc.ToSortableString()));
|
||||
|
||||
// Payload hash
|
||||
hasher.AppendData(payloadHash);
|
||||
|
||||
return hasher.GetHashAndReset();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Compute chain link from string HLC timestamp.
|
||||
/// </summary>
|
||||
public static byte[] ComputeLink(
|
||||
byte[]? prevLink,
|
||||
Guid jobId,
|
||||
string tHlcString,
|
||||
byte[] payloadHash)
|
||||
{
|
||||
var tHlc = HlcTimestamp.Parse(tHlcString);
|
||||
return ComputeLink(prevLink, jobId, tHlc, payloadHash);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Compute deterministic payload hash from canonical JSON.
|
||||
/// </summary>
|
||||
/// <typeparam name="T">Payload type.</typeparam>
|
||||
/// <param name="payload">The payload object.</param>
|
||||
/// <returns>SHA-256 hash of the canonical JSON representation.</returns>
|
||||
public static byte[] ComputePayloadHash<T>(T payload)
|
||||
{
|
||||
var canonical = CanonJson.Serialize(payload);
|
||||
return SHA256.HashData(Encoding.UTF8.GetBytes(canonical));
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Compute payload hash from raw bytes.
|
||||
/// </summary>
|
||||
/// <param name="payloadBytes">Raw payload bytes.</param>
|
||||
/// <returns>SHA-256 hash of the bytes.</returns>
|
||||
public static byte[] ComputePayloadHash(byte[] payloadBytes)
|
||||
{
|
||||
return SHA256.HashData(payloadBytes);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verify that a chain link matches the expected computation.
|
||||
/// </summary>
|
||||
public static bool VerifyLink(
|
||||
byte[] storedLink,
|
||||
byte[]? prevLink,
|
||||
Guid jobId,
|
||||
HlcTimestamp tHlc,
|
||||
byte[] payloadHash)
|
||||
{
|
||||
var computed = ComputeLink(prevLink, jobId, tHlc, payloadHash);
|
||||
return CryptographicOperations.FixedTimeEquals(storedLink, computed);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Convert link bytes to hex string for display.
|
||||
/// </summary>
|
||||
public static string ToHex(byte[]? link)
|
||||
{
|
||||
if (link is null or { Length: 0 })
|
||||
{
|
||||
return "(null)";
|
||||
}
|
||||
|
||||
return Convert.ToHexString(link).ToLowerInvariant();
|
||||
}
|
||||
}
|
||||
@@ -27,6 +27,8 @@
|
||||
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Determinism.Abstractions\StellaOps.Determinism.Abstractions.csproj" />
|
||||
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Infrastructure.Postgres\StellaOps.Infrastructure.Postgres.csproj" />
|
||||
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Infrastructure.EfCore\StellaOps.Infrastructure.EfCore.csproj" />
|
||||
<ProjectReference Include="..\..\..\__Libraries\StellaOps.HybridLogicalClock\StellaOps.HybridLogicalClock.csproj" />
|
||||
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Canonical.Json\StellaOps.Canonical.Json.csproj" />
|
||||
</ItemGroup>
|
||||
|
||||
<!-- Embed SQL migrations as resources -->
|
||||
|
||||
Reference in New Issue
Block a user