docs consolidation and others
This commit is contained in:
@@ -0,0 +1,171 @@
|
||||
-- -----------------------------------------------------------------------------
|
||||
-- 002_hlc_queue_chain.sql
|
||||
-- Sprint: SPRINT_20260105_002_002_SCHEDULER_hlc_queue_chain
|
||||
-- Tasks: SQC-002, SQC-003, SQC-004
|
||||
-- Description: HLC-ordered scheduler queue with cryptographic chain linking
|
||||
-- -----------------------------------------------------------------------------
|
||||
|
||||
-- ============================================================================
|
||||
-- SQC-002: scheduler.scheduler_log - HLC-ordered, chain-linked jobs
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS scheduler.scheduler_log (
|
||||
-- Storage order (BIGSERIAL for monotonic insertion, not authoritative for ordering)
|
||||
seq_bigint BIGSERIAL PRIMARY KEY,
|
||||
|
||||
-- Tenant isolation
|
||||
tenant_id TEXT NOT NULL,
|
||||
|
||||
-- HLC timestamp: "1704067200000-scheduler-east-1-000042"
|
||||
-- This is the authoritative ordering key
|
||||
t_hlc TEXT NOT NULL,
|
||||
|
||||
-- Optional queue partition for parallel processing
|
||||
partition_key TEXT DEFAULT '',
|
||||
|
||||
-- Job identifier (deterministic from payload using GUID v5)
|
||||
job_id UUID NOT NULL,
|
||||
|
||||
-- SHA-256 of canonical JSON payload (32 bytes)
|
||||
payload_hash BYTEA NOT NULL CHECK (octet_length(payload_hash) = 32),
|
||||
|
||||
-- Previous chain link (null for first entry in partition)
|
||||
prev_link BYTEA CHECK (prev_link IS NULL OR octet_length(prev_link) = 32),
|
||||
|
||||
-- Current chain link: Hash(prev_link || job_id || t_hlc || payload_hash)
|
||||
link BYTEA NOT NULL CHECK (octet_length(link) = 32),
|
||||
|
||||
-- Wall-clock timestamp for operational queries (not authoritative)
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Ensure unique HLC ordering within tenant/partition
|
||||
CONSTRAINT uq_scheduler_log_order UNIQUE (tenant_id, t_hlc, partition_key, job_id)
|
||||
);
|
||||
|
||||
-- Primary query: get jobs by HLC order within tenant
|
||||
CREATE INDEX IF NOT EXISTS idx_scheduler_log_tenant_hlc
|
||||
ON scheduler.scheduler_log (tenant_id, t_hlc ASC);
|
||||
|
||||
-- Partition-specific queries
|
||||
CREATE INDEX IF NOT EXISTS idx_scheduler_log_partition
|
||||
ON scheduler.scheduler_log (tenant_id, partition_key, t_hlc ASC);
|
||||
|
||||
-- Job lookup by ID
|
||||
CREATE INDEX IF NOT EXISTS idx_scheduler_log_job_id
|
||||
ON scheduler.scheduler_log (job_id);
|
||||
|
||||
-- Chain verification: find by link hash
|
||||
CREATE INDEX IF NOT EXISTS idx_scheduler_log_link
|
||||
ON scheduler.scheduler_log (link);
|
||||
|
||||
-- Range queries for batch snapshots
|
||||
CREATE INDEX IF NOT EXISTS idx_scheduler_log_created
|
||||
ON scheduler.scheduler_log (tenant_id, created_at DESC);
|
||||
|
||||
COMMENT ON TABLE scheduler.scheduler_log IS 'HLC-ordered scheduler queue with cryptographic chain linking for audit-safe job ordering';
|
||||
COMMENT ON COLUMN scheduler.scheduler_log.t_hlc IS 'Hybrid Logical Clock timestamp: authoritative ordering key. Format: physicalTime13-nodeId-counter6';
|
||||
COMMENT ON COLUMN scheduler.scheduler_log.link IS 'Chain link = SHA256(prev_link || job_id || t_hlc || payload_hash). Creates tamper-evident sequence.';
|
||||
|
||||
-- ============================================================================
|
||||
-- SQC-003: scheduler.batch_snapshot - Audit anchors for job batches
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS scheduler.batch_snapshot (
|
||||
-- Snapshot identifier
|
||||
batch_id UUID PRIMARY KEY,
|
||||
|
||||
-- Tenant isolation
|
||||
tenant_id TEXT NOT NULL,
|
||||
|
||||
-- HLC range covered by this snapshot
|
||||
range_start_t TEXT NOT NULL,
|
||||
range_end_t TEXT NOT NULL,
|
||||
|
||||
-- Chain head at snapshot time (last link in range)
|
||||
head_link BYTEA NOT NULL CHECK (octet_length(head_link) = 32),
|
||||
|
||||
-- Job count for quick validation
|
||||
job_count INT NOT NULL CHECK (job_count >= 0),
|
||||
|
||||
-- Wall-clock timestamp
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Optional DSSE signature fields
|
||||
signed_by TEXT, -- Key ID that signed
|
||||
signature BYTEA, -- DSSE signature bytes
|
||||
|
||||
-- Constraint: signature requires signed_by
|
||||
CONSTRAINT chk_signature_requires_signer CHECK (
|
||||
(signature IS NULL AND signed_by IS NULL) OR
|
||||
(signature IS NOT NULL AND signed_by IS NOT NULL)
|
||||
)
|
||||
);
|
||||
|
||||
-- Query snapshots by tenant and time
|
||||
CREATE INDEX IF NOT EXISTS idx_batch_snapshot_tenant
|
||||
ON scheduler.batch_snapshot (tenant_id, created_at DESC);
|
||||
|
||||
-- Query snapshots by HLC range
|
||||
CREATE INDEX IF NOT EXISTS idx_batch_snapshot_range
|
||||
ON scheduler.batch_snapshot (tenant_id, range_start_t, range_end_t);
|
||||
|
||||
COMMENT ON TABLE scheduler.batch_snapshot IS 'Audit anchors for scheduler job batches. Captures chain head at specific HLC ranges.';
|
||||
COMMENT ON COLUMN scheduler.batch_snapshot.head_link IS 'Chain head (last link) at snapshot time. Can be verified by replaying chain.';
|
||||
|
||||
-- ============================================================================
|
||||
-- SQC-004: scheduler.chain_heads - Per-partition chain head tracking
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS scheduler.chain_heads (
|
||||
-- Tenant isolation
|
||||
tenant_id TEXT NOT NULL,
|
||||
|
||||
-- Partition (empty string for default partition)
|
||||
partition_key TEXT NOT NULL DEFAULT '',
|
||||
|
||||
-- Last chain link in this partition
|
||||
last_link BYTEA NOT NULL CHECK (octet_length(last_link) = 32),
|
||||
|
||||
-- Last HLC timestamp in this partition
|
||||
last_t_hlc TEXT NOT NULL,
|
||||
|
||||
-- Wall-clock timestamp of last update
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Primary key: one head per tenant/partition
|
||||
PRIMARY KEY (tenant_id, partition_key)
|
||||
);
|
||||
|
||||
-- Query chain heads by update time (for monitoring)
|
||||
CREATE INDEX IF NOT EXISTS idx_chain_heads_updated
|
||||
ON scheduler.chain_heads (updated_at DESC);
|
||||
|
||||
COMMENT ON TABLE scheduler.chain_heads IS 'Tracks current chain head for each tenant/partition. Updated atomically with scheduler_log inserts.';
|
||||
COMMENT ON COLUMN scheduler.chain_heads.last_link IS 'Current chain head. Used as prev_link for next enqueue.';
|
||||
|
||||
-- ============================================================================
|
||||
-- Atomic upsert function for chain head updates
|
||||
-- ============================================================================
|
||||
|
||||
CREATE OR REPLACE FUNCTION scheduler.upsert_chain_head(
|
||||
p_tenant_id TEXT,
|
||||
p_partition_key TEXT,
|
||||
p_new_link BYTEA,
|
||||
p_new_t_hlc TEXT
|
||||
)
|
||||
RETURNS VOID
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
BEGIN
|
||||
INSERT INTO scheduler.chain_heads (tenant_id, partition_key, last_link, last_t_hlc, updated_at)
|
||||
VALUES (p_tenant_id, p_partition_key, p_new_link, p_new_t_hlc, NOW())
|
||||
ON CONFLICT (tenant_id, partition_key)
|
||||
DO UPDATE SET
|
||||
last_link = EXCLUDED.last_link,
|
||||
last_t_hlc = EXCLUDED.last_t_hlc,
|
||||
updated_at = EXCLUDED.updated_at
|
||||
WHERE scheduler.chain_heads.last_t_hlc < EXCLUDED.last_t_hlc;
|
||||
END;
|
||||
$$;
|
||||
|
||||
COMMENT ON FUNCTION scheduler.upsert_chain_head IS 'Atomically updates chain head. Only updates if new HLC > current HLC (monotonicity).';
|
||||
@@ -0,0 +1,58 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// BatchSnapshotEntity.cs
|
||||
// Sprint: SPRINT_20260105_002_002_SCHEDULER_hlc_queue_chain
|
||||
// Task: SQC-005 - Entity for batch_snapshot table
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Models;
|
||||
|
||||
/// <summary>
|
||||
/// Entity representing an audit anchor for a batch of scheduler jobs.
|
||||
/// </summary>
|
||||
public sealed record BatchSnapshotEntity
|
||||
{
|
||||
/// <summary>
|
||||
/// Snapshot identifier.
|
||||
/// </summary>
|
||||
public required Guid BatchId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Tenant identifier for isolation.
|
||||
/// </summary>
|
||||
public required string TenantId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// HLC range start (inclusive).
|
||||
/// </summary>
|
||||
public required string RangeStartT { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// HLC range end (inclusive).
|
||||
/// </summary>
|
||||
public required string RangeEndT { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Chain head at snapshot time (last link in range).
|
||||
/// </summary>
|
||||
public required byte[] HeadLink { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Number of jobs in the snapshot range.
|
||||
/// </summary>
|
||||
public required int JobCount { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Wall-clock timestamp of snapshot creation.
|
||||
/// </summary>
|
||||
public required DateTimeOffset CreatedAt { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Key ID that signed the snapshot (null if unsigned).
|
||||
/// </summary>
|
||||
public string? SignedBy { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// DSSE signature bytes (null if unsigned).
|
||||
/// </summary>
|
||||
public byte[]? Signature { get; init; }
|
||||
}
|
||||
@@ -0,0 +1,38 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// ChainHeadEntity.cs
|
||||
// Sprint: SPRINT_20260105_002_002_SCHEDULER_hlc_queue_chain
|
||||
// Task: SQC-005 - Entity for chain_heads table
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Models;
|
||||
|
||||
/// <summary>
|
||||
/// Entity representing the current chain head for a tenant/partition.
|
||||
/// </summary>
|
||||
public sealed record ChainHeadEntity
|
||||
{
|
||||
/// <summary>
|
||||
/// Tenant identifier for isolation.
|
||||
/// </summary>
|
||||
public required string TenantId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Partition key (empty string for default partition).
|
||||
/// </summary>
|
||||
public string PartitionKey { get; init; } = "";
|
||||
|
||||
/// <summary>
|
||||
/// Last chain link in this partition.
|
||||
/// </summary>
|
||||
public required byte[] LastLink { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Last HLC timestamp in this partition.
|
||||
/// </summary>
|
||||
public required string LastTHlc { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Wall-clock timestamp of last update.
|
||||
/// </summary>
|
||||
public required DateTimeOffset UpdatedAt { get; init; }
|
||||
}
|
||||
@@ -0,0 +1,60 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// SchedulerLogEntity.cs
|
||||
// Sprint: SPRINT_20260105_002_002_SCHEDULER_hlc_queue_chain
|
||||
// Task: SQC-005 - Entity for scheduler_log table
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Models;
|
||||
|
||||
/// <summary>
|
||||
/// Entity representing an HLC-ordered, chain-linked scheduler log entry.
|
||||
/// </summary>
|
||||
public sealed record SchedulerLogEntity
|
||||
{
|
||||
/// <summary>
|
||||
/// Storage sequence number (BIGSERIAL, not authoritative for ordering).
|
||||
/// Populated by the database on insert; 0 for new entries before persistence.
|
||||
/// </summary>
|
||||
public long SeqBigint { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Tenant identifier for isolation.
|
||||
/// </summary>
|
||||
public required string TenantId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// HLC timestamp string: "1704067200000-scheduler-east-1-000042".
|
||||
/// This is the authoritative ordering key.
|
||||
/// </summary>
|
||||
public required string THlc { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Optional queue partition for parallel processing.
|
||||
/// </summary>
|
||||
public string PartitionKey { get; init; } = "";
|
||||
|
||||
/// <summary>
|
||||
/// Job identifier (deterministic from payload using GUID v5).
|
||||
/// </summary>
|
||||
public required Guid JobId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// SHA-256 of canonical JSON payload (32 bytes).
|
||||
/// </summary>
|
||||
public required byte[] PayloadHash { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Previous chain link (null for first entry in partition).
|
||||
/// </summary>
|
||||
public byte[]? PrevLink { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Current chain link: Hash(prev_link || job_id || t_hlc || payload_hash).
|
||||
/// </summary>
|
||||
public required byte[] Link { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Wall-clock timestamp for operational queries (not authoritative).
|
||||
/// </summary>
|
||||
public required DateTimeOffset CreatedAt { get; init; }
|
||||
}
|
||||
@@ -0,0 +1,179 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// BatchSnapshotRepository.cs
|
||||
// Sprint: SPRINT_20260105_002_002_SCHEDULER_hlc_queue_chain
|
||||
// Task: SQC-013 - Implement BatchSnapshotService
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Npgsql;
|
||||
using StellaOps.Infrastructure.Postgres.Repositories;
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Models;
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Repositories;
|
||||
|
||||
/// <summary>
|
||||
/// PostgreSQL implementation of batch snapshot repository.
|
||||
/// </summary>
|
||||
public sealed class BatchSnapshotRepository : RepositoryBase<SchedulerDataSource>, IBatchSnapshotRepository
|
||||
{
|
||||
public BatchSnapshotRepository(
|
||||
SchedulerDataSource dataSource,
|
||||
ILogger<BatchSnapshotRepository> logger)
|
||||
: base(dataSource, logger)
|
||||
{
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task InsertAsync(BatchSnapshotEntity snapshot, CancellationToken cancellationToken = default)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(snapshot);
|
||||
|
||||
const string sql = """
|
||||
INSERT INTO scheduler.batch_snapshot (
|
||||
batch_id, tenant_id, range_start_t, range_end_t,
|
||||
head_link, job_count, created_at, signed_by, signature
|
||||
) VALUES (
|
||||
@batch_id, @tenant_id, @range_start_t, @range_end_t,
|
||||
@head_link, @job_count, @created_at, @signed_by, @signature
|
||||
)
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(snapshot.TenantId, "writer", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
|
||||
AddParameter(command, "batch_id", snapshot.BatchId);
|
||||
AddParameter(command, "tenant_id", snapshot.TenantId);
|
||||
AddParameter(command, "range_start_t", snapshot.RangeStartT);
|
||||
AddParameter(command, "range_end_t", snapshot.RangeEndT);
|
||||
AddParameter(command, "head_link", snapshot.HeadLink);
|
||||
AddParameter(command, "job_count", snapshot.JobCount);
|
||||
AddParameter(command, "created_at", snapshot.CreatedAt);
|
||||
AddParameter(command, "signed_by", snapshot.SignedBy);
|
||||
AddParameter(command, "signature", snapshot.Signature);
|
||||
|
||||
await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<BatchSnapshotEntity?> GetByIdAsync(Guid batchId, CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT batch_id, tenant_id, range_start_t, range_end_t,
|
||||
head_link, job_count, created_at, signed_by, signature
|
||||
FROM scheduler.batch_snapshot
|
||||
WHERE batch_id = @batch_id
|
||||
""";
|
||||
|
||||
return await QuerySingleOrDefaultAsync(
|
||||
tenantId: null!,
|
||||
sql,
|
||||
cmd => AddParameter(cmd, "batch_id", batchId),
|
||||
MapBatchSnapshot,
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<BatchSnapshotEntity>> GetByTenantAsync(
|
||||
string tenantId,
|
||||
int limit = 100,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(tenantId);
|
||||
|
||||
const string sql = """
|
||||
SELECT batch_id, tenant_id, range_start_t, range_end_t,
|
||||
head_link, job_count, created_at, signed_by, signature
|
||||
FROM scheduler.batch_snapshot
|
||||
WHERE tenant_id = @tenant_id
|
||||
ORDER BY created_at DESC
|
||||
LIMIT @limit
|
||||
""";
|
||||
|
||||
return await QueryAsync(
|
||||
tenantId,
|
||||
sql,
|
||||
cmd =>
|
||||
{
|
||||
AddParameter(cmd, "tenant_id", tenantId);
|
||||
AddParameter(cmd, "limit", limit);
|
||||
},
|
||||
MapBatchSnapshot,
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<BatchSnapshotEntity>> GetContainingHlcAsync(
|
||||
string tenantId,
|
||||
string tHlc,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(tenantId);
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(tHlc);
|
||||
|
||||
const string sql = """
|
||||
SELECT batch_id, tenant_id, range_start_t, range_end_t,
|
||||
head_link, job_count, created_at, signed_by, signature
|
||||
FROM scheduler.batch_snapshot
|
||||
WHERE tenant_id = @tenant_id
|
||||
AND range_start_t <= @t_hlc
|
||||
AND range_end_t >= @t_hlc
|
||||
ORDER BY created_at DESC
|
||||
""";
|
||||
|
||||
return await QueryAsync(
|
||||
tenantId,
|
||||
sql,
|
||||
cmd =>
|
||||
{
|
||||
AddParameter(cmd, "tenant_id", tenantId);
|
||||
AddParameter(cmd, "t_hlc", tHlc);
|
||||
},
|
||||
MapBatchSnapshot,
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<BatchSnapshotEntity?> GetLatestAsync(
|
||||
string tenantId,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(tenantId);
|
||||
|
||||
const string sql = """
|
||||
SELECT batch_id, tenant_id, range_start_t, range_end_t,
|
||||
head_link, job_count, created_at, signed_by, signature
|
||||
FROM scheduler.batch_snapshot
|
||||
WHERE tenant_id = @tenant_id
|
||||
ORDER BY created_at DESC
|
||||
LIMIT 1
|
||||
""";
|
||||
|
||||
return await QuerySingleOrDefaultAsync(
|
||||
tenantId,
|
||||
sql,
|
||||
cmd => AddParameter(cmd, "tenant_id", tenantId),
|
||||
MapBatchSnapshot,
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
private static BatchSnapshotEntity MapBatchSnapshot(NpgsqlDataReader reader)
|
||||
{
|
||||
return new BatchSnapshotEntity
|
||||
{
|
||||
BatchId = reader.GetGuid(reader.GetOrdinal("batch_id")),
|
||||
TenantId = reader.GetString(reader.GetOrdinal("tenant_id")),
|
||||
RangeStartT = reader.GetString(reader.GetOrdinal("range_start_t")),
|
||||
RangeEndT = reader.GetString(reader.GetOrdinal("range_end_t")),
|
||||
HeadLink = reader.GetFieldValue<byte[]>(reader.GetOrdinal("head_link")),
|
||||
JobCount = reader.GetInt32(reader.GetOrdinal("job_count")),
|
||||
CreatedAt = reader.GetFieldValue<DateTimeOffset>(reader.GetOrdinal("created_at")),
|
||||
SignedBy = reader.IsDBNull(reader.GetOrdinal("signed_by"))
|
||||
? null
|
||||
: reader.GetString(reader.GetOrdinal("signed_by")),
|
||||
Signature = reader.IsDBNull(reader.GetOrdinal("signature"))
|
||||
? null
|
||||
: reader.GetFieldValue<byte[]>(reader.GetOrdinal("signature"))
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,140 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// ChainHeadRepository.cs
|
||||
// Sprint: SPRINT_20260105_002_002_SCHEDULER_hlc_queue_chain
|
||||
// Task: SQC-007 - PostgreSQL implementation for chain_heads repository
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Npgsql;
|
||||
using StellaOps.Infrastructure.Postgres.Repositories;
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Models;
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Repositories;
|
||||
|
||||
/// <summary>
|
||||
/// PostgreSQL repository for chain head tracking operations.
|
||||
/// </summary>
|
||||
public sealed class ChainHeadRepository : RepositoryBase<SchedulerDataSource>, IChainHeadRepository
|
||||
{
|
||||
/// <summary>
|
||||
/// Creates a new chain head repository.
|
||||
/// </summary>
|
||||
public ChainHeadRepository(
|
||||
SchedulerDataSource dataSource,
|
||||
ILogger<ChainHeadRepository> logger)
|
||||
: base(dataSource, logger)
|
||||
{
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<ChainHeadEntity?> GetAsync(
|
||||
string tenantId,
|
||||
string partitionKey,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT tenant_id, partition_key, last_link, last_t_hlc, updated_at
|
||||
FROM scheduler.chain_heads
|
||||
WHERE tenant_id = @tenant_id AND partition_key = @partition_key
|
||||
""";
|
||||
|
||||
return await QuerySingleOrDefaultAsync(
|
||||
tenantId,
|
||||
sql,
|
||||
cmd =>
|
||||
{
|
||||
AddParameter(cmd, "tenant_id", tenantId);
|
||||
AddParameter(cmd, "partition_key", partitionKey);
|
||||
},
|
||||
MapChainHeadEntity,
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<byte[]?> GetLastLinkAsync(
|
||||
string tenantId,
|
||||
string partitionKey,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT last_link
|
||||
FROM scheduler.chain_heads
|
||||
WHERE tenant_id = @tenant_id AND partition_key = @partition_key
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
AddParameter(command, "partition_key", partitionKey);
|
||||
|
||||
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
|
||||
return result is DBNull or null ? null : (byte[])result;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<bool> UpsertAsync(
|
||||
string tenantId,
|
||||
string partitionKey,
|
||||
byte[] newLink,
|
||||
string newTHlc,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
// Use the upsert function with monotonicity check
|
||||
const string sql = """
|
||||
INSERT INTO scheduler.chain_heads (tenant_id, partition_key, last_link, last_t_hlc, updated_at)
|
||||
VALUES (@tenant_id, @partition_key, @new_link, @new_t_hlc, NOW())
|
||||
ON CONFLICT (tenant_id, partition_key)
|
||||
DO UPDATE SET
|
||||
last_link = EXCLUDED.last_link,
|
||||
last_t_hlc = EXCLUDED.last_t_hlc,
|
||||
updated_at = EXCLUDED.updated_at
|
||||
WHERE scheduler.chain_heads.last_t_hlc < EXCLUDED.last_t_hlc
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "writer", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
AddParameter(command, "partition_key", partitionKey);
|
||||
AddParameter(command, "new_link", newLink);
|
||||
AddParameter(command, "new_t_hlc", newTHlc);
|
||||
|
||||
var rowsAffected = await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
return rowsAffected > 0;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<ChainHeadEntity>> GetAllForTenantAsync(
|
||||
string tenantId,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT tenant_id, partition_key, last_link, last_t_hlc, updated_at
|
||||
FROM scheduler.chain_heads
|
||||
WHERE tenant_id = @tenant_id
|
||||
ORDER BY partition_key
|
||||
""";
|
||||
|
||||
return await QueryAsync(
|
||||
tenantId,
|
||||
sql,
|
||||
cmd => AddParameter(cmd, "tenant_id", tenantId),
|
||||
MapChainHeadEntity,
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
private static ChainHeadEntity MapChainHeadEntity(NpgsqlDataReader reader)
|
||||
{
|
||||
return new ChainHeadEntity
|
||||
{
|
||||
TenantId = reader.GetString(reader.GetOrdinal("tenant_id")),
|
||||
PartitionKey = reader.GetString(reader.GetOrdinal("partition_key")),
|
||||
LastLink = reader.GetFieldValue<byte[]>(reader.GetOrdinal("last_link")),
|
||||
LastTHlc = reader.GetString(reader.GetOrdinal("last_t_hlc")),
|
||||
UpdatedAt = reader.GetFieldValue<DateTimeOffset>(reader.GetOrdinal("updated_at"))
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,50 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// IBatchSnapshotRepository.cs
|
||||
// Sprint: SPRINT_20260105_002_002_SCHEDULER_hlc_queue_chain
|
||||
// Task: SQC-013 - Implement BatchSnapshotService
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Models;
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Repositories;
|
||||
|
||||
/// <summary>
|
||||
/// Repository interface for batch snapshot operations.
|
||||
/// </summary>
|
||||
public interface IBatchSnapshotRepository
|
||||
{
|
||||
/// <summary>
|
||||
/// Inserts a new batch snapshot.
|
||||
/// </summary>
|
||||
/// <param name="snapshot">The snapshot to insert.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
Task InsertAsync(BatchSnapshotEntity snapshot, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets a batch snapshot by ID.
|
||||
/// </summary>
|
||||
Task<BatchSnapshotEntity?> GetByIdAsync(Guid batchId, CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets batch snapshots for a tenant, ordered by creation time descending.
|
||||
/// </summary>
|
||||
Task<IReadOnlyList<BatchSnapshotEntity>> GetByTenantAsync(
|
||||
string tenantId,
|
||||
int limit = 100,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets batch snapshots that contain a specific HLC timestamp.
|
||||
/// </summary>
|
||||
Task<IReadOnlyList<BatchSnapshotEntity>> GetContainingHlcAsync(
|
||||
string tenantId,
|
||||
string tHlc,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets the latest batch snapshot for a tenant.
|
||||
/// </summary>
|
||||
Task<BatchSnapshotEntity?> GetLatestAsync(
|
||||
string tenantId,
|
||||
CancellationToken cancellationToken = default);
|
||||
}
|
||||
@@ -0,0 +1,64 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// IChainHeadRepository.cs
|
||||
// Sprint: SPRINT_20260105_002_002_SCHEDULER_hlc_queue_chain
|
||||
// Task: SQC-007 - Interface for chain_heads repository
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Models;
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Repositories;
|
||||
|
||||
/// <summary>
|
||||
/// Repository interface for chain head tracking operations.
|
||||
/// </summary>
|
||||
public interface IChainHeadRepository
|
||||
{
|
||||
/// <summary>
|
||||
/// Gets the current chain head for a tenant/partition.
|
||||
/// </summary>
|
||||
/// <param name="tenantId">Tenant identifier.</param>
|
||||
/// <param name="partitionKey">Partition key (empty string for default).</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>Current chain head, or null if no entries exist.</returns>
|
||||
Task<ChainHeadEntity?> GetAsync(
|
||||
string tenantId,
|
||||
string partitionKey,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets the last link hash for a tenant/partition.
|
||||
/// Convenience method for chain linking operations.
|
||||
/// </summary>
|
||||
/// <param name="tenantId">Tenant identifier.</param>
|
||||
/// <param name="partitionKey">Partition key (empty string for default).</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>Last link hash, or null if no entries exist.</returns>
|
||||
Task<byte[]?> GetLastLinkAsync(
|
||||
string tenantId,
|
||||
string partitionKey,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Updates the chain head atomically with monotonicity check.
|
||||
/// Only updates if new HLC > current HLC.
|
||||
/// </summary>
|
||||
/// <param name="tenantId">Tenant identifier.</param>
|
||||
/// <param name="partitionKey">Partition key (empty string for default).</param>
|
||||
/// <param name="newLink">New chain link.</param>
|
||||
/// <param name="newTHlc">New HLC timestamp.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>True if updated, false if skipped due to monotonicity.</returns>
|
||||
Task<bool> UpsertAsync(
|
||||
string tenantId,
|
||||
string partitionKey,
|
||||
byte[] newLink,
|
||||
string newTHlc,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets all chain heads for a tenant.
|
||||
/// </summary>
|
||||
Task<IReadOnlyList<ChainHeadEntity>> GetAllForTenantAsync(
|
||||
string tenantId,
|
||||
CancellationToken cancellationToken = default);
|
||||
}
|
||||
@@ -0,0 +1,74 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// ISchedulerLogRepository.cs
|
||||
// Sprint: SPRINT_20260105_002_002_SCHEDULER_hlc_queue_chain
|
||||
// Task: SQC-005 - Interface for scheduler_log repository
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Models;
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Repositories;
|
||||
|
||||
/// <summary>
|
||||
/// Repository interface for HLC-ordered scheduler log operations.
|
||||
/// </summary>
|
||||
public interface ISchedulerLogRepository
|
||||
{
|
||||
/// <summary>
|
||||
/// Inserts a new log entry and atomically updates the chain head.
|
||||
/// </summary>
|
||||
/// <param name="entry">The log entry to insert.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>The inserted entry with populated seq_bigint.</returns>
|
||||
Task<SchedulerLogEntity> InsertWithChainUpdateAsync(
|
||||
SchedulerLogEntity entry,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets log entries by HLC order within a tenant/partition.
|
||||
/// </summary>
|
||||
/// <param name="tenantId">Tenant identifier.</param>
|
||||
/// <param name="partitionKey">Optional partition key (null for all partitions).</param>
|
||||
/// <param name="limit">Maximum entries to return.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
Task<IReadOnlyList<SchedulerLogEntity>> GetByHlcOrderAsync(
|
||||
string tenantId,
|
||||
string? partitionKey,
|
||||
int limit,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets log entries within an HLC range.
|
||||
/// </summary>
|
||||
/// <param name="tenantId">Tenant identifier.</param>
|
||||
/// <param name="startTHlc">Start HLC (inclusive, null for no lower bound).</param>
|
||||
/// <param name="endTHlc">End HLC (inclusive, null for no upper bound).</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
Task<IReadOnlyList<SchedulerLogEntity>> GetByHlcRangeAsync(
|
||||
string tenantId,
|
||||
string? startTHlc,
|
||||
string? endTHlc,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets a log entry by job ID.
|
||||
/// </summary>
|
||||
Task<SchedulerLogEntity?> GetByJobIdAsync(
|
||||
Guid jobId,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets a log entry by its chain link hash.
|
||||
/// </summary>
|
||||
Task<SchedulerLogEntity?> GetByLinkAsync(
|
||||
byte[] link,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Counts entries in an HLC range.
|
||||
/// </summary>
|
||||
Task<int> CountByHlcRangeAsync(
|
||||
string tenantId,
|
||||
string? startTHlc,
|
||||
string? endTHlc,
|
||||
CancellationToken cancellationToken = default);
|
||||
}
|
||||
@@ -0,0 +1,270 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// SchedulerLogRepository.cs
|
||||
// Sprint: SPRINT_20260105_002_002_SCHEDULER_hlc_queue_chain
|
||||
// Task: SQC-006 - PostgreSQL implementation for scheduler_log repository
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Npgsql;
|
||||
using StellaOps.Infrastructure.Postgres.Repositories;
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Models;
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Repositories;
|
||||
|
||||
/// <summary>
|
||||
/// PostgreSQL repository for HLC-ordered scheduler log operations.
|
||||
/// </summary>
|
||||
public sealed class SchedulerLogRepository : RepositoryBase<SchedulerDataSource>, ISchedulerLogRepository
|
||||
{
|
||||
private readonly IChainHeadRepository _chainHeadRepository;
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new scheduler log repository.
|
||||
/// </summary>
|
||||
public SchedulerLogRepository(
|
||||
SchedulerDataSource dataSource,
|
||||
ILogger<SchedulerLogRepository> logger,
|
||||
IChainHeadRepository chainHeadRepository)
|
||||
: base(dataSource, logger)
|
||||
{
|
||||
_chainHeadRepository = chainHeadRepository;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<SchedulerLogEntity> InsertWithChainUpdateAsync(
|
||||
SchedulerLogEntity entry,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
INSERT INTO scheduler.scheduler_log (
|
||||
tenant_id, t_hlc, partition_key, job_id, payload_hash, prev_link, link
|
||||
)
|
||||
VALUES (
|
||||
@tenant_id, @t_hlc, @partition_key, @job_id, @payload_hash, @prev_link, @link
|
||||
)
|
||||
RETURNING seq_bigint, tenant_id, t_hlc, partition_key, job_id, payload_hash, prev_link, link, created_at
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(entry.TenantId, "writer", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
// Use transaction for atomicity of log insert + chain head update
|
||||
await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
try
|
||||
{
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
command.Transaction = transaction;
|
||||
|
||||
AddParameter(command, "tenant_id", entry.TenantId);
|
||||
AddParameter(command, "t_hlc", entry.THlc);
|
||||
AddParameter(command, "partition_key", entry.PartitionKey);
|
||||
AddParameter(command, "job_id", entry.JobId);
|
||||
AddParameter(command, "payload_hash", entry.PayloadHash);
|
||||
AddParameter(command, "prev_link", entry.PrevLink);
|
||||
AddParameter(command, "link", entry.Link);
|
||||
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
await reader.ReadAsync(cancellationToken).ConfigureAwait(false);
|
||||
var result = MapSchedulerLogEntry(reader);
|
||||
await reader.CloseAsync().ConfigureAwait(false);
|
||||
|
||||
// Update chain head atomically
|
||||
await _chainHeadRepository.UpsertAsync(
|
||||
entry.TenantId,
|
||||
entry.PartitionKey,
|
||||
entry.Link,
|
||||
entry.THlc,
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
|
||||
await transaction.CommitAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
return result;
|
||||
}
|
||||
catch
|
||||
{
|
||||
await transaction.RollbackAsync(cancellationToken).ConfigureAwait(false);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<SchedulerLogEntity>> GetByHlcOrderAsync(
|
||||
string tenantId,
|
||||
string? partitionKey,
|
||||
int limit,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var sql = partitionKey is not null
|
||||
? """
|
||||
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id, payload_hash, prev_link, link, created_at
|
||||
FROM scheduler.scheduler_log
|
||||
WHERE tenant_id = @tenant_id AND partition_key = @partition_key
|
||||
ORDER BY t_hlc ASC
|
||||
LIMIT @limit
|
||||
"""
|
||||
: """
|
||||
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id, payload_hash, prev_link, link, created_at
|
||||
FROM scheduler.scheduler_log
|
||||
WHERE tenant_id = @tenant_id
|
||||
ORDER BY t_hlc ASC
|
||||
LIMIT @limit
|
||||
""";
|
||||
|
||||
return await QueryAsync(
|
||||
tenantId,
|
||||
sql,
|
||||
cmd =>
|
||||
{
|
||||
AddParameter(cmd, "tenant_id", tenantId);
|
||||
if (partitionKey is not null)
|
||||
{
|
||||
AddParameter(cmd, "partition_key", partitionKey);
|
||||
}
|
||||
AddParameter(cmd, "limit", limit);
|
||||
},
|
||||
MapSchedulerLogEntry,
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<SchedulerLogEntity>> GetByHlcRangeAsync(
|
||||
string tenantId,
|
||||
string? startTHlc,
|
||||
string? endTHlc,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var whereClause = "WHERE tenant_id = @tenant_id";
|
||||
if (startTHlc is not null)
|
||||
{
|
||||
whereClause += " AND t_hlc >= @start_t_hlc";
|
||||
}
|
||||
if (endTHlc is not null)
|
||||
{
|
||||
whereClause += " AND t_hlc <= @end_t_hlc";
|
||||
}
|
||||
|
||||
var sql = $"""
|
||||
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id, payload_hash, prev_link, link, created_at
|
||||
FROM scheduler.scheduler_log
|
||||
{whereClause}
|
||||
ORDER BY t_hlc ASC
|
||||
""";
|
||||
|
||||
return await QueryAsync(
|
||||
tenantId,
|
||||
sql,
|
||||
cmd =>
|
||||
{
|
||||
AddParameter(cmd, "tenant_id", tenantId);
|
||||
if (startTHlc is not null)
|
||||
{
|
||||
AddParameter(cmd, "start_t_hlc", startTHlc);
|
||||
}
|
||||
if (endTHlc is not null)
|
||||
{
|
||||
AddParameter(cmd, "end_t_hlc", endTHlc);
|
||||
}
|
||||
},
|
||||
MapSchedulerLogEntry,
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<SchedulerLogEntity?> GetByJobIdAsync(
|
||||
Guid jobId,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id, payload_hash, prev_link, link, created_at
|
||||
FROM scheduler.scheduler_log
|
||||
WHERE job_id = @job_id
|
||||
""";
|
||||
|
||||
// Job ID lookup doesn't require tenant context
|
||||
return await QuerySingleOrDefaultAsync(
|
||||
tenantId: null!,
|
||||
sql,
|
||||
cmd => AddParameter(cmd, "job_id", jobId),
|
||||
MapSchedulerLogEntry,
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<SchedulerLogEntity?> GetByLinkAsync(
|
||||
byte[] link,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id, payload_hash, prev_link, link, created_at
|
||||
FROM scheduler.scheduler_log
|
||||
WHERE link = @link
|
||||
""";
|
||||
|
||||
return await QuerySingleOrDefaultAsync(
|
||||
tenantId: null!,
|
||||
sql,
|
||||
cmd => AddParameter(cmd, "link", link),
|
||||
MapSchedulerLogEntry,
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<int> CountByHlcRangeAsync(
|
||||
string tenantId,
|
||||
string? startTHlc,
|
||||
string? endTHlc,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var whereClause = "WHERE tenant_id = @tenant_id";
|
||||
if (startTHlc is not null)
|
||||
{
|
||||
whereClause += " AND t_hlc >= @start_t_hlc";
|
||||
}
|
||||
if (endTHlc is not null)
|
||||
{
|
||||
whereClause += " AND t_hlc <= @end_t_hlc";
|
||||
}
|
||||
|
||||
var sql = $"""
|
||||
SELECT COUNT(*)::INT
|
||||
FROM scheduler.scheduler_log
|
||||
{whereClause}
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
if (startTHlc is not null)
|
||||
{
|
||||
AddParameter(command, "start_t_hlc", startTHlc);
|
||||
}
|
||||
if (endTHlc is not null)
|
||||
{
|
||||
AddParameter(command, "end_t_hlc", endTHlc);
|
||||
}
|
||||
|
||||
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
|
||||
return result is int count ? count : 0;
|
||||
}
|
||||
|
||||
private static SchedulerLogEntity MapSchedulerLogEntry(NpgsqlDataReader reader)
|
||||
{
|
||||
return new SchedulerLogEntity
|
||||
{
|
||||
SeqBigint = reader.GetInt64(reader.GetOrdinal("seq_bigint")),
|
||||
TenantId = reader.GetString(reader.GetOrdinal("tenant_id")),
|
||||
THlc = reader.GetString(reader.GetOrdinal("t_hlc")),
|
||||
PartitionKey = reader.GetString(reader.GetOrdinal("partition_key")),
|
||||
JobId = reader.GetGuid(reader.GetOrdinal("job_id")),
|
||||
PayloadHash = reader.GetFieldValue<byte[]>(reader.GetOrdinal("payload_hash")),
|
||||
PrevLink = reader.IsDBNull(reader.GetOrdinal("prev_link"))
|
||||
? null
|
||||
: reader.GetFieldValue<byte[]>(reader.GetOrdinal("prev_link")),
|
||||
Link = reader.GetFieldValue<byte[]>(reader.GetOrdinal("link")),
|
||||
CreatedAt = reader.GetFieldValue<DateTimeOffset>(reader.GetOrdinal("created_at"))
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,160 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// SchedulerChainLinking.cs
|
||||
// Sprint: SPRINT_20260105_002_002_SCHEDULER_hlc_queue_chain
|
||||
// Task: SQC-008 - Implement SchedulerChainLinking static class
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
using StellaOps.HybridLogicalClock;
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres;
|
||||
|
||||
/// <summary>
|
||||
/// Static utility class for computing chain links in the scheduler queue.
|
||||
/// Chain links provide tamper-evident sequence proofs per the advisory specification.
|
||||
/// </summary>
|
||||
public static class SchedulerChainLinking
|
||||
{
|
||||
/// <summary>
|
||||
/// Number of bytes in a chain link (SHA-256 = 32 bytes).
|
||||
/// </summary>
|
||||
public const int LinkSizeBytes = 32;
|
||||
|
||||
/// <summary>
|
||||
/// Compute chain link per advisory specification:
|
||||
/// link_i = Hash(link_{i-1} || job_id || t_hlc || payload_hash)
|
||||
/// </summary>
|
||||
/// <param name="prevLink">Previous chain link, or null for first entry (uses 32 zero bytes).</param>
|
||||
/// <param name="jobId">Job identifier.</param>
|
||||
/// <param name="tHlc">HLC timestamp.</param>
|
||||
/// <param name="payloadHash">SHA-256 hash of canonical payload.</param>
|
||||
/// <returns>New chain link (32 bytes).</returns>
|
||||
public static byte[] ComputeLink(
|
||||
byte[]? prevLink,
|
||||
Guid jobId,
|
||||
HlcTimestamp tHlc,
|
||||
byte[] payloadHash)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(payloadHash);
|
||||
if (payloadHash.Length != LinkSizeBytes)
|
||||
{
|
||||
throw new ArgumentException($"Payload hash must be {LinkSizeBytes} bytes", nameof(payloadHash));
|
||||
}
|
||||
|
||||
using var hasher = IncrementalHash.CreateHash(HashAlgorithmName.SHA256);
|
||||
|
||||
// Previous link (or 32 zero bytes for first entry)
|
||||
hasher.AppendData(prevLink ?? new byte[LinkSizeBytes]);
|
||||
|
||||
// Job ID as bytes (using standard Guid byte layout)
|
||||
hasher.AppendData(jobId.ToByteArray());
|
||||
|
||||
// HLC timestamp as UTF-8 bytes
|
||||
hasher.AppendData(Encoding.UTF8.GetBytes(tHlc.ToSortableString()));
|
||||
|
||||
// Payload hash
|
||||
hasher.AppendData(payloadHash);
|
||||
|
||||
return hasher.GetHashAndReset();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Compute chain link from string HLC timestamp.
|
||||
/// </summary>
|
||||
public static byte[] ComputeLink(
|
||||
byte[]? prevLink,
|
||||
Guid jobId,
|
||||
string tHlcString,
|
||||
byte[] payloadHash)
|
||||
{
|
||||
var tHlc = HlcTimestamp.Parse(tHlcString);
|
||||
return ComputeLink(prevLink, jobId, tHlc, payloadHash);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Compute deterministic payload hash from canonical JSON.
|
||||
/// </summary>
|
||||
/// <param name="canonicalJson">RFC 8785 canonical JSON representation of payload.</param>
|
||||
/// <returns>SHA-256 hash (32 bytes).</returns>
|
||||
public static byte[] ComputePayloadHash(string canonicalJson)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrEmpty(canonicalJson);
|
||||
return SHA256.HashData(Encoding.UTF8.GetBytes(canonicalJson));
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Compute deterministic payload hash from raw bytes.
|
||||
/// </summary>
|
||||
/// <param name="payload">Payload bytes.</param>
|
||||
/// <returns>SHA-256 hash (32 bytes).</returns>
|
||||
public static byte[] ComputePayloadHash(byte[] payload)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(payload);
|
||||
return SHA256.HashData(payload);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verify that a chain link is correctly computed.
|
||||
/// </summary>
|
||||
/// <param name="expectedLink">The stored link to verify.</param>
|
||||
/// <param name="prevLink">Previous chain link.</param>
|
||||
/// <param name="jobId">Job identifier.</param>
|
||||
/// <param name="tHlc">HLC timestamp.</param>
|
||||
/// <param name="payloadHash">Payload hash.</param>
|
||||
/// <returns>True if the link is valid.</returns>
|
||||
public static bool VerifyLink(
|
||||
byte[] expectedLink,
|
||||
byte[]? prevLink,
|
||||
Guid jobId,
|
||||
HlcTimestamp tHlc,
|
||||
byte[] payloadHash)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(expectedLink);
|
||||
if (expectedLink.Length != LinkSizeBytes)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
var computed = ComputeLink(prevLink, jobId, tHlc, payloadHash);
|
||||
return CryptographicOperations.FixedTimeEquals(expectedLink, computed);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verify that a chain link is correctly computed (string HLC version).
|
||||
/// </summary>
|
||||
public static bool VerifyLink(
|
||||
byte[] expectedLink,
|
||||
byte[]? prevLink,
|
||||
Guid jobId,
|
||||
string tHlcString,
|
||||
byte[] payloadHash)
|
||||
{
|
||||
if (!HlcTimestamp.TryParse(tHlcString, out var tHlc))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
return VerifyLink(expectedLink, prevLink, jobId, tHlc, payloadHash);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Create the genesis link (first link in a chain).
|
||||
/// Uses 32 zero bytes as the previous link.
|
||||
/// </summary>
|
||||
public static byte[] ComputeGenesisLink(
|
||||
Guid jobId,
|
||||
HlcTimestamp tHlc,
|
||||
byte[] payloadHash)
|
||||
{
|
||||
return ComputeLink(null, jobId, tHlc, payloadHash);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Formats a link as a hexadecimal string for display/logging.
|
||||
/// </summary>
|
||||
public static string ToHexString(byte[]? link)
|
||||
{
|
||||
if (link is null) return "(null)";
|
||||
return Convert.ToHexString(link).ToLowerInvariant();
|
||||
}
|
||||
}
|
||||
@@ -27,6 +27,7 @@
|
||||
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Determinism.Abstractions\StellaOps.Determinism.Abstractions.csproj" />
|
||||
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Infrastructure.Postgres\StellaOps.Infrastructure.Postgres.csproj" />
|
||||
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Infrastructure.EfCore\StellaOps.Infrastructure.EfCore.csproj" />
|
||||
<ProjectReference Include="..\..\..\__Libraries\StellaOps.HybridLogicalClock\StellaOps.HybridLogicalClock.csproj" />
|
||||
</ItemGroup>
|
||||
|
||||
<!-- Embed SQL migrations as resources -->
|
||||
|
||||
Reference in New Issue
Block a user