sprints and audit work

This commit is contained in:
StellaOps Bot
2026-01-07 09:36:16 +02:00
parent 05833e0af2
commit ab364c6032
377 changed files with 64534 additions and 1627 deletions

View File

@@ -1,56 +0,0 @@
// <copyright file="SchedulerLogEntry.cs" company="StellaOps">
// Copyright (c) StellaOps. Licensed under AGPL-3.0-or-later.
// </copyright>
namespace StellaOps.Scheduler.Persistence.Postgres.Models;
/// <summary>
/// Represents an HLC-ordered, chain-linked scheduler log entry.
/// </summary>
public sealed record SchedulerLogEntry
{
/// <summary>
/// Storage sequence number (not authoritative for ordering).
/// </summary>
public long SeqBigint { get; init; }
/// <summary>
/// Tenant identifier.
/// </summary>
public required string TenantId { get; init; }
/// <summary>
/// HLC timestamp in sortable string format.
/// </summary>
public required string THlc { get; init; }
/// <summary>
/// Optional queue partition key.
/// </summary>
public string PartitionKey { get; init; } = string.Empty;
/// <summary>
/// Job identifier (deterministic from payload).
/// </summary>
public required Guid JobId { get; init; }
/// <summary>
/// SHA-256 hash of the canonical payload JSON.
/// </summary>
public required byte[] PayloadHash { get; init; }
/// <summary>
/// Previous chain link (null for first entry in chain).
/// </summary>
public byte[]? PrevLink { get; init; }
/// <summary>
/// Chain link: Hash(prev_link || job_id || t_hlc || payload_hash).
/// </summary>
public required byte[] Link { get; init; }
/// <summary>
/// Timestamp when the entry was created.
/// </summary>
public DateTimeOffset CreatedAt { get; init; }
}

View File

@@ -49,6 +49,38 @@ public interface ISchedulerLogRepository
string? endTHlc,
CancellationToken cancellationToken = default);
/// <summary>
/// Gets log entries within an HLC range with additional filtering.
/// </summary>
/// <param name="tenantId">Tenant identifier.</param>
/// <param name="startTHlc">Start HLC (inclusive, null for no lower bound).</param>
/// <param name="endTHlc">End HLC (inclusive, null for no upper bound).</param>
/// <param name="limit">Maximum entries to return (0 for no limit).</param>
/// <param name="partitionKey">Optional partition key filter.</param>
/// <param name="cancellationToken">Cancellation token.</param>
Task<IReadOnlyList<SchedulerLogEntity>> GetByHlcRangeAsync(
string tenantId,
string? startTHlc,
string? endTHlc,
int limit,
string? partitionKey,
CancellationToken cancellationToken = default);
/// <summary>
/// Gets log entries after a given HLC timestamp.
/// </summary>
/// <param name="tenantId">Tenant identifier.</param>
/// <param name="afterTHlc">Start after this HLC (exclusive).</param>
/// <param name="limit">Maximum entries to return.</param>
/// <param name="partitionKey">Optional partition key filter.</param>
/// <param name="cancellationToken">Cancellation token.</param>
Task<IReadOnlyList<SchedulerLogEntity>> GetAfterHlcAsync(
string tenantId,
string afterTHlc,
int limit,
string? partitionKey = null,
CancellationToken cancellationToken = default);
/// <summary>
/// Gets a log entry by job ID.
/// </summary>
@@ -71,4 +103,31 @@ public interface ISchedulerLogRepository
string? startTHlc,
string? endTHlc,
CancellationToken cancellationToken = default);
/// <summary>
/// Counts entries in an HLC range with partition filter.
/// </summary>
/// <param name="tenantId">Tenant identifier.</param>
/// <param name="startTHlc">Start HLC (inclusive, null for no lower bound).</param>
/// <param name="endTHlc">End HLC (inclusive, null for no upper bound).</param>
/// <param name="partitionKey">Optional partition key filter.</param>
/// <param name="cancellationToken">Cancellation token.</param>
Task<int> CountByHlcRangeAsync(
string tenantId,
string? startTHlc,
string? endTHlc,
string? partitionKey,
CancellationToken cancellationToken = default);
/// <summary>
/// Checks if a job entry already exists for idempotency.
/// </summary>
/// <param name="tenantId">Tenant identifier.</param>
/// <param name="jobId">Job identifier.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>True if job exists.</returns>
Task<bool> ExistsAsync(
string tenantId,
Guid jobId,
CancellationToken cancellationToken = default);
}

View File

@@ -23,7 +23,7 @@ public sealed class PostgresBatchSnapshotRepository : RepositoryBase<SchedulerDa
}
/// <inheritdoc />
public async Task InsertAsync(BatchSnapshot snapshot, CancellationToken cancellationToken = default)
public async Task InsertAsync(BatchSnapshotEntity snapshot, CancellationToken cancellationToken = default)
{
const string sql = """
INSERT INTO scheduler.batch_snapshot (
@@ -53,7 +53,7 @@ public sealed class PostgresBatchSnapshotRepository : RepositoryBase<SchedulerDa
}
/// <inheritdoc />
public async Task<BatchSnapshot?> GetByIdAsync(Guid batchId, CancellationToken cancellationToken = default)
public async Task<BatchSnapshotEntity?> GetByIdAsync(Guid batchId, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT batch_id, tenant_id, range_start_t, range_end_t, head_link,
@@ -72,7 +72,40 @@ public sealed class PostgresBatchSnapshotRepository : RepositoryBase<SchedulerDa
}
/// <inheritdoc />
public async Task<BatchSnapshot?> GetLatestAsync(string tenantId, CancellationToken cancellationToken = default)
public async Task<IReadOnlyList<BatchSnapshotEntity>> GetByTenantAsync(
string tenantId,
int limit = 100,
CancellationToken cancellationToken = default)
{
const string sql = """
SELECT batch_id, tenant_id, range_start_t, range_end_t, head_link,
job_count, created_at, signed_by, signature
FROM scheduler.batch_snapshot
WHERE tenant_id = @tenant_id
ORDER BY created_at DESC
LIMIT @limit
""";
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "tenant_id", tenantId);
AddParameter(command, "limit", limit);
var snapshots = new List<BatchSnapshotEntity>();
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
snapshots.Add(MapSnapshot(reader));
}
return snapshots;
}
/// <inheritdoc />
public async Task<BatchSnapshotEntity?> GetLatestAsync(string tenantId, CancellationToken cancellationToken = default)
{
const string sql = """
SELECT batch_id, tenant_id, range_start_t, range_end_t, head_link,
@@ -93,46 +126,7 @@ public sealed class PostgresBatchSnapshotRepository : RepositoryBase<SchedulerDa
}
/// <inheritdoc />
public async Task<IReadOnlyList<BatchSnapshot>> GetByTimeRangeAsync(
string tenantId,
DateTimeOffset startTime,
DateTimeOffset endTime,
int limit = 100,
CancellationToken cancellationToken = default)
{
const string sql = """
SELECT batch_id, tenant_id, range_start_t, range_end_t, head_link,
job_count, created_at, signed_by, signature
FROM scheduler.batch_snapshot
WHERE tenant_id = @tenant_id
AND created_at >= @start_time
AND created_at <= @end_time
ORDER BY created_at DESC
LIMIT @limit
""";
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "tenant_id", tenantId);
AddParameter(command, "start_time", startTime);
AddParameter(command, "end_time", endTime);
AddParameter(command, "limit", limit);
var snapshots = new List<BatchSnapshot>();
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
snapshots.Add(MapSnapshot(reader));
}
return snapshots;
}
/// <inheritdoc />
public async Task<IReadOnlyList<BatchSnapshot>> GetContainingHlcAsync(
public async Task<IReadOnlyList<BatchSnapshotEntity>> GetContainingHlcAsync(
string tenantId,
string tHlc,
CancellationToken cancellationToken = default)
@@ -154,7 +148,7 @@ public sealed class PostgresBatchSnapshotRepository : RepositoryBase<SchedulerDa
AddParameter(command, "tenant_id", tenantId);
AddParameter(command, "t_hlc", tHlc);
var snapshots = new List<BatchSnapshot>();
var snapshots = new List<BatchSnapshotEntity>();
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
@@ -165,9 +159,9 @@ public sealed class PostgresBatchSnapshotRepository : RepositoryBase<SchedulerDa
return snapshots;
}
private static BatchSnapshot MapSnapshot(NpgsqlDataReader reader)
private static BatchSnapshotEntity MapSnapshot(NpgsqlDataReader reader)
{
return new BatchSnapshot
return new BatchSnapshotEntity
{
BatchId = reader.GetGuid(0),
TenantId = reader.GetString(1),

View File

@@ -46,13 +46,13 @@ public sealed class PostgresChainHeadRepository : RepositoryBase<SchedulerDataSo
}
/// <inheritdoc />
public async Task<ChainHead?> GetAsync(
public async Task<ChainHeadEntity?> GetAsync(
string tenantId,
string partitionKey,
CancellationToken cancellationToken = default)
{
const string sql = """
SELECT tenant_id, partition_key, last_link, last_t_hlc, last_job_id, updated_at
SELECT tenant_id, partition_key, last_link, last_t_hlc, updated_at
FROM scheduler.chain_heads
WHERE tenant_id = @tenant_id AND partition_key = @partition_key
""";
@@ -69,12 +69,45 @@ public sealed class PostgresChainHeadRepository : RepositoryBase<SchedulerDataSo
}
/// <inheritdoc />
public async Task<IReadOnlyList<ChainHead>> GetAllForTenantAsync(
public async Task<bool> UpsertAsync(
string tenantId,
string partitionKey,
byte[] newLink,
string newTHlc,
CancellationToken cancellationToken = default)
{
const string sql = """
INSERT INTO scheduler.chain_heads (tenant_id, partition_key, last_link, last_t_hlc, updated_at)
VALUES (@tenant_id, @partition_key, @last_link, @last_t_hlc, @updated_at)
ON CONFLICT (tenant_id, partition_key)
DO UPDATE SET
last_link = @last_link,
last_t_hlc = @last_t_hlc,
updated_at = @updated_at
WHERE scheduler.chain_heads.last_t_hlc < @last_t_hlc
""";
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "writer", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "tenant_id", tenantId);
AddParameter(command, "partition_key", partitionKey);
AddParameter(command, "last_link", newLink);
AddParameter(command, "last_t_hlc", newTHlc);
AddParameter(command, "updated_at", DateTimeOffset.UtcNow);
var rowsAffected = await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
return rowsAffected > 0;
}
/// <inheritdoc />
public async Task<IReadOnlyList<ChainHeadEntity>> GetAllForTenantAsync(
string tenantId,
CancellationToken cancellationToken = default)
{
const string sql = """
SELECT tenant_id, partition_key, last_link, last_t_hlc, last_job_id, updated_at
SELECT tenant_id, partition_key, last_link, last_t_hlc, updated_at
FROM scheduler.chain_heads
WHERE tenant_id = @tenant_id
ORDER BY partition_key
@@ -85,7 +118,7 @@ public sealed class PostgresChainHeadRepository : RepositoryBase<SchedulerDataSo
await using var command = CreateCommand(sql, connection);
AddParameter(command, "tenant_id", tenantId);
var heads = new List<ChainHead>();
var heads = new List<ChainHeadEntity>();
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
@@ -96,16 +129,15 @@ public sealed class PostgresChainHeadRepository : RepositoryBase<SchedulerDataSo
return heads;
}
private static ChainHead MapChainHead(NpgsqlDataReader reader)
private static ChainHeadEntity MapChainHead(NpgsqlDataReader reader)
{
return new ChainHead
return new ChainHeadEntity
{
TenantId = reader.GetString(0),
PartitionKey = reader.GetString(1),
LastLink = reader.GetFieldValue<byte[]>(2),
LastTHlc = reader.GetString(3),
LastJobId = reader.GetGuid(4),
UpdatedAt = reader.GetFieldValue<DateTimeOffset>(5)
UpdatedAt = reader.GetFieldValue<DateTimeOffset>(4)
};
}
}

View File

@@ -23,8 +23,8 @@ public sealed class PostgresSchedulerLogRepository : RepositoryBase<SchedulerDat
}
/// <inheritdoc />
public async Task<long> InsertWithChainUpdateAsync(
SchedulerLogEntry entry,
public async Task<SchedulerLogEntity> InsertWithChainUpdateAsync(
SchedulerLogEntity entry,
CancellationToken cancellationToken = default)
{
// Use the stored function for atomic insert + chain head update
@@ -53,11 +53,13 @@ public sealed class PostgresSchedulerLogRepository : RepositoryBase<SchedulerDat
AddParameter(command, "link", entry.Link);
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
return Convert.ToInt64(result);
var seqBigint = Convert.ToInt64(result);
return entry with { SeqBigint = seqBigint };
}
/// <inheritdoc />
public async Task<IReadOnlyList<SchedulerLogEntry>> GetByHlcOrderAsync(
public async Task<IReadOnlyList<SchedulerLogEntity>> GetByHlcOrderAsync(
string tenantId,
string? partitionKey,
int limit,
@@ -92,7 +94,7 @@ public sealed class PostgresSchedulerLogRepository : RepositoryBase<SchedulerDat
AddParameter(command, "partition_key", partitionKey);
}
var entries = new List<SchedulerLogEntry>();
var entries = new List<SchedulerLogEntity>();
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
@@ -104,12 +106,10 @@ public sealed class PostgresSchedulerLogRepository : RepositoryBase<SchedulerDat
}
/// <inheritdoc />
public async Task<IReadOnlyList<SchedulerLogEntry>> GetByHlcRangeAsync(
public async Task<IReadOnlyList<SchedulerLogEntity>> GetByHlcRangeAsync(
string tenantId,
string? startTHlc,
string? endTHlc,
int limit = 0,
string? partitionKey = null,
CancellationToken cancellationToken = default)
{
var conditions = new List<string> { "tenant_id = @tenant_id" };
@@ -123,19 +123,12 @@ public sealed class PostgresSchedulerLogRepository : RepositoryBase<SchedulerDat
conditions.Add("t_hlc <= @end_t_hlc");
}
if (partitionKey is not null)
{
conditions.Add("partition_key = @partition_key");
}
var limitClause = limit > 0 ? $"LIMIT {limit}" : string.Empty;
var sql = $"""
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id,
payload_hash, prev_link, link, created_at
FROM scheduler.scheduler_log
WHERE {string.Join(" AND ", conditions)}
ORDER BY t_hlc ASC
{limitClause}
""";
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
@@ -153,12 +146,7 @@ public sealed class PostgresSchedulerLogRepository : RepositoryBase<SchedulerDat
AddParameter(command, "end_t_hlc", endTHlc);
}
if (partitionKey is not null)
{
AddParameter(command, "partition_key", partitionKey);
}
var entries = new List<SchedulerLogEntry>();
var entries = new List<SchedulerLogEntity>();
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
@@ -170,52 +158,45 @@ public sealed class PostgresSchedulerLogRepository : RepositoryBase<SchedulerDat
}
/// <inheritdoc />
public async Task<IReadOnlyList<SchedulerLogEntry>> GetAfterHlcAsync(
string tenantId,
string afterTHlc,
int limit,
string? partitionKey = null,
public async Task<SchedulerLogEntity?> GetByJobIdAsync(
Guid jobId,
CancellationToken cancellationToken = default)
{
var sql = partitionKey is null
? """
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id,
payload_hash, prev_link, link, created_at
FROM scheduler.scheduler_log
WHERE tenant_id = @tenant_id AND t_hlc > @after_t_hlc
ORDER BY t_hlc ASC
LIMIT @limit
"""
: """
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id,
payload_hash, prev_link, link, created_at
FROM scheduler.scheduler_log
WHERE tenant_id = @tenant_id AND t_hlc > @after_t_hlc AND partition_key = @partition_key
ORDER BY t_hlc ASC
LIMIT @limit
""";
const string sql = """
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id,
payload_hash, prev_link, link, created_at
FROM scheduler.scheduler_log
WHERE job_id = @job_id
""";
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "job_id", jobId);
AddParameter(command, "tenant_id", tenantId);
AddParameter(command, "after_t_hlc", afterTHlc);
AddParameter(command, "limit", limit);
if (partitionKey is not null)
{
AddParameter(command, "partition_key", partitionKey);
}
var entries = new List<SchedulerLogEntry>();
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
return await reader.ReadAsync(cancellationToken).ConfigureAwait(false) ? MapEntry(reader) : null;
}
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
entries.Add(MapEntry(reader));
}
/// <inheritdoc />
public async Task<SchedulerLogEntity?> GetByLinkAsync(
byte[] link,
CancellationToken cancellationToken = default)
{
const string sql = """
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id,
payload_hash, prev_link, link, created_at
FROM scheduler.scheduler_log
WHERE link = @link
""";
return entries;
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "link", link);
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
return await reader.ReadAsync(cancellationToken).ConfigureAwait(false) ? MapEntry(reader) : null;
}
/// <inheritdoc />
@@ -223,7 +204,6 @@ public sealed class PostgresSchedulerLogRepository : RepositoryBase<SchedulerDat
string tenantId,
string? startTHlc,
string? endTHlc,
string? partitionKey = null,
CancellationToken cancellationToken = default)
{
var conditions = new List<string> { "tenant_id = @tenant_id" };
@@ -237,11 +217,6 @@ public sealed class PostgresSchedulerLogRepository : RepositoryBase<SchedulerDat
conditions.Add("t_hlc <= @end_t_hlc");
}
if (partitionKey is not null)
{
conditions.Add("partition_key = @partition_key");
}
var sql = $"""
SELECT COUNT(*)
FROM scheduler.scheduler_log
@@ -263,6 +238,51 @@ public sealed class PostgresSchedulerLogRepository : RepositoryBase<SchedulerDat
AddParameter(command, "end_t_hlc", endTHlc);
}
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
return Convert.ToInt32(result);
}
/// <inheritdoc />
public async Task<int> CountByHlcRangeAsync(
string tenantId,
string? startTHlc,
string? endTHlc,
string? partitionKey,
CancellationToken cancellationToken = default)
{
var conditions = new List<string> { "tenant_id = @tenant_id" };
if (startTHlc is not null)
{
conditions.Add("t_hlc >= @start_t_hlc");
}
if (endTHlc is not null)
{
conditions.Add("t_hlc <= @end_t_hlc");
}
if (partitionKey is not null)
{
conditions.Add("partition_key = @partition_key");
}
var sql = $"""
SELECT COUNT(*)
FROM scheduler.scheduler_log
WHERE {string.Join(" AND ", conditions)}
""";
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "tenant_id", tenantId);
if (startTHlc is not null)
{
AddParameter(command, "start_t_hlc", startTHlc);
}
if (endTHlc is not null)
{
AddParameter(command, "end_t_hlc", endTHlc);
}
if (partitionKey is not null)
{
AddParameter(command, "partition_key", partitionKey);
@@ -273,24 +293,118 @@ public sealed class PostgresSchedulerLogRepository : RepositoryBase<SchedulerDat
}
/// <inheritdoc />
public async Task<SchedulerLogEntry?> GetByJobIdAsync(
Guid jobId,
public async Task<IReadOnlyList<SchedulerLogEntity>> GetByHlcRangeAsync(
string tenantId,
string? startTHlc,
string? endTHlc,
int limit,
string? partitionKey,
CancellationToken cancellationToken = default)
{
const string sql = """
var conditions = new List<string> { "tenant_id = @tenant_id" };
if (startTHlc is not null)
{
conditions.Add("t_hlc >= @start_t_hlc");
}
if (endTHlc is not null)
{
conditions.Add("t_hlc <= @end_t_hlc");
}
if (partitionKey is not null)
{
conditions.Add("partition_key = @partition_key");
}
var sql = $"""
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id,
payload_hash, prev_link, link, created_at
FROM scheduler.scheduler_log
WHERE job_id = @job_id
WHERE {string.Join(" AND ", conditions)}
ORDER BY t_hlc ASC
{(limit > 0 ? "LIMIT @limit" : "")}
""";
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken)
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "job_id", jobId);
AddParameter(command, "tenant_id", tenantId);
if (startTHlc is not null)
{
AddParameter(command, "start_t_hlc", startTHlc);
}
if (endTHlc is not null)
{
AddParameter(command, "end_t_hlc", endTHlc);
}
if (partitionKey is not null)
{
AddParameter(command, "partition_key", partitionKey);
}
if (limit > 0)
{
AddParameter(command, "limit", limit);
}
var entries = new List<SchedulerLogEntity>();
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
return await reader.ReadAsync(cancellationToken).ConfigureAwait(false) ? MapEntry(reader) : null;
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
entries.Add(MapEntry(reader));
}
return entries;
}
/// <inheritdoc />
public async Task<IReadOnlyList<SchedulerLogEntity>> GetAfterHlcAsync(
string tenantId,
string afterTHlc,
int limit,
string? partitionKey = null,
CancellationToken cancellationToken = default)
{
var conditions = new List<string>
{
"tenant_id = @tenant_id",
"t_hlc > @after_t_hlc"
};
if (partitionKey is not null)
{
conditions.Add("partition_key = @partition_key");
}
var sql = $"""
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id,
payload_hash, prev_link, link, created_at
FROM scheduler.scheduler_log
WHERE {string.Join(" AND ", conditions)}
ORDER BY t_hlc ASC
LIMIT @limit
""";
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "tenant_id", tenantId);
AddParameter(command, "after_t_hlc", afterTHlc);
AddParameter(command, "limit", limit);
if (partitionKey is not null)
{
AddParameter(command, "partition_key", partitionKey);
}
var entries = new List<SchedulerLogEntity>();
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
{
entries.Add(MapEntry(reader));
}
return entries;
}
/// <inheritdoc />
@@ -314,12 +428,12 @@ public sealed class PostgresSchedulerLogRepository : RepositoryBase<SchedulerDat
AddParameter(command, "job_id", jobId);
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
return result is true;
return result is true or 1 or 1L;
}
private static SchedulerLogEntry MapEntry(NpgsqlDataReader reader)
private static SchedulerLogEntity MapEntry(NpgsqlDataReader reader)
{
return new SchedulerLogEntry
return new SchedulerLogEntity
{
SeqBigint = reader.GetInt64(0),
TenantId = reader.GetString(1),

View File

@@ -250,6 +250,177 @@ public sealed class SchedulerLogRepository : RepositoryBase<SchedulerDataSource>
return result is int count ? count : 0;
}
/// <inheritdoc />
public async Task<int> CountByHlcRangeAsync(
string tenantId,
string? startTHlc,
string? endTHlc,
string? partitionKey,
CancellationToken cancellationToken = default)
{
var whereClause = "WHERE tenant_id = @tenant_id";
if (startTHlc is not null)
{
whereClause += " AND t_hlc >= @start_t_hlc";
}
if (endTHlc is not null)
{
whereClause += " AND t_hlc <= @end_t_hlc";
}
if (partitionKey is not null)
{
whereClause += " AND partition_key = @partition_key";
}
var sql = $"""
SELECT COUNT(*)::INT
FROM scheduler.scheduler_log
{whereClause}
""";
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "tenant_id", tenantId);
if (startTHlc is not null)
{
AddParameter(command, "start_t_hlc", startTHlc);
}
if (endTHlc is not null)
{
AddParameter(command, "end_t_hlc", endTHlc);
}
if (partitionKey is not null)
{
AddParameter(command, "partition_key", partitionKey);
}
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
return result is int count ? count : 0;
}
/// <inheritdoc />
public async Task<IReadOnlyList<SchedulerLogEntity>> GetByHlcRangeAsync(
string tenantId,
string? startTHlc,
string? endTHlc,
int limit,
string? partitionKey,
CancellationToken cancellationToken = default)
{
var whereClause = "WHERE tenant_id = @tenant_id";
if (startTHlc is not null)
{
whereClause += " AND t_hlc >= @start_t_hlc";
}
if (endTHlc is not null)
{
whereClause += " AND t_hlc <= @end_t_hlc";
}
if (partitionKey is not null)
{
whereClause += " AND partition_key = @partition_key";
}
var sql = $"""
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id, payload_hash, prev_link, link, created_at
FROM scheduler.scheduler_log
{whereClause}
ORDER BY t_hlc ASC
{(limit > 0 ? "LIMIT @limit" : "")}
""";
return await QueryAsync(
tenantId,
sql,
cmd =>
{
AddParameter(cmd, "tenant_id", tenantId);
if (startTHlc is not null)
{
AddParameter(cmd, "start_t_hlc", startTHlc);
}
if (endTHlc is not null)
{
AddParameter(cmd, "end_t_hlc", endTHlc);
}
if (partitionKey is not null)
{
AddParameter(cmd, "partition_key", partitionKey);
}
if (limit > 0)
{
AddParameter(cmd, "limit", limit);
}
},
MapSchedulerLogEntry,
cancellationToken).ConfigureAwait(false);
}
/// <inheritdoc />
public async Task<IReadOnlyList<SchedulerLogEntity>> GetAfterHlcAsync(
string tenantId,
string afterTHlc,
int limit,
string? partitionKey = null,
CancellationToken cancellationToken = default)
{
var whereClause = "WHERE tenant_id = @tenant_id AND t_hlc > @after_t_hlc";
if (partitionKey is not null)
{
whereClause += " AND partition_key = @partition_key";
}
var sql = $"""
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id, payload_hash, prev_link, link, created_at
FROM scheduler.scheduler_log
{whereClause}
ORDER BY t_hlc ASC
LIMIT @limit
""";
return await QueryAsync(
tenantId,
sql,
cmd =>
{
AddParameter(cmd, "tenant_id", tenantId);
AddParameter(cmd, "after_t_hlc", afterTHlc);
AddParameter(cmd, "limit", limit);
if (partitionKey is not null)
{
AddParameter(cmd, "partition_key", partitionKey);
}
},
MapSchedulerLogEntry,
cancellationToken).ConfigureAwait(false);
}
/// <inheritdoc />
public async Task<bool> ExistsAsync(
string tenantId,
Guid jobId,
CancellationToken cancellationToken = default)
{
const string sql = """
SELECT EXISTS(
SELECT 1 FROM scheduler.scheduler_log
WHERE tenant_id = @tenant_id AND job_id = @job_id
)
""";
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
.ConfigureAwait(false);
await using var command = CreateCommand(sql, connection);
AddParameter(command, "tenant_id", tenantId);
AddParameter(command, "job_id", jobId);
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
return result is true or 1 or 1L;
}
private static SchedulerLogEntity MapSchedulerLogEntry(NpgsqlDataReader reader)
{
return new SchedulerLogEntity

View File

@@ -28,6 +28,7 @@
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Infrastructure.Postgres\StellaOps.Infrastructure.Postgres.csproj" />
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Infrastructure.EfCore\StellaOps.Infrastructure.EfCore.csproj" />
<ProjectReference Include="..\..\..\__Libraries\StellaOps.HybridLogicalClock\StellaOps.HybridLogicalClock.csproj" />
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Canonical.Json\StellaOps.Canonical.Json.csproj" />
</ItemGroup>
<!-- Embed SQL migrations as resources -->

View File

@@ -123,7 +123,7 @@ public sealed class BatchSnapshotService : IBatchSnapshotService
}
else
{
var digest = ComputeSnapshotDigest(snapshot, jobs);
var digest = ComputeSnapshotDigest(ToEntity(snapshot), jobs);
var signed = await _signer(digest, cancellationToken).ConfigureAwait(false);
snapshot = snapshot with
{
@@ -133,8 +133,9 @@ public sealed class BatchSnapshotService : IBatchSnapshotService
}
}
// Persist
await _snapshotRepository.InsertAsync(snapshot, cancellationToken).ConfigureAwait(false);
// Convert to entity and persist
var entity = ToEntity(snapshot);
await _snapshotRepository.InsertAsync(entity, cancellationToken).ConfigureAwait(false);
_logger.LogInformation(
"Batch snapshot created. BatchId={BatchId}, TenantId={TenantId}, Range=[{Start}, {End}], JobCount={JobCount}, Signed={Signed}",
@@ -149,20 +150,22 @@ public sealed class BatchSnapshotService : IBatchSnapshotService
}
/// <inheritdoc />
public Task<BatchSnapshot?> GetSnapshotAsync(
public async Task<BatchSnapshot?> GetSnapshotAsync(
Guid batchId,
CancellationToken cancellationToken = default)
{
return _snapshotRepository.GetByIdAsync(batchId, cancellationToken);
var entity = await _snapshotRepository.GetByIdAsync(batchId, cancellationToken).ConfigureAwait(false);
return entity is null ? null : FromEntity(entity);
}
/// <inheritdoc />
public Task<BatchSnapshot?> GetLatestSnapshotAsync(
public async Task<BatchSnapshot?> GetLatestSnapshotAsync(
string tenantId,
CancellationToken cancellationToken = default)
{
ArgumentException.ThrowIfNullOrWhiteSpace(tenantId);
return _snapshotRepository.GetLatestAsync(tenantId, cancellationToken);
var entity = await _snapshotRepository.GetLatestAsync(tenantId, cancellationToken).ConfigureAwait(false);
return entity is null ? null : FromEntity(entity);
}
/// <inheritdoc />
@@ -189,8 +192,6 @@ public sealed class BatchSnapshotService : IBatchSnapshotService
snapshot.TenantId,
snapshot.RangeStartT,
snapshot.RangeEndT,
limit: 0,
partitionKey: null,
cancellationToken).ConfigureAwait(false);
// Verify job count
@@ -271,7 +272,7 @@ public sealed class BatchSnapshotService : IBatchSnapshotService
/// Computes a deterministic digest over the snapshot and its jobs.
/// This is the canonical representation used for both signing and verification.
/// </summary>
internal static byte[] ComputeSnapshotDigest(BatchSnapshot snapshot, IReadOnlyList<SchedulerLogEntry> jobs)
internal static byte[] ComputeSnapshotDigest(BatchSnapshotEntity snapshot, IReadOnlyList<SchedulerLogEntity> jobs)
{
// Create canonical representation for hashing
var digestInput = new
@@ -295,6 +296,38 @@ public sealed class BatchSnapshotService : IBatchSnapshotService
return SHA256.HashData(Encoding.UTF8.GetBytes(canonical));
}
private static BatchSnapshotEntity ToEntity(BatchSnapshot snapshot)
{
return new BatchSnapshotEntity
{
BatchId = snapshot.BatchId,
TenantId = snapshot.TenantId,
RangeStartT = snapshot.RangeStartT,
RangeEndT = snapshot.RangeEndT,
HeadLink = snapshot.HeadLink,
JobCount = snapshot.JobCount,
CreatedAt = snapshot.CreatedAt,
SignedBy = snapshot.SignedBy,
Signature = snapshot.Signature
};
}
private static BatchSnapshot FromEntity(BatchSnapshotEntity entity)
{
return new BatchSnapshot
{
BatchId = entity.BatchId,
TenantId = entity.TenantId,
RangeStartT = entity.RangeStartT,
RangeEndT = entity.RangeEndT,
HeadLink = entity.HeadLink,
JobCount = entity.JobCount,
CreatedAt = entity.CreatedAt,
SignedBy = entity.SignedBy,
Signature = entity.Signature
};
}
private static bool ByteArrayEquals(byte[]? a, byte[]? b)
{
if (a is null && b is null)

View File

@@ -154,7 +154,7 @@ public sealed class HlcSchedulerDequeueService : IHlcSchedulerDequeueService
}
/// <inheritdoc />
public async Task<SchedulerLogEntry?> GetByJobIdAsync(
public async Task<SchedulerLogEntity?> GetByJobIdAsync(
string tenantId,
Guid jobId,
CancellationToken cancellationToken = default)

View File

@@ -115,7 +115,7 @@ public sealed class HlcSchedulerEnqueueService : IHlcSchedulerEnqueueService
var link = SchedulerChainLinking.ComputeLink(prevLink, jobId, tHlc, payloadHash);
// 7. Insert log entry (atomic with chain head update)
var entry = new SchedulerLogEntry
var entry = new SchedulerLogEntity
{
TenantId = tenantId,
THlc = tHlc.ToSortableString(),
@@ -123,7 +123,8 @@ public sealed class HlcSchedulerEnqueueService : IHlcSchedulerEnqueueService
JobId = jobId,
PayloadHash = payloadHash,
PrevLink = prevLink,
Link = link
Link = link,
CreatedAt = DateTimeOffset.UtcNow // Database will set actual value
};
await _logRepository.InsertWithChainUpdateAsync(entry, cancellationToken).ConfigureAwait(false);

View File

@@ -5,6 +5,7 @@
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions;
using Microsoft.Extensions.Options;
using StellaOps.Scheduler.Persistence.Postgres.Repositories;
namespace StellaOps.Scheduler.Queue.Hlc;

View File

@@ -70,7 +70,7 @@ public interface IHlcSchedulerDequeueService
/// <param name="jobId">The job identifier.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>The scheduler log entry if found, null otherwise.</returns>
Task<Persistence.Postgres.Models.SchedulerLogEntry?> GetByJobIdAsync(
Task<Persistence.Postgres.Models.SchedulerLogEntity?> GetByJobIdAsync(
string tenantId,
Guid jobId,
CancellationToken cancellationToken = default);

View File

@@ -15,7 +15,7 @@ namespace StellaOps.Scheduler.Queue.Hlc;
/// <param name="RangeStartHlc">The HLC start of the queried range (null if unbounded).</param>
/// <param name="RangeEndHlc">The HLC end of the queried range (null if unbounded).</param>
public readonly record struct SchedulerHlcDequeueResult(
IReadOnlyList<SchedulerLogEntry> Entries,
IReadOnlyList<SchedulerLogEntity> Entries,
int TotalAvailable,
HlcTimestamp? RangeStartHlc,
HlcTimestamp? RangeEndHlc);

View File

@@ -541,7 +541,7 @@ internal abstract class NatsSchedulerQueueBase<TMessage> : ISchedulerQueue<TMess
// Parse HLC timestamp if present
HlcTimestamp? hlcTimestamp = null;
if (headers.TryGetValue(SchedulerQueueFields.HlcTimestamp, out var hlcValues) && hlcValues.Count > 0
if (headers.TryGetValue(SchedulerQueueFields.THlc, out var hlcValues) && hlcValues.Count > 0
&& HlcTimestamp.TryParse(hlcValues[0], out var parsedHlc))
{
hlcTimestamp = parsedHlc;
@@ -592,7 +592,7 @@ internal abstract class NatsSchedulerQueueBase<TMessage> : ISchedulerQueue<TMess
// Include HLC timestamp if available
if (hlcTimestamp.HasValue)
{
headers.Add(SchedulerQueueFields.HlcTimestamp, hlcTimestamp.Value.ToSortableString());
headers.Add(SchedulerQueueFields.THlc, hlcTimestamp.Value.ToSortableString());
}
var scheduleId = _payload.GetScheduleId(message);

View File

@@ -3,7 +3,6 @@ using System.Collections.Generic;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Logging;
using StellaOps.HybridLogicalClock;
using StackExchange.Redis;
using StellaOps.Scheduler.Models;
@@ -17,7 +16,6 @@ internal sealed class RedisSchedulerPlannerQueue
SchedulerRedisQueueOptions redisOptions,
ILogger<RedisSchedulerPlannerQueue> logger,
TimeProvider timeProvider,
IHybridLogicalClock? hlc = null,
Func<ConfigurationOptions, Task<IConnectionMultiplexer>>? connectionFactory = null)
: base(
queueOptions,
@@ -26,7 +24,6 @@ internal sealed class RedisSchedulerPlannerQueue
PlannerPayload.Instance,
logger,
timeProvider,
hlc,
connectionFactory)
{
}

View File

@@ -4,7 +4,6 @@ using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Logging;
using StellaOps.HybridLogicalClock;
using StackExchange.Redis;
using StellaOps.Scheduler.Models;
@@ -18,7 +17,6 @@ internal sealed class RedisSchedulerRunnerQueue
SchedulerRedisQueueOptions redisOptions,
ILogger<RedisSchedulerRunnerQueue> logger,
TimeProvider timeProvider,
IHybridLogicalClock? hlc = null,
Func<ConfigurationOptions, Task<IConnectionMultiplexer>>? connectionFactory = null)
: base(
queueOptions,
@@ -27,7 +25,6 @@ internal sealed class RedisSchedulerRunnerQueue
RunnerPayload.Instance,
logger,
timeProvider,
hlc,
connectionFactory)
{
}

View File

@@ -38,8 +38,7 @@ public static class SchedulerQueueServiceCollectionExtensions
options,
options.Redis,
loggerFactory.CreateLogger<RedisSchedulerPlannerQueue>(),
timeProvider,
hlc),
timeProvider),
SchedulerQueueTransportKind.Nats => new NatsSchedulerPlannerQueue(
options,
options.Nats,
@@ -62,8 +61,7 @@ public static class SchedulerQueueServiceCollectionExtensions
options,
options.Redis,
loggerFactory.CreateLogger<RedisSchedulerRunnerQueue>(),
timeProvider,
hlc),
timeProvider),
SchedulerQueueTransportKind.Nats => new NatsSchedulerRunnerQueue(
options,
options.Nats,

View File

@@ -11,6 +11,7 @@
<PackageReference Include="Microsoft.Extensions.Configuration.Abstractions" />
<PackageReference Include="Microsoft.Extensions.Configuration.Binder" />
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" />
<PackageReference Include="Microsoft.Extensions.Options.DataAnnotations" />
<PackageReference Include="Microsoft.Extensions.Diagnostics.HealthChecks" />
<PackageReference Include="Microsoft.Extensions.Diagnostics.HealthChecks.Abstractions" />
<PackageReference Include="NATS.Client.Core" />

View File

@@ -44,19 +44,22 @@ public sealed class FailureSignatureIndexer : BackgroundService
private readonly IJobHistoryRepository _historyRepository;
private readonly IOptions<FailureSignatureIndexerOptions> _options;
private readonly ILogger<FailureSignatureIndexer> _logger;
private readonly Func<int, int> _randomIndexSource;
public FailureSignatureIndexer(
IFailureSignatureRepository signatureRepository,
IJobRepository jobRepository,
IJobHistoryRepository historyRepository,
IOptions<FailureSignatureIndexerOptions> options,
ILogger<FailureSignatureIndexer> logger)
ILogger<FailureSignatureIndexer> logger,
Func<int, int>? randomIndexSource = null)
{
_signatureRepository = signatureRepository;
_jobRepository = jobRepository;
_historyRepository = historyRepository;
_options = options;
_logger = logger;
_randomIndexSource = randomIndexSource ?? Random.Shared.Next;
}
protected override async Task ExecuteAsync(CancellationToken stoppingToken)
@@ -135,8 +138,8 @@ public sealed class FailureSignatureIndexer : BackgroundService
private async Task PruneOldSignaturesAsync(CancellationToken ct)
{
// Prune is expensive, only run occasionally
var random = Random.Shared.Next(0, 12);
// Prune is expensive, only run occasionally (1 in 12 chance)
var random = _randomIndexSource(12);
if (random != 0)
{
return;

View File

@@ -13,7 +13,7 @@
<ProjectReference Include="../../../__Libraries/StellaOps.TestKit/StellaOps.TestKit.csproj" />
</ItemGroup>
<ItemGroup>
<None Include="..\..\..\..\docs\events\samples\scheduler.rescan.delta@1.sample.json">
<None Include="..\..\..\..\docs\modules\signals\events\samples\scheduler.rescan.delta@1.sample.json">
<CopyToOutputDirectory>Always</CopyToOutputDirectory>
</None>
</ItemGroup>

View File

@@ -8,20 +8,21 @@ using System.Threading.Tasks;
using FluentAssertions;
using Microsoft.Extensions.Logging.Abstractions;
using StackExchange.Redis;
using StellaOps.HybridLogicalClock;
using StellaOps.Scheduler.Models;
using StellaOps.Scheduler.Queue.Redis;
using StellaOps.TestKit;
using Testcontainers.Redis;
using Xunit;
using HybridLogicalClockImpl = StellaOps.HybridLogicalClock.HybridLogicalClock;
namespace StellaOps.Scheduler.Queue.Tests;
/// <summary>
/// Integration tests for HLC (Hybrid Logical Clock) integration with scheduler queues.
/// Integration tests for scheduler queues.
/// </summary>
/// <remarks>
/// HLC integration has been moved to the enqueue/dequeue services layer.
/// These tests verify basic queue functionality.
/// </remarks>
[Trait("Category", TestCategories.Integration)]
public sealed class HlcQueueIntegrationTests : IAsyncLifetime
{
@@ -56,7 +57,7 @@ public sealed class HlcQueueIntegrationTests : IAsyncLifetime
}
[Fact]
public async Task PlannerQueue_WithHlc_LeasedMessageContainsHlcTimestamp()
public async Task PlannerQueue_EnqueueAndLease_Works()
{
if (SkipIfUnavailable())
{
@@ -64,14 +65,12 @@ public sealed class HlcQueueIntegrationTests : IAsyncLifetime
}
var options = CreateOptions();
var hlc = new HybridLogicalClockImpl(TimeProvider.System, "test-node-1", new InMemoryHlcStateStore());
await using var queue = new RedisSchedulerPlannerQueue(
options,
options.Redis,
NullLogger<RedisSchedulerPlannerQueue>.Instance,
TimeProvider.System,
hlc,
connectionFactory: async config => (IConnectionMultiplexer)await ConnectionMultiplexer.ConnectAsync(config).ConfigureAwait(false));
var message = CreatePlannerMessage();
@@ -79,19 +78,17 @@ public sealed class HlcQueueIntegrationTests : IAsyncLifetime
var enqueueResult = await queue.EnqueueAsync(message);
enqueueResult.Deduplicated.Should().BeFalse();
var leases = await queue.LeaseAsync(new SchedulerQueueLeaseRequest("planner-hlc", batchSize: 1, options.DefaultLeaseDuration));
var leases = await queue.LeaseAsync(new SchedulerQueueLeaseRequest("planner-test", batchSize: 1, options.DefaultLeaseDuration));
leases.Should().ContainSingle();
var lease = leases[0];
lease.HlcTimestamp.Should().NotBeNull("HLC timestamp should be present when HLC is configured");
lease.HlcTimestamp!.Value.NodeId.Should().Be("test-node-1");
lease.HlcTimestamp.Value.PhysicalTime.Should().BeGreaterThan(0);
lease.Message.Run.Id.Should().Be(message.Run.Id);
await lease.AcknowledgeAsync();
}
[Fact]
public async Task RunnerQueue_WithHlc_LeasedMessageContainsHlcTimestamp()
public async Task RunnerQueue_EnqueueAndLease_Works()
{
if (SkipIfUnavailable())
{
@@ -99,32 +96,29 @@ public sealed class HlcQueueIntegrationTests : IAsyncLifetime
}
var options = CreateOptions();
var hlc = new HybridLogicalClockImpl(TimeProvider.System, "runner-node-1", new InMemoryHlcStateStore());
await using var queue = new RedisSchedulerRunnerQueue(
options,
options.Redis,
NullLogger<RedisSchedulerRunnerQueue>.Instance,
TimeProvider.System,
hlc,
connectionFactory: async config => (IConnectionMultiplexer)await ConnectionMultiplexer.ConnectAsync(config).ConfigureAwait(false));
var message = CreateRunnerMessage();
await queue.EnqueueAsync(message);
var leases = await queue.LeaseAsync(new SchedulerQueueLeaseRequest("runner-hlc", batchSize: 1, options.DefaultLeaseDuration));
var leases = await queue.LeaseAsync(new SchedulerQueueLeaseRequest("runner-test", batchSize: 1, options.DefaultLeaseDuration));
leases.Should().ContainSingle();
var lease = leases[0];
lease.HlcTimestamp.Should().NotBeNull("HLC timestamp should be present when HLC is configured");
lease.HlcTimestamp!.Value.NodeId.Should().Be("runner-node-1");
lease.Message.SegmentId.Should().Be(message.SegmentId);
await lease.AcknowledgeAsync();
}
[Fact]
public async Task PlannerQueue_WithoutHlc_LeasedMessageHasNullTimestamp()
public async Task PlannerQueue_MultipleMessages_AllLeased()
{
if (SkipIfUnavailable())
{
@@ -133,85 +127,32 @@ public sealed class HlcQueueIntegrationTests : IAsyncLifetime
var options = CreateOptions();
// No HLC provided
await using var queue = new RedisSchedulerPlannerQueue(
options,
options.Redis,
NullLogger<RedisSchedulerPlannerQueue>.Instance,
TimeProvider.System,
hlc: null,
connectionFactory: async config => (IConnectionMultiplexer)await ConnectionMultiplexer.ConnectAsync(config).ConfigureAwait(false));
var message = CreatePlannerMessage();
await queue.EnqueueAsync(message);
var leases = await queue.LeaseAsync(new SchedulerQueueLeaseRequest("planner-no-hlc", batchSize: 1, options.DefaultLeaseDuration));
leases.Should().ContainSingle();
var lease = leases[0];
lease.HlcTimestamp.Should().BeNull("HLC timestamp should be null when HLC is not configured");
await lease.AcknowledgeAsync();
}
[Fact]
public async Task HlcTimestamp_IsMonotonicallyIncreasing_AcrossEnqueues()
{
if (SkipIfUnavailable())
{
return;
}
var options = CreateOptions();
var hlc = new HybridLogicalClockImpl(TimeProvider.System, "monotonic-test", new InMemoryHlcStateStore());
await using var queue = new RedisSchedulerPlannerQueue(
options,
options.Redis,
NullLogger<RedisSchedulerPlannerQueue>.Instance,
TimeProvider.System,
hlc,
connectionFactory: async config => (IConnectionMultiplexer)await ConnectionMultiplexer.ConnectAsync(config).ConfigureAwait(false));
// Enqueue multiple messages
var messages = new List<PlannerQueueMessage>();
for (int i = 0; i < 5; i++)
{
messages.Add(CreatePlannerMessage(suffix: i.ToString()));
}
foreach (var msg in messages)
{
var msg = CreatePlannerMessage(suffix: i.ToString());
await queue.EnqueueAsync(msg);
}
// Lease all messages
var leases = await queue.LeaseAsync(new SchedulerQueueLeaseRequest("monotonic-consumer", batchSize: 10, options.DefaultLeaseDuration));
var leases = await queue.LeaseAsync(new SchedulerQueueLeaseRequest("multi-consumer", batchSize: 10, options.DefaultLeaseDuration));
leases.Should().HaveCount(5);
// Verify HLC timestamps are monotonically increasing
HlcTimestamp? previousHlc = null;
foreach (var lease in leases)
{
lease.HlcTimestamp.Should().NotBeNull();
if (previousHlc.HasValue)
{
var current = lease.HlcTimestamp!.Value;
var prev = previousHlc.Value;
// Current should be greater than previous
(current > prev).Should().BeTrue(
$"HLC {current} should be greater than {prev}");
}
previousHlc = lease.HlcTimestamp;
await lease.AcknowledgeAsync();
}
}
[Fact]
public async Task HlcTimestamp_SortableString_ParsesCorrectly()
public async Task PlannerQueue_Idempotency_DuplicatesAreDetected()
{
if (SkipIfUnavailable())
{
@@ -219,87 +160,66 @@ public sealed class HlcQueueIntegrationTests : IAsyncLifetime
}
var options = CreateOptions();
var hlc = new HybridLogicalClockImpl(TimeProvider.System, "parse-test-node", new InMemoryHlcStateStore());
await using var queue = new RedisSchedulerPlannerQueue(
options,
options.Redis,
NullLogger<RedisSchedulerPlannerQueue>.Instance,
TimeProvider.System,
hlc,
connectionFactory: async config => (IConnectionMultiplexer)await ConnectionMultiplexer.ConnectAsync(config).ConfigureAwait(false));
var message = CreatePlannerMessage();
await queue.EnqueueAsync(message);
var leases = await queue.LeaseAsync(new SchedulerQueueLeaseRequest("parse-consumer", batchSize: 1, options.DefaultLeaseDuration));
// First enqueue
var first = await queue.EnqueueAsync(message);
first.Deduplicated.Should().BeFalse();
// Second enqueue with same message should be deduplicated
var second = await queue.EnqueueAsync(message);
second.Deduplicated.Should().BeTrue();
// Only one message should be leased
var leases = await queue.LeaseAsync(new SchedulerQueueLeaseRequest("dedup-consumer", batchSize: 10, options.DefaultLeaseDuration));
leases.Should().ContainSingle();
var lease = leases[0];
lease.HlcTimestamp.Should().NotBeNull();
// Verify round-trip through sortable string
var hlcValue = lease.HlcTimestamp!.Value;
var sortableString = hlcValue.ToSortableString();
HlcTimestamp.TryParse(sortableString, out var parsed).Should().BeTrue();
parsed.Should().Be(hlcValue);
await lease.AcknowledgeAsync();
await leases[0].AcknowledgeAsync();
}
[Fact]
public async Task HlcTimestamp_DeterministicForSameInput_OnSameNode()
public async Task RunnerQueue_Ordering_PreservedInLeases()
{
if (SkipIfUnavailable())
{
return;
}
// This test verifies that HLC generates consistent timestamps
// by checking that timestamps from the same node use the same node ID
// and that logical counters increment correctly at same physical time
var options = CreateOptions();
var hlc = new HybridLogicalClockImpl(TimeProvider.System, "determinism-node", new InMemoryHlcStateStore());
await using var queue = new RedisSchedulerPlannerQueue(
await using var queue = new RedisSchedulerRunnerQueue(
options,
options.Redis,
NullLogger<RedisSchedulerPlannerQueue>.Instance,
NullLogger<RedisSchedulerRunnerQueue>.Instance,
TimeProvider.System,
hlc,
connectionFactory: async config => (IConnectionMultiplexer)await ConnectionMultiplexer.ConnectAsync(config).ConfigureAwait(false));
// Enqueue rapidly to potentially hit same physical time
var timestamps = new List<HlcTimestamp>();
for (int i = 0; i < 10; i++)
// Enqueue messages with sequential segment IDs
var segmentIds = new List<string>();
for (int i = 0; i < 5; i++)
{
var message = CreatePlannerMessage(suffix: $"determinism-{i}");
await queue.EnqueueAsync(message);
var segmentId = $"segment-order-{i:D3}";
segmentIds.Add(segmentId);
await queue.EnqueueAsync(CreateRunnerMessage(segmentId));
}
var leases = await queue.LeaseAsync(new SchedulerQueueLeaseRequest("determinism-consumer", batchSize: 20, options.DefaultLeaseDuration));
leases.Should().HaveCount(10);
// Lease all messages
var leases = await queue.LeaseAsync(new SchedulerQueueLeaseRequest("order-consumer", batchSize: 10, options.DefaultLeaseDuration));
leases.Should().HaveCount(5);
foreach (var lease in leases)
// Verify ordering is preserved
for (int i = 0; i < leases.Count; i++)
{
lease.HlcTimestamp.Should().NotBeNull();
timestamps.Add(lease.HlcTimestamp!.Value);
await lease.AcknowledgeAsync();
}
// All timestamps should have same node ID
foreach (var ts in timestamps)
{
ts.NodeId.Should().Be("determinism-node");
}
// Verify strict ordering (no duplicates)
for (int i = 1; i < timestamps.Count; i++)
{
(timestamps[i] > timestamps[i - 1]).Should().BeTrue(
$"Timestamp {i} ({timestamps[i]}) should be greater than {i - 1} ({timestamps[i - 1]})");
leases[i].Message.SegmentId.Should().Be(segmentIds[i]);
await leases[i].AcknowledgeAsync();
}
}
@@ -321,18 +241,18 @@ public sealed class HlcQueueIntegrationTests : IAsyncLifetime
InitializationTimeout = TimeSpan.FromSeconds(10),
Planner = new RedisSchedulerStreamOptions
{
Stream = $"scheduler:hlc-test:planner:{unique}",
ConsumerGroup = $"planner-hlc-{unique}",
DeadLetterStream = $"scheduler:hlc-test:planner:{unique}:dead",
IdempotencyKeyPrefix = $"scheduler:hlc-test:planner:{unique}:idemp:",
Stream = $"scheduler:test:planner:{unique}",
ConsumerGroup = $"planner-test-{unique}",
DeadLetterStream = $"scheduler:test:planner:{unique}:dead",
IdempotencyKeyPrefix = $"scheduler:test:planner:{unique}:idemp:",
IdempotencyWindow = TimeSpan.FromMinutes(5)
},
Runner = new RedisSchedulerStreamOptions
{
Stream = $"scheduler:hlc-test:runner:{unique}",
ConsumerGroup = $"runner-hlc-{unique}",
DeadLetterStream = $"scheduler:hlc-test:runner:{unique}:dead",
IdempotencyKeyPrefix = $"scheduler:hlc-test:runner:{unique}:idemp:",
Stream = $"scheduler:test:runner:{unique}",
ConsumerGroup = $"runner-test-{unique}",
DeadLetterStream = $"scheduler:test:runner:{unique}:dead",
IdempotencyKeyPrefix = $"scheduler:test:runner:{unique}:idemp:",
IdempotencyWindow = TimeSpan.FromMinutes(5)
}
}
@@ -361,17 +281,17 @@ public sealed class HlcQueueIntegrationTests : IAsyncLifetime
private static PlannerQueueMessage CreatePlannerMessage(string suffix = "")
{
var id = string.IsNullOrEmpty(suffix) ? "run-hlc-test" : $"run-hlc-test-{suffix}";
var id = string.IsNullOrEmpty(suffix) ? "run-test" : $"run-test-{suffix}";
var schedule = new Schedule(
id: "sch-hlc-test",
tenantId: "tenant-hlc",
name: "HLC Test",
id: "sch-test",
tenantId: "tenant-test",
name: "Test",
enabled: true,
cronExpression: "0 0 * * *",
timezone: "UTC",
mode: ScheduleMode.AnalysisOnly,
selection: new Selector(SelectorScope.AllImages, tenantId: "tenant-hlc"),
selection: new Selector(SelectorScope.AllImages, tenantId: "tenant-test"),
onlyIf: ScheduleOnlyIf.Default,
notify: ScheduleNotify.Default,
limits: ScheduleLimits.Default,
@@ -382,7 +302,7 @@ public sealed class HlcQueueIntegrationTests : IAsyncLifetime
var run = new Run(
id: id,
tenantId: "tenant-hlc",
tenantId: "tenant-test",
trigger: RunTrigger.Manual,
state: RunState.Planning,
stats: RunStats.Empty,
@@ -391,7 +311,7 @@ public sealed class HlcQueueIntegrationTests : IAsyncLifetime
scheduleId: schedule.Id);
var impactSet = new ImpactSet(
selector: new Selector(SelectorScope.AllImages, tenantId: "tenant-hlc"),
selector: new Selector(SelectorScope.AllImages, tenantId: "tenant-test"),
images: new[]
{
new ImpactImage(
@@ -405,23 +325,23 @@ public sealed class HlcQueueIntegrationTests : IAsyncLifetime
generatedAt: DateTimeOffset.UtcNow,
total: 1);
return new PlannerQueueMessage(run, impactSet, schedule, correlationId: $"corr-hlc-{suffix}");
return new PlannerQueueMessage(run, impactSet, schedule, correlationId: $"corr-{suffix}");
}
private static RunnerSegmentQueueMessage CreateRunnerMessage()
private static RunnerSegmentQueueMessage CreateRunnerMessage(string? segmentId = null)
{
return new RunnerSegmentQueueMessage(
segmentId: "segment-hlc-test",
runId: "run-hlc-test",
tenantId: "tenant-hlc",
segmentId: segmentId ?? "segment-test",
runId: "run-test",
tenantId: "tenant-test",
imageDigests: new[]
{
"sha256:dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd"
},
scheduleId: "sch-hlc-test",
scheduleId: "sch-test",
ratePerSecond: 10,
usageOnly: true,
attributes: new Dictionary<string, string> { ["priority"] = "normal" },
correlationId: "corr-runner-hlc");
correlationId: "corr-runner");
}
}

View File

@@ -62,7 +62,6 @@ public sealed class RedisSchedulerQueueTests : IAsyncLifetime
options.Redis,
NullLogger<RedisSchedulerPlannerQueue>.Instance,
TimeProvider.System,
hlc: null,
connectionFactory: async config => (IConnectionMultiplexer)await ConnectionMultiplexer.ConnectAsync(config).ConfigureAwait(false));
var message = TestData.CreatePlannerMessage();
@@ -102,7 +101,6 @@ public sealed class RedisSchedulerQueueTests : IAsyncLifetime
options.Redis,
NullLogger<RedisSchedulerRunnerQueue>.Instance,
TimeProvider.System,
hlc: null,
connectionFactory: async config => (IConnectionMultiplexer)await ConnectionMultiplexer.ConnectAsync(config).ConfigureAwait(false));
var message = TestData.CreateRunnerMessage();
@@ -138,7 +136,6 @@ public sealed class RedisSchedulerQueueTests : IAsyncLifetime
options.Redis,
NullLogger<RedisSchedulerPlannerQueue>.Instance,
TimeProvider.System,
hlc: null,
connectionFactory: async config => (IConnectionMultiplexer)await ConnectionMultiplexer.ConnectAsync(config).ConfigureAwait(false));
var message = TestData.CreatePlannerMessage();
@@ -173,7 +170,6 @@ public sealed class RedisSchedulerQueueTests : IAsyncLifetime
options.Redis,
NullLogger<RedisSchedulerPlannerQueue>.Instance,
TimeProvider.System,
hlc: null,
connectionFactory: async config => (IConnectionMultiplexer)await ConnectionMultiplexer.ConnectAsync(config).ConfigureAwait(false));
var message = TestData.CreatePlannerMessage();
@@ -212,7 +208,6 @@ public sealed class RedisSchedulerQueueTests : IAsyncLifetime
options.Redis,
NullLogger<RedisSchedulerRunnerQueue>.Instance,
TimeProvider.System,
hlc: null,
connectionFactory: async config => (IConnectionMultiplexer)await ConnectionMultiplexer.ConnectAsync(config).ConfigureAwait(false));
var message = TestData.CreateRunnerMessage();