sprints and audit work
This commit is contained in:
@@ -1,56 +0,0 @@
|
||||
// <copyright file="SchedulerLogEntry.cs" company="StellaOps">
|
||||
// Copyright (c) StellaOps. Licensed under AGPL-3.0-or-later.
|
||||
// </copyright>
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Models;
|
||||
|
||||
/// <summary>
|
||||
/// Represents an HLC-ordered, chain-linked scheduler log entry.
|
||||
/// </summary>
|
||||
public sealed record SchedulerLogEntry
|
||||
{
|
||||
/// <summary>
|
||||
/// Storage sequence number (not authoritative for ordering).
|
||||
/// </summary>
|
||||
public long SeqBigint { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Tenant identifier.
|
||||
/// </summary>
|
||||
public required string TenantId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// HLC timestamp in sortable string format.
|
||||
/// </summary>
|
||||
public required string THlc { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Optional queue partition key.
|
||||
/// </summary>
|
||||
public string PartitionKey { get; init; } = string.Empty;
|
||||
|
||||
/// <summary>
|
||||
/// Job identifier (deterministic from payload).
|
||||
/// </summary>
|
||||
public required Guid JobId { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// SHA-256 hash of the canonical payload JSON.
|
||||
/// </summary>
|
||||
public required byte[] PayloadHash { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Previous chain link (null for first entry in chain).
|
||||
/// </summary>
|
||||
public byte[]? PrevLink { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Chain link: Hash(prev_link || job_id || t_hlc || payload_hash).
|
||||
/// </summary>
|
||||
public required byte[] Link { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Timestamp when the entry was created.
|
||||
/// </summary>
|
||||
public DateTimeOffset CreatedAt { get; init; }
|
||||
}
|
||||
@@ -49,6 +49,38 @@ public interface ISchedulerLogRepository
|
||||
string? endTHlc,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets log entries within an HLC range with additional filtering.
|
||||
/// </summary>
|
||||
/// <param name="tenantId">Tenant identifier.</param>
|
||||
/// <param name="startTHlc">Start HLC (inclusive, null for no lower bound).</param>
|
||||
/// <param name="endTHlc">End HLC (inclusive, null for no upper bound).</param>
|
||||
/// <param name="limit">Maximum entries to return (0 for no limit).</param>
|
||||
/// <param name="partitionKey">Optional partition key filter.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
Task<IReadOnlyList<SchedulerLogEntity>> GetByHlcRangeAsync(
|
||||
string tenantId,
|
||||
string? startTHlc,
|
||||
string? endTHlc,
|
||||
int limit,
|
||||
string? partitionKey,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets log entries after a given HLC timestamp.
|
||||
/// </summary>
|
||||
/// <param name="tenantId">Tenant identifier.</param>
|
||||
/// <param name="afterTHlc">Start after this HLC (exclusive).</param>
|
||||
/// <param name="limit">Maximum entries to return.</param>
|
||||
/// <param name="partitionKey">Optional partition key filter.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
Task<IReadOnlyList<SchedulerLogEntity>> GetAfterHlcAsync(
|
||||
string tenantId,
|
||||
string afterTHlc,
|
||||
int limit,
|
||||
string? partitionKey = null,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Gets a log entry by job ID.
|
||||
/// </summary>
|
||||
@@ -71,4 +103,31 @@ public interface ISchedulerLogRepository
|
||||
string? startTHlc,
|
||||
string? endTHlc,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Counts entries in an HLC range with partition filter.
|
||||
/// </summary>
|
||||
/// <param name="tenantId">Tenant identifier.</param>
|
||||
/// <param name="startTHlc">Start HLC (inclusive, null for no lower bound).</param>
|
||||
/// <param name="endTHlc">End HLC (inclusive, null for no upper bound).</param>
|
||||
/// <param name="partitionKey">Optional partition key filter.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
Task<int> CountByHlcRangeAsync(
|
||||
string tenantId,
|
||||
string? startTHlc,
|
||||
string? endTHlc,
|
||||
string? partitionKey,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// Checks if a job entry already exists for idempotency.
|
||||
/// </summary>
|
||||
/// <param name="tenantId">Tenant identifier.</param>
|
||||
/// <param name="jobId">Job identifier.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>True if job exists.</returns>
|
||||
Task<bool> ExistsAsync(
|
||||
string tenantId,
|
||||
Guid jobId,
|
||||
CancellationToken cancellationToken = default);
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ public sealed class PostgresBatchSnapshotRepository : RepositoryBase<SchedulerDa
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task InsertAsync(BatchSnapshot snapshot, CancellationToken cancellationToken = default)
|
||||
public async Task InsertAsync(BatchSnapshotEntity snapshot, CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
INSERT INTO scheduler.batch_snapshot (
|
||||
@@ -53,7 +53,7 @@ public sealed class PostgresBatchSnapshotRepository : RepositoryBase<SchedulerDa
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<BatchSnapshot?> GetByIdAsync(Guid batchId, CancellationToken cancellationToken = default)
|
||||
public async Task<BatchSnapshotEntity?> GetByIdAsync(Guid batchId, CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT batch_id, tenant_id, range_start_t, range_end_t, head_link,
|
||||
@@ -72,7 +72,40 @@ public sealed class PostgresBatchSnapshotRepository : RepositoryBase<SchedulerDa
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<BatchSnapshot?> GetLatestAsync(string tenantId, CancellationToken cancellationToken = default)
|
||||
public async Task<IReadOnlyList<BatchSnapshotEntity>> GetByTenantAsync(
|
||||
string tenantId,
|
||||
int limit = 100,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT batch_id, tenant_id, range_start_t, range_end_t, head_link,
|
||||
job_count, created_at, signed_by, signature
|
||||
FROM scheduler.batch_snapshot
|
||||
WHERE tenant_id = @tenant_id
|
||||
ORDER BY created_at DESC
|
||||
LIMIT @limit
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
AddParameter(command, "limit", limit);
|
||||
|
||||
var snapshots = new List<BatchSnapshotEntity>();
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
snapshots.Add(MapSnapshot(reader));
|
||||
}
|
||||
|
||||
return snapshots;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<BatchSnapshotEntity?> GetLatestAsync(string tenantId, CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT batch_id, tenant_id, range_start_t, range_end_t, head_link,
|
||||
@@ -93,46 +126,7 @@ public sealed class PostgresBatchSnapshotRepository : RepositoryBase<SchedulerDa
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<BatchSnapshot>> GetByTimeRangeAsync(
|
||||
string tenantId,
|
||||
DateTimeOffset startTime,
|
||||
DateTimeOffset endTime,
|
||||
int limit = 100,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT batch_id, tenant_id, range_start_t, range_end_t, head_link,
|
||||
job_count, created_at, signed_by, signature
|
||||
FROM scheduler.batch_snapshot
|
||||
WHERE tenant_id = @tenant_id
|
||||
AND created_at >= @start_time
|
||||
AND created_at <= @end_time
|
||||
ORDER BY created_at DESC
|
||||
LIMIT @limit
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
AddParameter(command, "start_time", startTime);
|
||||
AddParameter(command, "end_time", endTime);
|
||||
AddParameter(command, "limit", limit);
|
||||
|
||||
var snapshots = new List<BatchSnapshot>();
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
snapshots.Add(MapSnapshot(reader));
|
||||
}
|
||||
|
||||
return snapshots;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<BatchSnapshot>> GetContainingHlcAsync(
|
||||
public async Task<IReadOnlyList<BatchSnapshotEntity>> GetContainingHlcAsync(
|
||||
string tenantId,
|
||||
string tHlc,
|
||||
CancellationToken cancellationToken = default)
|
||||
@@ -154,7 +148,7 @@ public sealed class PostgresBatchSnapshotRepository : RepositoryBase<SchedulerDa
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
AddParameter(command, "t_hlc", tHlc);
|
||||
|
||||
var snapshots = new List<BatchSnapshot>();
|
||||
var snapshots = new List<BatchSnapshotEntity>();
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
||||
@@ -165,9 +159,9 @@ public sealed class PostgresBatchSnapshotRepository : RepositoryBase<SchedulerDa
|
||||
return snapshots;
|
||||
}
|
||||
|
||||
private static BatchSnapshot MapSnapshot(NpgsqlDataReader reader)
|
||||
private static BatchSnapshotEntity MapSnapshot(NpgsqlDataReader reader)
|
||||
{
|
||||
return new BatchSnapshot
|
||||
return new BatchSnapshotEntity
|
||||
{
|
||||
BatchId = reader.GetGuid(0),
|
||||
TenantId = reader.GetString(1),
|
||||
|
||||
@@ -46,13 +46,13 @@ public sealed class PostgresChainHeadRepository : RepositoryBase<SchedulerDataSo
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<ChainHead?> GetAsync(
|
||||
public async Task<ChainHeadEntity?> GetAsync(
|
||||
string tenantId,
|
||||
string partitionKey,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT tenant_id, partition_key, last_link, last_t_hlc, last_job_id, updated_at
|
||||
SELECT tenant_id, partition_key, last_link, last_t_hlc, updated_at
|
||||
FROM scheduler.chain_heads
|
||||
WHERE tenant_id = @tenant_id AND partition_key = @partition_key
|
||||
""";
|
||||
@@ -69,12 +69,45 @@ public sealed class PostgresChainHeadRepository : RepositoryBase<SchedulerDataSo
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<ChainHead>> GetAllForTenantAsync(
|
||||
public async Task<bool> UpsertAsync(
|
||||
string tenantId,
|
||||
string partitionKey,
|
||||
byte[] newLink,
|
||||
string newTHlc,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
INSERT INTO scheduler.chain_heads (tenant_id, partition_key, last_link, last_t_hlc, updated_at)
|
||||
VALUES (@tenant_id, @partition_key, @last_link, @last_t_hlc, @updated_at)
|
||||
ON CONFLICT (tenant_id, partition_key)
|
||||
DO UPDATE SET
|
||||
last_link = @last_link,
|
||||
last_t_hlc = @last_t_hlc,
|
||||
updated_at = @updated_at
|
||||
WHERE scheduler.chain_heads.last_t_hlc < @last_t_hlc
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "writer", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
AddParameter(command, "partition_key", partitionKey);
|
||||
AddParameter(command, "last_link", newLink);
|
||||
AddParameter(command, "last_t_hlc", newTHlc);
|
||||
AddParameter(command, "updated_at", DateTimeOffset.UtcNow);
|
||||
|
||||
var rowsAffected = await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
return rowsAffected > 0;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<ChainHeadEntity>> GetAllForTenantAsync(
|
||||
string tenantId,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT tenant_id, partition_key, last_link, last_t_hlc, last_job_id, updated_at
|
||||
SELECT tenant_id, partition_key, last_link, last_t_hlc, updated_at
|
||||
FROM scheduler.chain_heads
|
||||
WHERE tenant_id = @tenant_id
|
||||
ORDER BY partition_key
|
||||
@@ -85,7 +118,7 @@ public sealed class PostgresChainHeadRepository : RepositoryBase<SchedulerDataSo
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
|
||||
var heads = new List<ChainHead>();
|
||||
var heads = new List<ChainHeadEntity>();
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
||||
@@ -96,16 +129,15 @@ public sealed class PostgresChainHeadRepository : RepositoryBase<SchedulerDataSo
|
||||
return heads;
|
||||
}
|
||||
|
||||
private static ChainHead MapChainHead(NpgsqlDataReader reader)
|
||||
private static ChainHeadEntity MapChainHead(NpgsqlDataReader reader)
|
||||
{
|
||||
return new ChainHead
|
||||
return new ChainHeadEntity
|
||||
{
|
||||
TenantId = reader.GetString(0),
|
||||
PartitionKey = reader.GetString(1),
|
||||
LastLink = reader.GetFieldValue<byte[]>(2),
|
||||
LastTHlc = reader.GetString(3),
|
||||
LastJobId = reader.GetGuid(4),
|
||||
UpdatedAt = reader.GetFieldValue<DateTimeOffset>(5)
|
||||
UpdatedAt = reader.GetFieldValue<DateTimeOffset>(4)
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,8 +23,8 @@ public sealed class PostgresSchedulerLogRepository : RepositoryBase<SchedulerDat
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<long> InsertWithChainUpdateAsync(
|
||||
SchedulerLogEntry entry,
|
||||
public async Task<SchedulerLogEntity> InsertWithChainUpdateAsync(
|
||||
SchedulerLogEntity entry,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
// Use the stored function for atomic insert + chain head update
|
||||
@@ -53,11 +53,13 @@ public sealed class PostgresSchedulerLogRepository : RepositoryBase<SchedulerDat
|
||||
AddParameter(command, "link", entry.Link);
|
||||
|
||||
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
|
||||
return Convert.ToInt64(result);
|
||||
var seqBigint = Convert.ToInt64(result);
|
||||
|
||||
return entry with { SeqBigint = seqBigint };
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<SchedulerLogEntry>> GetByHlcOrderAsync(
|
||||
public async Task<IReadOnlyList<SchedulerLogEntity>> GetByHlcOrderAsync(
|
||||
string tenantId,
|
||||
string? partitionKey,
|
||||
int limit,
|
||||
@@ -92,7 +94,7 @@ public sealed class PostgresSchedulerLogRepository : RepositoryBase<SchedulerDat
|
||||
AddParameter(command, "partition_key", partitionKey);
|
||||
}
|
||||
|
||||
var entries = new List<SchedulerLogEntry>();
|
||||
var entries = new List<SchedulerLogEntity>();
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
||||
@@ -104,12 +106,10 @@ public sealed class PostgresSchedulerLogRepository : RepositoryBase<SchedulerDat
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<SchedulerLogEntry>> GetByHlcRangeAsync(
|
||||
public async Task<IReadOnlyList<SchedulerLogEntity>> GetByHlcRangeAsync(
|
||||
string tenantId,
|
||||
string? startTHlc,
|
||||
string? endTHlc,
|
||||
int limit = 0,
|
||||
string? partitionKey = null,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var conditions = new List<string> { "tenant_id = @tenant_id" };
|
||||
@@ -123,19 +123,12 @@ public sealed class PostgresSchedulerLogRepository : RepositoryBase<SchedulerDat
|
||||
conditions.Add("t_hlc <= @end_t_hlc");
|
||||
}
|
||||
|
||||
if (partitionKey is not null)
|
||||
{
|
||||
conditions.Add("partition_key = @partition_key");
|
||||
}
|
||||
|
||||
var limitClause = limit > 0 ? $"LIMIT {limit}" : string.Empty;
|
||||
var sql = $"""
|
||||
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id,
|
||||
payload_hash, prev_link, link, created_at
|
||||
FROM scheduler.scheduler_log
|
||||
WHERE {string.Join(" AND ", conditions)}
|
||||
ORDER BY t_hlc ASC
|
||||
{limitClause}
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
|
||||
@@ -153,12 +146,7 @@ public sealed class PostgresSchedulerLogRepository : RepositoryBase<SchedulerDat
|
||||
AddParameter(command, "end_t_hlc", endTHlc);
|
||||
}
|
||||
|
||||
if (partitionKey is not null)
|
||||
{
|
||||
AddParameter(command, "partition_key", partitionKey);
|
||||
}
|
||||
|
||||
var entries = new List<SchedulerLogEntry>();
|
||||
var entries = new List<SchedulerLogEntity>();
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
||||
@@ -170,52 +158,45 @@ public sealed class PostgresSchedulerLogRepository : RepositoryBase<SchedulerDat
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<SchedulerLogEntry>> GetAfterHlcAsync(
|
||||
string tenantId,
|
||||
string afterTHlc,
|
||||
int limit,
|
||||
string? partitionKey = null,
|
||||
public async Task<SchedulerLogEntity?> GetByJobIdAsync(
|
||||
Guid jobId,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var sql = partitionKey is null
|
||||
? """
|
||||
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id,
|
||||
payload_hash, prev_link, link, created_at
|
||||
FROM scheduler.scheduler_log
|
||||
WHERE tenant_id = @tenant_id AND t_hlc > @after_t_hlc
|
||||
ORDER BY t_hlc ASC
|
||||
LIMIT @limit
|
||||
"""
|
||||
: """
|
||||
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id,
|
||||
payload_hash, prev_link, link, created_at
|
||||
FROM scheduler.scheduler_log
|
||||
WHERE tenant_id = @tenant_id AND t_hlc > @after_t_hlc AND partition_key = @partition_key
|
||||
ORDER BY t_hlc ASC
|
||||
LIMIT @limit
|
||||
""";
|
||||
const string sql = """
|
||||
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id,
|
||||
payload_hash, prev_link, link, created_at
|
||||
FROM scheduler.scheduler_log
|
||||
WHERE job_id = @job_id
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
|
||||
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "job_id", jobId);
|
||||
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
AddParameter(command, "after_t_hlc", afterTHlc);
|
||||
AddParameter(command, "limit", limit);
|
||||
if (partitionKey is not null)
|
||||
{
|
||||
AddParameter(command, "partition_key", partitionKey);
|
||||
}
|
||||
|
||||
var entries = new List<SchedulerLogEntry>();
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
return await reader.ReadAsync(cancellationToken).ConfigureAwait(false) ? MapEntry(reader) : null;
|
||||
}
|
||||
|
||||
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
entries.Add(MapEntry(reader));
|
||||
}
|
||||
/// <inheritdoc />
|
||||
public async Task<SchedulerLogEntity?> GetByLinkAsync(
|
||||
byte[] link,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id,
|
||||
payload_hash, prev_link, link, created_at
|
||||
FROM scheduler.scheduler_log
|
||||
WHERE link = @link
|
||||
""";
|
||||
|
||||
return entries;
|
||||
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "link", link);
|
||||
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
return await reader.ReadAsync(cancellationToken).ConfigureAwait(false) ? MapEntry(reader) : null;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
@@ -223,7 +204,6 @@ public sealed class PostgresSchedulerLogRepository : RepositoryBase<SchedulerDat
|
||||
string tenantId,
|
||||
string? startTHlc,
|
||||
string? endTHlc,
|
||||
string? partitionKey = null,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var conditions = new List<string> { "tenant_id = @tenant_id" };
|
||||
@@ -237,11 +217,6 @@ public sealed class PostgresSchedulerLogRepository : RepositoryBase<SchedulerDat
|
||||
conditions.Add("t_hlc <= @end_t_hlc");
|
||||
}
|
||||
|
||||
if (partitionKey is not null)
|
||||
{
|
||||
conditions.Add("partition_key = @partition_key");
|
||||
}
|
||||
|
||||
var sql = $"""
|
||||
SELECT COUNT(*)
|
||||
FROM scheduler.scheduler_log
|
||||
@@ -263,6 +238,51 @@ public sealed class PostgresSchedulerLogRepository : RepositoryBase<SchedulerDat
|
||||
AddParameter(command, "end_t_hlc", endTHlc);
|
||||
}
|
||||
|
||||
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
|
||||
return Convert.ToInt32(result);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<int> CountByHlcRangeAsync(
|
||||
string tenantId,
|
||||
string? startTHlc,
|
||||
string? endTHlc,
|
||||
string? partitionKey,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var conditions = new List<string> { "tenant_id = @tenant_id" };
|
||||
if (startTHlc is not null)
|
||||
{
|
||||
conditions.Add("t_hlc >= @start_t_hlc");
|
||||
}
|
||||
if (endTHlc is not null)
|
||||
{
|
||||
conditions.Add("t_hlc <= @end_t_hlc");
|
||||
}
|
||||
if (partitionKey is not null)
|
||||
{
|
||||
conditions.Add("partition_key = @partition_key");
|
||||
}
|
||||
|
||||
var sql = $"""
|
||||
SELECT COUNT(*)
|
||||
FROM scheduler.scheduler_log
|
||||
WHERE {string.Join(" AND ", conditions)}
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
if (startTHlc is not null)
|
||||
{
|
||||
AddParameter(command, "start_t_hlc", startTHlc);
|
||||
}
|
||||
if (endTHlc is not null)
|
||||
{
|
||||
AddParameter(command, "end_t_hlc", endTHlc);
|
||||
}
|
||||
if (partitionKey is not null)
|
||||
{
|
||||
AddParameter(command, "partition_key", partitionKey);
|
||||
@@ -273,24 +293,118 @@ public sealed class PostgresSchedulerLogRepository : RepositoryBase<SchedulerDat
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<SchedulerLogEntry?> GetByJobIdAsync(
|
||||
Guid jobId,
|
||||
public async Task<IReadOnlyList<SchedulerLogEntity>> GetByHlcRangeAsync(
|
||||
string tenantId,
|
||||
string? startTHlc,
|
||||
string? endTHlc,
|
||||
int limit,
|
||||
string? partitionKey,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
var conditions = new List<string> { "tenant_id = @tenant_id" };
|
||||
if (startTHlc is not null)
|
||||
{
|
||||
conditions.Add("t_hlc >= @start_t_hlc");
|
||||
}
|
||||
if (endTHlc is not null)
|
||||
{
|
||||
conditions.Add("t_hlc <= @end_t_hlc");
|
||||
}
|
||||
if (partitionKey is not null)
|
||||
{
|
||||
conditions.Add("partition_key = @partition_key");
|
||||
}
|
||||
|
||||
var sql = $"""
|
||||
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id,
|
||||
payload_hash, prev_link, link, created_at
|
||||
FROM scheduler.scheduler_log
|
||||
WHERE job_id = @job_id
|
||||
WHERE {string.Join(" AND ", conditions)}
|
||||
ORDER BY t_hlc ASC
|
||||
{(limit > 0 ? "LIMIT @limit" : "")}
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenSystemConnectionAsync(cancellationToken)
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
AddParameter(command, "job_id", jobId);
|
||||
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
if (startTHlc is not null)
|
||||
{
|
||||
AddParameter(command, "start_t_hlc", startTHlc);
|
||||
}
|
||||
if (endTHlc is not null)
|
||||
{
|
||||
AddParameter(command, "end_t_hlc", endTHlc);
|
||||
}
|
||||
if (partitionKey is not null)
|
||||
{
|
||||
AddParameter(command, "partition_key", partitionKey);
|
||||
}
|
||||
if (limit > 0)
|
||||
{
|
||||
AddParameter(command, "limit", limit);
|
||||
}
|
||||
|
||||
var entries = new List<SchedulerLogEntity>();
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
return await reader.ReadAsync(cancellationToken).ConfigureAwait(false) ? MapEntry(reader) : null;
|
||||
|
||||
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
entries.Add(MapEntry(reader));
|
||||
}
|
||||
|
||||
return entries;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<SchedulerLogEntity>> GetAfterHlcAsync(
|
||||
string tenantId,
|
||||
string afterTHlc,
|
||||
int limit,
|
||||
string? partitionKey = null,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var conditions = new List<string>
|
||||
{
|
||||
"tenant_id = @tenant_id",
|
||||
"t_hlc > @after_t_hlc"
|
||||
};
|
||||
if (partitionKey is not null)
|
||||
{
|
||||
conditions.Add("partition_key = @partition_key");
|
||||
}
|
||||
|
||||
var sql = $"""
|
||||
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id,
|
||||
payload_hash, prev_link, link, created_at
|
||||
FROM scheduler.scheduler_log
|
||||
WHERE {string.Join(" AND ", conditions)}
|
||||
ORDER BY t_hlc ASC
|
||||
LIMIT @limit
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
AddParameter(command, "after_t_hlc", afterTHlc);
|
||||
AddParameter(command, "limit", limit);
|
||||
if (partitionKey is not null)
|
||||
{
|
||||
AddParameter(command, "partition_key", partitionKey);
|
||||
}
|
||||
|
||||
var entries = new List<SchedulerLogEntity>();
|
||||
await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
entries.Add(MapEntry(reader));
|
||||
}
|
||||
|
||||
return entries;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
@@ -314,12 +428,12 @@ public sealed class PostgresSchedulerLogRepository : RepositoryBase<SchedulerDat
|
||||
AddParameter(command, "job_id", jobId);
|
||||
|
||||
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
|
||||
return result is true;
|
||||
return result is true or 1 or 1L;
|
||||
}
|
||||
|
||||
private static SchedulerLogEntry MapEntry(NpgsqlDataReader reader)
|
||||
private static SchedulerLogEntity MapEntry(NpgsqlDataReader reader)
|
||||
{
|
||||
return new SchedulerLogEntry
|
||||
return new SchedulerLogEntity
|
||||
{
|
||||
SeqBigint = reader.GetInt64(0),
|
||||
TenantId = reader.GetString(1),
|
||||
|
||||
@@ -250,6 +250,177 @@ public sealed class SchedulerLogRepository : RepositoryBase<SchedulerDataSource>
|
||||
return result is int count ? count : 0;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<int> CountByHlcRangeAsync(
|
||||
string tenantId,
|
||||
string? startTHlc,
|
||||
string? endTHlc,
|
||||
string? partitionKey,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var whereClause = "WHERE tenant_id = @tenant_id";
|
||||
if (startTHlc is not null)
|
||||
{
|
||||
whereClause += " AND t_hlc >= @start_t_hlc";
|
||||
}
|
||||
if (endTHlc is not null)
|
||||
{
|
||||
whereClause += " AND t_hlc <= @end_t_hlc";
|
||||
}
|
||||
if (partitionKey is not null)
|
||||
{
|
||||
whereClause += " AND partition_key = @partition_key";
|
||||
}
|
||||
|
||||
var sql = $"""
|
||||
SELECT COUNT(*)::INT
|
||||
FROM scheduler.scheduler_log
|
||||
{whereClause}
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
if (startTHlc is not null)
|
||||
{
|
||||
AddParameter(command, "start_t_hlc", startTHlc);
|
||||
}
|
||||
if (endTHlc is not null)
|
||||
{
|
||||
AddParameter(command, "end_t_hlc", endTHlc);
|
||||
}
|
||||
if (partitionKey is not null)
|
||||
{
|
||||
AddParameter(command, "partition_key", partitionKey);
|
||||
}
|
||||
|
||||
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
|
||||
return result is int count ? count : 0;
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<SchedulerLogEntity>> GetByHlcRangeAsync(
|
||||
string tenantId,
|
||||
string? startTHlc,
|
||||
string? endTHlc,
|
||||
int limit,
|
||||
string? partitionKey,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var whereClause = "WHERE tenant_id = @tenant_id";
|
||||
if (startTHlc is not null)
|
||||
{
|
||||
whereClause += " AND t_hlc >= @start_t_hlc";
|
||||
}
|
||||
if (endTHlc is not null)
|
||||
{
|
||||
whereClause += " AND t_hlc <= @end_t_hlc";
|
||||
}
|
||||
if (partitionKey is not null)
|
||||
{
|
||||
whereClause += " AND partition_key = @partition_key";
|
||||
}
|
||||
|
||||
var sql = $"""
|
||||
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id, payload_hash, prev_link, link, created_at
|
||||
FROM scheduler.scheduler_log
|
||||
{whereClause}
|
||||
ORDER BY t_hlc ASC
|
||||
{(limit > 0 ? "LIMIT @limit" : "")}
|
||||
""";
|
||||
|
||||
return await QueryAsync(
|
||||
tenantId,
|
||||
sql,
|
||||
cmd =>
|
||||
{
|
||||
AddParameter(cmd, "tenant_id", tenantId);
|
||||
if (startTHlc is not null)
|
||||
{
|
||||
AddParameter(cmd, "start_t_hlc", startTHlc);
|
||||
}
|
||||
if (endTHlc is not null)
|
||||
{
|
||||
AddParameter(cmd, "end_t_hlc", endTHlc);
|
||||
}
|
||||
if (partitionKey is not null)
|
||||
{
|
||||
AddParameter(cmd, "partition_key", partitionKey);
|
||||
}
|
||||
if (limit > 0)
|
||||
{
|
||||
AddParameter(cmd, "limit", limit);
|
||||
}
|
||||
},
|
||||
MapSchedulerLogEntry,
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<IReadOnlyList<SchedulerLogEntity>> GetAfterHlcAsync(
|
||||
string tenantId,
|
||||
string afterTHlc,
|
||||
int limit,
|
||||
string? partitionKey = null,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var whereClause = "WHERE tenant_id = @tenant_id AND t_hlc > @after_t_hlc";
|
||||
if (partitionKey is not null)
|
||||
{
|
||||
whereClause += " AND partition_key = @partition_key";
|
||||
}
|
||||
|
||||
var sql = $"""
|
||||
SELECT seq_bigint, tenant_id, t_hlc, partition_key, job_id, payload_hash, prev_link, link, created_at
|
||||
FROM scheduler.scheduler_log
|
||||
{whereClause}
|
||||
ORDER BY t_hlc ASC
|
||||
LIMIT @limit
|
||||
""";
|
||||
|
||||
return await QueryAsync(
|
||||
tenantId,
|
||||
sql,
|
||||
cmd =>
|
||||
{
|
||||
AddParameter(cmd, "tenant_id", tenantId);
|
||||
AddParameter(cmd, "after_t_hlc", afterTHlc);
|
||||
AddParameter(cmd, "limit", limit);
|
||||
if (partitionKey is not null)
|
||||
{
|
||||
AddParameter(cmd, "partition_key", partitionKey);
|
||||
}
|
||||
},
|
||||
MapSchedulerLogEntry,
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<bool> ExistsAsync(
|
||||
string tenantId,
|
||||
Guid jobId,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
const string sql = """
|
||||
SELECT EXISTS(
|
||||
SELECT 1 FROM scheduler.scheduler_log
|
||||
WHERE tenant_id = @tenant_id AND job_id = @job_id
|
||||
)
|
||||
""";
|
||||
|
||||
await using var connection = await DataSource.OpenConnectionAsync(tenantId, "reader", cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await using var command = CreateCommand(sql, connection);
|
||||
|
||||
AddParameter(command, "tenant_id", tenantId);
|
||||
AddParameter(command, "job_id", jobId);
|
||||
|
||||
var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
|
||||
return result is true or 1 or 1L;
|
||||
}
|
||||
|
||||
private static SchedulerLogEntity MapSchedulerLogEntry(NpgsqlDataReader reader)
|
||||
{
|
||||
return new SchedulerLogEntity
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Infrastructure.Postgres\StellaOps.Infrastructure.Postgres.csproj" />
|
||||
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Infrastructure.EfCore\StellaOps.Infrastructure.EfCore.csproj" />
|
||||
<ProjectReference Include="..\..\..\__Libraries\StellaOps.HybridLogicalClock\StellaOps.HybridLogicalClock.csproj" />
|
||||
<ProjectReference Include="..\..\..\__Libraries\StellaOps.Canonical.Json\StellaOps.Canonical.Json.csproj" />
|
||||
</ItemGroup>
|
||||
|
||||
<!-- Embed SQL migrations as resources -->
|
||||
|
||||
@@ -123,7 +123,7 @@ public sealed class BatchSnapshotService : IBatchSnapshotService
|
||||
}
|
||||
else
|
||||
{
|
||||
var digest = ComputeSnapshotDigest(snapshot, jobs);
|
||||
var digest = ComputeSnapshotDigest(ToEntity(snapshot), jobs);
|
||||
var signed = await _signer(digest, cancellationToken).ConfigureAwait(false);
|
||||
snapshot = snapshot with
|
||||
{
|
||||
@@ -133,8 +133,9 @@ public sealed class BatchSnapshotService : IBatchSnapshotService
|
||||
}
|
||||
}
|
||||
|
||||
// Persist
|
||||
await _snapshotRepository.InsertAsync(snapshot, cancellationToken).ConfigureAwait(false);
|
||||
// Convert to entity and persist
|
||||
var entity = ToEntity(snapshot);
|
||||
await _snapshotRepository.InsertAsync(entity, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
_logger.LogInformation(
|
||||
"Batch snapshot created. BatchId={BatchId}, TenantId={TenantId}, Range=[{Start}, {End}], JobCount={JobCount}, Signed={Signed}",
|
||||
@@ -149,20 +150,22 @@ public sealed class BatchSnapshotService : IBatchSnapshotService
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public Task<BatchSnapshot?> GetSnapshotAsync(
|
||||
public async Task<BatchSnapshot?> GetSnapshotAsync(
|
||||
Guid batchId,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
return _snapshotRepository.GetByIdAsync(batchId, cancellationToken);
|
||||
var entity = await _snapshotRepository.GetByIdAsync(batchId, cancellationToken).ConfigureAwait(false);
|
||||
return entity is null ? null : FromEntity(entity);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public Task<BatchSnapshot?> GetLatestSnapshotAsync(
|
||||
public async Task<BatchSnapshot?> GetLatestSnapshotAsync(
|
||||
string tenantId,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
ArgumentException.ThrowIfNullOrWhiteSpace(tenantId);
|
||||
return _snapshotRepository.GetLatestAsync(tenantId, cancellationToken);
|
||||
var entity = await _snapshotRepository.GetLatestAsync(tenantId, cancellationToken).ConfigureAwait(false);
|
||||
return entity is null ? null : FromEntity(entity);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
@@ -189,8 +192,6 @@ public sealed class BatchSnapshotService : IBatchSnapshotService
|
||||
snapshot.TenantId,
|
||||
snapshot.RangeStartT,
|
||||
snapshot.RangeEndT,
|
||||
limit: 0,
|
||||
partitionKey: null,
|
||||
cancellationToken).ConfigureAwait(false);
|
||||
|
||||
// Verify job count
|
||||
@@ -271,7 +272,7 @@ public sealed class BatchSnapshotService : IBatchSnapshotService
|
||||
/// Computes a deterministic digest over the snapshot and its jobs.
|
||||
/// This is the canonical representation used for both signing and verification.
|
||||
/// </summary>
|
||||
internal static byte[] ComputeSnapshotDigest(BatchSnapshot snapshot, IReadOnlyList<SchedulerLogEntry> jobs)
|
||||
internal static byte[] ComputeSnapshotDigest(BatchSnapshotEntity snapshot, IReadOnlyList<SchedulerLogEntity> jobs)
|
||||
{
|
||||
// Create canonical representation for hashing
|
||||
var digestInput = new
|
||||
@@ -295,6 +296,38 @@ public sealed class BatchSnapshotService : IBatchSnapshotService
|
||||
return SHA256.HashData(Encoding.UTF8.GetBytes(canonical));
|
||||
}
|
||||
|
||||
private static BatchSnapshotEntity ToEntity(BatchSnapshot snapshot)
|
||||
{
|
||||
return new BatchSnapshotEntity
|
||||
{
|
||||
BatchId = snapshot.BatchId,
|
||||
TenantId = snapshot.TenantId,
|
||||
RangeStartT = snapshot.RangeStartT,
|
||||
RangeEndT = snapshot.RangeEndT,
|
||||
HeadLink = snapshot.HeadLink,
|
||||
JobCount = snapshot.JobCount,
|
||||
CreatedAt = snapshot.CreatedAt,
|
||||
SignedBy = snapshot.SignedBy,
|
||||
Signature = snapshot.Signature
|
||||
};
|
||||
}
|
||||
|
||||
private static BatchSnapshot FromEntity(BatchSnapshotEntity entity)
|
||||
{
|
||||
return new BatchSnapshot
|
||||
{
|
||||
BatchId = entity.BatchId,
|
||||
TenantId = entity.TenantId,
|
||||
RangeStartT = entity.RangeStartT,
|
||||
RangeEndT = entity.RangeEndT,
|
||||
HeadLink = entity.HeadLink,
|
||||
JobCount = entity.JobCount,
|
||||
CreatedAt = entity.CreatedAt,
|
||||
SignedBy = entity.SignedBy,
|
||||
Signature = entity.Signature
|
||||
};
|
||||
}
|
||||
|
||||
private static bool ByteArrayEquals(byte[]? a, byte[]? b)
|
||||
{
|
||||
if (a is null && b is null)
|
||||
|
||||
@@ -154,7 +154,7 @@ public sealed class HlcSchedulerDequeueService : IHlcSchedulerDequeueService
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public async Task<SchedulerLogEntry?> GetByJobIdAsync(
|
||||
public async Task<SchedulerLogEntity?> GetByJobIdAsync(
|
||||
string tenantId,
|
||||
Guid jobId,
|
||||
CancellationToken cancellationToken = default)
|
||||
|
||||
@@ -115,7 +115,7 @@ public sealed class HlcSchedulerEnqueueService : IHlcSchedulerEnqueueService
|
||||
var link = SchedulerChainLinking.ComputeLink(prevLink, jobId, tHlc, payloadHash);
|
||||
|
||||
// 7. Insert log entry (atomic with chain head update)
|
||||
var entry = new SchedulerLogEntry
|
||||
var entry = new SchedulerLogEntity
|
||||
{
|
||||
TenantId = tenantId,
|
||||
THlc = tHlc.ToSortableString(),
|
||||
@@ -123,7 +123,8 @@ public sealed class HlcSchedulerEnqueueService : IHlcSchedulerEnqueueService
|
||||
JobId = jobId,
|
||||
PayloadHash = payloadHash,
|
||||
PrevLink = prevLink,
|
||||
Link = link
|
||||
Link = link,
|
||||
CreatedAt = DateTimeOffset.UtcNow // Database will set actual value
|
||||
};
|
||||
|
||||
await _logRepository.InsertWithChainUpdateAsync(entry, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
using Microsoft.Extensions.Configuration;
|
||||
using Microsoft.Extensions.DependencyInjection;
|
||||
using Microsoft.Extensions.DependencyInjection.Extensions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Repositories;
|
||||
|
||||
namespace StellaOps.Scheduler.Queue.Hlc;
|
||||
|
||||
@@ -70,7 +70,7 @@ public interface IHlcSchedulerDequeueService
|
||||
/// <param name="jobId">The job identifier.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <returns>The scheduler log entry if found, null otherwise.</returns>
|
||||
Task<Persistence.Postgres.Models.SchedulerLogEntry?> GetByJobIdAsync(
|
||||
Task<Persistence.Postgres.Models.SchedulerLogEntity?> GetByJobIdAsync(
|
||||
string tenantId,
|
||||
Guid jobId,
|
||||
CancellationToken cancellationToken = default);
|
||||
|
||||
@@ -15,7 +15,7 @@ namespace StellaOps.Scheduler.Queue.Hlc;
|
||||
/// <param name="RangeStartHlc">The HLC start of the queried range (null if unbounded).</param>
|
||||
/// <param name="RangeEndHlc">The HLC end of the queried range (null if unbounded).</param>
|
||||
public readonly record struct SchedulerHlcDequeueResult(
|
||||
IReadOnlyList<SchedulerLogEntry> Entries,
|
||||
IReadOnlyList<SchedulerLogEntity> Entries,
|
||||
int TotalAvailable,
|
||||
HlcTimestamp? RangeStartHlc,
|
||||
HlcTimestamp? RangeEndHlc);
|
||||
|
||||
@@ -541,7 +541,7 @@ internal abstract class NatsSchedulerQueueBase<TMessage> : ISchedulerQueue<TMess
|
||||
|
||||
// Parse HLC timestamp if present
|
||||
HlcTimestamp? hlcTimestamp = null;
|
||||
if (headers.TryGetValue(SchedulerQueueFields.HlcTimestamp, out var hlcValues) && hlcValues.Count > 0
|
||||
if (headers.TryGetValue(SchedulerQueueFields.THlc, out var hlcValues) && hlcValues.Count > 0
|
||||
&& HlcTimestamp.TryParse(hlcValues[0], out var parsedHlc))
|
||||
{
|
||||
hlcTimestamp = parsedHlc;
|
||||
@@ -592,7 +592,7 @@ internal abstract class NatsSchedulerQueueBase<TMessage> : ISchedulerQueue<TMess
|
||||
// Include HLC timestamp if available
|
||||
if (hlcTimestamp.HasValue)
|
||||
{
|
||||
headers.Add(SchedulerQueueFields.HlcTimestamp, hlcTimestamp.Value.ToSortableString());
|
||||
headers.Add(SchedulerQueueFields.THlc, hlcTimestamp.Value.ToSortableString());
|
||||
}
|
||||
|
||||
var scheduleId = _payload.GetScheduleId(message);
|
||||
|
||||
@@ -3,7 +3,6 @@ using System.Collections.Generic;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using StellaOps.HybridLogicalClock;
|
||||
using StackExchange.Redis;
|
||||
using StellaOps.Scheduler.Models;
|
||||
|
||||
@@ -17,7 +16,6 @@ internal sealed class RedisSchedulerPlannerQueue
|
||||
SchedulerRedisQueueOptions redisOptions,
|
||||
ILogger<RedisSchedulerPlannerQueue> logger,
|
||||
TimeProvider timeProvider,
|
||||
IHybridLogicalClock? hlc = null,
|
||||
Func<ConfigurationOptions, Task<IConnectionMultiplexer>>? connectionFactory = null)
|
||||
: base(
|
||||
queueOptions,
|
||||
@@ -26,7 +24,6 @@ internal sealed class RedisSchedulerPlannerQueue
|
||||
PlannerPayload.Instance,
|
||||
logger,
|
||||
timeProvider,
|
||||
hlc,
|
||||
connectionFactory)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ using System.Linq;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using StellaOps.HybridLogicalClock;
|
||||
using StackExchange.Redis;
|
||||
using StellaOps.Scheduler.Models;
|
||||
|
||||
@@ -18,7 +17,6 @@ internal sealed class RedisSchedulerRunnerQueue
|
||||
SchedulerRedisQueueOptions redisOptions,
|
||||
ILogger<RedisSchedulerRunnerQueue> logger,
|
||||
TimeProvider timeProvider,
|
||||
IHybridLogicalClock? hlc = null,
|
||||
Func<ConfigurationOptions, Task<IConnectionMultiplexer>>? connectionFactory = null)
|
||||
: base(
|
||||
queueOptions,
|
||||
@@ -27,7 +25,6 @@ internal sealed class RedisSchedulerRunnerQueue
|
||||
RunnerPayload.Instance,
|
||||
logger,
|
||||
timeProvider,
|
||||
hlc,
|
||||
connectionFactory)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -38,8 +38,7 @@ public static class SchedulerQueueServiceCollectionExtensions
|
||||
options,
|
||||
options.Redis,
|
||||
loggerFactory.CreateLogger<RedisSchedulerPlannerQueue>(),
|
||||
timeProvider,
|
||||
hlc),
|
||||
timeProvider),
|
||||
SchedulerQueueTransportKind.Nats => new NatsSchedulerPlannerQueue(
|
||||
options,
|
||||
options.Nats,
|
||||
@@ -62,8 +61,7 @@ public static class SchedulerQueueServiceCollectionExtensions
|
||||
options,
|
||||
options.Redis,
|
||||
loggerFactory.CreateLogger<RedisSchedulerRunnerQueue>(),
|
||||
timeProvider,
|
||||
hlc),
|
||||
timeProvider),
|
||||
SchedulerQueueTransportKind.Nats => new NatsSchedulerRunnerQueue(
|
||||
options,
|
||||
options.Nats,
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
<PackageReference Include="Microsoft.Extensions.Configuration.Abstractions" />
|
||||
<PackageReference Include="Microsoft.Extensions.Configuration.Binder" />
|
||||
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" />
|
||||
<PackageReference Include="Microsoft.Extensions.Options.DataAnnotations" />
|
||||
<PackageReference Include="Microsoft.Extensions.Diagnostics.HealthChecks" />
|
||||
<PackageReference Include="Microsoft.Extensions.Diagnostics.HealthChecks.Abstractions" />
|
||||
<PackageReference Include="NATS.Client.Core" />
|
||||
|
||||
@@ -44,19 +44,22 @@ public sealed class FailureSignatureIndexer : BackgroundService
|
||||
private readonly IJobHistoryRepository _historyRepository;
|
||||
private readonly IOptions<FailureSignatureIndexerOptions> _options;
|
||||
private readonly ILogger<FailureSignatureIndexer> _logger;
|
||||
private readonly Func<int, int> _randomIndexSource;
|
||||
|
||||
public FailureSignatureIndexer(
|
||||
IFailureSignatureRepository signatureRepository,
|
||||
IJobRepository jobRepository,
|
||||
IJobHistoryRepository historyRepository,
|
||||
IOptions<FailureSignatureIndexerOptions> options,
|
||||
ILogger<FailureSignatureIndexer> logger)
|
||||
ILogger<FailureSignatureIndexer> logger,
|
||||
Func<int, int>? randomIndexSource = null)
|
||||
{
|
||||
_signatureRepository = signatureRepository;
|
||||
_jobRepository = jobRepository;
|
||||
_historyRepository = historyRepository;
|
||||
_options = options;
|
||||
_logger = logger;
|
||||
_randomIndexSource = randomIndexSource ?? Random.Shared.Next;
|
||||
}
|
||||
|
||||
protected override async Task ExecuteAsync(CancellationToken stoppingToken)
|
||||
@@ -135,8 +138,8 @@ public sealed class FailureSignatureIndexer : BackgroundService
|
||||
|
||||
private async Task PruneOldSignaturesAsync(CancellationToken ct)
|
||||
{
|
||||
// Prune is expensive, only run occasionally
|
||||
var random = Random.Shared.Next(0, 12);
|
||||
// Prune is expensive, only run occasionally (1 in 12 chance)
|
||||
var random = _randomIndexSource(12);
|
||||
if (random != 0)
|
||||
{
|
||||
return;
|
||||
|
||||
Reference in New Issue
Block a user