consolidation of some of the modules, localization fixes, product advisories work, qa work
This commit is contained in:
@@ -0,0 +1,64 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.EntityFrameworkCore;
|
||||
using StellaOps.Scheduler.Persistence.EfCore.CompiledModels;
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Models;
|
||||
using StellaOps.TestKit;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Tests;
|
||||
|
||||
/// <summary>
|
||||
/// Guard tests ensuring the EF Core compiled model is real (not a stub)
|
||||
/// and contains all expected entity type registrations.
|
||||
/// </summary>
|
||||
public sealed class CompiledModelGuardTests
|
||||
{
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public void CompiledModel_Instance_IsNotNull()
|
||||
{
|
||||
SchedulerDbContextModel.Instance.Should().NotBeNull(
|
||||
"compiled model must be generated via 'dotnet ef dbcontext optimize', not a stub");
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public void CompiledModel_HasExpectedEntityTypeCount()
|
||||
{
|
||||
var entityTypes = SchedulerDbContextModel.Instance.GetEntityTypes().ToList();
|
||||
entityTypes.Should().HaveCount(10,
|
||||
"scheduler compiled model must contain exactly 10 entity types (regenerate with 'dotnet ef dbcontext optimize' if count differs)");
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Theory]
|
||||
[InlineData(typeof(JobEntity))]
|
||||
[InlineData(typeof(JobHistoryEntity))]
|
||||
[InlineData(typeof(TriggerEntity))]
|
||||
[InlineData(typeof(WorkerEntity))]
|
||||
[InlineData(typeof(LockEntity))]
|
||||
[InlineData(typeof(MetricsEntity))]
|
||||
[InlineData(typeof(FailureSignatureEntity))]
|
||||
[InlineData(typeof(SchedulerLogEntity))]
|
||||
[InlineData(typeof(ChainHeadEntity))]
|
||||
[InlineData(typeof(BatchSnapshotEntity))]
|
||||
public void CompiledModel_ContainsEntityType(Type entityType)
|
||||
{
|
||||
var found = SchedulerDbContextModel.Instance.FindEntityType(entityType);
|
||||
found.Should().NotBeNull(
|
||||
$"compiled model must contain entity type '{entityType.Name}' — regenerate if missing");
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public void CompiledModel_EntityTypes_HaveTableNames()
|
||||
{
|
||||
var entityTypes = SchedulerDbContextModel.Instance.GetEntityTypes();
|
||||
foreach (var entityType in entityTypes)
|
||||
{
|
||||
var tableName = entityType.GetTableName();
|
||||
tableName.Should().NotBeNullOrWhiteSpace(
|
||||
$"entity type '{entityType.ClrType.Name}' must have a table name configured");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,194 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
using StellaOps.TestKit;
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Tests;
|
||||
|
||||
[Collection(SchedulerPostgresCollection.Name)]
|
||||
public sealed class DistributedLockRepositoryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly SchedulerPostgresFixture _fixture;
|
||||
private readonly DistributedLockRepository _repository;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
|
||||
public DistributedLockRepositoryTests(SchedulerPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
|
||||
var options = fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = fixture.SchemaName;
|
||||
var dataSource = new SchedulerDataSource(Options.Create(options), NullLogger<SchedulerDataSource>.Instance);
|
||||
_repository = new DistributedLockRepository(dataSource, NullLogger<DistributedLockRepository>.Instance);
|
||||
}
|
||||
|
||||
public ValueTask InitializeAsync() => new(_fixture.TruncateAllTablesAsync());
|
||||
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task TryAcquire_SucceedsOnFirstAttempt()
|
||||
{
|
||||
// Arrange
|
||||
var lockKey = $"test-lock-{Guid.NewGuid()}";
|
||||
|
||||
// Act
|
||||
var acquired = await _repository.TryAcquireAsync(_tenantId, lockKey, "worker-1", TimeSpan.FromMinutes(5));
|
||||
|
||||
// Assert
|
||||
acquired.Should().BeTrue();
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task TryAcquire_FailsWhenAlreadyHeld()
|
||||
{
|
||||
// Arrange
|
||||
var lockKey = $"contended-lock-{Guid.NewGuid()}";
|
||||
await _repository.TryAcquireAsync(_tenantId, lockKey, "worker-1", TimeSpan.FromMinutes(5));
|
||||
|
||||
// Act
|
||||
var secondAcquire = await _repository.TryAcquireAsync(_tenantId, lockKey, "worker-2", TimeSpan.FromMinutes(5));
|
||||
|
||||
// Assert
|
||||
secondAcquire.Should().BeFalse();
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task Release_AllowsReacquisition()
|
||||
{
|
||||
// Arrange
|
||||
var lockKey = $"release-test-{Guid.NewGuid()}";
|
||||
await _repository.TryAcquireAsync(_tenantId, lockKey, "worker-1", TimeSpan.FromMinutes(5));
|
||||
|
||||
// Act
|
||||
await _repository.ReleaseAsync(lockKey, "worker-1");
|
||||
var reacquired = await _repository.TryAcquireAsync(_tenantId, lockKey, "worker-2", TimeSpan.FromMinutes(5));
|
||||
|
||||
// Assert
|
||||
reacquired.Should().BeTrue();
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task Extend_ExtendsLockDuration()
|
||||
{
|
||||
// Arrange
|
||||
var lockKey = $"extend-test-{Guid.NewGuid()}";
|
||||
await _repository.TryAcquireAsync(_tenantId, lockKey, "worker-1", TimeSpan.FromMinutes(1));
|
||||
|
||||
// Act
|
||||
var extended = await _repository.ExtendAsync(lockKey, "worker-1", TimeSpan.FromMinutes(10));
|
||||
|
||||
// Assert
|
||||
extended.Should().BeTrue();
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task Extend_FailsForDifferentHolder()
|
||||
{
|
||||
// Arrange
|
||||
var lockKey = $"extend-fail-{Guid.NewGuid()}";
|
||||
await _repository.TryAcquireAsync(_tenantId, lockKey, "worker-1", TimeSpan.FromMinutes(5));
|
||||
|
||||
// Act
|
||||
var extended = await _repository.ExtendAsync(lockKey, "worker-2", TimeSpan.FromMinutes(10));
|
||||
|
||||
// Assert
|
||||
extended.Should().BeFalse();
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task Get_ReturnsLockInfo()
|
||||
{
|
||||
// Arrange
|
||||
var lockKey = $"get-test-{Guid.NewGuid()}";
|
||||
await _repository.TryAcquireAsync(_tenantId, lockKey, "worker-1", TimeSpan.FromMinutes(5));
|
||||
|
||||
// Act
|
||||
var lockInfo = await _repository.GetAsync(lockKey);
|
||||
|
||||
// Assert
|
||||
lockInfo.Should().NotBeNull();
|
||||
lockInfo!.HolderId.Should().Be("worker-1");
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task ListByTenant_ReturnsTenantsLocks()
|
||||
{
|
||||
// Arrange
|
||||
var lockKey1 = $"tenant-lock-1-{Guid.NewGuid()}";
|
||||
var lockKey2 = $"tenant-lock-2-{Guid.NewGuid()}";
|
||||
await _repository.TryAcquireAsync(_tenantId, lockKey1, "worker-1", TimeSpan.FromMinutes(5));
|
||||
await _repository.TryAcquireAsync(_tenantId, lockKey2, "worker-1", TimeSpan.FromMinutes(5));
|
||||
|
||||
// Act
|
||||
var locks = await _repository.ListByTenantAsync(_tenantId);
|
||||
|
||||
// Assert
|
||||
locks.Should().HaveCount(2);
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task TryAcquire_IsExclusiveAcrossConcurrentCallers()
|
||||
{
|
||||
// Arrange
|
||||
var lockKey = $"concurrent-lock-{Guid.NewGuid()}";
|
||||
var duration = TimeSpan.FromSeconds(5);
|
||||
|
||||
// Act
|
||||
var attempts = Enumerable.Range(0, 8)
|
||||
.Select(i => Task.Run(() => _repository.TryAcquireAsync(_tenantId, lockKey, $"worker-{i}", duration)))
|
||||
.ToArray();
|
||||
|
||||
var results = await Task.WhenAll(attempts);
|
||||
|
||||
// Assert
|
||||
var successIndexes = results
|
||||
.Select((acquired, index) => (acquired, index))
|
||||
.Where(tuple => tuple.acquired)
|
||||
.Select(tuple => tuple.index)
|
||||
.ToList();
|
||||
|
||||
successIndexes.Should().HaveCount(1);
|
||||
var winningHolder = $"worker-{successIndexes.Single()}";
|
||||
|
||||
var persisted = await _repository.GetAsync(lockKey);
|
||||
persisted.Should().NotBeNull();
|
||||
persisted!.HolderId.Should().Be(winningHolder);
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task TryAcquire_AllowsReacquireAfterExpiration()
|
||||
{
|
||||
// Arrange
|
||||
var lockKey = $"expiring-lock-{Guid.NewGuid()}";
|
||||
var shortDuration = TimeSpan.FromMilliseconds(500);
|
||||
|
||||
await _repository.TryAcquireAsync(_tenantId, lockKey, "worker-initial", shortDuration);
|
||||
|
||||
// Wait for expiration with a small safety buffer.
|
||||
await Task.Delay(1100);
|
||||
|
||||
// Act
|
||||
var reacquired = await _repository.TryAcquireAsync(_tenantId, lockKey, "worker-retry", TimeSpan.FromSeconds(5));
|
||||
|
||||
// Assert
|
||||
reacquired.Should().BeTrue();
|
||||
|
||||
var persisted = await _repository.GetAsync(lockKey);
|
||||
persisted.Should().NotBeNull();
|
||||
persisted!.HolderId.Should().Be("worker-retry");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,121 @@
|
||||
using System;
|
||||
using System.Linq;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Scheduler.Models;
|
||||
using StellaOps.Scheduler.Persistence.Postgres;
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
using StellaOps.TestKit;
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Tests;
|
||||
|
||||
[Collection(SchedulerPostgresCollection.Name)]
|
||||
public sealed class GraphJobRepositoryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly SchedulerPostgresFixture _fixture;
|
||||
|
||||
public GraphJobRepositoryTests(SchedulerPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
}
|
||||
|
||||
public ValueTask InitializeAsync() => new(_fixture.TruncateAllTablesAsync());
|
||||
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
|
||||
|
||||
private static GraphBuildJob BuildJob(string tenant, string id, GraphJobStatus status = GraphJobStatus.Pending)
|
||||
=> new(
|
||||
id: id,
|
||||
tenantId: tenant,
|
||||
sbomId: "sbom-1",
|
||||
sbomVersionId: "sbom-ver-1",
|
||||
sbomDigest: "sha256:abc",
|
||||
status: status,
|
||||
trigger: GraphBuildJobTrigger.SbomVersion,
|
||||
createdAt: DateTimeOffset.UtcNow);
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task InsertAndGetBuildJob()
|
||||
{
|
||||
var dataSource = CreateDataSource();
|
||||
var repo = new GraphJobRepository(dataSource, NullLogger<GraphJobRepository>.Instance);
|
||||
var job = BuildJob("t1", Guid.NewGuid().ToString());
|
||||
|
||||
await repo.InsertAsync(job, CancellationToken.None);
|
||||
|
||||
var fetched = await repo.GetBuildJobAsync("t1", job.Id, CancellationToken.None);
|
||||
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(job.Id);
|
||||
fetched.Status.Should().Be(GraphJobStatus.Pending);
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task TryReplaceSucceedsWithExpectedStatus()
|
||||
{
|
||||
var dataSource = CreateDataSource();
|
||||
var repo = new GraphJobRepository(dataSource, NullLogger<GraphJobRepository>.Instance);
|
||||
var job = BuildJob("t1", Guid.NewGuid().ToString());
|
||||
await repo.InsertAsync(job, CancellationToken.None);
|
||||
|
||||
var running = job with { Status = GraphJobStatus.Running };
|
||||
|
||||
var updated = await repo.TryReplaceAsync(running, GraphJobStatus.Pending, CancellationToken.None);
|
||||
|
||||
updated.Should().BeTrue();
|
||||
|
||||
var fetched = await repo.GetBuildJobAsync("t1", job.Id, CancellationToken.None);
|
||||
fetched!.Status.Should().Be(GraphJobStatus.Running);
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task TryReplaceFailsOnUnexpectedStatus()
|
||||
{
|
||||
var dataSource = CreateDataSource();
|
||||
var repo = new GraphJobRepository(dataSource, NullLogger<GraphJobRepository>.Instance);
|
||||
var job = BuildJob("t1", Guid.NewGuid().ToString(), GraphJobStatus.Completed);
|
||||
await repo.InsertAsync(job, CancellationToken.None);
|
||||
|
||||
var running = job with { Status = GraphJobStatus.Running };
|
||||
|
||||
var updated = await repo.TryReplaceAsync(running, GraphJobStatus.Pending, CancellationToken.None);
|
||||
|
||||
updated.Should().BeFalse();
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task ListBuildJobsHonorsStatusAndLimit()
|
||||
{
|
||||
var dataSource = CreateDataSource();
|
||||
var repo = new GraphJobRepository(dataSource, NullLogger<GraphJobRepository>.Instance);
|
||||
for (int i = 0; i < 5; i++)
|
||||
{
|
||||
await repo.InsertAsync(BuildJob("t1", Guid.NewGuid().ToString(), GraphJobStatus.Pending), CancellationToken.None);
|
||||
}
|
||||
|
||||
var running = BuildJob("t1", Guid.NewGuid().ToString(), GraphJobStatus.Running);
|
||||
await repo.InsertAsync(running, CancellationToken.None);
|
||||
|
||||
var pending = await repo.ListBuildJobsAsync("t1", GraphJobStatus.Pending, 3, CancellationToken.None);
|
||||
pending.Count.Should().Be(3);
|
||||
|
||||
var runningList = await repo.ListBuildJobsAsync("t1", GraphJobStatus.Running, 10, CancellationToken.None);
|
||||
runningList.Should().ContainSingle(j => j.Id == running.Id);
|
||||
}
|
||||
private SchedulerDataSource CreateDataSource()
|
||||
{
|
||||
var options = _fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = SchedulerDataSource.DefaultSchemaName;
|
||||
return new SchedulerDataSource(Options.Create(options), NullLogger<SchedulerDataSource>.Instance);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,272 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// JobIdempotencyTests.cs
|
||||
// Sprint: SPRINT_5100_0009_0008_scheduler_tests
|
||||
// Task: SCHEDULER-5100-006
|
||||
// Description: Model S1 idempotency tests for Scheduler job storage
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Models;
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Repositories;
|
||||
using StellaOps.TestKit;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Tests;
|
||||
|
||||
/// <summary>
|
||||
/// Idempotency tests for Scheduler job storage operations.
|
||||
/// Implements Model S1 (Storage/Postgres) test requirements:
|
||||
/// - Same job enqueued twice → single execution
|
||||
/// - Idempotency key uniqueness enforced per tenant
|
||||
/// - Duplicate insertions handled gracefully
|
||||
/// </summary>
|
||||
[Collection(SchedulerPostgresCollection.Name)]
|
||||
[Trait("Category", TestCategories.Integration)]
|
||||
[Trait("Category", "StorageIdempotency")]
|
||||
public sealed class JobIdempotencyTests : IAsyncLifetime
|
||||
{
|
||||
private readonly SchedulerPostgresFixture _fixture;
|
||||
private SchedulerDataSource _dataSource = null!;
|
||||
private JobRepository _jobRepository = null!;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
|
||||
public JobIdempotencyTests(SchedulerPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
}
|
||||
|
||||
public async ValueTask InitializeAsync()
|
||||
{
|
||||
await _fixture.TruncateAllTablesAsync();
|
||||
|
||||
var options = _fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = SchedulerDataSource.DefaultSchemaName;
|
||||
_dataSource = new SchedulerDataSource(Options.Create(options), NullLogger<SchedulerDataSource>.Instance);
|
||||
_jobRepository = new JobRepository(_dataSource, NullLogger<JobRepository>.Instance);
|
||||
}
|
||||
|
||||
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
|
||||
|
||||
[Fact]
|
||||
public async Task CreateJob_SameIdempotencyKey_SecondInsertFails()
|
||||
{
|
||||
// Arrange
|
||||
var idempotencyKey = $"idem-{Guid.NewGuid():N}";
|
||||
var job1 = CreateJob("job-type-1", idempotencyKey);
|
||||
var job2 = CreateJob("job-type-1", idempotencyKey);
|
||||
|
||||
// Act
|
||||
await _jobRepository.CreateAsync(job1);
|
||||
var createAgain = async () => await _jobRepository.CreateAsync(job2);
|
||||
|
||||
// Assert - Second insert should fail due to unique constraint on idempotency_key
|
||||
await createAgain.Should().ThrowAsync<Exception>(
|
||||
"duplicate idempotency_key should be rejected");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByIdempotencyKey_ReturnsExistingJob()
|
||||
{
|
||||
// Arrange
|
||||
var idempotencyKey = $"idem-{Guid.NewGuid():N}";
|
||||
var job = CreateJob("job-type-1", idempotencyKey);
|
||||
await _jobRepository.CreateAsync(job);
|
||||
|
||||
// Act
|
||||
var existing = await _jobRepository.GetByIdempotencyKeyAsync(_tenantId, idempotencyKey);
|
||||
|
||||
// Assert
|
||||
existing.Should().NotBeNull();
|
||||
existing!.Id.Should().Be(job.Id);
|
||||
existing.IdempotencyKey.Should().Be(idempotencyKey);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByIdempotencyKey_DifferentTenant_ReturnsNull()
|
||||
{
|
||||
// Arrange
|
||||
var idempotencyKey = $"idem-{Guid.NewGuid():N}";
|
||||
var job = CreateJob("job-type-1", idempotencyKey);
|
||||
await _jobRepository.CreateAsync(job);
|
||||
|
||||
// Act - Query with different tenant
|
||||
var otherTenant = Guid.NewGuid().ToString();
|
||||
var existing = await _jobRepository.GetByIdempotencyKeyAsync(otherTenant, idempotencyKey);
|
||||
|
||||
// Assert - Should not find job from different tenant
|
||||
existing.Should().BeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task SameIdempotencyKey_DifferentTenants_BothSucceed()
|
||||
{
|
||||
// Arrange
|
||||
var idempotencyKey = $"shared-idem-{Guid.NewGuid():N}";
|
||||
var tenant1 = Guid.NewGuid().ToString();
|
||||
var tenant2 = Guid.NewGuid().ToString();
|
||||
|
||||
var job1 = CreateJob("job-type-1", idempotencyKey, tenant1);
|
||||
var job2 = CreateJob("job-type-1", idempotencyKey, tenant2);
|
||||
|
||||
// Act
|
||||
var created1 = await _jobRepository.CreateAsync(job1);
|
||||
var created2 = await _jobRepository.CreateAsync(job2);
|
||||
|
||||
// Assert - Both should succeed (different tenants)
|
||||
created1.Should().NotBeNull();
|
||||
created2.Should().NotBeNull();
|
||||
created1.Id.Should().NotBe(created2.Id);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task MultipleJobs_UniqueIdempotencyKeys_AllCreated()
|
||||
{
|
||||
// Arrange
|
||||
var jobs = Enumerable.Range(1, 10)
|
||||
.Select(i => CreateJob($"job-type-{i}", $"idem-{i}-{Guid.NewGuid():N}"))
|
||||
.ToList();
|
||||
|
||||
// Act
|
||||
foreach (var job in jobs)
|
||||
{
|
||||
await _jobRepository.CreateAsync(job);
|
||||
}
|
||||
|
||||
// Assert - All jobs should be created
|
||||
foreach (var job in jobs)
|
||||
{
|
||||
var fetched = await _jobRepository.GetByIdAsync(_tenantId, job.Id);
|
||||
fetched.Should().NotBeNull();
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task JobIdempotency_PayloadDigestMatchesExpected()
|
||||
{
|
||||
// Arrange
|
||||
var idempotencyKey = $"idem-{Guid.NewGuid():N}";
|
||||
var payloadDigest = $"sha256:{Guid.NewGuid():N}";
|
||||
var job = CreateJob("job-type-1", idempotencyKey, payloadDigest: payloadDigest);
|
||||
|
||||
// Act
|
||||
await _jobRepository.CreateAsync(job);
|
||||
var fetched = await _jobRepository.GetByIdempotencyKeyAsync(_tenantId, idempotencyKey);
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.PayloadDigest.Should().Be(payloadDigest);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task CompletedJob_SameIdempotencyKey_StillRejectsNewInsert()
|
||||
{
|
||||
// Arrange
|
||||
var idempotencyKey = $"idem-{Guid.NewGuid():N}";
|
||||
var job = CreateJob("job-type-1", idempotencyKey);
|
||||
var created = await _jobRepository.CreateAsync(job);
|
||||
|
||||
// Complete the job
|
||||
var leased = await _jobRepository.TryLeaseJobAsync(
|
||||
_tenantId, created.Id, "worker-1", TimeSpan.FromMinutes(5));
|
||||
if (leased != null)
|
||||
{
|
||||
await _jobRepository.CompleteAsync(_tenantId, created.Id, leased.LeaseId!.Value);
|
||||
}
|
||||
|
||||
// Act - Try to create another job with same idempotency key
|
||||
var newJob = CreateJob("job-type-1", idempotencyKey);
|
||||
var createAgain = async () => await _jobRepository.CreateAsync(newJob);
|
||||
|
||||
// Assert - Should still fail (idempotency key persists after completion)
|
||||
await createAgain.Should().ThrowAsync<Exception>(
|
||||
"completed job's idempotency_key should still block new inserts");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task FailedJob_SameIdempotencyKey_StillRejectsNewInsert()
|
||||
{
|
||||
// Arrange
|
||||
var idempotencyKey = $"idem-{Guid.NewGuid():N}";
|
||||
var job = CreateJob("job-type-1", idempotencyKey);
|
||||
var created = await _jobRepository.CreateAsync(job);
|
||||
|
||||
// Fail the job
|
||||
var leased = await _jobRepository.TryLeaseJobAsync(
|
||||
_tenantId, created.Id, "worker-1", TimeSpan.FromMinutes(5));
|
||||
if (leased != null)
|
||||
{
|
||||
await _jobRepository.FailAsync(_tenantId, created.Id, leased.LeaseId!.Value, "test failure", retry: false);
|
||||
}
|
||||
|
||||
// Act - Try to create another job with same idempotency key
|
||||
var newJob = CreateJob("job-type-1", idempotencyKey);
|
||||
var createAgain = async () => await _jobRepository.CreateAsync(newJob);
|
||||
|
||||
// Assert - Should still fail
|
||||
await createAgain.Should().ThrowAsync<Exception>(
|
||||
"failed job's idempotency_key should still block new inserts");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task CanceledJob_SameIdempotencyKey_StillRejectsNewInsert()
|
||||
{
|
||||
// Arrange
|
||||
var idempotencyKey = $"idem-{Guid.NewGuid():N}";
|
||||
var job = CreateJob("job-type-1", idempotencyKey);
|
||||
var created = await _jobRepository.CreateAsync(job);
|
||||
|
||||
// Cancel the job
|
||||
await _jobRepository.CancelAsync(_tenantId, created.Id, "test cancellation");
|
||||
|
||||
// Act - Try to create another job with same idempotency key
|
||||
var newJob = CreateJob("job-type-1", idempotencyKey);
|
||||
var createAgain = async () => await _jobRepository.CreateAsync(newJob);
|
||||
|
||||
// Assert - Should still fail
|
||||
await createAgain.Should().ThrowAsync<Exception>(
|
||||
"canceled job's idempotency_key should still block new inserts");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task TenantIsolation_JobsOnlyVisibleToOwnTenant()
|
||||
{
|
||||
// Arrange
|
||||
var tenant1 = Guid.NewGuid().ToString();
|
||||
var tenant2 = Guid.NewGuid().ToString();
|
||||
|
||||
var job1 = CreateJob("job-type-1", $"idem-1-{Guid.NewGuid():N}", tenant1);
|
||||
var job2 = CreateJob("job-type-2", $"idem-2-{Guid.NewGuid():N}", tenant2);
|
||||
|
||||
await _jobRepository.CreateAsync(job1);
|
||||
await _jobRepository.CreateAsync(job2);
|
||||
|
||||
// Act
|
||||
var tenant1Jobs = await _jobRepository.GetByStatusAsync(tenant1, JobStatus.Scheduled, limit: 100);
|
||||
var tenant2Jobs = await _jobRepository.GetByStatusAsync(tenant2, JobStatus.Scheduled, limit: 100);
|
||||
|
||||
// Assert
|
||||
tenant1Jobs.Should().NotContain(j => j.TenantId == tenant2);
|
||||
tenant2Jobs.Should().NotContain(j => j.TenantId == tenant1);
|
||||
}
|
||||
|
||||
private JobEntity CreateJob(string jobType, string idempotencyKey, string? tenantId = null, string? payloadDigest = null)
|
||||
{
|
||||
return new JobEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = tenantId ?? _tenantId,
|
||||
JobType = jobType,
|
||||
Status = JobStatus.Scheduled,
|
||||
Priority = 0,
|
||||
Payload = """{"test": true}""",
|
||||
PayloadDigest = payloadDigest ?? $"sha256:{Guid.NewGuid():N}",
|
||||
IdempotencyKey = idempotencyKey,
|
||||
MaxAttempts = 3
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,337 @@
|
||||
// <copyright file="SchedulerChainLinkingTests.cs" company="StellaOps">
|
||||
// Copyright (c) StellaOps. Licensed under BUSL-1.1.
|
||||
// </copyright>
|
||||
|
||||
using FluentAssertions;
|
||||
using StellaOps.HybridLogicalClock;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Tests;
|
||||
|
||||
[Trait("Category", "Unit")]
|
||||
public sealed class SchedulerChainLinkingTests
|
||||
{
|
||||
[Fact]
|
||||
public void ComputeLink_WithNullPrevLink_UsesZeroLink()
|
||||
{
|
||||
// Arrange
|
||||
var jobId = Guid.Parse("12345678-1234-1234-1234-123456789012");
|
||||
var hlc = new HlcTimestamp { PhysicalTime = 1000000000000L, NodeId = "node1", LogicalCounter = 1 };
|
||||
var payloadHash = new byte[32];
|
||||
payloadHash[0] = 0xAB;
|
||||
|
||||
// Act
|
||||
var link1 = SchedulerChainLinking.ComputeLink(null, jobId, hlc, payloadHash);
|
||||
var link2 = SchedulerChainLinking.ComputeLink(SchedulerChainLinking.ZeroLink, jobId, hlc, payloadHash);
|
||||
|
||||
// Assert
|
||||
link1.Should().HaveCount(32);
|
||||
link1.Should().BeEquivalentTo(link2, "null prev_link should be treated as zero link");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ComputeLink_IsDeterministic_SameInputsSameOutput()
|
||||
{
|
||||
// Arrange
|
||||
var prevLink = new byte[32];
|
||||
prevLink[0] = 0x01;
|
||||
var jobId = Guid.Parse("AAAAAAAA-BBBB-CCCC-DDDD-EEEEEEEEEEEE");
|
||||
var hlc = new HlcTimestamp { PhysicalTime = 1704067200000L, NodeId = "scheduler-1", LogicalCounter = 42 };
|
||||
var payloadHash = new byte[32];
|
||||
for (int i = 0; i < 32; i++) payloadHash[i] = (byte)i;
|
||||
|
||||
// Act
|
||||
var link1 = SchedulerChainLinking.ComputeLink(prevLink, jobId, hlc, payloadHash);
|
||||
var link2 = SchedulerChainLinking.ComputeLink(prevLink, jobId, hlc, payloadHash);
|
||||
var link3 = SchedulerChainLinking.ComputeLink(prevLink, jobId, hlc, payloadHash);
|
||||
|
||||
// Assert
|
||||
link1.Should().BeEquivalentTo(link2);
|
||||
link2.Should().BeEquivalentTo(link3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ComputeLink_DifferentJobIds_ProduceDifferentLinks()
|
||||
{
|
||||
// Arrange
|
||||
var prevLink = new byte[32];
|
||||
var hlc = new HlcTimestamp { PhysicalTime = 1704067200000L, NodeId = "node1", LogicalCounter = 1 };
|
||||
var payloadHash = new byte[32];
|
||||
|
||||
var jobId1 = Guid.Parse("11111111-1111-1111-1111-111111111111");
|
||||
var jobId2 = Guid.Parse("22222222-2222-2222-2222-222222222222");
|
||||
|
||||
// Act
|
||||
var link1 = SchedulerChainLinking.ComputeLink(prevLink, jobId1, hlc, payloadHash);
|
||||
var link2 = SchedulerChainLinking.ComputeLink(prevLink, jobId2, hlc, payloadHash);
|
||||
|
||||
// Assert
|
||||
link1.Should().NotBeEquivalentTo(link2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ComputeLink_DifferentHlcTimestamps_ProduceDifferentLinks()
|
||||
{
|
||||
// Arrange
|
||||
var prevLink = new byte[32];
|
||||
var jobId = Guid.NewGuid();
|
||||
var payloadHash = new byte[32];
|
||||
|
||||
var hlc1 = new HlcTimestamp { PhysicalTime = 1704067200000L, NodeId = "node1", LogicalCounter = 1 };
|
||||
var hlc2 = new HlcTimestamp { PhysicalTime = 1704067200000L, NodeId = "node1", LogicalCounter = 2 }; // Different counter
|
||||
var hlc3 = new HlcTimestamp { PhysicalTime = 1704067200001L, NodeId = "node1", LogicalCounter = 1 }; // Different physical time
|
||||
|
||||
// Act
|
||||
var link1 = SchedulerChainLinking.ComputeLink(prevLink, jobId, hlc1, payloadHash);
|
||||
var link2 = SchedulerChainLinking.ComputeLink(prevLink, jobId, hlc2, payloadHash);
|
||||
var link3 = SchedulerChainLinking.ComputeLink(prevLink, jobId, hlc3, payloadHash);
|
||||
|
||||
// Assert
|
||||
link1.Should().NotBeEquivalentTo(link2);
|
||||
link1.Should().NotBeEquivalentTo(link3);
|
||||
link2.Should().NotBeEquivalentTo(link3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ComputeLink_DifferentPrevLinks_ProduceDifferentLinks()
|
||||
{
|
||||
// Arrange
|
||||
var jobId = Guid.NewGuid();
|
||||
var hlc = new HlcTimestamp { PhysicalTime = 1704067200000L, NodeId = "node1", LogicalCounter = 1 };
|
||||
var payloadHash = new byte[32];
|
||||
|
||||
var prevLink1 = new byte[32];
|
||||
var prevLink2 = new byte[32];
|
||||
prevLink2[0] = 0xFF;
|
||||
|
||||
// Act
|
||||
var link1 = SchedulerChainLinking.ComputeLink(prevLink1, jobId, hlc, payloadHash);
|
||||
var link2 = SchedulerChainLinking.ComputeLink(prevLink2, jobId, hlc, payloadHash);
|
||||
|
||||
// Assert
|
||||
link1.Should().NotBeEquivalentTo(link2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ComputeLink_DifferentPayloadHashes_ProduceDifferentLinks()
|
||||
{
|
||||
// Arrange
|
||||
var prevLink = new byte[32];
|
||||
var jobId = Guid.NewGuid();
|
||||
var hlc = new HlcTimestamp { PhysicalTime = 1704067200000L, NodeId = "node1", LogicalCounter = 1 };
|
||||
|
||||
var payload1 = new byte[32];
|
||||
var payload2 = new byte[32];
|
||||
payload2[31] = 0x01;
|
||||
|
||||
// Act
|
||||
var link1 = SchedulerChainLinking.ComputeLink(prevLink, jobId, hlc, payload1);
|
||||
var link2 = SchedulerChainLinking.ComputeLink(prevLink, jobId, hlc, payload2);
|
||||
|
||||
// Assert
|
||||
link1.Should().NotBeEquivalentTo(link2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ComputeLink_WithStringHlc_ProducesSameResultAsParsedHlc()
|
||||
{
|
||||
// Arrange
|
||||
var prevLink = new byte[32];
|
||||
var jobId = Guid.NewGuid();
|
||||
var hlc = new HlcTimestamp { PhysicalTime = 1704067200000L, NodeId = "node1", LogicalCounter = 42 };
|
||||
var hlcString = hlc.ToSortableString();
|
||||
var payloadHash = new byte[32];
|
||||
|
||||
// Act
|
||||
var link1 = SchedulerChainLinking.ComputeLink(prevLink, jobId, hlc, payloadHash);
|
||||
var link2 = SchedulerChainLinking.ComputeLink(prevLink, jobId, hlcString, payloadHash);
|
||||
|
||||
// Assert
|
||||
link1.Should().BeEquivalentTo(link2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void VerifyLink_ValidLink_ReturnsTrue()
|
||||
{
|
||||
// Arrange
|
||||
var prevLink = new byte[32];
|
||||
prevLink[0] = 0xDE;
|
||||
var jobId = Guid.NewGuid();
|
||||
var hlc = new HlcTimestamp { PhysicalTime = 1704067200000L, NodeId = "verifier", LogicalCounter = 100 };
|
||||
var payloadHash = new byte[32];
|
||||
payloadHash[15] = 0xAD;
|
||||
|
||||
var computedLink = SchedulerChainLinking.ComputeLink(prevLink, jobId, hlc, payloadHash);
|
||||
|
||||
// Act
|
||||
var isValid = SchedulerChainLinking.VerifyLink(computedLink, prevLink, jobId, hlc, payloadHash);
|
||||
|
||||
// Assert
|
||||
isValid.Should().BeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void VerifyLink_TamperedLink_ReturnsFalse()
|
||||
{
|
||||
// Arrange
|
||||
var prevLink = new byte[32];
|
||||
var jobId = Guid.NewGuid();
|
||||
var hlc = new HlcTimestamp { PhysicalTime = 1704067200000L, NodeId = "node1", LogicalCounter = 1 };
|
||||
var payloadHash = new byte[32];
|
||||
|
||||
var computedLink = SchedulerChainLinking.ComputeLink(prevLink, jobId, hlc, payloadHash);
|
||||
|
||||
// Tamper with the link
|
||||
var tamperedLink = (byte[])computedLink.Clone();
|
||||
tamperedLink[0] ^= 0xFF;
|
||||
|
||||
// Act
|
||||
var isValid = SchedulerChainLinking.VerifyLink(tamperedLink, prevLink, jobId, hlc, payloadHash);
|
||||
|
||||
// Assert
|
||||
isValid.Should().BeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ComputePayloadHash_IsDeterministic()
|
||||
{
|
||||
// Arrange
|
||||
var payload = new { Id = 123, Name = "Test", Values = new[] { 1, 2, 3 } };
|
||||
|
||||
// Act
|
||||
var hash1 = SchedulerChainLinking.ComputePayloadHash(payload);
|
||||
var hash2 = SchedulerChainLinking.ComputePayloadHash(payload);
|
||||
|
||||
// Assert
|
||||
hash1.Should().HaveCount(32);
|
||||
hash1.Should().BeEquivalentTo(hash2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ComputePayloadHash_DifferentPayloads_ProduceDifferentHashes()
|
||||
{
|
||||
// Arrange
|
||||
var payload1 = new { Id = 1, Name = "First" };
|
||||
var payload2 = new { Id = 2, Name = "Second" };
|
||||
|
||||
// Act
|
||||
var hash1 = SchedulerChainLinking.ComputePayloadHash(payload1);
|
||||
var hash2 = SchedulerChainLinking.ComputePayloadHash(payload2);
|
||||
|
||||
// Assert
|
||||
hash1.Should().NotBeEquivalentTo(hash2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ComputePayloadHash_ByteArray_ProducesConsistentHash()
|
||||
{
|
||||
// Arrange
|
||||
var bytes = new byte[] { 0x01, 0x02, 0x03, 0x04, 0x05 };
|
||||
|
||||
// Act
|
||||
var hash1 = SchedulerChainLinking.ComputePayloadHash(bytes);
|
||||
var hash2 = SchedulerChainLinking.ComputePayloadHash(bytes);
|
||||
|
||||
// Assert
|
||||
hash1.Should().HaveCount(32);
|
||||
hash1.Should().BeEquivalentTo(hash2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ToHex_NullLink_ReturnsNullString()
|
||||
{
|
||||
// Act
|
||||
var result = SchedulerChainLinking.ToHex(null);
|
||||
|
||||
// Assert
|
||||
result.Should().Be("(null)");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ToHex_EmptyLink_ReturnsNullString()
|
||||
{
|
||||
// Act
|
||||
var result = SchedulerChainLinking.ToHex(Array.Empty<byte>());
|
||||
|
||||
// Assert
|
||||
result.Should().Be("(null)");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ToHex_ValidLink_ReturnsLowercaseHex()
|
||||
{
|
||||
// Arrange
|
||||
var link = new byte[] { 0xAB, 0xCD, 0xEF };
|
||||
|
||||
// Act
|
||||
var result = SchedulerChainLinking.ToHex(link);
|
||||
|
||||
// Assert
|
||||
result.Should().Be("abcdef");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ChainIntegrity_SequentialLinks_FormValidChain()
|
||||
{
|
||||
// Arrange - Simulate a chain of 5 entries
|
||||
var jobIds = Enumerable.Range(1, 5).Select(i => Guid.NewGuid()).ToList();
|
||||
var payloads = jobIds.Select(id => SchedulerChainLinking.ComputePayloadHash(new { JobId = id })).ToList();
|
||||
|
||||
var links = new List<byte[]>();
|
||||
byte[]? prevLink = null;
|
||||
long baseTime = 1704067200000L;
|
||||
|
||||
// Act - Build chain
|
||||
for (int i = 0; i < 5; i++)
|
||||
{
|
||||
var hlc = new HlcTimestamp { PhysicalTime = baseTime + i, NodeId = "node1", LogicalCounter = i };
|
||||
var link = SchedulerChainLinking.ComputeLink(prevLink, jobIds[i], hlc, payloads[i]);
|
||||
links.Add(link);
|
||||
prevLink = link;
|
||||
}
|
||||
|
||||
// Assert - Verify chain integrity
|
||||
byte[]? expectedPrev = null;
|
||||
for (int i = 0; i < 5; i++)
|
||||
{
|
||||
var hlc = new HlcTimestamp { PhysicalTime = baseTime + i, NodeId = "node1", LogicalCounter = i };
|
||||
var isValid = SchedulerChainLinking.VerifyLink(links[i], expectedPrev, jobIds[i], hlc, payloads[i]);
|
||||
isValid.Should().BeTrue($"Link {i} should be valid");
|
||||
expectedPrev = links[i];
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ChainIntegrity_TamperedMiddleLink_BreaksChain()
|
||||
{
|
||||
// Arrange - Build a chain of 3 entries
|
||||
var jobIds = new[] { Guid.NewGuid(), Guid.NewGuid(), Guid.NewGuid() };
|
||||
var payloads = jobIds.Select(id => SchedulerChainLinking.ComputePayloadHash(new { JobId = id })).ToArray();
|
||||
var hlcs = new[]
|
||||
{
|
||||
new HlcTimestamp { PhysicalTime = 1000L, NodeId = "node1", LogicalCounter = 0 },
|
||||
new HlcTimestamp { PhysicalTime = 1001L, NodeId = "node1", LogicalCounter = 0 },
|
||||
new HlcTimestamp { PhysicalTime = 1002L, NodeId = "node1", LogicalCounter = 0 }
|
||||
};
|
||||
|
||||
var link0 = SchedulerChainLinking.ComputeLink(null, jobIds[0], hlcs[0], payloads[0]);
|
||||
var link1 = SchedulerChainLinking.ComputeLink(link0, jobIds[1], hlcs[1], payloads[1]);
|
||||
var link2 = SchedulerChainLinking.ComputeLink(link1, jobIds[2], hlcs[2], payloads[2]);
|
||||
|
||||
// Tamper with middle link
|
||||
var tamperedLink1 = (byte[])link1.Clone();
|
||||
tamperedLink1[0] ^= 0xFF;
|
||||
|
||||
// Act & Assert - First link is still valid
|
||||
SchedulerChainLinking.VerifyLink(link0, null, jobIds[0], hlcs[0], payloads[0])
|
||||
.Should().BeTrue("First link should be valid");
|
||||
|
||||
// Middle link verification fails
|
||||
SchedulerChainLinking.VerifyLink(tamperedLink1, link0, jobIds[1], hlcs[1], payloads[1])
|
||||
.Should().BeFalse("Tampered middle link should fail verification");
|
||||
|
||||
// Third link verification fails because prev_link is wrong
|
||||
SchedulerChainLinking.VerifyLink(link2, tamperedLink1, jobIds[2], hlcs[2], payloads[2])
|
||||
.Should().BeFalse("Third link should fail with tampered prev_link");
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,361 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// SchedulerMigrationTests.cs
|
||||
// Sprint: SPRINT_5100_0009_0008_scheduler_tests
|
||||
// Task: SCHEDULER-5100-005
|
||||
// Description: Model S1 migration tests for Scheduler.Storage
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using System.Reflection;
|
||||
using Dapper;
|
||||
using FluentAssertions;
|
||||
using Npgsql;
|
||||
using StellaOps.TestKit;
|
||||
using Testcontainers.PostgreSql;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Tests;
|
||||
|
||||
/// <summary>
|
||||
/// Migration tests for Scheduler.Storage.
|
||||
/// Implements Model S1 (Storage/Postgres) migration test requirements:
|
||||
/// - Apply all migrations from scratch (fresh database)
|
||||
/// - Apply migrations from N-1 (incremental application)
|
||||
/// - Verify migration idempotency (apply twice → no error)
|
||||
/// </summary>
|
||||
[Trait("Category", TestCategories.Integration)]
|
||||
[Trait("Category", "StorageMigration")]
|
||||
public sealed class SchedulerMigrationTests : IAsyncLifetime
|
||||
{
|
||||
private PostgreSqlContainer _container = null!;
|
||||
|
||||
public async ValueTask InitializeAsync()
|
||||
{
|
||||
_container = new PostgreSqlBuilder()
|
||||
.WithImage("postgres:16-alpine")
|
||||
.WithDatabase("scheduler_migration_test")
|
||||
.WithUsername("postgres")
|
||||
.WithPassword("postgres")
|
||||
.Build();
|
||||
|
||||
await _container.StartAsync();
|
||||
}
|
||||
|
||||
public async ValueTask DisposeAsync()
|
||||
{
|
||||
await _container.DisposeAsync();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyMigrations_FromScratch_AllTablesCreated()
|
||||
{
|
||||
// Arrange
|
||||
var connectionString = _container.GetConnectionString();
|
||||
|
||||
// Act - Apply all migrations from scratch
|
||||
await ApplyAllMigrationsAsync(connectionString);
|
||||
|
||||
// Assert - Verify Scheduler tables exist
|
||||
await using var connection = new NpgsqlConnection(connectionString);
|
||||
await connection.OpenAsync();
|
||||
|
||||
var tables = await connection.QueryAsync<string>(
|
||||
@"SELECT table_name FROM information_schema.tables
|
||||
WHERE table_schema = 'scheduler'
|
||||
ORDER BY table_name");
|
||||
|
||||
var tableList = tables.ToList();
|
||||
|
||||
// Verify core Scheduler tables exist
|
||||
tableList.Should().Contain("jobs", "jobs table should exist");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyMigrations_FromScratch_AllMigrationsRecorded()
|
||||
{
|
||||
// Arrange
|
||||
var connectionString = _container.GetConnectionString();
|
||||
await ApplyAllMigrationsAsync(connectionString);
|
||||
|
||||
// Assert - Verify migrations are recorded
|
||||
await using var connection = new NpgsqlConnection(connectionString);
|
||||
await connection.OpenAsync();
|
||||
|
||||
var migrationsApplied = await connection.QueryAsync<string>(
|
||||
"SELECT migration_id FROM __migrations ORDER BY applied_at");
|
||||
|
||||
var migrationList = migrationsApplied.ToList();
|
||||
migrationList.Should().NotBeEmpty("migrations should be tracked");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyMigrations_Twice_IsIdempotent()
|
||||
{
|
||||
// Arrange
|
||||
var connectionString = _container.GetConnectionString();
|
||||
|
||||
// Act - Apply migrations twice
|
||||
await ApplyAllMigrationsAsync(connectionString);
|
||||
var applyAgain = async () => await ApplyAllMigrationsAsync(connectionString);
|
||||
|
||||
// Assert - Second application should not throw
|
||||
await applyAgain.Should().NotThrowAsync(
|
||||
"applying migrations twice should be idempotent");
|
||||
|
||||
// Verify migrations are not duplicated
|
||||
await using var connection = new NpgsqlConnection(connectionString);
|
||||
await connection.OpenAsync();
|
||||
|
||||
var migrationCount = await connection.ExecuteScalarAsync<int>(
|
||||
"SELECT COUNT(*) FROM __migrations");
|
||||
|
||||
// Count unique migrations
|
||||
var uniqueMigrations = await connection.ExecuteScalarAsync<int>(
|
||||
"SELECT COUNT(DISTINCT migration_id) FROM __migrations");
|
||||
|
||||
migrationCount.Should().Be(uniqueMigrations,
|
||||
"each migration should only be recorded once");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyMigrations_VerifySchemaIntegrity()
|
||||
{
|
||||
// Arrange
|
||||
var connectionString = _container.GetConnectionString();
|
||||
await ApplyAllMigrationsAsync(connectionString);
|
||||
|
||||
// Assert - Verify indexes exist
|
||||
await using var connection = new NpgsqlConnection(connectionString);
|
||||
await connection.OpenAsync();
|
||||
|
||||
var indexes = await connection.QueryAsync<string>(
|
||||
@"SELECT indexname FROM pg_indexes
|
||||
WHERE schemaname = 'scheduler'
|
||||
ORDER BY indexname");
|
||||
|
||||
var indexList = indexes.ToList();
|
||||
indexList.Should().NotBeEmpty("scheduler schema should have indexes");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyMigrations_IndividualMigrationsCanRollForward()
|
||||
{
|
||||
// Arrange
|
||||
var connectionString = _container.GetConnectionString();
|
||||
|
||||
// Act - Apply migrations in sequence
|
||||
var migrationFiles = GetMigrationFiles();
|
||||
|
||||
await using var connection = new NpgsqlConnection(connectionString);
|
||||
await connection.OpenAsync();
|
||||
|
||||
// Create migration tracking table first
|
||||
await connection.ExecuteAsync(@"
|
||||
CREATE TABLE IF NOT EXISTS __migrations (
|
||||
id SERIAL PRIMARY KEY,
|
||||
migration_id TEXT NOT NULL UNIQUE,
|
||||
applied_at TIMESTAMPTZ DEFAULT NOW()
|
||||
)");
|
||||
|
||||
// Apply each migration in order
|
||||
int appliedCount = 0;
|
||||
foreach (var migrationFile in migrationFiles.OrderBy(f => f))
|
||||
{
|
||||
var migrationId = Path.GetFileName(migrationFile);
|
||||
|
||||
// Check if already applied
|
||||
var alreadyApplied = await connection.ExecuteScalarAsync<int>(
|
||||
"SELECT COUNT(*) FROM __migrations WHERE migration_id = @Id",
|
||||
new { Id = migrationId });
|
||||
|
||||
if (alreadyApplied > 0)
|
||||
continue;
|
||||
|
||||
// Apply migration
|
||||
var sql = GetMigrationContent(migrationFile);
|
||||
if (!string.IsNullOrWhiteSpace(sql))
|
||||
{
|
||||
await connection.ExecuteAsync(sql);
|
||||
await connection.ExecuteAsync(
|
||||
"INSERT INTO __migrations (migration_id) VALUES (@Id)",
|
||||
new { Id = migrationId });
|
||||
appliedCount++;
|
||||
}
|
||||
}
|
||||
|
||||
// Assert - Migrations should be applied (if any exist)
|
||||
var totalMigrations = await connection.ExecuteScalarAsync<int>(
|
||||
"SELECT COUNT(*) FROM __migrations");
|
||||
|
||||
totalMigrations.Should().BeGreaterThanOrEqualTo(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyMigrations_ForeignKeyConstraintsValid()
|
||||
{
|
||||
// Arrange
|
||||
var connectionString = _container.GetConnectionString();
|
||||
await ApplyAllMigrationsAsync(connectionString);
|
||||
|
||||
// Assert - Verify foreign key constraints exist and are valid
|
||||
await using var connection = new NpgsqlConnection(connectionString);
|
||||
await connection.OpenAsync();
|
||||
|
||||
var foreignKeys = await connection.QueryAsync<string>(
|
||||
@"SELECT tc.constraint_name
|
||||
FROM information_schema.table_constraints tc
|
||||
WHERE tc.constraint_type = 'FOREIGN KEY'
|
||||
AND tc.table_schema = 'scheduler'
|
||||
ORDER BY tc.constraint_name");
|
||||
|
||||
var fkList = foreignKeys.ToList();
|
||||
// Foreign keys may or may not exist depending on schema design
|
||||
fkList.Should().NotBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyMigrations_JobsTableHasRequiredColumns()
|
||||
{
|
||||
// Arrange
|
||||
var connectionString = _container.GetConnectionString();
|
||||
await ApplyAllMigrationsAsync(connectionString);
|
||||
|
||||
// Assert - Verify jobs table has required columns
|
||||
await using var connection = new NpgsqlConnection(connectionString);
|
||||
await connection.OpenAsync();
|
||||
|
||||
var columns = await connection.QueryAsync<string>(
|
||||
@"SELECT column_name FROM information_schema.columns
|
||||
WHERE table_name = 'jobs' AND table_schema = 'scheduler'
|
||||
ORDER BY ordinal_position");
|
||||
|
||||
var columnList = columns.ToList();
|
||||
|
||||
if (columnList.Any())
|
||||
{
|
||||
columnList.Should().Contain("id", "jobs table should have id column");
|
||||
columnList.Should().Contain("tenant_id", "jobs table should have tenant_id column");
|
||||
columnList.Should().Contain("status", "jobs table should have status column");
|
||||
columnList.Should().Contain("idempotency_key", "jobs table should have idempotency_key column");
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyMigrations_SchedulerSchemaExists()
|
||||
{
|
||||
// Arrange
|
||||
var connectionString = _container.GetConnectionString();
|
||||
await ApplyAllMigrationsAsync(connectionString);
|
||||
|
||||
// Assert - Verify scheduler schema exists
|
||||
await using var connection = new NpgsqlConnection(connectionString);
|
||||
await connection.OpenAsync();
|
||||
|
||||
var schemaExists = await connection.ExecuteScalarAsync<int>(
|
||||
@"SELECT COUNT(*) FROM information_schema.schemata
|
||||
WHERE schema_name = 'scheduler'");
|
||||
|
||||
schemaExists.Should().Be(1, "scheduler schema should exist");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task InitialSchemaMigration_CanBeReappliedWithoutTriggerConflicts()
|
||||
{
|
||||
var connectionString = _container.GetConnectionString();
|
||||
var migrationResource = GetMigrationResourceByFileName("001_initial_schema.sql");
|
||||
var sql = GetMigrationContent(migrationResource);
|
||||
|
||||
await using var connection = new NpgsqlConnection(connectionString);
|
||||
await connection.OpenAsync();
|
||||
|
||||
await connection.ExecuteAsync(sql);
|
||||
|
||||
var applyAgain = async () => await connection.ExecuteAsync(sql);
|
||||
await applyAgain.Should().NotThrowAsync(
|
||||
"001_initial_schema.sql must remain idempotent when rerun on initialized schemas");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ExceptionLifecycleMigration_CanBeReappliedWithoutPolicyConflicts()
|
||||
{
|
||||
var connectionString = _container.GetConnectionString();
|
||||
await ApplyAllMigrationsAsync(connectionString);
|
||||
|
||||
var migrationResource = GetMigrationResourceByFileName("003_exception_lifecycle.sql");
|
||||
var sql = GetMigrationContent(migrationResource);
|
||||
|
||||
await using var connection = new NpgsqlConnection(connectionString);
|
||||
await connection.OpenAsync();
|
||||
|
||||
var applyAgain = async () => await connection.ExecuteAsync(sql);
|
||||
await applyAgain.Should().NotThrowAsync(
|
||||
"003_exception_lifecycle.sql must remain idempotent when rerun on initialized schemas");
|
||||
}
|
||||
|
||||
private async Task ApplyAllMigrationsAsync(string connectionString)
|
||||
{
|
||||
await using var connection = new NpgsqlConnection(connectionString);
|
||||
await connection.OpenAsync();
|
||||
|
||||
// Create migration tracking table
|
||||
await connection.ExecuteAsync(@"
|
||||
CREATE TABLE IF NOT EXISTS __migrations (
|
||||
id SERIAL PRIMARY KEY,
|
||||
migration_id TEXT NOT NULL UNIQUE,
|
||||
applied_at TIMESTAMPTZ DEFAULT NOW()
|
||||
)");
|
||||
|
||||
// Get and apply all migrations
|
||||
var migrationFiles = GetMigrationFiles();
|
||||
|
||||
foreach (var migrationFile in migrationFiles.OrderBy(f => f))
|
||||
{
|
||||
var migrationId = Path.GetFileName(migrationFile);
|
||||
|
||||
// Skip if already applied
|
||||
var alreadyApplied = await connection.ExecuteScalarAsync<int>(
|
||||
"SELECT COUNT(*) FROM __migrations WHERE migration_id = @Id",
|
||||
new { Id = migrationId });
|
||||
|
||||
if (alreadyApplied > 0)
|
||||
continue;
|
||||
|
||||
// Apply migration
|
||||
var sql = GetMigrationContent(migrationFile);
|
||||
if (!string.IsNullOrWhiteSpace(sql))
|
||||
{
|
||||
await connection.ExecuteAsync(sql);
|
||||
await connection.ExecuteAsync(
|
||||
"INSERT INTO __migrations (migration_id) VALUES (@Id)",
|
||||
new { Id = migrationId });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static IEnumerable<string> GetMigrationFiles()
|
||||
{
|
||||
var assembly = typeof(SchedulerDataSource).Assembly;
|
||||
var resourceNames = assembly.GetManifestResourceNames()
|
||||
.Where(n => n.Contains("Migrations") && n.EndsWith(".sql"))
|
||||
.OrderBy(n => n);
|
||||
|
||||
return resourceNames;
|
||||
}
|
||||
|
||||
private static string GetMigrationResourceByFileName(string fileName)
|
||||
{
|
||||
return GetMigrationFiles()
|
||||
.First(resource => resource.EndsWith(fileName, StringComparison.OrdinalIgnoreCase));
|
||||
}
|
||||
|
||||
private static string GetMigrationContent(string resourceName)
|
||||
{
|
||||
var assembly = typeof(SchedulerDataSource).Assembly;
|
||||
using var stream = assembly.GetManifestResourceStream(resourceName);
|
||||
if (stream == null)
|
||||
return string.Empty;
|
||||
|
||||
using var reader = new StreamReader(stream);
|
||||
return reader.ReadToEnd();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,146 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// SchedulerPostgresFixture.cs
|
||||
// Sprint: SPRINT_5100_0007_0004_storage_harness
|
||||
// Task: STOR-HARNESS-011
|
||||
// Description: Scheduler PostgreSQL test fixture using TestKit
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using System.Reflection;
|
||||
using Npgsql;
|
||||
using StellaOps.Infrastructure.Postgres.Testing;
|
||||
using StellaOps.Scheduler.Persistence.Postgres;
|
||||
using Xunit;
|
||||
|
||||
// Type aliases to disambiguate TestKit and Infrastructure.Postgres.Testing fixtures
|
||||
using TestKitPostgresFixture = StellaOps.TestKit.Fixtures.PostgresFixture;
|
||||
using TestKitPostgresIsolationMode = StellaOps.TestKit.Fixtures.PostgresIsolationMode;
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Tests;
|
||||
|
||||
/// <summary>
|
||||
/// PostgreSQL integration test fixture for the Scheduler module.
|
||||
/// Runs migrations from embedded resources and provides test isolation.
|
||||
/// </summary>
|
||||
public sealed class SchedulerPostgresFixture : PostgresIntegrationFixture, ICollectionFixture<SchedulerPostgresFixture>
|
||||
{
|
||||
protected override Assembly? GetMigrationAssembly()
|
||||
=> typeof(SchedulerDataSource).Assembly;
|
||||
|
||||
protected override string GetModuleName() => "Scheduler";
|
||||
|
||||
public new async Task TruncateAllTablesAsync(CancellationToken cancellationToken = default)
|
||||
{
|
||||
// Base fixture truncates the randomly-generated test schema (e.g. schema_migrations table lives there).
|
||||
await Fixture.TruncateAllTablesAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
// Scheduler migrations create the canonical `scheduler.*` schema explicitly, so we must truncate it as well
|
||||
// to ensure test isolation between methods.
|
||||
await using var connection = new NpgsqlConnection(ConnectionString);
|
||||
await connection.OpenAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
const string listTablesSql = """
|
||||
SELECT table_name
|
||||
FROM information_schema.tables
|
||||
WHERE table_schema = 'scheduler'
|
||||
AND table_type = 'BASE TABLE';
|
||||
""";
|
||||
|
||||
var tables = new List<string>();
|
||||
await using (var command = new NpgsqlCommand(listTablesSql, connection))
|
||||
await using (var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
tables.Add(reader.GetString(0));
|
||||
}
|
||||
}
|
||||
|
||||
if (tables.Count == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
var qualified = tables.Select(static t => $"scheduler.\"{t}\"");
|
||||
var truncateSql = $"TRUNCATE TABLE {string.Join(", ", qualified)} RESTART IDENTITY CASCADE;";
|
||||
await using var truncateCommand = new NpgsqlCommand(truncateSql, connection);
|
||||
await truncateCommand.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Collection definition for Scheduler PostgreSQL integration tests.
|
||||
/// Tests in this collection share a single PostgreSQL container instance.
|
||||
/// </summary>
|
||||
[CollectionDefinition(Name)]
|
||||
public sealed class SchedulerPostgresCollection : ICollectionFixture<SchedulerPostgresFixture>
|
||||
{
|
||||
public const string Name = "SchedulerPostgres";
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// TestKit-based PostgreSQL fixture for Scheduler storage tests.
|
||||
/// Uses TestKit's PostgresFixture for enhanced isolation modes.
|
||||
/// </summary>
|
||||
public sealed class SchedulerTestKitPostgresFixture : IAsyncLifetime
|
||||
{
|
||||
private TestKitPostgresFixture _fixture = null!;
|
||||
private Assembly MigrationAssembly => typeof(SchedulerDataSource).Assembly;
|
||||
|
||||
public TestKitPostgresFixture Fixture => _fixture;
|
||||
public string ConnectionString => _fixture.ConnectionString;
|
||||
|
||||
public async ValueTask InitializeAsync()
|
||||
{
|
||||
_fixture = new TestKitPostgresFixture { IsolationMode = TestKitPostgresIsolationMode.Truncation };
|
||||
await _fixture.InitializeAsync();
|
||||
await _fixture.ApplyMigrationsFromAssemblyAsync(MigrationAssembly, "scheduler");
|
||||
}
|
||||
|
||||
public ValueTask DisposeAsync() => _fixture.DisposeAsync();
|
||||
|
||||
public async Task TruncateAllTablesAsync(CancellationToken cancellationToken = default)
|
||||
{
|
||||
await _fixture.TruncateAllTablesAsync();
|
||||
|
||||
// Scheduler migrations create the canonical `scheduler.*` schema explicitly
|
||||
await using var connection = new NpgsqlConnection(ConnectionString);
|
||||
await connection.OpenAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
const string listTablesSql = """
|
||||
SELECT table_name
|
||||
FROM information_schema.tables
|
||||
WHERE table_schema = 'scheduler'
|
||||
AND table_type = 'BASE TABLE';
|
||||
""";
|
||||
|
||||
var tables = new List<string>();
|
||||
await using (var command = new NpgsqlCommand(listTablesSql, connection))
|
||||
await using (var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
tables.Add(reader.GetString(0));
|
||||
}
|
||||
}
|
||||
|
||||
if (tables.Count > 0)
|
||||
{
|
||||
var qualified = tables.Select(static t => $"scheduler.\"{t}\"");
|
||||
var truncateSql = $"TRUNCATE TABLE {string.Join(", ", qualified)} RESTART IDENTITY CASCADE;";
|
||||
await using var truncateCommand = new NpgsqlCommand(truncateSql, connection);
|
||||
await truncateCommand.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Collection definition for Scheduler TestKit PostgreSQL tests.
|
||||
/// </summary>
|
||||
[CollectionDefinition(SchedulerTestKitPostgresCollection.Name)]
|
||||
public sealed class SchedulerTestKitPostgresCollection : ICollectionFixture<SchedulerTestKitPostgresFixture>
|
||||
{
|
||||
public const string Name = "SchedulerTestKitPostgres";
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,338 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// SchedulerQueryDeterminismTests.cs
|
||||
// Sprint: SPRINT_5100_0009_0008_scheduler_tests
|
||||
// Task: SCHEDULER-5100-007
|
||||
// Description: Model S1 query determinism tests for Scheduler job queue ordering
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Models;
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Repositories;
|
||||
using StellaOps.TestKit;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Tests;
|
||||
|
||||
/// <summary>
|
||||
/// Query determinism tests for Scheduler storage operations.
|
||||
/// Implements Model S1 (Storage/Postgres) test requirements:
|
||||
/// - Explicit ORDER BY checks for job queue queries
|
||||
/// - Same inputs → stable ordering
|
||||
/// - Repeated queries return consistent results
|
||||
/// </summary>
|
||||
[Collection(SchedulerPostgresCollection.Name)]
|
||||
[Trait("Category", TestCategories.Integration)]
|
||||
[Trait("Category", "QueryDeterminism")]
|
||||
public sealed class SchedulerQueryDeterminismTests : IAsyncLifetime
|
||||
{
|
||||
private readonly SchedulerPostgresFixture _fixture;
|
||||
private SchedulerDataSource _dataSource = null!;
|
||||
private JobRepository _jobRepository = null!;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
|
||||
public SchedulerQueryDeterminismTests(SchedulerPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
}
|
||||
|
||||
public async ValueTask InitializeAsync()
|
||||
{
|
||||
await _fixture.TruncateAllTablesAsync();
|
||||
|
||||
var options = _fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = SchedulerDataSource.DefaultSchemaName;
|
||||
_dataSource = new SchedulerDataSource(Options.Create(options), NullLogger<SchedulerDataSource>.Instance);
|
||||
_jobRepository = new JobRepository(_dataSource, NullLogger<JobRepository>.Instance);
|
||||
}
|
||||
|
||||
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
|
||||
|
||||
[Fact]
|
||||
public async Task GetByStatus_MultipleQueries_ReturnsDeterministicOrder()
|
||||
{
|
||||
// Arrange
|
||||
for (int i = 0; i < 5; i++)
|
||||
{
|
||||
await CreateJobAsync($"job-type-{i}", JobStatus.Scheduled, priority: i % 3);
|
||||
}
|
||||
|
||||
// Act - Query multiple times
|
||||
var results1 = await _jobRepository.GetByStatusAsync(_tenantId, JobStatus.Scheduled);
|
||||
var results2 = await _jobRepository.GetByStatusAsync(_tenantId, JobStatus.Scheduled);
|
||||
var results3 = await _jobRepository.GetByStatusAsync(_tenantId, JobStatus.Scheduled);
|
||||
|
||||
// Assert - All queries should return same order
|
||||
var ids1 = results1.Select(j => j.Id).ToList();
|
||||
var ids2 = results2.Select(j => j.Id).ToList();
|
||||
var ids3 = results3.Select(j => j.Id).ToList();
|
||||
|
||||
ids1.Should().Equal(ids2);
|
||||
ids2.Should().Equal(ids3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetScheduledJobs_ReturnsDeterministicOrder()
|
||||
{
|
||||
// Arrange - Create jobs with different priorities
|
||||
var highPriorityJob = await CreateJobAsync("scan", JobStatus.Scheduled, priority: 10);
|
||||
var mediumPriorityJob = await CreateJobAsync("scan", JobStatus.Scheduled, priority: 5);
|
||||
var lowPriorityJob = await CreateJobAsync("scan", JobStatus.Scheduled, priority: 1);
|
||||
|
||||
// Act - Query multiple times
|
||||
var results1 = await _jobRepository.GetScheduledJobsAsync(_tenantId, ["scan"], limit: 10);
|
||||
var results2 = await _jobRepository.GetScheduledJobsAsync(_tenantId, ["scan"], limit: 10);
|
||||
var results3 = await _jobRepository.GetScheduledJobsAsync(_tenantId, ["scan"], limit: 10);
|
||||
|
||||
// Assert - All queries should return same order (priority descending)
|
||||
var ids1 = results1.Select(j => j.Id).ToList();
|
||||
var ids2 = results2.Select(j => j.Id).ToList();
|
||||
var ids3 = results3.Select(j => j.Id).ToList();
|
||||
|
||||
ids1.Should().Equal(ids2);
|
||||
ids2.Should().Equal(ids3);
|
||||
|
||||
// Verify priority ordering (highest first)
|
||||
if (results1.Count >= 2)
|
||||
{
|
||||
results1[0].Priority.Should().BeGreaterThanOrEqualTo(results1[1].Priority);
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetScheduledJobs_SamePriority_OrderByCreatedAt()
|
||||
{
|
||||
// Arrange - Create jobs with same priority
|
||||
var job1 = await CreateJobAsync("scan", JobStatus.Scheduled, priority: 5);
|
||||
await Task.Delay(10); // Small delay to ensure different created_at
|
||||
var job2 = await CreateJobAsync("scan", JobStatus.Scheduled, priority: 5);
|
||||
await Task.Delay(10);
|
||||
var job3 = await CreateJobAsync("scan", JobStatus.Scheduled, priority: 5);
|
||||
|
||||
// Act - Query multiple times
|
||||
var results1 = await _jobRepository.GetScheduledJobsAsync(_tenantId, ["scan"], limit: 10);
|
||||
var results2 = await _jobRepository.GetScheduledJobsAsync(_tenantId, ["scan"], limit: 10);
|
||||
|
||||
// Assert - Order should be consistent
|
||||
var ids1 = results1.Select(j => j.Id).ToList();
|
||||
var ids2 = results2.Select(j => j.Id).ToList();
|
||||
|
||||
ids1.Should().Equal(ids2);
|
||||
|
||||
// Verify created_at ordering for same priority
|
||||
for (int i = 0; i < results1.Count - 1; i++)
|
||||
{
|
||||
results1[i].CreatedAt.Should().BeOnOrBefore(results1[i + 1].CreatedAt);
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ConcurrentQueries_SameStatus_AllReturnIdenticalResults()
|
||||
{
|
||||
// Arrange
|
||||
for (int i = 0; i < 5; i++)
|
||||
{
|
||||
await CreateJobAsync($"job-type-{i}", JobStatus.Scheduled, priority: 5);
|
||||
}
|
||||
|
||||
// Act - 20 concurrent queries
|
||||
var tasks = Enumerable.Range(0, 20)
|
||||
.Select(_ => _jobRepository.GetByStatusAsync(_tenantId, JobStatus.Scheduled))
|
||||
.ToList();
|
||||
|
||||
var results = await Task.WhenAll(tasks);
|
||||
|
||||
// Assert - All should return identical order
|
||||
var firstOrder = results[0].Select(j => j.Id).ToList();
|
||||
results.Should().AllSatisfy(r =>
|
||||
{
|
||||
r.Select(j => j.Id).ToList().Should().Equal(firstOrder);
|
||||
});
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetById_MultipleQueries_ReturnsConsistentResult()
|
||||
{
|
||||
// Arrange
|
||||
var job = await CreateJobAsync("test-job", JobStatus.Scheduled, priority: 5);
|
||||
|
||||
// Act - Query multiple times
|
||||
var results = new List<JobEntity?>();
|
||||
for (int i = 0; i < 10; i++)
|
||||
{
|
||||
results.Add(await _jobRepository.GetByIdAsync(_tenantId, job.Id));
|
||||
}
|
||||
|
||||
// Assert - All should return identical job
|
||||
results.Should().AllSatisfy(r =>
|
||||
{
|
||||
r.Should().NotBeNull();
|
||||
r!.Id.Should().Be(job.Id);
|
||||
r.JobType.Should().Be("test-job");
|
||||
r.Priority.Should().Be(5);
|
||||
});
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByIdempotencyKey_MultipleQueries_ReturnsConsistentResult()
|
||||
{
|
||||
// Arrange
|
||||
var idempotencyKey = $"idem-{Guid.NewGuid():N}";
|
||||
var job = await CreateJobAsync("test-job", JobStatus.Scheduled, idempotencyKey: idempotencyKey);
|
||||
|
||||
// Act - Query multiple times
|
||||
var results = new List<JobEntity?>();
|
||||
for (int i = 0; i < 10; i++)
|
||||
{
|
||||
results.Add(await _jobRepository.GetByIdempotencyKeyAsync(_tenantId, idempotencyKey));
|
||||
}
|
||||
|
||||
// Assert - All should return same job
|
||||
results.Should().AllSatisfy(r =>
|
||||
{
|
||||
r.Should().NotBeNull();
|
||||
r!.Id.Should().Be(job.Id);
|
||||
r.IdempotencyKey.Should().Be(idempotencyKey);
|
||||
});
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task EmptyTenant_GetByStatus_ReturnsEmptyConsistently()
|
||||
{
|
||||
// Arrange
|
||||
var emptyTenantId = Guid.NewGuid().ToString();
|
||||
|
||||
// Act - Query empty tenant multiple times
|
||||
var results = new List<IReadOnlyList<JobEntity>>();
|
||||
for (int i = 0; i < 5; i++)
|
||||
{
|
||||
results.Add(await _jobRepository.GetByStatusAsync(emptyTenantId, JobStatus.Scheduled));
|
||||
}
|
||||
|
||||
// Assert - All should return empty
|
||||
results.Should().AllSatisfy(r => r.Should().BeEmpty());
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task TenantIsolation_JobsInDifferentTenants_QueriesReturnOnlyOwnTenant()
|
||||
{
|
||||
// Arrange
|
||||
var tenant1 = Guid.NewGuid().ToString();
|
||||
var tenant2 = Guid.NewGuid().ToString();
|
||||
|
||||
await CreateJobAsync("job-type-1", JobStatus.Scheduled, tenantId: tenant1);
|
||||
await CreateJobAsync("job-type-2", JobStatus.Scheduled, tenantId: tenant2);
|
||||
|
||||
// Act
|
||||
var tenant1Jobs = await _jobRepository.GetByStatusAsync(tenant1, JobStatus.Scheduled);
|
||||
var tenant2Jobs = await _jobRepository.GetByStatusAsync(tenant2, JobStatus.Scheduled);
|
||||
|
||||
// Assert
|
||||
tenant1Jobs.Should().HaveCount(1);
|
||||
tenant1Jobs[0].TenantId.Should().Be(tenant1);
|
||||
|
||||
tenant2Jobs.Should().HaveCount(1);
|
||||
tenant2Jobs[0].TenantId.Should().Be(tenant2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task PaginatedQuery_OffsetAndLimit_DeterministicResults()
|
||||
{
|
||||
// Arrange - Create 10 jobs
|
||||
for (int i = 0; i < 10; i++)
|
||||
{
|
||||
await CreateJobAsync($"job-type-{i}", JobStatus.Scheduled, priority: 10 - i);
|
||||
}
|
||||
|
||||
// Act - Query with different offsets
|
||||
var page1a = await _jobRepository.GetByStatusAsync(_tenantId, JobStatus.Scheduled, limit: 3, offset: 0);
|
||||
var page2a = await _jobRepository.GetByStatusAsync(_tenantId, JobStatus.Scheduled, limit: 3, offset: 3);
|
||||
var page1b = await _jobRepository.GetByStatusAsync(_tenantId, JobStatus.Scheduled, limit: 3, offset: 0);
|
||||
var page2b = await _jobRepository.GetByStatusAsync(_tenantId, JobStatus.Scheduled, limit: 3, offset: 3);
|
||||
|
||||
// Assert - Same pages should return same results
|
||||
page1a.Select(j => j.Id).Should().Equal(page1b.Select(j => j.Id));
|
||||
page2a.Select(j => j.Id).Should().Equal(page2b.Select(j => j.Id));
|
||||
|
||||
// Pages should not overlap
|
||||
var page1Ids = page1a.Select(j => j.Id).ToHashSet();
|
||||
var page2Ids = page2a.Select(j => j.Id).ToHashSet();
|
||||
page1Ids.Intersect(page2Ids).Should().BeEmpty();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task JobStatusTransitions_DoesNotAffectOrderOfOtherJobs()
|
||||
{
|
||||
// Arrange
|
||||
var job1 = await CreateJobAsync("scan", JobStatus.Scheduled, priority: 5);
|
||||
var job2 = await CreateJobAsync("scan", JobStatus.Scheduled, priority: 5);
|
||||
var job3 = await CreateJobAsync("scan", JobStatus.Scheduled, priority: 5);
|
||||
|
||||
var initialOrder = (await _jobRepository.GetByStatusAsync(_tenantId, JobStatus.Scheduled))
|
||||
.Select(j => j.Id).ToList();
|
||||
|
||||
// Act - Lease and complete job2
|
||||
var leased = await _jobRepository.TryLeaseJobAsync(_tenantId, job2.Id, "worker-1", TimeSpan.FromMinutes(5));
|
||||
if (leased != null)
|
||||
{
|
||||
await _jobRepository.CompleteAsync(_tenantId, job2.Id, leased.LeaseId!.Value);
|
||||
}
|
||||
|
||||
// Query remaining scheduled jobs
|
||||
var afterTransition = await _jobRepository.GetByStatusAsync(_tenantId, JobStatus.Scheduled);
|
||||
|
||||
// Assert - Remaining jobs should maintain their relative order
|
||||
var remainingIds = afterTransition.Select(j => j.Id).ToList();
|
||||
var expectedOrder = initialOrder.Where(id => id != job2.Id).ToList();
|
||||
remainingIds.Should().Equal(expectedOrder);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task MultipleJobTypes_FilteringMaintainsOrder()
|
||||
{
|
||||
// Arrange
|
||||
await CreateJobAsync("scan", JobStatus.Scheduled, priority: 10);
|
||||
await CreateJobAsync("build", JobStatus.Scheduled, priority: 8);
|
||||
await CreateJobAsync("scan", JobStatus.Scheduled, priority: 6);
|
||||
await CreateJobAsync("deploy", JobStatus.Scheduled, priority: 4);
|
||||
await CreateJobAsync("scan", JobStatus.Scheduled, priority: 2);
|
||||
|
||||
// Act - Query only scan jobs multiple times
|
||||
var scanJobs1 = await _jobRepository.GetScheduledJobsAsync(_tenantId, ["scan"], limit: 10);
|
||||
var scanJobs2 = await _jobRepository.GetScheduledJobsAsync(_tenantId, ["scan"], limit: 10);
|
||||
|
||||
// Assert
|
||||
scanJobs1.Should().HaveCount(3);
|
||||
scanJobs1.Select(j => j.Id).Should().Equal(scanJobs2.Select(j => j.Id));
|
||||
|
||||
// Verify priority ordering
|
||||
scanJobs1[0].Priority.Should().BeGreaterThanOrEqualTo(scanJobs1[1].Priority);
|
||||
scanJobs1[1].Priority.Should().BeGreaterThanOrEqualTo(scanJobs1[2].Priority);
|
||||
}
|
||||
|
||||
private async Task<JobEntity> CreateJobAsync(
|
||||
string jobType,
|
||||
JobStatus status,
|
||||
int priority = 0,
|
||||
string? tenantId = null,
|
||||
string? idempotencyKey = null)
|
||||
{
|
||||
var job = new JobEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = tenantId ?? _tenantId,
|
||||
JobType = jobType,
|
||||
Status = status,
|
||||
Priority = priority,
|
||||
Payload = """{"test": true}""",
|
||||
PayloadDigest = $"sha256:{Guid.NewGuid():N}",
|
||||
IdempotencyKey = idempotencyKey ?? $"idem-{Guid.NewGuid():N}",
|
||||
MaxAttempts = 3
|
||||
};
|
||||
return await _jobRepository.CreateAsync(job);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
<?xml version="1.0" ?>
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
|
||||
<PropertyGroup>
|
||||
<TargetFramework>net10.0</TargetFramework>
|
||||
<ImplicitUsings>enable</ImplicitUsings>
|
||||
<Nullable>enable</Nullable>
|
||||
<LangVersion>preview</LangVersion>
|
||||
<IsPackable>false</IsPackable>
|
||||
<IsTestProject>true</IsTestProject>
|
||||
<RootNamespace>StellaOps.Scheduler.Persistence.Tests</RootNamespace>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="Dapper" />
|
||||
<PackageReference Include="FluentAssertions" />
|
||||
<PackageReference Include="Moq" />
|
||||
<PackageReference Include="Testcontainers.PostgreSql" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="..\..\StellaOps.Scheduler.__Libraries\StellaOps.Scheduler.Persistence\StellaOps.Scheduler.Persistence.csproj" />
|
||||
<ProjectReference Include="..\..\..\__Tests\__Libraries\StellaOps.Infrastructure.Postgres.Testing\StellaOps.Infrastructure.Postgres.Testing.csproj" />
|
||||
<ProjectReference Include="..\..\..\__Libraries\StellaOps.TestKit\StellaOps.TestKit.csproj" />
|
||||
</ItemGroup>
|
||||
</Project>
|
||||
@@ -0,0 +1,8 @@
|
||||
# StellaOps.Scheduler.Persistence.Tests Task Board
|
||||
This board mirrors active sprint tasks for this module.
|
||||
Source of truth: `docs/implplan/SPRINT_20260130_002_Tools_csproj_remediation_solid_review.md`.
|
||||
|
||||
| Task ID | Status | Notes |
|
||||
| --- | --- | --- |
|
||||
| REMED-05 | TODO | Remediation checklist: docs/implplan/audits/csproj-standards/remediation/checklists/src/Scheduler/__Tests/StellaOps.Scheduler.Persistence.Tests/StellaOps.Scheduler.Persistence.Tests.md. |
|
||||
| REMED-06 | DONE | SOLID review notes captured for SPRINT_20260130_002. |
|
||||
@@ -0,0 +1,274 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Models;
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
using StellaOps.TestKit;
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Tests;
|
||||
|
||||
/// <summary>
|
||||
/// Fixed TimeProvider for deterministic tests.
|
||||
/// Returns a fixed UTC time regardless of wall-clock.
|
||||
/// </summary>
|
||||
internal sealed class FixedTimeProvider : TimeProvider
|
||||
{
|
||||
private readonly DateTimeOffset _fixedTime;
|
||||
public FixedTimeProvider(DateTimeOffset fixedTime) => _fixedTime = fixedTime;
|
||||
public override DateTimeOffset GetUtcNow() => _fixedTime;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Integration tests verifying that Scheduler repositories honour the injected
|
||||
/// <see cref="TimeProvider"/> instead of relying on SQL <c>NOW()</c>.
|
||||
///
|
||||
/// Each repository constructor accepts an optional <c>TimeProvider? timeProvider = null</c>
|
||||
/// parameter. When a <see cref="FixedTimeProvider"/> set to a distinctive past date is
|
||||
/// injected, every timestamp column that the repository writes via <c>@now</c> must
|
||||
/// reflect that fixed date, not the database server clock.
|
||||
/// </summary>
|
||||
[Collection(SchedulerPostgresCollection.Name)]
|
||||
public sealed class TimeProviderIntegrationTests : IAsyncLifetime
|
||||
{
|
||||
private static readonly DateTimeOffset FixedTime =
|
||||
new(2020, 6, 15, 12, 0, 0, TimeSpan.Zero);
|
||||
|
||||
private readonly SchedulerPostgresFixture _fixture;
|
||||
private readonly SchedulerDataSource _dataSource;
|
||||
|
||||
public TimeProviderIntegrationTests(SchedulerPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
|
||||
var options = fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = fixture.SchemaName;
|
||||
_dataSource = new SchedulerDataSource(Options.Create(options), NullLogger<SchedulerDataSource>.Instance);
|
||||
}
|
||||
|
||||
public ValueTask InitializeAsync() => new(_fixture.TruncateAllTablesAsync());
|
||||
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// DistributedLockRepository
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task TryAcquire_UsesTimeProvider_ForExpiresAt()
|
||||
{
|
||||
// Arrange
|
||||
var fixedTimeProvider = new FixedTimeProvider(FixedTime);
|
||||
var repository = new DistributedLockRepository(
|
||||
_dataSource,
|
||||
NullLogger<DistributedLockRepository>.Instance,
|
||||
fixedTimeProvider);
|
||||
|
||||
var lockKey = $"tp-lock-{Guid.NewGuid()}";
|
||||
var tenantId = Guid.NewGuid().ToString();
|
||||
var duration = TimeSpan.FromMinutes(5);
|
||||
|
||||
// Act
|
||||
var acquired = await repository.TryAcquireAsync(tenantId, lockKey, "holder-1", duration);
|
||||
|
||||
// Assert – acquisition must succeed
|
||||
acquired.Should().BeTrue();
|
||||
|
||||
// Read the lock back (same repository / same fixed TimeProvider so
|
||||
// the expires_at > @now check uses the same fixed time).
|
||||
var lockInfo = await repository.GetAsync(lockKey);
|
||||
|
||||
lockInfo.Should().NotBeNull("the lock should be readable with the same TimeProvider");
|
||||
|
||||
// expires_at must equal FixedTime + duration (the INSERT sets expires_at = @now + @duration)
|
||||
lockInfo!.ExpiresAt.Should().BeCloseTo(FixedTime + duration, TimeSpan.FromSeconds(1));
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task TryAcquire_OnConflict_UsesTimeProvider_ForAcquiredAtAndExpiresAt()
|
||||
{
|
||||
// Arrange — first acquire with a fixed past time to create the lock with a known expires_at
|
||||
var earlyTime = new DateTimeOffset(2020, 6, 15, 10, 0, 0, TimeSpan.Zero);
|
||||
var earlyProvider = new FixedTimeProvider(earlyTime);
|
||||
var earlyRepo = new DistributedLockRepository(
|
||||
_dataSource,
|
||||
NullLogger<DistributedLockRepository>.Instance,
|
||||
earlyProvider);
|
||||
|
||||
var lockKey = $"tp-conflict-{Guid.NewGuid()}";
|
||||
var tenantId = Guid.NewGuid().ToString();
|
||||
var shortDuration = TimeSpan.FromMilliseconds(200);
|
||||
|
||||
await earlyRepo.TryAcquireAsync(tenantId, lockKey, "holder-old", shortDuration);
|
||||
|
||||
// The lock's expires_at is earlyTime + 200ms = 2020-06-15T10:00:00.200Z
|
||||
// Now re-acquire with a later fixed time that exceeds the expires_at
|
||||
// This triggers the ON CONFLICT DO UPDATE where expires_at < @now
|
||||
var laterTime = new DateTimeOffset(2020, 6, 15, 12, 0, 0, TimeSpan.Zero);
|
||||
var laterProvider = new FixedTimeProvider(laterTime);
|
||||
var laterRepo = new DistributedLockRepository(
|
||||
_dataSource,
|
||||
NullLogger<DistributedLockRepository>.Instance,
|
||||
laterProvider);
|
||||
|
||||
var duration = TimeSpan.FromMinutes(10);
|
||||
|
||||
// Act
|
||||
var reacquired = await laterRepo.TryAcquireAsync(tenantId, lockKey, "holder-new", duration);
|
||||
|
||||
// Assert
|
||||
reacquired.Should().BeTrue("expired lock should be reacquirable");
|
||||
|
||||
var lockInfo = await laterRepo.GetAsync(lockKey);
|
||||
lockInfo.Should().NotBeNull();
|
||||
|
||||
// ON CONFLICT path sets acquired_at = @now
|
||||
lockInfo!.AcquiredAt.Should().BeCloseTo(laterTime, TimeSpan.FromSeconds(1));
|
||||
|
||||
// ON CONFLICT path sets expires_at = @now + @duration
|
||||
lockInfo.ExpiresAt.Should().BeCloseTo(laterTime + duration, TimeSpan.FromSeconds(1));
|
||||
|
||||
lockInfo.HolderId.Should().Be("holder-new");
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// WorkerRepository
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task Heartbeat_UsesTimeProvider_ForLastHeartbeatAt()
|
||||
{
|
||||
// Arrange — insert a worker first (using system time so the row exists)
|
||||
var systemRepo = new WorkerRepository(
|
||||
_dataSource,
|
||||
NullLogger<WorkerRepository>.Instance);
|
||||
|
||||
var worker = new WorkerEntity
|
||||
{
|
||||
Id = $"tp-worker-hb-{Guid.NewGuid()}",
|
||||
Hostname = "test-host",
|
||||
Status = WorkerStatus.Active,
|
||||
JobTypes = ["scan"],
|
||||
MaxConcurrentJobs = 4
|
||||
};
|
||||
await systemRepo.UpsertAsync(worker);
|
||||
|
||||
// Now heartbeat with a fixed TimeProvider
|
||||
var fixedTimeProvider = new FixedTimeProvider(FixedTime);
|
||||
var fixedRepo = new WorkerRepository(
|
||||
_dataSource,
|
||||
NullLogger<WorkerRepository>.Instance,
|
||||
fixedTimeProvider);
|
||||
|
||||
// Act
|
||||
var updated = await fixedRepo.HeartbeatAsync(worker.Id, 2);
|
||||
|
||||
// Assert
|
||||
updated.Should().BeTrue();
|
||||
|
||||
// Read back and verify the timestamp
|
||||
var fetched = await fixedRepo.GetByIdAsync(worker.Id);
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.LastHeartbeatAt.Should().BeCloseTo(FixedTime, TimeSpan.FromSeconds(1));
|
||||
fetched.CurrentJobs.Should().Be(2);
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task Upsert_OnConflict_UsesTimeProvider_ForLastHeartbeatAt()
|
||||
{
|
||||
// Arrange — insert a worker first so the second upsert triggers ON CONFLICT
|
||||
var systemRepo = new WorkerRepository(
|
||||
_dataSource,
|
||||
NullLogger<WorkerRepository>.Instance);
|
||||
|
||||
var workerId = $"tp-worker-upsert-{Guid.NewGuid()}";
|
||||
var worker = new WorkerEntity
|
||||
{
|
||||
Id = workerId,
|
||||
Hostname = "test-host",
|
||||
Status = WorkerStatus.Active,
|
||||
JobTypes = ["scan"],
|
||||
MaxConcurrentJobs = 4
|
||||
};
|
||||
await systemRepo.UpsertAsync(worker);
|
||||
|
||||
// Second upsert with fixed TimeProvider triggers ON CONFLICT DO UPDATE
|
||||
// which sets last_heartbeat_at = @now
|
||||
var fixedTimeProvider = new FixedTimeProvider(FixedTime);
|
||||
var fixedRepo = new WorkerRepository(
|
||||
_dataSource,
|
||||
NullLogger<WorkerRepository>.Instance,
|
||||
fixedTimeProvider);
|
||||
|
||||
var updatedWorker = new WorkerEntity
|
||||
{
|
||||
Id = workerId,
|
||||
Hostname = "updated-host",
|
||||
Status = WorkerStatus.Active,
|
||||
JobTypes = ["scan", "sbom"],
|
||||
MaxConcurrentJobs = 8
|
||||
};
|
||||
|
||||
// Act
|
||||
var returned = await fixedRepo.UpsertAsync(updatedWorker);
|
||||
|
||||
// Assert — the returned entity should have last_heartbeat_at equal to our fixed time
|
||||
returned.Should().NotBeNull();
|
||||
returned.LastHeartbeatAt.Should().BeCloseTo(FixedTime, TimeSpan.FromSeconds(1));
|
||||
returned.Hostname.Should().Be("updated-host");
|
||||
returned.MaxConcurrentJobs.Should().Be(8);
|
||||
|
||||
// Also verify via a fresh read
|
||||
var fetched = await fixedRepo.GetByIdAsync(workerId);
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.LastHeartbeatAt.Should().BeCloseTo(FixedTime, TimeSpan.FromSeconds(1));
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task GetStaleWorkers_UsesTimeProvider_ForStaleComparison()
|
||||
{
|
||||
// Arrange — create a worker with a heartbeat at the fixed (past) time
|
||||
var fixedTimeProvider = new FixedTimeProvider(FixedTime);
|
||||
var fixedRepo = new WorkerRepository(
|
||||
_dataSource,
|
||||
NullLogger<WorkerRepository>.Instance,
|
||||
fixedTimeProvider);
|
||||
|
||||
// Insert the worker first with system time so it exists
|
||||
var systemRepo = new WorkerRepository(
|
||||
_dataSource,
|
||||
NullLogger<WorkerRepository>.Instance);
|
||||
|
||||
var worker = new WorkerEntity
|
||||
{
|
||||
Id = $"tp-stale-{Guid.NewGuid()}",
|
||||
Hostname = "stale-host",
|
||||
Status = WorkerStatus.Active,
|
||||
JobTypes = ["scan"],
|
||||
MaxConcurrentJobs = 2
|
||||
};
|
||||
await systemRepo.UpsertAsync(worker);
|
||||
|
||||
// Set heartbeat to fixed time (2020-06-15 12:00:00 UTC)
|
||||
await fixedRepo.HeartbeatAsync(worker.Id, 0);
|
||||
|
||||
// Query with a "recent" fixed time — the worker's heartbeat at 2020 should be stale
|
||||
// relative to 2020-06-15 12:30:00 with a stale duration of 10 minutes
|
||||
var laterTime = new DateTimeOffset(2020, 6, 15, 12, 30, 0, TimeSpan.Zero);
|
||||
var laterProvider = new FixedTimeProvider(laterTime);
|
||||
var laterRepo = new WorkerRepository(
|
||||
_dataSource,
|
||||
NullLogger<WorkerRepository>.Instance,
|
||||
laterProvider);
|
||||
|
||||
// Act — stale duration of 10 min means heartbeats before 12:20 are stale
|
||||
var staleWorkers = await laterRepo.GetStaleWorkersAsync(TimeSpan.FromMinutes(10));
|
||||
|
||||
// Assert
|
||||
staleWorkers.Should().ContainSingle(w => w.Id == worker.Id);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,263 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Models;
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
using StellaOps.TestKit;
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Tests;
|
||||
|
||||
[Collection(SchedulerPostgresCollection.Name)]
|
||||
public sealed class TriggerRepositoryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly SchedulerPostgresFixture _fixture;
|
||||
private readonly SchedulerDataSource _dataSource;
|
||||
private readonly TriggerRepository _repository;
|
||||
private readonly string _tenantId = Guid.NewGuid().ToString();
|
||||
|
||||
public TriggerRepositoryTests(SchedulerPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
|
||||
var options = fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = fixture.SchemaName;
|
||||
_dataSource = new SchedulerDataSource(Options.Create(options), NullLogger<SchedulerDataSource>.Instance);
|
||||
_repository = new TriggerRepository(_dataSource, NullLogger<TriggerRepository>.Instance);
|
||||
}
|
||||
|
||||
public ValueTask InitializeAsync() => new(_fixture.TruncateAllTablesAsync());
|
||||
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task CreateAndGet_RoundTripsTrigger()
|
||||
{
|
||||
// Arrange
|
||||
var trigger = new TriggerEntity
|
||||
{
|
||||
Id = Guid.NewGuid(),
|
||||
TenantId = _tenantId,
|
||||
Name = "daily-scan",
|
||||
Description = "Daily vulnerability scan",
|
||||
JobType = "scan",
|
||||
JobPayload = "{\"target\": \"registry.example.com\"}",
|
||||
CronExpression = "0 0 * * *",
|
||||
Timezone = "UTC",
|
||||
Enabled = true,
|
||||
NextFireAt = DateTimeOffset.UtcNow.AddDays(1)
|
||||
};
|
||||
|
||||
// Act
|
||||
await _repository.CreateAsync(trigger);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, trigger.Id);
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(trigger.Id);
|
||||
fetched.Name.Should().Be("daily-scan");
|
||||
fetched.JobType.Should().Be("scan");
|
||||
fetched.CronExpression.Should().Be("0 0 * * *");
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task GetByName_ReturnsCorrectTrigger()
|
||||
{
|
||||
// Arrange
|
||||
var trigger = CreateTrigger("weekly-report", "0 0 * * 0");
|
||||
await _repository.CreateAsync(trigger);
|
||||
|
||||
// Act
|
||||
var fetched = await _repository.GetByNameAsync(_tenantId, "weekly-report");
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(trigger.Id);
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task List_ReturnsAllTriggersForTenant()
|
||||
{
|
||||
// Arrange
|
||||
var trigger1 = CreateTrigger("trigger1", "0 * * * *");
|
||||
var trigger2 = CreateTrigger("trigger2", "0 0 * * *");
|
||||
await _repository.CreateAsync(trigger1);
|
||||
await _repository.CreateAsync(trigger2);
|
||||
|
||||
// Act
|
||||
var triggers = await _repository.ListAsync(_tenantId);
|
||||
|
||||
// Assert
|
||||
triggers.Should().HaveCount(2);
|
||||
triggers.Select(t => t.Name).Should().Contain(["trigger1", "trigger2"]);
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task GetDueTriggers_ReturnsTriggersReadyToFire()
|
||||
{
|
||||
// Arrange - One due trigger, one future trigger
|
||||
var dueTrigger = CreateTrigger("due", "* * * * *");
|
||||
dueTrigger = new TriggerEntity
|
||||
{
|
||||
Id = dueTrigger.Id,
|
||||
TenantId = dueTrigger.TenantId,
|
||||
Name = dueTrigger.Name,
|
||||
JobType = dueTrigger.JobType,
|
||||
CronExpression = dueTrigger.CronExpression,
|
||||
NextFireAt = DateTimeOffset.UtcNow.AddMinutes(-1), // Due
|
||||
Enabled = true
|
||||
};
|
||||
|
||||
var futureTrigger = CreateTrigger("future", "0 0 * * *");
|
||||
futureTrigger = new TriggerEntity
|
||||
{
|
||||
Id = futureTrigger.Id,
|
||||
TenantId = futureTrigger.TenantId,
|
||||
Name = futureTrigger.Name,
|
||||
JobType = futureTrigger.JobType,
|
||||
CronExpression = futureTrigger.CronExpression,
|
||||
NextFireAt = DateTimeOffset.UtcNow.AddDays(1), // Not due
|
||||
Enabled = true
|
||||
};
|
||||
|
||||
await _repository.CreateAsync(dueTrigger);
|
||||
await _repository.CreateAsync(futureTrigger);
|
||||
|
||||
// Act
|
||||
var dueTriggers = await _repository.GetDueTriggersAsync();
|
||||
|
||||
// Assert
|
||||
dueTriggers.Should().HaveCount(1);
|
||||
dueTriggers[0].Name.Should().Be("due");
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task RecordFire_UpdatesTriggerState()
|
||||
{
|
||||
// Arrange
|
||||
var trigger = CreateTrigger("fire-test", "* * * * *");
|
||||
await _repository.CreateAsync(trigger);
|
||||
var jobId = Guid.NewGuid();
|
||||
var jobRepository = new JobRepository(_dataSource, NullLogger<JobRepository>.Instance);
|
||||
await jobRepository.CreateAsync(new JobEntity
|
||||
{
|
||||
Id = jobId,
|
||||
TenantId = _tenantId,
|
||||
JobType = "scan",
|
||||
PayloadDigest = "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||
IdempotencyKey = $"job-{jobId}",
|
||||
CreatedAt = DateTimeOffset.UtcNow,
|
||||
Payload = "{}"
|
||||
});
|
||||
var nextFireAt = DateTimeOffset.UtcNow.AddMinutes(1);
|
||||
|
||||
// Act
|
||||
var result = await _repository.RecordFireAsync(_tenantId, trigger.Id, jobId, nextFireAt);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, trigger.Id);
|
||||
|
||||
// Assert
|
||||
result.Should().BeTrue();
|
||||
fetched!.LastJobId.Should().Be(jobId);
|
||||
fetched.NextFireAt.Should().BeCloseTo(nextFireAt, TimeSpan.FromSeconds(1));
|
||||
fetched.FireCount.Should().Be(1);
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task SetEnabled_TogglesEnableState()
|
||||
{
|
||||
// Arrange
|
||||
var trigger = CreateTrigger("toggle-test", "* * * * *");
|
||||
await _repository.CreateAsync(trigger);
|
||||
|
||||
// Act - Disable
|
||||
await _repository.SetEnabledAsync(_tenantId, trigger.Id, false);
|
||||
var disabled = await _repository.GetByIdAsync(_tenantId, trigger.Id);
|
||||
|
||||
// Assert
|
||||
disabled!.Enabled.Should().BeFalse();
|
||||
|
||||
// Act - Re-enable
|
||||
await _repository.SetEnabledAsync(_tenantId, trigger.Id, true);
|
||||
var enabled = await _repository.GetByIdAsync(_tenantId, trigger.Id);
|
||||
|
||||
// Assert
|
||||
enabled!.Enabled.Should().BeTrue();
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task Delete_RemovesTrigger()
|
||||
{
|
||||
// Arrange
|
||||
var trigger = CreateTrigger("delete-test", "* * * * *");
|
||||
await _repository.CreateAsync(trigger);
|
||||
|
||||
// Act
|
||||
await _repository.DeleteAsync(_tenantId, trigger.Id);
|
||||
var fetched = await _repository.GetByIdAsync(_tenantId, trigger.Id);
|
||||
|
||||
// Assert
|
||||
fetched.Should().BeNull();
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task GetDueTriggers_IsDeterministicForEqualNextFire()
|
||||
{
|
||||
// Arrange
|
||||
var baseTime = DateTimeOffset.UtcNow.AddMinutes(-5);
|
||||
var dueAt = new DateTimeOffset(
|
||||
baseTime.Ticks - (baseTime.Ticks % TimeSpan.TicksPerMillisecond),
|
||||
baseTime.Offset);
|
||||
|
||||
const string tenantA = "tenant-a";
|
||||
const string tenantB = "tenant-b";
|
||||
|
||||
var triggerA = CreateTrigger("deterministic-a", "* * * * *", Guid.Parse("11111111-1111-1111-1111-111111111111"), dueAt, tenantA);
|
||||
var triggerB = CreateTrigger("deterministic-b", "* * * * *", Guid.Parse("22222222-2222-2222-2222-222222222222"), dueAt, tenantA);
|
||||
var triggerC = CreateTrigger("deterministic-c", "* * * * *", Guid.Parse("33333333-3333-3333-3333-333333333333"), dueAt, tenantB);
|
||||
|
||||
await _repository.CreateAsync(triggerB);
|
||||
await _repository.CreateAsync(triggerC);
|
||||
await _repository.CreateAsync(triggerA); // Insert out of order on purpose
|
||||
|
||||
var expectedOrder = new[]
|
||||
{
|
||||
triggerA.Id,
|
||||
triggerB.Id,
|
||||
triggerC.Id
|
||||
};
|
||||
|
||||
// Act
|
||||
var first = await _repository.GetDueTriggersAsync(limit: 10);
|
||||
var second = await _repository.GetDueTriggersAsync(limit: 10);
|
||||
|
||||
// Assert
|
||||
first.Select(t => t.Id).Should().Equal(expectedOrder);
|
||||
second.Select(t => t.Id).Should().Equal(expectedOrder);
|
||||
}
|
||||
|
||||
private TriggerEntity CreateTrigger(
|
||||
string name,
|
||||
string cron,
|
||||
Guid? id = null,
|
||||
DateTimeOffset? nextFireAt = null,
|
||||
string? tenantId = null) => new()
|
||||
{
|
||||
Id = id ?? Guid.NewGuid(),
|
||||
TenantId = tenantId ?? _tenantId,
|
||||
Name = name,
|
||||
JobType = "test-job",
|
||||
CronExpression = cron,
|
||||
Enabled = true,
|
||||
NextFireAt = nextFireAt ?? DateTimeOffset.UtcNow.AddHours(1)
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,165 @@
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Models;
|
||||
using StellaOps.Scheduler.Persistence.Postgres.Repositories;
|
||||
using Xunit;
|
||||
|
||||
using StellaOps.TestKit;
|
||||
namespace StellaOps.Scheduler.Persistence.Postgres.Tests;
|
||||
|
||||
[Collection(SchedulerPostgresCollection.Name)]
|
||||
public sealed class WorkerRepositoryTests : IAsyncLifetime
|
||||
{
|
||||
private readonly SchedulerPostgresFixture _fixture;
|
||||
private readonly WorkerRepository _repository;
|
||||
|
||||
public WorkerRepositoryTests(SchedulerPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
|
||||
var options = fixture.Fixture.CreateOptions();
|
||||
options.SchemaName = fixture.SchemaName;
|
||||
var dataSource = new SchedulerDataSource(Options.Create(options), NullLogger<SchedulerDataSource>.Instance);
|
||||
_repository = new WorkerRepository(dataSource, NullLogger<WorkerRepository>.Instance);
|
||||
}
|
||||
|
||||
public ValueTask InitializeAsync() => new(_fixture.TruncateAllTablesAsync());
|
||||
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task UpsertAndGet_RoundTripsWorker()
|
||||
{
|
||||
// Arrange
|
||||
var worker = new WorkerEntity
|
||||
{
|
||||
Id = $"worker-{Guid.NewGuid()}",
|
||||
Hostname = "node-01.cluster.local",
|
||||
Status = WorkerStatus.Active,
|
||||
JobTypes = ["scan", "sbom"],
|
||||
MaxConcurrentJobs = 4
|
||||
};
|
||||
|
||||
// Act
|
||||
await _repository.UpsertAsync(worker);
|
||||
var fetched = await _repository.GetByIdAsync(worker.Id);
|
||||
|
||||
// Assert
|
||||
fetched.Should().NotBeNull();
|
||||
fetched!.Id.Should().Be(worker.Id);
|
||||
fetched.Hostname.Should().Be("node-01.cluster.local");
|
||||
fetched.JobTypes.Should().BeEquivalentTo(["scan", "sbom"]);
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task Heartbeat_UpdatesLastHeartbeat()
|
||||
{
|
||||
// Arrange
|
||||
var worker = CreateWorker();
|
||||
await _repository.UpsertAsync(worker);
|
||||
|
||||
// Act
|
||||
await Task.Delay(100); // Ensure time difference
|
||||
await _repository.HeartbeatAsync(worker.Id, 2);
|
||||
var fetched = await _repository.GetByIdAsync(worker.Id);
|
||||
|
||||
// Assert
|
||||
fetched!.LastHeartbeatAt.Should().BeCloseTo(DateTimeOffset.UtcNow, TimeSpan.FromSeconds(5));
|
||||
fetched.CurrentJobs.Should().Be(2);
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task ListByStatus_ReturnsWorkersWithStatus()
|
||||
{
|
||||
// Arrange
|
||||
var activeWorker = CreateWorker();
|
||||
var drainingWorker = new WorkerEntity
|
||||
{
|
||||
Id = $"draining-{Guid.NewGuid()}",
|
||||
Hostname = "node-02",
|
||||
Status = WorkerStatus.Draining,
|
||||
JobTypes = ["scan"],
|
||||
MaxConcurrentJobs = 4
|
||||
};
|
||||
await _repository.UpsertAsync(activeWorker);
|
||||
await _repository.UpsertAsync(drainingWorker);
|
||||
|
||||
// Act
|
||||
var activeWorkers = await _repository.ListByStatusAsync(WorkerStatus.Active);
|
||||
|
||||
// Assert
|
||||
activeWorkers.Should().HaveCount(1);
|
||||
activeWorkers[0].Id.Should().Be(activeWorker.Id);
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task SetStatus_ChangesWorkerStatus()
|
||||
{
|
||||
// Arrange
|
||||
var worker = CreateWorker();
|
||||
await _repository.UpsertAsync(worker);
|
||||
|
||||
// Act
|
||||
await _repository.SetStatusAsync(worker.Id, WorkerStatus.Draining);
|
||||
var fetched = await _repository.GetByIdAsync(worker.Id);
|
||||
|
||||
// Assert
|
||||
fetched!.Status.Should().Be(WorkerStatus.Draining);
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task Delete_RemovesWorker()
|
||||
{
|
||||
// Arrange
|
||||
var worker = CreateWorker();
|
||||
await _repository.UpsertAsync(worker);
|
||||
|
||||
// Act
|
||||
await _repository.DeleteAsync(worker.Id);
|
||||
var fetched = await _repository.GetByIdAsync(worker.Id);
|
||||
|
||||
// Assert
|
||||
fetched.Should().BeNull();
|
||||
}
|
||||
|
||||
[Trait("Category", TestCategories.Unit)]
|
||||
[Fact]
|
||||
public async Task List_ReturnsAllWorkers()
|
||||
{
|
||||
// Arrange
|
||||
var worker1 = CreateWorker();
|
||||
var worker2 = new WorkerEntity
|
||||
{
|
||||
Id = $"worker2-{Guid.NewGuid()}",
|
||||
Hostname = "node-02",
|
||||
Status = WorkerStatus.Active,
|
||||
JobTypes = ["scan"],
|
||||
MaxConcurrentJobs = 2
|
||||
};
|
||||
await _repository.UpsertAsync(worker1);
|
||||
await _repository.UpsertAsync(worker2);
|
||||
|
||||
// Act
|
||||
var workers = await _repository.ListAsync();
|
||||
|
||||
// Assert
|
||||
workers.Should().HaveCount(2);
|
||||
}
|
||||
|
||||
private WorkerEntity CreateWorker() => new()
|
||||
{
|
||||
Id = $"worker-{Guid.NewGuid()}",
|
||||
Hostname = "test-host",
|
||||
Status = WorkerStatus.Active,
|
||||
JobTypes = ["scan"],
|
||||
MaxConcurrentJobs = 4
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user