Add tests for SBOM generation determinism across multiple formats
- Created `StellaOps.TestKit.Tests` project for unit tests related to determinism. - Implemented `DeterminismManifestTests` to validate deterministic output for canonical bytes and strings, file read/write operations, and error handling for invalid schema versions. - Added `SbomDeterminismTests` to ensure identical inputs produce consistent SBOMs across SPDX 3.0.1 and CycloneDX 1.6/1.7 formats, including parallel execution tests. - Updated project references in `StellaOps.Integration.Determinism` to include the new determinism testing library.
This commit is contained in:
@@ -11,6 +11,7 @@
|
||||
<ProjectReference Include="../../__Libraries/StellaOps.Scanner.Cache/StellaOps.Scanner.Cache.csproj" />
|
||||
<ProjectReference Include="../../../Authority/StellaOps.Authority/StellaOps.Auth.Abstractions/StellaOps.Auth.Abstractions.csproj" />
|
||||
<ProjectReference Include="../../../Authority/StellaOps.Authority/StellaOps.Auth.Client/StellaOps.Auth.Client.csproj" />
|
||||
<ProjectReference Include="../../../__Libraries/StellaOps.TestKit/StellaOps.TestKit.csproj" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<PackageReference Include="FluentAssertions" Version="6.12.0" />
|
||||
|
||||
@@ -0,0 +1,114 @@
|
||||
using StellaOps.TestKit;
|
||||
using StellaOps.TestKit.Assertions;
|
||||
using StellaOps.TestKit.Deterministic;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Scanner.Core.Tests;
|
||||
|
||||
/// <summary>
|
||||
/// Example tests demonstrating StellaOps.TestKit usage in Scanner.Core.Tests.
|
||||
/// These serve as pilot validation for TestKit Wave 4 (Task 12).
|
||||
/// </summary>
|
||||
public class TestKitExamples
|
||||
{
|
||||
[Fact, Trait("Category", TestCategories.Unit)]
|
||||
public void DeterministicTime_Example()
|
||||
{
|
||||
// Arrange: Create a deterministic time provider at a known UTC timestamp
|
||||
using var time = new DeterministicTime(new DateTime(2026, 1, 15, 10, 30, 0, DateTimeKind.Utc));
|
||||
|
||||
// Act: Read the current time multiple times
|
||||
var timestamp1 = time.UtcNow;
|
||||
var timestamp2 = time.UtcNow;
|
||||
|
||||
// Assert: Time is frozen (reproducible)
|
||||
Assert.Equal(timestamp1, timestamp2);
|
||||
Assert.Equal(new DateTime(2026, 1, 15, 10, 30, 0, DateTimeKind.Utc), timestamp1);
|
||||
|
||||
// Act: Advance time by 1 hour
|
||||
time.Advance(TimeSpan.FromHours(1));
|
||||
|
||||
// Assert: Time advances deterministically
|
||||
Assert.Equal(new DateTime(2026, 1, 15, 11, 30, 0, DateTimeKind.Utc), time.UtcNow);
|
||||
}
|
||||
|
||||
[Fact, Trait("Category", TestCategories.Unit)]
|
||||
public void DeterministicRandom_Example()
|
||||
{
|
||||
// Arrange: Create seeded random generators
|
||||
var random1 = new DeterministicRandom(seed: 42);
|
||||
var random2 = new DeterministicRandom(seed: 42);
|
||||
|
||||
// Act: Generate random values
|
||||
var guid1 = random1.NextGuid();
|
||||
var guid2 = random2.NextGuid();
|
||||
var str1 = random1.NextString(length: 10);
|
||||
var str2 = random2.NextString(length: 10);
|
||||
|
||||
// Assert: Same seed produces same sequence (reproducible)
|
||||
Assert.Equal(guid1, guid2);
|
||||
Assert.Equal(str1, str2);
|
||||
}
|
||||
|
||||
[Fact, Trait("Category", TestCategories.Unit)]
|
||||
public void CanonicalJsonAssert_Determinism_Example()
|
||||
{
|
||||
// Arrange: Create a test object
|
||||
var testData = new
|
||||
{
|
||||
Name = "TestPackage",
|
||||
Version = "1.0.0",
|
||||
Dependencies = new[] { "Dep1", "Dep2" }
|
||||
};
|
||||
|
||||
// Act & Assert: Verify deterministic serialization
|
||||
CanonicalJsonAssert.IsDeterministic(testData, iterations: 100);
|
||||
|
||||
// Compute hash for golden master verification
|
||||
var hash = CanonicalJsonAssert.ComputeCanonicalHash(testData);
|
||||
Assert.NotEmpty(hash);
|
||||
Assert.Equal(64, hash.Length); // SHA-256 hex = 64 chars
|
||||
}
|
||||
|
||||
[Fact, Trait("Category", TestCategories.Snapshot)]
|
||||
public void SnapshotAssert_Example()
|
||||
{
|
||||
// Arrange: Create SBOM-like test data
|
||||
var sbom = new
|
||||
{
|
||||
SpdxVersion = "SPDX-3.0.1",
|
||||
DataLicense = "CC0-1.0",
|
||||
Name = "TestSbom",
|
||||
DocumentNamespace = "https://example.com/test",
|
||||
Packages = new[]
|
||||
{
|
||||
new { Name = "Package1", Version = "1.0.0" },
|
||||
new { Name = "Package2", Version = "2.0.0" }
|
||||
}
|
||||
};
|
||||
|
||||
// Act & Assert: Snapshot testing (golden master)
|
||||
// Run with UPDATE_SNAPSHOTS=1 to create baseline
|
||||
SnapshotAssert.MatchesSnapshot(sbom, "TestKitExample_SBOM");
|
||||
}
|
||||
|
||||
[Fact, Trait("Category", TestCategories.Unit)]
|
||||
public void CanonicalJsonAssert_PropertyCheck_Example()
|
||||
{
|
||||
// Arrange: Create test vulnerability data
|
||||
var vulnerability = new
|
||||
{
|
||||
CveId = "CVE-2026-1234",
|
||||
Severity = "HIGH",
|
||||
Package = new
|
||||
{
|
||||
Name = "vulnerable-lib",
|
||||
Version = "1.2.3"
|
||||
}
|
||||
};
|
||||
|
||||
// Act & Assert: Verify specific property exists in canonical JSON
|
||||
CanonicalJsonAssert.ContainsProperty(vulnerability, "CveId", "CVE-2026-1234");
|
||||
CanonicalJsonAssert.ContainsProperty(vulnerability, "Package.Name", "vulnerable-lib");
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,269 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// ScanQueryDeterminismTests.cs
|
||||
// Sprint: SPRINT_5100_0009_0001_scanner_tests
|
||||
// Task: SCANNER-5100-015
|
||||
// Description: Model S1 query determinism tests for Scanner storage
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Infrastructure.Postgres.Options;
|
||||
using StellaOps.Scanner.Storage;
|
||||
using StellaOps.Scanner.Storage.Entities;
|
||||
using StellaOps.Scanner.Storage.Postgres;
|
||||
using StellaOps.TestKit;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Tests;
|
||||
|
||||
/// <summary>
|
||||
/// Query determinism tests for Scanner storage operations.
|
||||
/// Implements Model S1 (Storage/Postgres) test requirements:
|
||||
/// - Same inputs → stable ordering (explicit ORDER BY checks)
|
||||
/// - Repeated queries return consistent results
|
||||
/// - Pagination ordering is deterministic
|
||||
/// </summary>
|
||||
[Collection("scanner-postgres")]
|
||||
[Trait("Category", TestCategories.Integration)]
|
||||
[Trait("Category", TestCategories.QueryDeterminism)]
|
||||
public sealed class ScanQueryDeterminismTests : IAsyncLifetime
|
||||
{
|
||||
private readonly ScannerPostgresFixture _fixture;
|
||||
private PostgresScanManifestRepository _manifestRepository = null!;
|
||||
private PostgresObservedCveRepository _cveRepository = null!;
|
||||
private ScannerDataSource _dataSource = null!;
|
||||
|
||||
public ScanQueryDeterminismTests(ScannerPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
}
|
||||
|
||||
public async Task InitializeAsync()
|
||||
{
|
||||
await _fixture.TruncateAllTablesAsync();
|
||||
|
||||
var options = new ScannerStorageOptions
|
||||
{
|
||||
Postgres = new PostgresOptions
|
||||
{
|
||||
ConnectionString = _fixture.ConnectionString,
|
||||
SchemaName = _fixture.SchemaName
|
||||
}
|
||||
};
|
||||
|
||||
_dataSource = new ScannerDataSource(Options.Create(options), NullLogger<ScannerDataSource>.Instance);
|
||||
_manifestRepository = new PostgresScanManifestRepository(_dataSource);
|
||||
_cveRepository = new PostgresObservedCveRepository(_dataSource);
|
||||
}
|
||||
|
||||
public Task DisposeAsync() => Task.CompletedTask;
|
||||
|
||||
[Fact]
|
||||
public async Task GetByHashAsync_SameHash_ReturnsIdenticalResults()
|
||||
{
|
||||
// Arrange
|
||||
var manifest = CreateManifest("sha256:deterministic");
|
||||
await _manifestRepository.SaveAsync(manifest);
|
||||
|
||||
// Act - Query same hash multiple times
|
||||
var results = new List<ScanManifestRow?>();
|
||||
for (int i = 0; i < 10; i++)
|
||||
{
|
||||
results.Add(await _manifestRepository.GetByHashAsync("sha256:deterministic"));
|
||||
}
|
||||
|
||||
// Assert - All results should be identical
|
||||
var first = results[0];
|
||||
foreach (var result in results)
|
||||
{
|
||||
result.Should().NotBeNull();
|
||||
result!.ManifestId.Should().Be(first!.ManifestId);
|
||||
result.ManifestHash.Should().Be(first.ManifestHash);
|
||||
result.ScanId.Should().Be(first.ScanId);
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByScanIdAsync_MultipleManifstsForScan_ReturnsMostRecent()
|
||||
{
|
||||
// Arrange
|
||||
var scanId = Guid.NewGuid();
|
||||
|
||||
// Create multiple manifests for same scan with delays
|
||||
var manifest1 = CreateManifest("sha256:first", scanId);
|
||||
await _manifestRepository.SaveAsync(manifest1);
|
||||
await Task.Delay(50);
|
||||
|
||||
var manifest2 = CreateManifest("sha256:second", scanId);
|
||||
await _manifestRepository.SaveAsync(manifest2);
|
||||
await Task.Delay(50);
|
||||
|
||||
var manifest3 = CreateManifest("sha256:third", scanId);
|
||||
await _manifestRepository.SaveAsync(manifest3);
|
||||
|
||||
// Act - Query multiple times
|
||||
var results = new List<ScanManifestRow?>();
|
||||
for (int i = 0; i < 5; i++)
|
||||
{
|
||||
results.Add(await _manifestRepository.GetByScanIdAsync(scanId));
|
||||
}
|
||||
|
||||
// Assert - All should return the same (most recent) manifest
|
||||
results.Should().AllSatisfy(r =>
|
||||
{
|
||||
r.Should().NotBeNull();
|
||||
r!.ManifestHash.Should().Be("sha256:third", "should return most recent manifest");
|
||||
});
|
||||
|
||||
// Verify deterministic - all IDs same
|
||||
var distinctIds = results.Select(r => r!.ManifestId).Distinct().ToList();
|
||||
distinctIds.Should().HaveCount(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ConcurrentQueries_SameHash_AllReturnIdenticalResults()
|
||||
{
|
||||
// Arrange
|
||||
var manifest = CreateManifest("sha256:concurrent");
|
||||
await _manifestRepository.SaveAsync(manifest);
|
||||
|
||||
// Act - 50 concurrent queries
|
||||
var tasks = Enumerable.Range(0, 50)
|
||||
.Select(_ => _manifestRepository.GetByHashAsync("sha256:concurrent"))
|
||||
.ToList();
|
||||
|
||||
var results = await Task.WhenAll(tasks);
|
||||
|
||||
// Assert - All should be identical
|
||||
var first = results[0];
|
||||
results.Should().AllSatisfy(r =>
|
||||
{
|
||||
r.Should().NotBeNull();
|
||||
r!.ManifestId.Should().Be(first!.ManifestId);
|
||||
});
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task QueryAfterUpdate_ReturnsUpdatedState()
|
||||
{
|
||||
// Arrange
|
||||
var manifest = CreateManifest("sha256:update");
|
||||
var saved = await _manifestRepository.SaveAsync(manifest);
|
||||
|
||||
// Act - Update and query
|
||||
var completedAt = DateTimeOffset.UtcNow;
|
||||
await _manifestRepository.MarkCompletedAsync(saved.ManifestId, completedAt);
|
||||
|
||||
// Query multiple times after update
|
||||
var results = new List<ScanManifestRow?>();
|
||||
for (int i = 0; i < 5; i++)
|
||||
{
|
||||
results.Add(await _manifestRepository.GetByHashAsync("sha256:update"));
|
||||
}
|
||||
|
||||
// Assert - All should show updated state
|
||||
results.Should().AllSatisfy(r =>
|
||||
{
|
||||
r.Should().NotBeNull();
|
||||
r!.ScanCompletedAt.Should().NotBeNull();
|
||||
});
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task MultipleHashes_QueriedInParallel_EachReturnsCorrectRecord()
|
||||
{
|
||||
// Arrange
|
||||
var hashes = Enumerable.Range(0, 10)
|
||||
.Select(i => $"sha256:parallel{i}")
|
||||
.ToList();
|
||||
|
||||
foreach (var hash in hashes)
|
||||
{
|
||||
await _manifestRepository.SaveAsync(CreateManifest(hash));
|
||||
}
|
||||
|
||||
// Act - Query all hashes in parallel
|
||||
var tasks = hashes.Select(h => _manifestRepository.GetByHashAsync(h)).ToList();
|
||||
var results = await Task.WhenAll(tasks);
|
||||
|
||||
// Assert - Each query should return the correct manifest
|
||||
for (int i = 0; i < hashes.Count; i++)
|
||||
{
|
||||
results[i].Should().NotBeNull();
|
||||
results[i]!.ManifestHash.Should().Be(hashes[i]);
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task NonExistentHash_AlwaysReturnsNull()
|
||||
{
|
||||
// Arrange - No data for this hash
|
||||
|
||||
// Act - Query multiple times
|
||||
var results = new List<ScanManifestRow?>();
|
||||
for (int i = 0; i < 5; i++)
|
||||
{
|
||||
results.Add(await _manifestRepository.GetByHashAsync("sha256:nonexistent"));
|
||||
}
|
||||
|
||||
// Assert - All should return null
|
||||
results.Should().AllBeEquivalentTo((ScanManifestRow?)null);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task NonExistentScanId_AlwaysReturnsNull()
|
||||
{
|
||||
// Arrange
|
||||
var nonExistentScanId = Guid.NewGuid();
|
||||
|
||||
// Act - Query multiple times
|
||||
var results = new List<ScanManifestRow?>();
|
||||
for (int i = 0; i < 5; i++)
|
||||
{
|
||||
results.Add(await _manifestRepository.GetByScanIdAsync(nonExistentScanId));
|
||||
}
|
||||
|
||||
// Assert - All should return null
|
||||
results.Should().AllBeEquivalentTo((ScanManifestRow?)null);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task QueriesWithDifferentPatterns_NoInterference()
|
||||
{
|
||||
// Arrange
|
||||
var scanId = Guid.NewGuid();
|
||||
var hash = $"sha256:pattern{Guid.NewGuid():N}";
|
||||
var manifest = CreateManifest(hash, scanId);
|
||||
await _manifestRepository.SaveAsync(manifest);
|
||||
|
||||
// Act - Mixed query patterns
|
||||
var byHash1 = await _manifestRepository.GetByHashAsync(hash);
|
||||
var byScanId1 = await _manifestRepository.GetByScanIdAsync(scanId);
|
||||
var byHash2 = await _manifestRepository.GetByHashAsync(hash);
|
||||
var byScanId2 = await _manifestRepository.GetByScanIdAsync(scanId);
|
||||
|
||||
// Assert - Both patterns return same record
|
||||
byHash1.Should().NotBeNull();
|
||||
byHash2.Should().NotBeNull();
|
||||
byScanId1.Should().NotBeNull();
|
||||
byScanId2.Should().NotBeNull();
|
||||
|
||||
byHash1!.ManifestId.Should().Be(byHash2!.ManifestId);
|
||||
byScanId1!.ManifestId.Should().Be(byScanId2!.ManifestId);
|
||||
byHash1.ManifestId.Should().Be(byScanId1.ManifestId);
|
||||
}
|
||||
|
||||
private static ScanManifestRow CreateManifest(string hash, Guid? scanId = null) => new()
|
||||
{
|
||||
ScanId = scanId ?? Guid.NewGuid(),
|
||||
ManifestHash = hash,
|
||||
SbomHash = "sha256:sbom" + Guid.NewGuid().ToString("N")[..8],
|
||||
RulesHash = "sha256:rules" + Guid.NewGuid().ToString("N")[..8],
|
||||
FeedHash = "sha256:feed" + Guid.NewGuid().ToString("N")[..8],
|
||||
PolicyHash = "sha256:policy" + Guid.NewGuid().ToString("N")[..8],
|
||||
ScanStartedAt = DateTimeOffset.UtcNow,
|
||||
ManifestContent = """{"version": "1.0", "scanner": "stellaops"}""",
|
||||
ScannerVersion = "1.0.0"
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,229 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// ScanResultIdempotencyTests.cs
|
||||
// Sprint: SPRINT_5100_0009_0001_scanner_tests
|
||||
// Task: SCANNER-5100-014
|
||||
// Description: Model S1 idempotency tests for Scanner scan results storage
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using FluentAssertions;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using Microsoft.Extensions.Options;
|
||||
using StellaOps.Infrastructure.Postgres.Options;
|
||||
using StellaOps.Scanner.Storage;
|
||||
using StellaOps.Scanner.Storage.Entities;
|
||||
using StellaOps.Scanner.Storage.Postgres;
|
||||
using StellaOps.TestKit;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Tests;
|
||||
|
||||
/// <summary>
|
||||
/// Idempotency tests for scan result storage operations.
|
||||
/// Implements Model S1 (Storage/Postgres) test requirements:
|
||||
/// - Insert same entity twice → no duplicates
|
||||
/// - Same manifest hash → same record returned
|
||||
/// - Update operations are idempotent
|
||||
/// </summary>
|
||||
[Collection("scanner-postgres")]
|
||||
[Trait("Category", TestCategories.Integration)]
|
||||
[Trait("Category", TestCategories.StorageIdempotency)]
|
||||
public sealed class ScanResultIdempotencyTests : IAsyncLifetime
|
||||
{
|
||||
private readonly ScannerPostgresFixture _fixture;
|
||||
private PostgresScanManifestRepository _manifestRepository = null!;
|
||||
private ScannerDataSource _dataSource = null!;
|
||||
|
||||
public ScanResultIdempotencyTests(ScannerPostgresFixture fixture)
|
||||
{
|
||||
_fixture = fixture;
|
||||
}
|
||||
|
||||
public async Task InitializeAsync()
|
||||
{
|
||||
await _fixture.TruncateAllTablesAsync();
|
||||
|
||||
var options = new ScannerStorageOptions
|
||||
{
|
||||
Postgres = new PostgresOptions
|
||||
{
|
||||
ConnectionString = _fixture.ConnectionString,
|
||||
SchemaName = _fixture.SchemaName
|
||||
}
|
||||
};
|
||||
|
||||
_dataSource = new ScannerDataSource(Options.Create(options), NullLogger<ScannerDataSource>.Instance);
|
||||
_manifestRepository = new PostgresScanManifestRepository(_dataSource);
|
||||
}
|
||||
|
||||
public Task DisposeAsync() => Task.CompletedTask;
|
||||
|
||||
[Fact]
|
||||
public async Task SaveAsync_SameManifestHash_Twice_CanRetrieveByHash()
|
||||
{
|
||||
// Arrange
|
||||
var manifest1 = CreateManifest("sha256:manifest1");
|
||||
var manifest2 = CreateManifest("sha256:manifest1"); // Same hash
|
||||
|
||||
// Act
|
||||
var saved1 = await _manifestRepository.SaveAsync(manifest1);
|
||||
|
||||
// Try to save second with same hash - depending on DB constraint
|
||||
// this might fail or create a new record
|
||||
try
|
||||
{
|
||||
var saved2 = await _manifestRepository.SaveAsync(manifest2);
|
||||
|
||||
// If it succeeds, verify we can get by hash
|
||||
var retrieved = await _manifestRepository.GetByHashAsync("sha256:manifest1");
|
||||
retrieved.Should().NotBeNull();
|
||||
}
|
||||
catch (Npgsql.PostgresException)
|
||||
{
|
||||
// Expected if manifest_hash has unique constraint
|
||||
// Verify the first one still exists
|
||||
var retrieved = await _manifestRepository.GetByHashAsync("sha256:manifest1");
|
||||
retrieved.Should().NotBeNull();
|
||||
retrieved!.ManifestId.Should().Be(saved1.ManifestId);
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByHashAsync_SameHash_ReturnsConsistentResult()
|
||||
{
|
||||
// Arrange
|
||||
var manifest = CreateManifest("sha256:consistent");
|
||||
await _manifestRepository.SaveAsync(manifest);
|
||||
|
||||
// Act - Query same hash multiple times
|
||||
var results = new List<ScanManifestRow?>();
|
||||
for (int i = 0; i < 10; i++)
|
||||
{
|
||||
results.Add(await _manifestRepository.GetByHashAsync("sha256:consistent"));
|
||||
}
|
||||
|
||||
// Assert - All should return same record
|
||||
results.Should().AllSatisfy(r =>
|
||||
{
|
||||
r.Should().NotBeNull();
|
||||
r!.ManifestHash.Should().Be("sha256:consistent");
|
||||
});
|
||||
|
||||
var distinctIds = results.Where(r => r != null).Select(r => r!.ManifestId).Distinct().ToList();
|
||||
distinctIds.Should().HaveCount(1, "same hash should always return same manifest");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetByScanIdAsync_SameId_ReturnsConsistentResult()
|
||||
{
|
||||
// Arrange
|
||||
var scanId = Guid.NewGuid();
|
||||
var manifest = CreateManifest("sha256:byscan", scanId);
|
||||
await _manifestRepository.SaveAsync(manifest);
|
||||
|
||||
// Act - Query same scan ID multiple times
|
||||
var results = new List<ScanManifestRow?>();
|
||||
for (int i = 0; i < 10; i++)
|
||||
{
|
||||
results.Add(await _manifestRepository.GetByScanIdAsync(scanId));
|
||||
}
|
||||
|
||||
// Assert - All should return same record
|
||||
results.Should().AllSatisfy(r =>
|
||||
{
|
||||
r.Should().NotBeNull();
|
||||
r!.ScanId.Should().Be(scanId);
|
||||
});
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task MarkCompletedAsync_Twice_IsIdempotent()
|
||||
{
|
||||
// Arrange
|
||||
var manifest = CreateManifest("sha256:complete");
|
||||
var saved = await _manifestRepository.SaveAsync(manifest);
|
||||
|
||||
var completedAt1 = DateTimeOffset.UtcNow;
|
||||
var completedAt2 = DateTimeOffset.UtcNow.AddMinutes(1);
|
||||
|
||||
// Act - Mark completed twice
|
||||
await _manifestRepository.MarkCompletedAsync(saved.ManifestId, completedAt1);
|
||||
var after1 = await _manifestRepository.GetByHashAsync("sha256:complete");
|
||||
|
||||
await _manifestRepository.MarkCompletedAsync(saved.ManifestId, completedAt2);
|
||||
var after2 = await _manifestRepository.GetByHashAsync("sha256:complete");
|
||||
|
||||
// Assert - Both should succeed, second updates the timestamp
|
||||
after1.Should().NotBeNull();
|
||||
after1!.ScanCompletedAt.Should().NotBeNull();
|
||||
|
||||
after2.Should().NotBeNull();
|
||||
after2!.ScanCompletedAt.Should().NotBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task MarkCompletedAsync_NonExistent_DoesNotThrow()
|
||||
{
|
||||
// Arrange
|
||||
var nonExistentId = Guid.NewGuid();
|
||||
|
||||
// Act
|
||||
var action = async () =>
|
||||
await _manifestRepository.MarkCompletedAsync(nonExistentId, DateTimeOffset.UtcNow);
|
||||
|
||||
// Assert - Should not throw (0 rows affected is OK)
|
||||
await action.Should().NotThrowAsync();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task SaveAsync_MultipleDifferentScans_AllPersisted()
|
||||
{
|
||||
// Arrange
|
||||
var manifests = Enumerable.Range(0, 5)
|
||||
.Select(i => CreateManifest($"sha256:multi{i}"))
|
||||
.ToList();
|
||||
|
||||
// Act
|
||||
foreach (var manifest in manifests)
|
||||
{
|
||||
await _manifestRepository.SaveAsync(manifest);
|
||||
}
|
||||
|
||||
// Assert - All should be retrievable
|
||||
for (int i = 0; i < 5; i++)
|
||||
{
|
||||
var retrieved = await _manifestRepository.GetByHashAsync($"sha256:multi{i}");
|
||||
retrieved.Should().NotBeNull();
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task SaveAsync_MultipleManifstsForSameScan_AllRetrievable()
|
||||
{
|
||||
// Arrange - Same scan ID, different manifests (e.g., scan retry)
|
||||
var scanId = Guid.NewGuid();
|
||||
var manifest1 = CreateManifest("sha256:retry1", scanId);
|
||||
var manifest2 = CreateManifest("sha256:retry2", scanId);
|
||||
|
||||
// Act
|
||||
await _manifestRepository.SaveAsync(manifest1);
|
||||
await _manifestRepository.SaveAsync(manifest2);
|
||||
|
||||
// Assert - GetByScanId returns most recent
|
||||
var retrieved = await _manifestRepository.GetByScanIdAsync(scanId);
|
||||
retrieved.Should().NotBeNull();
|
||||
// Should return one of them (most recent by created_at)
|
||||
}
|
||||
|
||||
private static ScanManifestRow CreateManifest(string hash, Guid? scanId = null) => new()
|
||||
{
|
||||
ScanId = scanId ?? Guid.NewGuid(),
|
||||
ManifestHash = hash,
|
||||
SbomHash = "sha256:sbom" + Guid.NewGuid().ToString("N")[..8],
|
||||
RulesHash = "sha256:rules" + Guid.NewGuid().ToString("N")[..8],
|
||||
FeedHash = "sha256:feed" + Guid.NewGuid().ToString("N")[..8],
|
||||
PolicyHash = "sha256:policy" + Guid.NewGuid().ToString("N")[..8],
|
||||
ScanStartedAt = DateTimeOffset.UtcNow,
|
||||
ManifestContent = """{"version": "1.0", "scanner": "stellaops"}""",
|
||||
ScannerVersion = "1.0.0"
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,282 @@
|
||||
// -----------------------------------------------------------------------------
|
||||
// ScannerMigrationTests.cs
|
||||
// Sprint: SPRINT_5100_0009_0001_scanner_tests
|
||||
// Task: SCANNER-5100-013
|
||||
// Description: Model S1 migration tests for Scanner.Storage
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using System.Reflection;
|
||||
using Dapper;
|
||||
using FluentAssertions;
|
||||
using Npgsql;
|
||||
using StellaOps.TestKit;
|
||||
using Testcontainers.PostgreSql;
|
||||
using Xunit;
|
||||
|
||||
namespace StellaOps.Scanner.Storage.Tests;
|
||||
|
||||
/// <summary>
|
||||
/// Migration tests for Scanner.Storage.
|
||||
/// Implements Model S1 (Storage/Postgres) migration test requirements:
|
||||
/// - Apply all migrations from scratch (fresh database)
|
||||
/// - Apply migrations from N-1 (incremental application)
|
||||
/// - Verify migration idempotency (apply twice → no error)
|
||||
/// </summary>
|
||||
[Trait("Category", TestCategories.Integration)]
|
||||
[Trait("Category", TestCategories.StorageMigration)]
|
||||
public sealed class ScannerMigrationTests : IAsyncLifetime
|
||||
{
|
||||
private PostgreSqlContainer _container = null!;
|
||||
|
||||
public async Task InitializeAsync()
|
||||
{
|
||||
_container = new PostgreSqlBuilder()
|
||||
.WithImage("postgres:16-alpine")
|
||||
.WithDatabase("scanner_migration_test")
|
||||
.WithUsername("postgres")
|
||||
.WithPassword("postgres")
|
||||
.Build();
|
||||
|
||||
await _container.StartAsync();
|
||||
}
|
||||
|
||||
public async Task DisposeAsync()
|
||||
{
|
||||
await _container.DisposeAsync();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyMigrations_FromScratch_AllTablesCreated()
|
||||
{
|
||||
// Arrange
|
||||
var connectionString = _container.GetConnectionString();
|
||||
|
||||
// Act - Apply all migrations from scratch
|
||||
await ApplyAllMigrationsAsync(connectionString);
|
||||
|
||||
// Assert - Verify key tables exist
|
||||
await using var connection = new NpgsqlConnection(connectionString);
|
||||
await connection.OpenAsync();
|
||||
|
||||
var tables = await connection.QueryAsync<string>(
|
||||
@"SELECT table_name FROM information_schema.tables
|
||||
WHERE table_schema = 'public'
|
||||
ORDER BY table_name");
|
||||
|
||||
var tableList = tables.ToList();
|
||||
|
||||
// Verify critical Scanner tables exist
|
||||
tableList.Should().Contain("epss_current", "EPSS current table should exist");
|
||||
tableList.Should().Contain("epss_history", "EPSS history table should exist");
|
||||
tableList.Should().Contain("scan_metrics", "Scan metrics table should exist");
|
||||
tableList.Should().Contain("__migrations", "Migration tracking table should exist");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyMigrations_FromScratch_AllMigrationsRecorded()
|
||||
{
|
||||
// Arrange
|
||||
var connectionString = _container.GetConnectionString();
|
||||
await ApplyAllMigrationsAsync(connectionString);
|
||||
|
||||
// Assert - Verify migrations are recorded
|
||||
await using var connection = new NpgsqlConnection(connectionString);
|
||||
await connection.OpenAsync();
|
||||
|
||||
var migrationsApplied = await connection.QueryAsync<string>(
|
||||
"SELECT migration_id FROM __migrations ORDER BY applied_at");
|
||||
|
||||
var migrationList = migrationsApplied.ToList();
|
||||
migrationList.Should().NotBeEmpty("migrations should be tracked");
|
||||
|
||||
// Verify first migration is recorded
|
||||
migrationList.Should().Contain(m => m.Contains("001_"), "001_create_tables should be recorded");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyMigrations_Twice_IsIdempotent()
|
||||
{
|
||||
// Arrange
|
||||
var connectionString = _container.GetConnectionString();
|
||||
|
||||
// Act - Apply migrations twice
|
||||
await ApplyAllMigrationsAsync(connectionString);
|
||||
var applyAgain = async () => await ApplyAllMigrationsAsync(connectionString);
|
||||
|
||||
// Assert - Second application should not throw
|
||||
await applyAgain.Should().NotThrowAsync(
|
||||
"applying migrations twice should be idempotent");
|
||||
|
||||
// Verify migrations are not duplicated
|
||||
await using var connection = new NpgsqlConnection(connectionString);
|
||||
await connection.OpenAsync();
|
||||
|
||||
var migrationCount = await connection.ExecuteScalarAsync<int>(
|
||||
"SELECT COUNT(*) FROM __migrations");
|
||||
|
||||
// Count unique migrations
|
||||
var uniqueMigrations = await connection.ExecuteScalarAsync<int>(
|
||||
"SELECT COUNT(DISTINCT migration_id) FROM __migrations");
|
||||
|
||||
migrationCount.Should().Be(uniqueMigrations,
|
||||
"each migration should only be recorded once");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyMigrations_VerifySchemaIntegrity()
|
||||
{
|
||||
// Arrange
|
||||
var connectionString = _container.GetConnectionString();
|
||||
await ApplyAllMigrationsAsync(connectionString);
|
||||
|
||||
// Assert - Verify key foreign key relationships exist
|
||||
await using var connection = new NpgsqlConnection(connectionString);
|
||||
await connection.OpenAsync();
|
||||
|
||||
// Verify indexes exist
|
||||
var indexes = await connection.QueryAsync<string>(
|
||||
@"SELECT indexname FROM pg_indexes
|
||||
WHERE schemaname = 'public'
|
||||
ORDER BY indexname");
|
||||
|
||||
var indexList = indexes.ToList();
|
||||
indexList.Should().NotBeEmpty("indexes should be created by migrations");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyMigrations_EpssTablesHaveCorrectSchema()
|
||||
{
|
||||
// Arrange
|
||||
var connectionString = _container.GetConnectionString();
|
||||
await ApplyAllMigrationsAsync(connectionString);
|
||||
|
||||
// Assert - Verify EPSS table schema
|
||||
await using var connection = new NpgsqlConnection(connectionString);
|
||||
await connection.OpenAsync();
|
||||
|
||||
var epssColumns = await connection.QueryAsync<string>(
|
||||
@"SELECT column_name FROM information_schema.columns
|
||||
WHERE table_name = 'epss_current' AND table_schema = 'public'
|
||||
ORDER BY ordinal_position");
|
||||
|
||||
var columnList = epssColumns.ToList();
|
||||
columnList.Should().Contain("cve_id", "EPSS table should have cve_id column");
|
||||
columnList.Should().Contain("score", "EPSS table should have score column");
|
||||
columnList.Should().Contain("percentile", "EPSS table should have percentile column");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyMigrations_IndividualMigrationsCanRollForward()
|
||||
{
|
||||
// Arrange
|
||||
var connectionString = _container.GetConnectionString();
|
||||
|
||||
// Act - Apply migrations in sequence
|
||||
var migrationFiles = GetMigrationFiles();
|
||||
|
||||
await using var connection = new NpgsqlConnection(connectionString);
|
||||
await connection.OpenAsync();
|
||||
|
||||
// Create migration tracking table first
|
||||
await connection.ExecuteAsync(@"
|
||||
CREATE TABLE IF NOT EXISTS __migrations (
|
||||
id SERIAL PRIMARY KEY,
|
||||
migration_id TEXT NOT NULL UNIQUE,
|
||||
applied_at TIMESTAMPTZ DEFAULT NOW()
|
||||
)");
|
||||
|
||||
// Apply each migration in order
|
||||
int appliedCount = 0;
|
||||
foreach (var migrationFile in migrationFiles.OrderBy(f => f))
|
||||
{
|
||||
var migrationId = Path.GetFileName(migrationFile);
|
||||
|
||||
// Check if already applied
|
||||
var alreadyApplied = await connection.ExecuteScalarAsync<int>(
|
||||
"SELECT COUNT(*) FROM __migrations WHERE migration_id = @Id",
|
||||
new { Id = migrationId });
|
||||
|
||||
if (alreadyApplied > 0)
|
||||
continue;
|
||||
|
||||
// Apply migration
|
||||
var sql = GetMigrationContent(migrationFile);
|
||||
if (!string.IsNullOrWhiteSpace(sql))
|
||||
{
|
||||
await connection.ExecuteAsync(sql);
|
||||
await connection.ExecuteAsync(
|
||||
"INSERT INTO __migrations (migration_id) VALUES (@Id)",
|
||||
new { Id = migrationId });
|
||||
appliedCount++;
|
||||
}
|
||||
}
|
||||
|
||||
// Assert
|
||||
appliedCount.Should().BeGreaterThan(0, "at least some migrations should be applied");
|
||||
|
||||
var totalMigrations = await connection.ExecuteScalarAsync<int>(
|
||||
"SELECT COUNT(*) FROM __migrations");
|
||||
totalMigrations.Should().BeGreaterThan(0);
|
||||
}
|
||||
|
||||
private async Task ApplyAllMigrationsAsync(string connectionString)
|
||||
{
|
||||
await using var connection = new NpgsqlConnection(connectionString);
|
||||
await connection.OpenAsync();
|
||||
|
||||
// Create migration tracking table
|
||||
await connection.ExecuteAsync(@"
|
||||
CREATE TABLE IF NOT EXISTS __migrations (
|
||||
id SERIAL PRIMARY KEY,
|
||||
migration_id TEXT NOT NULL UNIQUE,
|
||||
applied_at TIMESTAMPTZ DEFAULT NOW()
|
||||
)");
|
||||
|
||||
// Get and apply all migrations
|
||||
var migrationFiles = GetMigrationFiles();
|
||||
|
||||
foreach (var migrationFile in migrationFiles.OrderBy(f => f))
|
||||
{
|
||||
var migrationId = Path.GetFileName(migrationFile);
|
||||
|
||||
// Skip if already applied
|
||||
var alreadyApplied = await connection.ExecuteScalarAsync<int>(
|
||||
"SELECT COUNT(*) FROM __migrations WHERE migration_id = @Id",
|
||||
new { Id = migrationId });
|
||||
|
||||
if (alreadyApplied > 0)
|
||||
continue;
|
||||
|
||||
// Apply migration
|
||||
var sql = GetMigrationContent(migrationFile);
|
||||
if (!string.IsNullOrWhiteSpace(sql))
|
||||
{
|
||||
await connection.ExecuteAsync(sql);
|
||||
await connection.ExecuteAsync(
|
||||
"INSERT INTO __migrations (migration_id) VALUES (@Id)",
|
||||
new { Id = migrationId });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static IEnumerable<string> GetMigrationFiles()
|
||||
{
|
||||
var assembly = typeof(ScannerStorageOptions).Assembly;
|
||||
var resourceNames = assembly.GetManifestResourceNames()
|
||||
.Where(n => n.Contains("Migrations") && n.EndsWith(".sql"))
|
||||
.OrderBy(n => n);
|
||||
|
||||
return resourceNames;
|
||||
}
|
||||
|
||||
private static string GetMigrationContent(string resourceName)
|
||||
{
|
||||
var assembly = typeof(ScannerStorageOptions).Assembly;
|
||||
using var stream = assembly.GetManifestResourceStream(resourceName);
|
||||
if (stream == null)
|
||||
return string.Empty;
|
||||
|
||||
using var reader = new StreamReader(stream);
|
||||
return reader.ReadToEnd();
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user