5100* tests strengthtenen work

This commit is contained in:
StellaOps Bot
2025-12-24 12:38:34 +02:00
parent 9a08d10b89
commit 02772c7a27
117 changed files with 29941 additions and 66 deletions

View File

@@ -0,0 +1,756 @@
// -----------------------------------------------------------------------------
// RoutingDecisionPropertyTests.cs
// Sprint: SPRINT_5100_0007_0001_testing_strategy_2026
// Task: TEST-STRAT-5100-004 - Property-based tests for routing/decision logic
// Description: FsCheck property tests for DefaultRoutingPlugin routing invariants
// -----------------------------------------------------------------------------
using FluentAssertions;
using FsCheck;
using FsCheck.Xunit;
using Microsoft.Extensions.Options;
using StellaOps.Router.Common.Enums;
using StellaOps.Router.Common.Models;
using StellaOps.Router.Gateway.Configuration;
using StellaOps.Router.Gateway.Routing;
using Xunit;
namespace StellaOps.Router.Gateway.Tests.Properties;
/// <summary>
/// Property-based tests for routing decision logic using FsCheck.
/// Tests verify invariants of the DefaultRoutingPlugin routing algorithm.
/// </summary>
public sealed class RoutingDecisionPropertyTests
{
#region Generators
/// <summary>
/// Generates a random ConnectionState with valid values.
/// </summary>
private static Gen<ConnectionState> GenerateConnection(
string? forcedRegion = null,
InstanceHealthStatus? forcedStatus = null,
string? forcedVersion = null)
{
return from connectionId in Gen.Elements("conn-1", "conn-2", "conn-3", "conn-4", "conn-5")
from serviceName in Gen.Constant("test-service")
from version in forcedVersion != null
? Gen.Constant(forcedVersion)
: Gen.Elements("1.0.0", "1.1.0", "2.0.0")
from region in forcedRegion != null
? Gen.Constant(forcedRegion)
: Gen.Elements("eu1", "eu2", "us1", "us2", "ap1")
from status in forcedStatus.HasValue
? Gen.Constant(forcedStatus.Value)
: Gen.Elements(InstanceHealthStatus.Healthy, InstanceHealthStatus.Degraded, InstanceHealthStatus.Unhealthy)
from pingMs in Gen.Choose(1, 500)
select new ConnectionState
{
ConnectionId = $"{connectionId}-{region}",
Instance = new ServiceInstance
{
InstanceId = $"{connectionId}-{region}",
ServiceName = serviceName,
Version = version,
Region = region
},
Status = status,
AveragePingMs = pingMs,
LastHeartbeatUtc = DateTimeOffset.UtcNow.AddSeconds(-pingMs % 60)
};
}
/// <summary>
/// Generates a list of connection candidates.
/// </summary>
private static Gen<List<ConnectionState>> GenerateCandidates(
int minCount = 1,
int maxCount = 10,
string? forcedRegion = null,
InstanceHealthStatus? forcedStatus = null)
{
return from count in Gen.Choose(minCount, maxCount)
from connections in Gen.ListOf(count, GenerateConnection(forcedRegion, forcedStatus))
select connections.DistinctBy(c => c.ConnectionId).ToList();
}
/// <summary>
/// Generates RoutingOptions with valid combinations.
/// </summary>
private static Gen<RoutingOptions> GenerateRoutingOptions()
{
return from preferLocal in Arb.Generate<bool>()
from allowDegraded in Arb.Generate<bool>()
from strictVersion in Arb.Generate<bool>()
from tieBreaker in Gen.Elements(TieBreakerMode.Random, TieBreakerMode.RoundRobin, TieBreakerMode.LowestLatency)
select new RoutingOptions
{
PreferLocalRegion = preferLocal,
AllowDegradedInstances = allowDegraded,
StrictVersionMatching = strictVersion,
TieBreaker = tieBreaker,
RoutingTimeoutMs = 5000,
DefaultVersion = null
};
}
#endregion
#region Property Tests - Determinism
[Property(MaxTest = 100, Arbitrary = new[] { typeof(ConnectionArbitrary) })]
public void SameInputs_ProduceDeterministicDecisions()
{
// Arrange
var options = new RoutingOptions
{
PreferLocalRegion = true,
AllowDegradedInstances = true,
StrictVersionMatching = true,
TieBreaker = TieBreakerMode.LowestLatency,
RoutingTimeoutMs = 5000
};
var plugin = CreatePlugin("eu1", options);
var candidates = CreateFixedCandidates();
// Act - Run routing multiple times
var decisions = new List<string?>();
for (int i = 0; i < 10; i++)
{
var decision = plugin.ChooseInstanceAsync(
CreateContext("1.0.0", candidates),
CancellationToken.None).GetAwaiter().GetResult();
decisions.Add(decision?.Connection?.ConnectionId);
}
// Assert - All decisions should be identical
decisions.All(d => d == decisions[0]).Should().BeTrue(
"same inputs with deterministic tie-breaker should produce same routing decision");
}
[Property(MaxTest = 100)]
public void EmptyCandidates_AlwaysReturnsNull()
{
// Arrange
var optionsGen = GenerateRoutingOptions();
var options = optionsGen.Sample(1, 1).First();
var plugin = CreatePlugin("eu1", options);
// Act
var decision = plugin.ChooseInstanceAsync(
CreateContext("1.0.0", []),
CancellationToken.None).GetAwaiter().GetResult();
// Assert
decision.Should().BeNull("empty candidates should always return null");
}
#endregion
#region Property Tests - Health Preference
[Property(MaxTest = 100)]
public void HealthyPreferred_WhenHealthyExists_NeverChoosesDegraded()
{
// Arrange
var options = new RoutingOptions
{
PreferLocalRegion = false,
AllowDegradedInstances = true,
StrictVersionMatching = false,
TieBreaker = TieBreakerMode.LowestLatency,
RoutingTimeoutMs = 5000
};
var plugin = CreatePlugin("eu1", options);
// Create mixed candidates with both healthy and degraded
var healthy = new ConnectionState
{
ConnectionId = "healthy-1",
Instance = new ServiceInstance
{
InstanceId = "healthy-1",
ServiceName = "test-service",
Version = "1.0.0",
Region = "eu1"
},
Status = InstanceHealthStatus.Healthy,
AveragePingMs = 100 // Higher latency but healthy
};
var degraded = new ConnectionState
{
ConnectionId = "degraded-1",
Instance = new ServiceInstance
{
InstanceId = "degraded-1",
ServiceName = "test-service",
Version = "1.0.0",
Region = "eu1"
},
Status = InstanceHealthStatus.Degraded,
AveragePingMs = 1 // Lower latency but degraded
};
var candidates = new List<ConnectionState> { degraded, healthy };
// Act
var decision = plugin.ChooseInstanceAsync(
CreateContext(null, candidates),
CancellationToken.None).GetAwaiter().GetResult();
// Assert
decision.Should().NotBeNull();
decision!.Connection.Status.Should().Be(InstanceHealthStatus.Healthy,
"healthy instances should always be preferred over degraded");
}
[Property(MaxTest = 100)]
public void WhenOnlyDegraded_AndAllowDegradedTrue_SelectsDegraded()
{
// Arrange
var options = new RoutingOptions
{
PreferLocalRegion = false,
AllowDegradedInstances = true,
StrictVersionMatching = false,
TieBreaker = TieBreakerMode.LowestLatency,
RoutingTimeoutMs = 5000
};
var plugin = CreatePlugin("eu1", options);
var degraded1 = new ConnectionState
{
ConnectionId = "degraded-1",
Instance = new ServiceInstance
{
InstanceId = "degraded-1",
ServiceName = "test-service",
Version = "1.0.0",
Region = "eu1"
},
Status = InstanceHealthStatus.Degraded,
AveragePingMs = 10
};
var degraded2 = new ConnectionState
{
ConnectionId = "degraded-2",
Instance = new ServiceInstance
{
InstanceId = "degraded-2",
ServiceName = "test-service",
Version = "1.0.0",
Region = "eu1"
},
Status = InstanceHealthStatus.Degraded,
AveragePingMs = 20
};
var candidates = new List<ConnectionState> { degraded1, degraded2 };
// Act
var decision = plugin.ChooseInstanceAsync(
CreateContext(null, candidates),
CancellationToken.None).GetAwaiter().GetResult();
// Assert
decision.Should().NotBeNull("degraded instances should be selected when no healthy available and AllowDegradedInstances=true");
decision!.Connection.Status.Should().Be(InstanceHealthStatus.Degraded);
}
[Property(MaxTest = 100)]
public void WhenOnlyDegraded_AndAllowDegradedFalse_ReturnsNull()
{
// Arrange
var options = new RoutingOptions
{
PreferLocalRegion = false,
AllowDegradedInstances = false,
StrictVersionMatching = false,
TieBreaker = TieBreakerMode.LowestLatency,
RoutingTimeoutMs = 5000
};
var plugin = CreatePlugin("eu1", options);
var degraded = new ConnectionState
{
ConnectionId = "degraded-1",
Instance = new ServiceInstance
{
InstanceId = "degraded-1",
ServiceName = "test-service",
Version = "1.0.0",
Region = "eu1"
},
Status = InstanceHealthStatus.Degraded,
AveragePingMs = 10
};
var candidates = new List<ConnectionState> { degraded };
// Act
var decision = plugin.ChooseInstanceAsync(
CreateContext(null, candidates),
CancellationToken.None).GetAwaiter().GetResult();
// Assert
decision.Should().BeNull("degraded instances should not be selected when AllowDegradedInstances=false");
}
#endregion
#region Property Tests - Region Tier Preference
[Property(MaxTest = 100)]
public void LocalRegion_AlwaysPreferred_WhenAvailable()
{
// Arrange
var options = new RoutingOptions
{
PreferLocalRegion = true,
AllowDegradedInstances = false,
StrictVersionMatching = false,
TieBreaker = TieBreakerMode.LowestLatency,
RoutingTimeoutMs = 5000
};
var gatewayRegion = "eu1";
var plugin = CreatePlugin(gatewayRegion, options);
var localInstance = new ConnectionState
{
ConnectionId = "local-1",
Instance = new ServiceInstance
{
InstanceId = "local-1",
ServiceName = "test-service",
Version = "1.0.0",
Region = "eu1" // Same as gateway
},
Status = InstanceHealthStatus.Healthy,
AveragePingMs = 100 // Higher latency
};
var remoteInstance = new ConnectionState
{
ConnectionId = "remote-1",
Instance = new ServiceInstance
{
InstanceId = "remote-1",
ServiceName = "test-service",
Version = "1.0.0",
Region = "us1" // Different region
},
Status = InstanceHealthStatus.Healthy,
AveragePingMs = 1 // Lower latency
};
var candidates = new List<ConnectionState> { remoteInstance, localInstance };
// Act
var decision = plugin.ChooseInstanceAsync(
CreateContext(null, candidates, gatewayRegion),
CancellationToken.None).GetAwaiter().GetResult();
// Assert
decision.Should().NotBeNull();
decision!.Connection.Instance.Region.Should().Be(gatewayRegion,
"local region should always be preferred when PreferLocalRegion=true");
}
[Property(MaxTest = 100)]
public void WhenNoLocalRegion_FallsBackToRemote()
{
// Arrange
var options = new RoutingOptions
{
PreferLocalRegion = true,
AllowDegradedInstances = false,
StrictVersionMatching = false,
TieBreaker = TieBreakerMode.LowestLatency,
RoutingTimeoutMs = 5000
};
var gatewayRegion = "eu1";
var plugin = CreatePlugin(gatewayRegion, options);
var remoteInstance = new ConnectionState
{
ConnectionId = "remote-1",
Instance = new ServiceInstance
{
InstanceId = "remote-1",
ServiceName = "test-service",
Version = "1.0.0",
Region = "us1" // Different region
},
Status = InstanceHealthStatus.Healthy,
AveragePingMs = 10
};
var candidates = new List<ConnectionState> { remoteInstance };
// Act
var decision = plugin.ChooseInstanceAsync(
CreateContext(null, candidates, gatewayRegion),
CancellationToken.None).GetAwaiter().GetResult();
// Assert
decision.Should().NotBeNull("should fallback to remote region when no local available");
decision!.Connection.Instance.Region.Should().Be("us1");
}
#endregion
#region Property Tests - Version Matching
[Property(MaxTest = 100)]
public void StrictVersionMatching_RejectsNonMatchingVersions()
{
// Arrange
var options = new RoutingOptions
{
PreferLocalRegion = false,
AllowDegradedInstances = true,
StrictVersionMatching = true,
TieBreaker = TieBreakerMode.LowestLatency,
RoutingTimeoutMs = 5000
};
var plugin = CreatePlugin("eu1", options);
var v1Instance = new ConnectionState
{
ConnectionId = "v1-1",
Instance = new ServiceInstance
{
InstanceId = "v1-1",
ServiceName = "test-service",
Version = "1.0.0",
Region = "eu1"
},
Status = InstanceHealthStatus.Healthy,
AveragePingMs = 10
};
var v2Instance = new ConnectionState
{
ConnectionId = "v2-1",
Instance = new ServiceInstance
{
InstanceId = "v2-1",
ServiceName = "test-service",
Version = "2.0.0",
Region = "eu1"
},
Status = InstanceHealthStatus.Healthy,
AveragePingMs = 10
};
var candidates = new List<ConnectionState> { v1Instance, v2Instance };
// Act
var decision = plugin.ChooseInstanceAsync(
CreateContext("2.0.0", candidates),
CancellationToken.None).GetAwaiter().GetResult();
// Assert
decision.Should().NotBeNull();
decision!.Connection.Instance.Version.Should().Be("2.0.0",
"strict version matching should only select matching version");
}
[Property(MaxTest = 100)]
public void RequestedVersion_NotAvailable_ReturnsNull()
{
// Arrange
var options = new RoutingOptions
{
PreferLocalRegion = false,
AllowDegradedInstances = true,
StrictVersionMatching = true,
TieBreaker = TieBreakerMode.LowestLatency,
RoutingTimeoutMs = 5000
};
var plugin = CreatePlugin("eu1", options);
var v1Instance = new ConnectionState
{
ConnectionId = "v1-1",
Instance = new ServiceInstance
{
InstanceId = "v1-1",
ServiceName = "test-service",
Version = "1.0.0",
Region = "eu1"
},
Status = InstanceHealthStatus.Healthy,
AveragePingMs = 10
};
var candidates = new List<ConnectionState> { v1Instance };
// Act
var decision = plugin.ChooseInstanceAsync(
CreateContext("3.0.0", candidates),
CancellationToken.None).GetAwaiter().GetResult();
// Assert
decision.Should().BeNull("requested version not available should return null");
}
#endregion
#region Property Tests - Tie-Breaker Behavior
[Property(MaxTest = 100)]
public void LowestLatency_TieBreaker_SelectsLowestPing()
{
// Arrange
var options = new RoutingOptions
{
PreferLocalRegion = false,
AllowDegradedInstances = false,
StrictVersionMatching = false,
TieBreaker = TieBreakerMode.LowestLatency,
RoutingTimeoutMs = 5000
};
var plugin = CreatePlugin("eu1", options);
var highLatency = new ConnectionState
{
ConnectionId = "high-1",
Instance = new ServiceInstance
{
InstanceId = "high-1",
ServiceName = "test-service",
Version = "1.0.0",
Region = "eu1"
},
Status = InstanceHealthStatus.Healthy,
AveragePingMs = 100
};
var lowLatency = new ConnectionState
{
ConnectionId = "low-1",
Instance = new ServiceInstance
{
InstanceId = "low-1",
ServiceName = "test-service",
Version = "1.0.0",
Region = "eu1"
},
Status = InstanceHealthStatus.Healthy,
AveragePingMs = 10
};
var candidates = new List<ConnectionState> { highLatency, lowLatency };
// Act
var decision = plugin.ChooseInstanceAsync(
CreateContext(null, candidates),
CancellationToken.None).GetAwaiter().GetResult();
// Assert
decision.Should().NotBeNull();
decision!.Connection.ConnectionId.Should().Be("low-1",
"lowest latency tie-breaker should select instance with lowest ping");
}
#endregion
#region Property Tests - Invariants
[Property(MaxTest = 100)]
public void DecisionAlwaysIncludesEndpoint()
{
// Arrange
var options = new RoutingOptions
{
PreferLocalRegion = false,
AllowDegradedInstances = true,
StrictVersionMatching = false,
TieBreaker = TieBreakerMode.LowestLatency,
RoutingTimeoutMs = 5000
};
var plugin = CreatePlugin("eu1", options);
var candidates = CreateFixedCandidates();
// Act
var decision = plugin.ChooseInstanceAsync(
CreateContext(null, candidates),
CancellationToken.None).GetAwaiter().GetResult();
// Assert
decision.Should().NotBeNull();
decision!.Endpoint.Should().NotBeNull("decision should always include endpoint");
decision.Connection.Should().NotBeNull("decision should always include connection");
}
[Property(MaxTest = 100)]
public void UnhealthyInstances_NeverSelected()
{
// Arrange
var options = new RoutingOptions
{
PreferLocalRegion = false,
AllowDegradedInstances = true,
StrictVersionMatching = false,
TieBreaker = TieBreakerMode.LowestLatency,
RoutingTimeoutMs = 5000
};
var plugin = CreatePlugin("eu1", options);
var unhealthy = new ConnectionState
{
ConnectionId = "unhealthy-1",
Instance = new ServiceInstance
{
InstanceId = "unhealthy-1",
ServiceName = "test-service",
Version = "1.0.0",
Region = "eu1"
},
Status = InstanceHealthStatus.Unhealthy,
AveragePingMs = 1 // Even with lowest latency
};
var candidates = new List<ConnectionState> { unhealthy };
// Act
var decision = plugin.ChooseInstanceAsync(
CreateContext(null, candidates),
CancellationToken.None).GetAwaiter().GetResult();
// Assert
decision.Should().BeNull("unhealthy instances should never be selected");
}
#endregion
#region Helpers
private static DefaultRoutingPlugin CreatePlugin(string gatewayRegion, RoutingOptions? options = null)
{
options ??= new RoutingOptions
{
PreferLocalRegion = true,
AllowDegradedInstances = true,
StrictVersionMatching = false,
TieBreaker = TieBreakerMode.LowestLatency,
RoutingTimeoutMs = 5000
};
var gatewayConfig = new RouterNodeConfig
{
Region = gatewayRegion,
NeighborRegions = ["eu2", "eu3"]
};
return new DefaultRoutingPlugin(
Options.Create(options),
Options.Create(gatewayConfig));
}
private static RoutingContext CreateContext(
string? requestedVersion,
List<ConnectionState> candidates,
string gatewayRegion = "eu1")
{
return new RoutingContext
{
Method = "GET",
Path = "/test",
Headers = new Dictionary<string, string>(),
Endpoint = new EndpointDescriptor
{
ServiceName = "test-service",
Version = "1.0.0",
Method = "GET",
Path = "/test"
},
AvailableConnections = candidates,
GatewayRegion = gatewayRegion,
RequestedVersion = requestedVersion,
CancellationToken = CancellationToken.None
};
}
private static List<ConnectionState> CreateFixedCandidates()
{
return
[
new ConnectionState
{
ConnectionId = "conn-1",
Instance = new ServiceInstance
{
InstanceId = "conn-1",
ServiceName = "test-service",
Version = "1.0.0",
Region = "eu1"
},
Status = InstanceHealthStatus.Healthy,
AveragePingMs = 10
},
new ConnectionState
{
ConnectionId = "conn-2",
Instance = new ServiceInstance
{
InstanceId = "conn-2",
ServiceName = "test-service",
Version = "1.0.0",
Region = "eu1"
},
Status = InstanceHealthStatus.Healthy,
AveragePingMs = 20
}
];
}
#endregion
}
/// <summary>
/// Custom Arbitrary for generating ConnectionState instances.
/// </summary>
public class ConnectionArbitrary
{
public static Arbitrary<ConnectionState> ConnectionState()
{
return Arb.From(Gen.Elements(
CreateConn("c1", "eu1", InstanceHealthStatus.Healthy, 10),
CreateConn("c2", "eu1", InstanceHealthStatus.Healthy, 20),
CreateConn("c3", "eu2", InstanceHealthStatus.Healthy, 30),
CreateConn("c4", "us1", InstanceHealthStatus.Degraded, 5)));
}
private static ConnectionState CreateConn(string id, string region, InstanceHealthStatus status, int pingMs)
{
return new ConnectionState
{
ConnectionId = id,
Instance = new ServiceInstance
{
InstanceId = id,
ServiceName = "test-service",
Version = "1.0.0",
Region = region
},
Status = status,
AveragePingMs = pingMs
};
}
}

View File

@@ -14,6 +14,8 @@
<ItemGroup>
<PackageReference Include="FluentAssertions" Version="6.12.0" />
<PackageReference Include="FsCheck" Version="2.16.6" />
<PackageReference Include="FsCheck.Xunit" Version="2.16.6" />
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="10.0.0" />
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.12.0" />
<PackageReference Include="Testcontainers" Version="4.4.0" />

View File

@@ -0,0 +1,232 @@
// -----------------------------------------------------------------------------
// ErrorModeComparisonLogic.cs
// Sprint: SPRINT_5100_0008_0001_competitor_parity
// Task: PARITY-5100-007 - Implement error mode comparison
// Description: Logic for comparing failure behavior between scanners
// -----------------------------------------------------------------------------
namespace StellaOps.Parity.Tests;
/// <summary>
/// Compares error handling behavior between scanners.
/// </summary>
public sealed class ErrorModeComparisonLogic
{
/// <summary>
/// Compares error behavior from multiple scanner runs with edge case inputs.
/// </summary>
public ErrorModeComparisonResult Compare(
List<ErrorTestScenario> scenarios,
Dictionary<string, List<ScannerOutput>> resultsByTool)
{
var result = new ErrorModeComparisonResult
{
TestedScenarios = scenarios.Count,
ToolResults = new Dictionary<string, ToolErrorBehavior>()
};
foreach (var (toolName, outputs) in resultsByTool)
{
var behavior = new ToolErrorBehavior
{
ToolName = toolName,
ScenarioResults = new List<ScenarioResult>()
};
for (int i = 0; i < scenarios.Count && i < outputs.Count; i++)
{
var scenario = scenarios[i];
var output = outputs[i];
var scenarioResult = new ScenarioResult
{
ScenarioName = scenario.Name,
ExpectedBehavior = scenario.ExpectedBehavior,
ActualExitCode = output.ExitCode,
ActualSuccess = output.Success,
ErrorMessage = output.Error,
DurationMs = output.DurationMs
};
// Evaluate if behavior matches expectations
scenarioResult.BehaviorMatched = EvaluateBehavior(scenario, output);
behavior.ScenarioResults.Add(scenarioResult);
}
behavior.PassRate = behavior.ScenarioResults.Count > 0
? (double)behavior.ScenarioResults.Count(r => r.BehaviorMatched) / behavior.ScenarioResults.Count * 100
: 0;
result.ToolResults[toolName] = behavior;
}
result.Success = true;
return result;
}
/// <summary>
/// Gets predefined error test scenarios.
/// </summary>
public static List<ErrorTestScenario> GetErrorTestScenarios()
{
return
[
new ErrorTestScenario
{
Name = "malformed-image-ref",
ImageRef = ":::invalid:::",
Description = "Completely invalid image reference format",
ExpectedBehavior = ExpectedErrorBehavior.GracefulError,
ExpectedExitCode = 1
},
new ErrorTestScenario
{
Name = "nonexistent-image",
ImageRef = "nonexistent/image:doesnotexist12345",
Description = "Image that does not exist in any registry",
ExpectedBehavior = ExpectedErrorBehavior.GracefulError,
ExpectedExitCode = 1
},
new ErrorTestScenario
{
Name = "network-timeout",
ImageRef = "10.255.255.1/timeout:latest",
Description = "Image from unreachable network (simulates timeout)",
ExpectedBehavior = ExpectedErrorBehavior.TimeoutOrError,
ExpectedExitCode = 1,
TimeoutSeconds = 30
},
new ErrorTestScenario
{
Name = "empty-image",
ImageRef = "busybox:latest",
Description = "Minimal image with almost no packages",
ExpectedBehavior = ExpectedErrorBehavior.SuccessEmpty,
ExpectedExitCode = 0
},
new ErrorTestScenario
{
Name = "scratch-image",
ImageRef = "scratch",
Description = "Empty scratch image",
ExpectedBehavior = ExpectedErrorBehavior.SuccessOrError,
ExpectedExitCode = null // Either success or graceful error
},
new ErrorTestScenario
{
Name = "large-image",
ImageRef = "nvidia/cuda:12.3.1-devel-ubuntu22.04",
Description = "Large image (~5GB) to test memory handling",
ExpectedBehavior = ExpectedErrorBehavior.SuccessOrSkip,
ExpectedExitCode = 0,
TimeoutSeconds = 600
},
new ErrorTestScenario
{
Name = "corrupted-layers",
ImageRef = "corrupted/test:v1",
Description = "Image with corrupted layer data (if available)",
ExpectedBehavior = ExpectedErrorBehavior.GracefulError,
ExpectedExitCode = 1
}
];
}
private static bool EvaluateBehavior(ErrorTestScenario scenario, ScannerOutput output)
{
return scenario.ExpectedBehavior switch
{
ExpectedErrorBehavior.GracefulError =>
!output.Success && output.ExitCode != 0 && !string.IsNullOrEmpty(output.Error),
ExpectedErrorBehavior.SuccessEmpty =>
output.Success && output.ExitCode == 0,
ExpectedErrorBehavior.SuccessOrError =>
output.ExitCode == 0 || (!output.Success && !string.IsNullOrEmpty(output.Error)),
ExpectedErrorBehavior.SuccessOrSkip =>
output.ExitCode == 0 || output.Error?.Contains("skip", StringComparison.OrdinalIgnoreCase) == true,
ExpectedErrorBehavior.TimeoutOrError =>
!output.Success,
ExpectedErrorBehavior.Crash =>
output.ExitCode < 0 || output.ExitCode > 128,
_ => false
};
}
}
/// <summary>
/// Result of error mode comparison.
/// </summary>
public sealed class ErrorModeComparisonResult
{
public bool Success { get; set; }
public string? Error { get; set; }
public int TestedScenarios { get; set; }
public Dictionary<string, ToolErrorBehavior> ToolResults { get; set; } = new();
}
/// <summary>
/// Error handling behavior for a single tool.
/// </summary>
public sealed class ToolErrorBehavior
{
public required string ToolName { get; init; }
public List<ScenarioResult> ScenarioResults { get; set; } = [];
public double PassRate { get; set; }
}
/// <summary>
/// Result for a single error scenario.
/// </summary>
public sealed class ScenarioResult
{
public required string ScenarioName { get; init; }
public ExpectedErrorBehavior ExpectedBehavior { get; set; }
public int ActualExitCode { get; set; }
public bool ActualSuccess { get; set; }
public string? ErrorMessage { get; set; }
public long DurationMs { get; set; }
public bool BehaviorMatched { get; set; }
}
/// <summary>
/// Defines an error test scenario.
/// </summary>
public sealed class ErrorTestScenario
{
public required string Name { get; init; }
public required string ImageRef { get; init; }
public required string Description { get; init; }
public ExpectedErrorBehavior ExpectedBehavior { get; init; }
public int? ExpectedExitCode { get; init; }
public int TimeoutSeconds { get; init; } = 60;
}
/// <summary>
/// Expected error behavior categories.
/// </summary>
public enum ExpectedErrorBehavior
{
/// <summary>Tool should exit with non-zero and meaningful error message.</summary>
GracefulError,
/// <summary>Tool should succeed but produce empty/minimal output.</summary>
SuccessEmpty,
/// <summary>Either success or graceful error is acceptable.</summary>
SuccessOrError,
/// <summary>Either success or skip message is acceptable.</summary>
SuccessOrSkip,
/// <summary>Timeout or error is expected (network unreachable).</summary>
TimeoutOrError,
/// <summary>Tool is expected to crash (for negative testing).</summary>
Crash
}

View File

@@ -0,0 +1,169 @@
// -----------------------------------------------------------------------------
// LatencyComparisonLogic.cs
// Sprint: SPRINT_5100_0008_0001_competitor_parity
// Task: PARITY-5100-006 - Implement latency comparison
// Description: Logic for comparing scan latency between scanners
// -----------------------------------------------------------------------------
namespace StellaOps.Parity.Tests;
/// <summary>
/// Compares latency metrics between scanner runs.
/// </summary>
public sealed class LatencyComparisonLogic
{
/// <summary>
/// Compares latency from multiple scan runs.
/// </summary>
public LatencyComparisonResult Compare(
IEnumerable<ScannerOutput> baselineRuns,
IEnumerable<ScannerOutput> candidateRuns)
{
var baselineList = baselineRuns.Where(r => r.Success).ToList();
var candidateList = candidateRuns.Where(r => r.Success).ToList();
if (baselineList.Count == 0 || candidateList.Count == 0)
{
return new LatencyComparisonResult
{
BaselineTool = baselineList.FirstOrDefault()?.ToolName ?? "unknown",
CandidateTool = candidateList.FirstOrDefault()?.ToolName ?? "unknown",
Error = "Insufficient successful runs for comparison"
};
}
var baselineMs = baselineList.Select(r => r.DurationMs).OrderBy(d => d).ToList();
var candidateMs = candidateList.Select(r => r.DurationMs).OrderBy(d => d).ToList();
return new LatencyComparisonResult
{
BaselineTool = baselineList[0].ToolName,
CandidateTool = candidateList[0].ToolName,
Success = true,
// Baseline stats
BaselineP50 = CalculatePercentile(baselineMs, 50),
BaselineP95 = CalculatePercentile(baselineMs, 95),
BaselineP99 = CalculatePercentile(baselineMs, 99),
BaselineMin = baselineMs.Min(),
BaselineMax = baselineMs.Max(),
BaselineMean = baselineMs.Average(),
BaselineStdDev = CalculateStdDev(baselineMs),
BaselineSampleCount = baselineMs.Count,
// Candidate stats
CandidateP50 = CalculatePercentile(candidateMs, 50),
CandidateP95 = CalculatePercentile(candidateMs, 95),
CandidateP99 = CalculatePercentile(candidateMs, 99),
CandidateMin = candidateMs.Min(),
CandidateMax = candidateMs.Max(),
CandidateMean = candidateMs.Average(),
CandidateStdDev = CalculateStdDev(candidateMs),
CandidateSampleCount = candidateMs.Count,
// Comparison metrics
P50Ratio = CalculatePercentile(candidateMs, 50) / Math.Max(1, CalculatePercentile(baselineMs, 50)),
P95Ratio = CalculatePercentile(candidateMs, 95) / Math.Max(1, CalculatePercentile(baselineMs, 95)),
MeanRatio = candidateMs.Average() / Math.Max(1, baselineMs.Average())
};
}
/// <summary>
/// Calculates time-to-first-signal (TTFS) if available in scan output.
/// </summary>
public TimeToFirstSignalResult CalculateTtfs(ScannerOutput output)
{
return new TimeToFirstSignalResult
{
ToolName = output.ToolName,
TotalDurationMs = output.DurationMs,
// TTFS would require streaming output parsing, which most tools don't support
// For now, we approximate as total duration
TtfsMs = output.DurationMs,
TtfsAvailable = false
};
}
private static double CalculatePercentile(List<long> sortedValues, int percentile)
{
if (sortedValues.Count == 0)
return 0;
var index = (percentile / 100.0) * (sortedValues.Count - 1);
var lower = (int)Math.Floor(index);
var upper = (int)Math.Ceiling(index);
if (lower == upper)
return sortedValues[lower];
var fraction = index - lower;
return sortedValues[lower] * (1 - fraction) + sortedValues[upper] * fraction;
}
private static double CalculateStdDev(List<long> values)
{
if (values.Count < 2)
return 0;
var mean = values.Average();
var sumSquares = values.Sum(v => Math.Pow(v - mean, 2));
return Math.Sqrt(sumSquares / (values.Count - 1));
}
}
/// <summary>
/// Result of latency comparison between two scanners.
/// </summary>
public sealed class LatencyComparisonResult
{
public required string BaselineTool { get; init; }
public required string CandidateTool { get; init; }
public bool Success { get; set; }
public string? Error { get; set; }
// Baseline latency stats (milliseconds)
public double BaselineP50 { get; set; }
public double BaselineP95 { get; set; }
public double BaselineP99 { get; set; }
public long BaselineMin { get; set; }
public long BaselineMax { get; set; }
public double BaselineMean { get; set; }
public double BaselineStdDev { get; set; }
public int BaselineSampleCount { get; set; }
// Candidate latency stats (milliseconds)
public double CandidateP50 { get; set; }
public double CandidateP95 { get; set; }
public double CandidateP99 { get; set; }
public long CandidateMin { get; set; }
public long CandidateMax { get; set; }
public double CandidateMean { get; set; }
public double CandidateStdDev { get; set; }
public int CandidateSampleCount { get; set; }
// Comparison ratios (candidate / baseline, <1 means candidate is faster)
public double P50Ratio { get; set; }
public double P95Ratio { get; set; }
public double MeanRatio { get; set; }
/// <summary>
/// Returns true if candidate is faster at P95 (ratio &lt; 1).
/// </summary>
public bool CandidateIsFaster => P95Ratio < 1.0;
/// <summary>
/// Returns the percentage improvement (positive = candidate faster).
/// </summary>
public double ImprovementPercent => (1 - P95Ratio) * 100;
}
/// <summary>
/// Time-to-first-signal measurement result.
/// </summary>
public sealed class TimeToFirstSignalResult
{
public required string ToolName { get; init; }
public long TotalDurationMs { get; set; }
public long TtfsMs { get; set; }
public bool TtfsAvailable { get; set; }
}

View File

@@ -0,0 +1,341 @@
// -----------------------------------------------------------------------------
// ParityHarness.cs
// Sprint: SPRINT_5100_0008_0001_competitor_parity
// Task: PARITY-5100-003 - Implement parity harness
// Description: Harness for running StellaOps and competitors on same fixtures
// -----------------------------------------------------------------------------
using System.Diagnostics;
using System.Text.Json;
using CliWrap;
using CliWrap.Buffered;
namespace StellaOps.Parity.Tests;
/// <summary>
/// Parity test harness that runs multiple scanners on the same container image
/// and collects their outputs for comparison.
/// </summary>
public sealed class ParityHarness : IAsyncDisposable
{
private readonly string _workDir;
private readonly Dictionary<string, ToolVersion> _toolVersions = new();
/// <summary>
/// Pinned tool versions for reproducible testing.
/// </summary>
public static class PinnedVersions
{
public const string Syft = "1.9.0";
public const string Grype = "0.79.3";
public const string Trivy = "0.54.1";
}
public ParityHarness(string? workDir = null)
{
_workDir = workDir ?? Path.Combine(Path.GetTempPath(), $"parity-{Guid.NewGuid():N}");
Directory.CreateDirectory(_workDir);
}
/// <summary>
/// Runs all configured scanners on the specified image and returns collected results.
/// </summary>
public async Task<ParityRunResult> RunAllAsync(
ParityImageFixture fixture,
CancellationToken cancellationToken = default)
{
var result = new ParityRunResult
{
Fixture = fixture,
StartedAtUtc = DateTimeOffset.UtcNow
};
// Run each scanner in parallel
var tasks = new List<Task<ScannerOutput>>
{
RunSyftAsync(fixture.Image, cancellationToken),
RunGrypeAsync(fixture.Image, cancellationToken),
RunTrivyAsync(fixture.Image, cancellationToken)
};
try
{
var outputs = await Task.WhenAll(tasks);
result.SyftOutput = outputs[0];
result.GrypeOutput = outputs[1];
result.TrivyOutput = outputs[2];
}
catch (Exception ex)
{
result.Error = ex.Message;
}
result.CompletedAtUtc = DateTimeOffset.UtcNow;
return result;
}
/// <summary>
/// Runs Syft SBOM generator on the specified image.
/// </summary>
public async Task<ScannerOutput> RunSyftAsync(
string image,
CancellationToken cancellationToken = default)
{
var output = new ScannerOutput
{
ToolName = "syft",
ToolVersion = PinnedVersions.Syft,
Image = image,
StartedAtUtc = DateTimeOffset.UtcNow
};
var outputPath = Path.Combine(_workDir, $"syft-{Guid.NewGuid():N}.json");
try
{
var sw = Stopwatch.StartNew();
// syft <image> -o spdx-json=<output>
var result = await Cli.Wrap("syft")
.WithArguments([$"{image}", "-o", $"spdx-json={outputPath}"])
.WithValidation(CommandResultValidation.None)
.ExecuteBufferedAsync(cancellationToken);
sw.Stop();
output.DurationMs = sw.ElapsedMilliseconds;
output.ExitCode = result.ExitCode;
output.Stderr = result.StandardError;
if (result.ExitCode == 0 && File.Exists(outputPath))
{
output.RawOutput = await File.ReadAllTextAsync(outputPath, cancellationToken);
output.SbomJson = JsonDocument.Parse(output.RawOutput);
output.Success = true;
}
else
{
output.Error = result.StandardError;
}
}
catch (Exception ex)
{
output.Error = ex.Message;
}
output.CompletedAtUtc = DateTimeOffset.UtcNow;
return output;
}
/// <summary>
/// Runs Grype vulnerability scanner on the specified image.
/// </summary>
public async Task<ScannerOutput> RunGrypeAsync(
string image,
CancellationToken cancellationToken = default)
{
var output = new ScannerOutput
{
ToolName = "grype",
ToolVersion = PinnedVersions.Grype,
Image = image,
StartedAtUtc = DateTimeOffset.UtcNow
};
var outputPath = Path.Combine(_workDir, $"grype-{Guid.NewGuid():N}.json");
try
{
var sw = Stopwatch.StartNew();
// grype <image> -o json --file <output>
var result = await Cli.Wrap("grype")
.WithArguments([$"{image}", "-o", "json", "--file", outputPath])
.WithValidation(CommandResultValidation.None)
.ExecuteBufferedAsync(cancellationToken);
sw.Stop();
output.DurationMs = sw.ElapsedMilliseconds;
output.ExitCode = result.ExitCode;
output.Stderr = result.StandardError;
if (result.ExitCode == 0 && File.Exists(outputPath))
{
output.RawOutput = await File.ReadAllTextAsync(outputPath, cancellationToken);
output.FindingsJson = JsonDocument.Parse(output.RawOutput);
output.Success = true;
}
else
{
output.Error = result.StandardError;
}
}
catch (Exception ex)
{
output.Error = ex.Message;
}
output.CompletedAtUtc = DateTimeOffset.UtcNow;
return output;
}
/// <summary>
/// Runs Trivy vulnerability scanner on the specified image.
/// </summary>
public async Task<ScannerOutput> RunTrivyAsync(
string image,
CancellationToken cancellationToken = default)
{
var output = new ScannerOutput
{
ToolName = "trivy",
ToolVersion = PinnedVersions.Trivy,
Image = image,
StartedAtUtc = DateTimeOffset.UtcNow
};
var outputPath = Path.Combine(_workDir, $"trivy-{Guid.NewGuid():N}.json");
try
{
var sw = Stopwatch.StartNew();
// trivy image <image> -f json -o <output>
var result = await Cli.Wrap("trivy")
.WithArguments(["image", image, "-f", "json", "-o", outputPath])
.WithValidation(CommandResultValidation.None)
.ExecuteBufferedAsync(cancellationToken);
sw.Stop();
output.DurationMs = sw.ElapsedMilliseconds;
output.ExitCode = result.ExitCode;
output.Stderr = result.StandardError;
if (result.ExitCode == 0 && File.Exists(outputPath))
{
output.RawOutput = await File.ReadAllTextAsync(outputPath, cancellationToken);
output.FindingsJson = JsonDocument.Parse(output.RawOutput);
output.Success = true;
}
else
{
output.Error = result.StandardError;
}
}
catch (Exception ex)
{
output.Error = ex.Message;
}
output.CompletedAtUtc = DateTimeOffset.UtcNow;
return output;
}
/// <summary>
/// Checks if required tools are available on the system.
/// </summary>
public async Task<ToolAvailability> CheckToolsAsync(CancellationToken cancellationToken = default)
{
var availability = new ToolAvailability();
availability.SyftAvailable = await CheckToolAsync("syft", "--version", cancellationToken);
availability.GrypeAvailable = await CheckToolAsync("grype", "--version", cancellationToken);
availability.TrivyAvailable = await CheckToolAsync("trivy", "--version", cancellationToken);
return availability;
}
private static async Task<bool> CheckToolAsync(string tool, string versionArg, CancellationToken cancellationToken)
{
try
{
var result = await Cli.Wrap(tool)
.WithArguments([versionArg])
.WithValidation(CommandResultValidation.None)
.ExecuteBufferedAsync(cancellationToken);
return result.ExitCode == 0;
}
catch
{
return false;
}
}
public ValueTask DisposeAsync()
{
if (Directory.Exists(_workDir))
{
try
{
Directory.Delete(_workDir, recursive: true);
}
catch
{
// Ignore cleanup errors
}
}
return ValueTask.CompletedTask;
}
}
/// <summary>
/// Result of running all scanners on a fixture.
/// </summary>
public sealed class ParityRunResult
{
public required ParityImageFixture Fixture { get; init; }
public DateTimeOffset StartedAtUtc { get; set; }
public DateTimeOffset CompletedAtUtc { get; set; }
public ScannerOutput? SyftOutput { get; set; }
public ScannerOutput? GrypeOutput { get; set; }
public ScannerOutput? TrivyOutput { get; set; }
public string? Error { get; set; }
public TimeSpan Duration => CompletedAtUtc - StartedAtUtc;
}
/// <summary>
/// Output from a single scanner run.
/// </summary>
public sealed class ScannerOutput
{
public required string ToolName { get; init; }
public required string ToolVersion { get; init; }
public required string Image { get; init; }
public DateTimeOffset StartedAtUtc { get; set; }
public DateTimeOffset CompletedAtUtc { get; set; }
public long DurationMs { get; set; }
public int ExitCode { get; set; }
public string? RawOutput { get; set; }
public string? Stderr { get; set; }
public string? Error { get; set; }
public bool Success { get; set; }
/// <summary>SBOM JSON document (for Syft).</summary>
public JsonDocument? SbomJson { get; set; }
/// <summary>Vulnerability findings JSON document (for Grype/Trivy).</summary>
public JsonDocument? FindingsJson { get; set; }
}
/// <summary>
/// Tool availability check result.
/// </summary>
public sealed class ToolAvailability
{
public bool SyftAvailable { get; set; }
public bool GrypeAvailable { get; set; }
public bool TrivyAvailable { get; set; }
public bool AllAvailable => SyftAvailable && GrypeAvailable && TrivyAvailable;
public bool AnyAvailable => SyftAvailable || GrypeAvailable || TrivyAvailable;
}
/// <summary>
/// Cached tool version information.
/// </summary>
public sealed class ToolVersion
{
public required string ToolName { get; init; }
public required string Version { get; init; }
public DateTimeOffset DetectedAtUtc { get; init; }
}

View File

@@ -0,0 +1,242 @@
// -----------------------------------------------------------------------------
// ParityTestFixtureSet.cs
// Sprint: SPRINT_5100_0008_0001_competitor_parity
// Task: PARITY-5100-002 - Define parity test fixture set
// Description: Container image fixtures for parity testing against competitors
// -----------------------------------------------------------------------------
namespace StellaOps.Parity.Tests;
/// <summary>
/// Defines the standard fixture set for competitor parity testing.
/// Each fixture represents a container image with known vulnerabilities
/// that is used to compare StellaOps against Syft, Grype, and Trivy.
/// </summary>
public static class ParityTestFixtureSet
{
/// <summary>
/// Gets the list of container image fixtures for parity testing.
/// These images are chosen to cover:
/// - Different base OS distributions (Alpine, Debian, RHEL, Ubuntu)
/// - Different package managers (apk, apt, rpm, npm, pip, maven)
/// - Known vulnerabilities for validation
/// - Multi-language applications
/// </summary>
public static IReadOnlyList<ParityImageFixture> Fixtures { get; } =
[
// Alpine-based images
new ParityImageFixture
{
Name = "alpine-base",
Image = "alpine:3.19.0",
Description = "Alpine Linux base image with minimal packages",
PackageManagers = ["apk"],
ExpectedMinPackages = 10,
Category = ImageCategory.BaseOS
},
new ParityImageFixture
{
Name = "alpine-python",
Image = "python:3.12-alpine",
Description = "Python on Alpine with pip packages",
PackageManagers = ["apk", "pip"],
ExpectedMinPackages = 50,
Category = ImageCategory.LanguageRuntime
},
// Debian-based images
new ParityImageFixture
{
Name = "debian-base",
Image = "debian:bookworm-slim",
Description = "Debian bookworm slim base image",
PackageManagers = ["apt"],
ExpectedMinPackages = 50,
Category = ImageCategory.BaseOS
},
new ParityImageFixture
{
Name = "node-debian",
Image = "node:20-bookworm-slim",
Description = "Node.js on Debian with npm packages",
PackageManagers = ["apt", "npm"],
ExpectedMinPackages = 100,
Category = ImageCategory.LanguageRuntime
},
// Ubuntu-based images
new ParityImageFixture
{
Name = "ubuntu-base",
Image = "ubuntu:22.04",
Description = "Ubuntu 22.04 LTS base image",
PackageManagers = ["apt"],
ExpectedMinPackages = 80,
Category = ImageCategory.BaseOS
},
// RHEL/CentOS-based images
new ParityImageFixture
{
Name = "rhel-base",
Image = "rockylinux:9-minimal",
Description = "Rocky Linux 9 minimal (RHEL compatible)",
PackageManagers = ["rpm"],
ExpectedMinPackages = 30,
Category = ImageCategory.BaseOS
},
// Multi-language application images
new ParityImageFixture
{
Name = "go-app",
Image = "golang:1.22-bookworm",
Description = "Go application with standard library",
PackageManagers = ["apt", "go"],
ExpectedMinPackages = 150,
Category = ImageCategory.LanguageRuntime
},
new ParityImageFixture
{
Name = "java-app",
Image = "eclipse-temurin:21-jdk-jammy",
Description = "Java 21 with Maven dependencies",
PackageManagers = ["apt", "maven"],
ExpectedMinPackages = 100,
Category = ImageCategory.LanguageRuntime
},
new ParityImageFixture
{
Name = "rust-app",
Image = "rust:1.75-bookworm",
Description = "Rust with cargo dependencies",
PackageManagers = ["apt", "cargo"],
ExpectedMinPackages = 100,
Category = ImageCategory.LanguageRuntime
},
new ParityImageFixture
{
Name = "dotnet-app",
Image = "mcr.microsoft.com/dotnet/aspnet:8.0-bookworm-slim",
Description = ".NET 8 ASP.NET runtime",
PackageManagers = ["apt", "nuget"],
ExpectedMinPackages = 80,
Category = ImageCategory.LanguageRuntime
},
// Images with known CVEs (for vulnerability comparison)
new ParityImageFixture
{
Name = "vuln-nginx",
Image = "nginx:1.24",
Description = "nginx with known vulnerabilities",
PackageManagers = ["apt"],
ExpectedMinPackages = 100,
Category = ImageCategory.KnownVulnerable,
ExpectedMinCVEs = 5
},
new ParityImageFixture
{
Name = "vuln-postgres",
Image = "postgres:14",
Description = "PostgreSQL with known vulnerabilities",
PackageManagers = ["apt"],
ExpectedMinPackages = 100,
Category = ImageCategory.KnownVulnerable,
ExpectedMinCVEs = 3
},
// Complex multi-layer images
new ParityImageFixture
{
Name = "complex-wordpress",
Image = "wordpress:6.4-php8.2-apache",
Description = "WordPress with PHP and Apache (complex layers)",
PackageManagers = ["apt", "composer"],
ExpectedMinPackages = 200,
Category = ImageCategory.ComplexApp
},
new ParityImageFixture
{
Name = "complex-redis",
Image = "redis:7.2-bookworm",
Description = "Redis server with multiple dependencies",
PackageManagers = ["apt"],
ExpectedMinPackages = 50,
Category = ImageCategory.ComplexApp
}
];
/// <summary>
/// Gets fixtures filtered by category.
/// </summary>
public static IEnumerable<ParityImageFixture> GetByCategory(ImageCategory category)
=> Fixtures.Where(f => f.Category == category);
/// <summary>
/// Gets fixtures that have expected CVEs (for vulnerability comparison).
/// </summary>
public static IEnumerable<ParityImageFixture> GetVulnerableFixtures()
=> Fixtures.Where(f => f.ExpectedMinCVEs > 0);
}
/// <summary>
/// Represents a container image fixture for parity testing.
/// </summary>
public sealed class ParityImageFixture
{
/// <summary>
/// Unique name for this fixture.
/// </summary>
public required string Name { get; init; }
/// <summary>
/// Docker image reference (e.g., "alpine:3.19.0").
/// </summary>
public required string Image { get; init; }
/// <summary>
/// Human-readable description of the fixture.
/// </summary>
public required string Description { get; init; }
/// <summary>
/// Package managers expected in this image.
/// </summary>
public required string[] PackageManagers { get; init; }
/// <summary>
/// Minimum expected package count (for validation).
/// </summary>
public int ExpectedMinPackages { get; init; }
/// <summary>
/// Category of this fixture.
/// </summary>
public ImageCategory Category { get; init; }
/// <summary>
/// Minimum expected CVE count (for vulnerable images).
/// </summary>
public int ExpectedMinCVEs { get; init; }
public override string ToString() => $"{Name} ({Image})";
}
/// <summary>
/// Categories for fixture images.
/// </summary>
public enum ImageCategory
{
/// <summary>Base OS image (Alpine, Debian, Ubuntu, RHEL).</summary>
BaseOS,
/// <summary>Language runtime image (Python, Node, Go, Java, Rust, .NET).</summary>
LanguageRuntime,
/// <summary>Image with known vulnerabilities for CVE comparison.</summary>
KnownVulnerable,
/// <summary>Complex multi-layer application image.</summary>
ComplexApp
}

View File

@@ -0,0 +1,273 @@
// -----------------------------------------------------------------------------
// SbomComparisonLogic.cs
// Sprint: SPRINT_5100_0008_0001_competitor_parity
// Task: PARITY-5100-004 - Implement SBOM comparison logic
// Description: Logic for comparing SBOM outputs between scanners
// -----------------------------------------------------------------------------
using System.Text.Json;
namespace StellaOps.Parity.Tests;
/// <summary>
/// Compares SBOM outputs between different scanners.
/// </summary>
public sealed class SbomComparisonLogic
{
/// <summary>
/// Compares two SBOM outputs and returns a comparison result.
/// </summary>
public SbomComparisonResult Compare(ScannerOutput baseline, ScannerOutput candidate)
{
var result = new SbomComparisonResult
{
BaselineTool = baseline.ToolName,
CandidateTool = candidate.ToolName,
Image = baseline.Image
};
if (baseline.SbomJson is null || candidate.SbomJson is null)
{
result.Error = "One or both SBOM outputs are null";
return result;
}
try
{
var baselinePackages = ExtractPackages(baseline.SbomJson);
var candidatePackages = ExtractPackages(candidate.SbomJson);
result.BaselinePackageCount = baselinePackages.Count;
result.CandidatePackageCount = candidatePackages.Count;
// Find packages in baseline but not in candidate
result.OnlyInBaseline = baselinePackages
.Where(bp => !candidatePackages.Any(cp => MatchesPackage(bp, cp)))
.ToList();
// Find packages in candidate but not in baseline
result.OnlyInCandidate = candidatePackages
.Where(cp => !baselinePackages.Any(bp => MatchesPackage(bp, cp)))
.ToList();
// Find matching packages
result.MatchingPackages = baselinePackages
.Where(bp => candidatePackages.Any(cp => MatchesPackage(bp, cp)))
.ToList();
// Calculate metrics
result.PackageCountDiff = result.CandidatePackageCount - result.BaselinePackageCount;
result.PackageCountDiffPercent = result.BaselinePackageCount > 0
? (double)result.PackageCountDiff / result.BaselinePackageCount * 100
: 0;
result.MatchRate = result.BaselinePackageCount > 0
? (double)result.MatchingPackages.Count / result.BaselinePackageCount * 100
: 0;
// PURL completeness
result.BaselinePurlCount = baselinePackages.Count(p => !string.IsNullOrEmpty(p.Purl));
result.CandidatePurlCount = candidatePackages.Count(p => !string.IsNullOrEmpty(p.Purl));
result.PurlCompletenessBaseline = result.BaselinePackageCount > 0
? (double)result.BaselinePurlCount / result.BaselinePackageCount * 100
: 0;
result.PurlCompletenessCandidate = result.CandidatePackageCount > 0
? (double)result.CandidatePurlCount / result.CandidatePackageCount * 100
: 0;
// License detection
result.BaselineLicenseCount = baselinePackages.Count(p => !string.IsNullOrEmpty(p.License));
result.CandidateLicenseCount = candidatePackages.Count(p => !string.IsNullOrEmpty(p.License));
// CPE mapping
result.BaselineCpeCount = baselinePackages.Count(p => !string.IsNullOrEmpty(p.Cpe));
result.CandidateCpeCount = candidatePackages.Count(p => !string.IsNullOrEmpty(p.Cpe));
result.Success = true;
}
catch (Exception ex)
{
result.Error = ex.Message;
}
return result;
}
private List<ExtractedPackage> ExtractPackages(JsonDocument sbomJson)
{
var packages = new List<ExtractedPackage>();
var root = sbomJson.RootElement;
// Try SPDX format first
if (root.TryGetProperty("packages", out var spdxPackages))
{
foreach (var pkg in spdxPackages.EnumerateArray())
{
packages.Add(ExtractSpdxPackage(pkg));
}
}
// Try CycloneDX format
else if (root.TryGetProperty("components", out var cdxComponents))
{
foreach (var component in cdxComponents.EnumerateArray())
{
packages.Add(ExtractCycloneDxPackage(component));
}
}
return packages;
}
private ExtractedPackage ExtractSpdxPackage(JsonElement pkg)
{
var extracted = new ExtractedPackage
{
Name = pkg.TryGetProperty("name", out var name) ? name.GetString() ?? "" : "",
Version = pkg.TryGetProperty("versionInfo", out var version) ? version.GetString() ?? "" : ""
};
// Extract PURL from external refs
if (pkg.TryGetProperty("externalRefs", out var refs))
{
foreach (var refItem in refs.EnumerateArray())
{
if (refItem.TryGetProperty("referenceType", out var refType) &&
refType.GetString()?.Equals("purl", StringComparison.OrdinalIgnoreCase) == true &&
refItem.TryGetProperty("referenceLocator", out var locator))
{
extracted.Purl = locator.GetString();
break;
}
}
}
// Extract license
if (pkg.TryGetProperty("licenseConcluded", out var license))
{
extracted.License = license.GetString();
}
return extracted;
}
private ExtractedPackage ExtractCycloneDxPackage(JsonElement component)
{
var extracted = new ExtractedPackage
{
Name = component.TryGetProperty("name", out var name) ? name.GetString() ?? "" : "",
Version = component.TryGetProperty("version", out var version) ? version.GetString() ?? "" : "",
Purl = component.TryGetProperty("purl", out var purl) ? purl.GetString() : null
};
// Extract license
if (component.TryGetProperty("licenses", out var licenses))
{
var licenseList = new List<string>();
foreach (var lic in licenses.EnumerateArray())
{
if (lic.TryGetProperty("license", out var licObj))
{
if (licObj.TryGetProperty("id", out var licId))
licenseList.Add(licId.GetString() ?? "");
else if (licObj.TryGetProperty("name", out var licName))
licenseList.Add(licName.GetString() ?? "");
}
}
extracted.License = string.Join(", ", licenseList);
}
// Extract CPE
if (component.TryGetProperty("cpe", out var cpe))
{
extracted.Cpe = cpe.GetString();
}
return extracted;
}
private static bool MatchesPackage(ExtractedPackage a, ExtractedPackage b)
{
// Match by PURL first (most reliable)
if (!string.IsNullOrEmpty(a.Purl) && !string.IsNullOrEmpty(b.Purl))
{
return NormalizePurl(a.Purl) == NormalizePurl(b.Purl);
}
// Fall back to name + version match
return string.Equals(a.Name, b.Name, StringComparison.OrdinalIgnoreCase) &&
string.Equals(NormalizeVersion(a.Version), NormalizeVersion(b.Version), StringComparison.OrdinalIgnoreCase);
}
private static string NormalizePurl(string purl)
{
// Remove qualifiers and subpath for basic comparison
var idx = purl.IndexOf('?');
if (idx > 0)
purl = purl[..idx];
idx = purl.IndexOf('#');
if (idx > 0)
purl = purl[..idx];
return purl.ToLowerInvariant();
}
private static string NormalizeVersion(string version)
{
// Strip common prefixes and suffixes
version = version.TrimStart('v', 'V');
return version.ToLowerInvariant();
}
}
/// <summary>
/// Result of comparing two SBOM outputs.
/// </summary>
public sealed class SbomComparisonResult
{
public required string BaselineTool { get; init; }
public required string CandidateTool { get; init; }
public required string Image { get; init; }
public bool Success { get; set; }
public string? Error { get; set; }
// Package counts
public int BaselinePackageCount { get; set; }
public int CandidatePackageCount { get; set; }
public int PackageCountDiff { get; set; }
public double PackageCountDiffPercent { get; set; }
// Package matching
public List<ExtractedPackage> OnlyInBaseline { get; set; } = [];
public List<ExtractedPackage> OnlyInCandidate { get; set; } = [];
public List<ExtractedPackage> MatchingPackages { get; set; } = [];
public double MatchRate { get; set; }
// PURL completeness
public int BaselinePurlCount { get; set; }
public int CandidatePurlCount { get; set; }
public double PurlCompletenessBaseline { get; set; }
public double PurlCompletenessCandidate { get; set; }
// License detection
public int BaselineLicenseCount { get; set; }
public int CandidateLicenseCount { get; set; }
// CPE mapping
public int BaselineCpeCount { get; set; }
public int CandidateCpeCount { get; set; }
}
/// <summary>
/// Extracted package information from SBOM.
/// </summary>
public sealed class ExtractedPackage
{
public required string Name { get; init; }
public required string Version { get; init; }
public string? Purl { get; set; }
public string? License { get; set; }
public string? Cpe { get; set; }
public string? Ecosystem { get; set; }
public override string ToString() =>
!string.IsNullOrEmpty(Purl) ? Purl : $"{Name}@{Version}";
}

View File

@@ -0,0 +1,58 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>net10.0</TargetFramework>
<LangVersion>preview</LangVersion>
<Nullable>enable</Nullable>
<ImplicitUsings>enable</ImplicitUsings>
<IsPackable>false</IsPackable>
<IsTestProject>true</IsTestProject>
<RootNamespace>StellaOps.Parity.Tests</RootNamespace>
</PropertyGroup>
<!--
Sprint: SPRINT_5100_0008_0001_competitor_parity
Task: PARITY-5100-001 - Create parity test project
Description: Competitor parity test harness for comparing StellaOps with Syft, Grype, Trivy
-->
<ItemGroup>
<!-- Test framework packages -->
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.12.0" />
<PackageReference Include="xunit" Version="2.9.2" />
<PackageReference Include="xunit.runner.visualstudio" Version="2.8.2">
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
<PrivateAssets>all</PrivateAssets>
</PackageReference>
<PackageReference Include="FluentAssertions" Version="6.12.0" />
<PackageReference Include="coverlet.collector" Version="6.0.4">
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
<PrivateAssets>all</PrivateAssets>
</PackageReference>
<!-- Container and process support -->
<PackageReference Include="Testcontainers" Version="4.4.0" />
<PackageReference Include="CliWrap" Version="3.7.0" />
<!-- JSON and SBOM processing -->
<PackageReference Include="System.Text.Json" Version="10.0.0" />
</ItemGroup>
<ItemGroup>
<!-- TestKit for deterministic helpers -->
<ProjectReference Include="..\..\src\__Libraries\StellaOps.TestKit\StellaOps.TestKit.csproj" />
<!-- Scanner libraries for SBOM comparison -->
<ProjectReference Include="..\..\src\Scanner\__Libraries\StellaOps.Scanner.Core\StellaOps.Scanner.Core.csproj" />
<ProjectReference Include="..\..\src\Scanner\__Libraries\StellaOps.Scanner.Emit\StellaOps.Scanner.Emit.csproj" />
</ItemGroup>
<ItemGroup>
<!-- Fixture images for parity testing -->
<Content Include="Fixtures\**\*">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</Content>
<Content Include="Results\**\*">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</Content>
</ItemGroup>
</Project>

View File

@@ -0,0 +1,340 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// SPDX-FileCopyrightText: 2025 StellaOps Contributors
namespace StellaOps.Parity.Tests.Storage;
/// <summary>
/// Detects drift in parity metrics when StellaOps falls behind competitors.
/// Triggers alerts when drift exceeds configured thresholds for a sustained period.
/// </summary>
public sealed class ParityDriftDetector
{
private readonly ParityResultStore _store;
private readonly DriftThresholds _thresholds;
private readonly int _requiredTrendDays;
public ParityDriftDetector(
ParityResultStore store,
DriftThresholds? thresholds = null,
int requiredTrendDays = 3)
{
_store = store ?? throw new ArgumentNullException(nameof(store));
_thresholds = thresholds ?? DriftThresholds.Default;
_requiredTrendDays = requiredTrendDays;
}
/// <summary>
/// Analyzes recent results and returns any drift alerts.
/// </summary>
public async Task<DriftAnalysisResult> AnalyzeAsync(CancellationToken ct = default)
{
// Load last N days of results
var startTime = DateTime.UtcNow.AddDays(-_requiredTrendDays - 1);
var results = await _store.LoadResultsAsync(startTime: startTime, ct: ct);
if (results.Count == 0)
{
return new DriftAnalysisResult
{
AnalyzedAt = DateTime.UtcNow,
ResultCount = 0,
Alerts = [],
Summary = "No parity results available for analysis."
};
}
var alerts = new List<DriftAlert>();
// Analyze SBOM completeness drift
var sbomCompleteness = results
.Select(r => (r.Timestamp, r.SbomMetrics.PackageCompletenessRatio))
.ToList();
var sbomDrift = CalculateDrift(sbomCompleteness);
if (sbomDrift.HasDrift && sbomDrift.DriftAmount > _thresholds.SbomCompletenessThreshold)
{
alerts.Add(new DriftAlert
{
MetricName = "SBOM Package Completeness",
DriftType = DriftType.Declining,
BaselineValue = sbomDrift.BaselineValue,
CurrentValue = sbomDrift.CurrentValue,
DriftPercent = sbomDrift.DriftAmount * 100,
ThresholdPercent = _thresholds.SbomCompletenessThreshold * 100,
TrendDays = _requiredTrendDays,
Severity = GetAlertSeverity(sbomDrift.DriftAmount, _thresholds.SbomCompletenessThreshold),
Recommendation = "Review SBOM extraction logic; compare against Syft package detection."
});
}
// Analyze vulnerability recall drift
var vulnRecall = results
.Select(r => (r.Timestamp, r.VulnMetrics.Recall))
.ToList();
var recallDrift = CalculateDrift(vulnRecall);
if (recallDrift.HasDrift && recallDrift.DriftAmount > _thresholds.VulnRecallThreshold)
{
alerts.Add(new DriftAlert
{
MetricName = "Vulnerability Recall",
DriftType = DriftType.Declining,
BaselineValue = recallDrift.BaselineValue,
CurrentValue = recallDrift.CurrentValue,
DriftPercent = recallDrift.DriftAmount * 100,
ThresholdPercent = _thresholds.VulnRecallThreshold * 100,
TrendDays = _requiredTrendDays,
Severity = GetAlertSeverity(recallDrift.DriftAmount, _thresholds.VulnRecallThreshold),
Recommendation = "Check feed freshness; verify matcher logic for new CVE patterns."
});
}
// Analyze latency drift (StellaOps vs Grype P95)
var latencyRatio = results
.Select(r => (r.Timestamp, r.LatencyMetrics.StellaOpsVsGrypeRatio))
.ToList();
var latencyDrift = CalculateInverseDrift(latencyRatio); // Higher is worse for latency
if (latencyDrift.HasDrift && latencyDrift.DriftAmount > _thresholds.LatencyRatioThreshold)
{
alerts.Add(new DriftAlert
{
MetricName = "Latency vs Grype (P95)",
DriftType = DriftType.Increasing,
BaselineValue = latencyDrift.BaselineValue,
CurrentValue = latencyDrift.CurrentValue,
DriftPercent = latencyDrift.DriftAmount * 100,
ThresholdPercent = _thresholds.LatencyRatioThreshold * 100,
TrendDays = _requiredTrendDays,
Severity = GetAlertSeverity(latencyDrift.DriftAmount, _thresholds.LatencyRatioThreshold),
Recommendation = "Profile scanner hot paths; check for new allocations or I/O bottlenecks."
});
}
// Analyze PURL completeness drift
var purlCompleteness = results
.Select(r => (r.Timestamp, r.SbomMetrics.PurlCompletenessRatio))
.ToList();
var purlDrift = CalculateDrift(purlCompleteness);
if (purlDrift.HasDrift && purlDrift.DriftAmount > _thresholds.PurlCompletenessThreshold)
{
alerts.Add(new DriftAlert
{
MetricName = "PURL Completeness",
DriftType = DriftType.Declining,
BaselineValue = purlDrift.BaselineValue,
CurrentValue = purlDrift.CurrentValue,
DriftPercent = purlDrift.DriftAmount * 100,
ThresholdPercent = _thresholds.PurlCompletenessThreshold * 100,
TrendDays = _requiredTrendDays,
Severity = GetAlertSeverity(purlDrift.DriftAmount, _thresholds.PurlCompletenessThreshold),
Recommendation = "Verify PURL generation for all package types; check ecosystem-specific extractors."
});
}
// Analyze F1 score drift
var f1Score = results
.Select(r => (r.Timestamp, r.VulnMetrics.F1Score))
.ToList();
var f1Drift = CalculateDrift(f1Score);
if (f1Drift.HasDrift && f1Drift.DriftAmount > _thresholds.F1ScoreThreshold)
{
alerts.Add(new DriftAlert
{
MetricName = "Vulnerability F1 Score",
DriftType = DriftType.Declining,
BaselineValue = f1Drift.BaselineValue,
CurrentValue = f1Drift.CurrentValue,
DriftPercent = f1Drift.DriftAmount * 100,
ThresholdPercent = _thresholds.F1ScoreThreshold * 100,
TrendDays = _requiredTrendDays,
Severity = GetAlertSeverity(f1Drift.DriftAmount, _thresholds.F1ScoreThreshold),
Recommendation = "Balance precision/recall; check for false positive patterns."
});
}
var summary = alerts.Count switch
{
0 => $"No drift detected across {results.Count} results over {_requiredTrendDays}+ days.",
1 => $"1 drift alert detected. Investigate {alerts[0].MetricName}.",
_ => $"{alerts.Count} drift alerts detected. Review SBOM and vulnerability parity."
};
return new DriftAnalysisResult
{
AnalyzedAt = DateTime.UtcNow,
ResultCount = results.Count,
Alerts = alerts,
Summary = summary
};
}
private static DriftCalculation CalculateDrift(IReadOnlyList<(DateTime Timestamp, double Value)> series)
{
if (series.Count < 2)
{
return new DriftCalculation { HasDrift = false };
}
// Split into first half (baseline) and second half (current)
var midpoint = series.Count / 2;
var baseline = series.Take(midpoint).Select(x => x.Value).ToList();
var current = series.Skip(midpoint).Select(x => x.Value).ToList();
var baselineAvg = baseline.Average();
var currentAvg = current.Average();
// Drift is relative decline: (baseline - current) / baseline
if (baselineAvg <= 0)
{
return new DriftCalculation { HasDrift = false };
}
var drift = (baselineAvg - currentAvg) / baselineAvg;
return new DriftCalculation
{
HasDrift = drift > 0, // Only flag declining metrics
BaselineValue = baselineAvg,
CurrentValue = currentAvg,
DriftAmount = Math.Max(0, drift)
};
}
private static DriftCalculation CalculateInverseDrift(IReadOnlyList<(DateTime Timestamp, double Value)> series)
{
if (series.Count < 2)
{
return new DriftCalculation { HasDrift = false };
}
// Split into first half (baseline) and second half (current)
var midpoint = series.Count / 2;
var baseline = series.Take(midpoint).Select(x => x.Value).ToList();
var current = series.Skip(midpoint).Select(x => x.Value).ToList();
var baselineAvg = baseline.Average();
var currentAvg = current.Average();
// Drift is relative increase: (current - baseline) / baseline
if (baselineAvg <= 0)
{
return new DriftCalculation { HasDrift = false };
}
var drift = (currentAvg - baselineAvg) / baselineAvg;
return new DriftCalculation
{
HasDrift = drift > 0, // Only flag increasing metrics (bad for latency)
BaselineValue = baselineAvg,
CurrentValue = currentAvg,
DriftAmount = Math.Max(0, drift)
};
}
private static AlertSeverity GetAlertSeverity(double drift, double threshold)
{
var ratio = drift / threshold;
return ratio switch
{
>= 3.0 => AlertSeverity.Critical,
>= 2.0 => AlertSeverity.High,
>= 1.5 => AlertSeverity.Medium,
_ => AlertSeverity.Low
};
}
private sealed record DriftCalculation
{
public bool HasDrift { get; init; }
public double BaselineValue { get; init; }
public double CurrentValue { get; init; }
public double DriftAmount { get; init; }
}
}
/// <summary>
/// Thresholds for triggering drift alerts.
/// Values are relative (e.g., 0.05 = 5% drift).
/// </summary>
public sealed record DriftThresholds
{
/// <summary>
/// Default thresholds: 5% drift on key metrics.
/// </summary>
public static DriftThresholds Default => new()
{
SbomCompletenessThreshold = 0.05,
PurlCompletenessThreshold = 0.05,
VulnRecallThreshold = 0.05,
F1ScoreThreshold = 0.05,
LatencyRatioThreshold = 0.10 // 10% for latency
};
/// <summary>
/// SBOM package completeness drift threshold (default: 5%).
/// </summary>
public double SbomCompletenessThreshold { get; init; }
/// <summary>
/// PURL completeness drift threshold (default: 5%).
/// </summary>
public double PurlCompletenessThreshold { get; init; }
/// <summary>
/// Vulnerability recall drift threshold (default: 5%).
/// </summary>
public double VulnRecallThreshold { get; init; }
/// <summary>
/// Vulnerability F1 score drift threshold (default: 5%).
/// </summary>
public double F1ScoreThreshold { get; init; }
/// <summary>
/// Latency ratio drift threshold (default: 10%).
/// </summary>
public double LatencyRatioThreshold { get; init; }
}
public sealed record DriftAnalysisResult
{
public required DateTime AnalyzedAt { get; init; }
public required int ResultCount { get; init; }
public required IReadOnlyList<DriftAlert> Alerts { get; init; }
public required string Summary { get; init; }
public bool HasAlerts => Alerts.Count > 0;
public bool HasCriticalAlerts => Alerts.Any(a => a.Severity == AlertSeverity.Critical);
}
public sealed record DriftAlert
{
public required string MetricName { get; init; }
public required DriftType DriftType { get; init; }
public required double BaselineValue { get; init; }
public required double CurrentValue { get; init; }
public required double DriftPercent { get; init; }
public required double ThresholdPercent { get; init; }
public required int TrendDays { get; init; }
public required AlertSeverity Severity { get; init; }
public required string Recommendation { get; init; }
}
public enum DriftType
{
Declining,
Increasing
}
public enum AlertSeverity
{
Low,
Medium,
High,
Critical
}

View File

@@ -0,0 +1,296 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// SPDX-FileCopyrightText: 2025 StellaOps Contributors
using System.Text.Json;
using System.Text.Json.Serialization;
namespace StellaOps.Parity.Tests.Storage;
/// <summary>
/// Time-series storage for parity test results.
/// Emits results as JSON artifacts for historical tracking and trend analysis.
/// </summary>
public sealed class ParityResultStore
{
private readonly string _storagePath;
private readonly JsonSerializerOptions _jsonOptions;
public ParityResultStore(string storagePath)
{
_storagePath = storagePath ?? throw new ArgumentNullException(nameof(storagePath));
_jsonOptions = new JsonSerializerOptions
{
WriteIndented = true,
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull,
Converters = { new JsonStringEnumConverter() }
};
}
/// <summary>
/// Stores a parity run result with timestamp-based filename.
/// </summary>
public async Task StoreResultAsync(ParityRunSummary summary, CancellationToken ct = default)
{
ArgumentNullException.ThrowIfNull(summary);
Directory.CreateDirectory(_storagePath);
var filename = $"parity-{summary.RunId}-{summary.Timestamp:yyyyMMddTHHmmssZ}.json";
var filePath = Path.Combine(_storagePath, filename);
var json = JsonSerializer.Serialize(summary, _jsonOptions);
await File.WriteAllTextAsync(filePath, json, ct);
}
/// <summary>
/// Loads all parity results within the specified time range.
/// </summary>
public async Task<IReadOnlyList<ParityRunSummary>> LoadResultsAsync(
DateTime? startTime = null,
DateTime? endTime = null,
CancellationToken ct = default)
{
if (!Directory.Exists(_storagePath))
{
return [];
}
var results = new List<ParityRunSummary>();
var files = Directory.GetFiles(_storagePath, "parity-*.json");
foreach (var file in files)
{
try
{
var json = await File.ReadAllTextAsync(file, ct);
var summary = JsonSerializer.Deserialize<ParityRunSummary>(json, _jsonOptions);
if (summary is null)
continue;
// Filter by time range
if (startTime.HasValue && summary.Timestamp < startTime.Value)
continue;
if (endTime.HasValue && summary.Timestamp > endTime.Value)
continue;
results.Add(summary);
}
catch (JsonException)
{
// Skip malformed files
}
}
return results
.OrderBy(r => r.Timestamp)
.ToList();
}
/// <summary>
/// Loads the latest N results.
/// </summary>
public async Task<IReadOnlyList<ParityRunSummary>> LoadLatestResultsAsync(
int count,
CancellationToken ct = default)
{
var allResults = await LoadResultsAsync(ct: ct);
return allResults
.OrderByDescending(r => r.Timestamp)
.Take(count)
.OrderBy(r => r.Timestamp)
.ToList();
}
/// <summary>
/// Prunes results older than the specified retention period.
/// </summary>
public async Task<int> PruneOldResultsAsync(TimeSpan retention, CancellationToken ct = default)
{
var cutoff = DateTime.UtcNow - retention;
var results = await LoadResultsAsync(endTime: cutoff, ct: ct);
var pruned = 0;
foreach (var result in results)
{
var filename = $"parity-{result.RunId}-{result.Timestamp:yyyyMMddTHHmmssZ}.json";
var filePath = Path.Combine(_storagePath, filename);
if (File.Exists(filePath))
{
File.Delete(filePath);
pruned++;
}
}
return pruned;
}
/// <summary>
/// Exports results to Prometheus-compatible metrics format (text).
/// </summary>
public string ExportPrometheusMetrics(ParityRunSummary summary)
{
ArgumentNullException.ThrowIfNull(summary);
var lines = new List<string>
{
"# HELP stellaops_parity_sbom_completeness_ratio SBOM package completeness ratio vs competitors.",
"# TYPE stellaops_parity_sbom_completeness_ratio gauge",
$"stellaops_parity_sbom_completeness_ratio{{run_id=\"{summary.RunId}\"}} {summary.SbomMetrics.PackageCompletenessRatio:F4}",
"",
"# HELP stellaops_parity_sbom_match_count Number of matched packages across scanners.",
"# TYPE stellaops_parity_sbom_match_count gauge",
$"stellaops_parity_sbom_match_count{{run_id=\"{summary.RunId}\"}} {summary.SbomMetrics.MatchedPackageCount}",
"",
"# HELP stellaops_parity_vuln_recall Vulnerability detection recall vs competitors.",
"# TYPE stellaops_parity_vuln_recall gauge",
$"stellaops_parity_vuln_recall{{run_id=\"{summary.RunId}\"}} {summary.VulnMetrics.Recall:F4}",
"",
"# HELP stellaops_parity_vuln_precision Vulnerability detection precision vs competitors.",
"# TYPE stellaops_parity_vuln_precision gauge",
$"stellaops_parity_vuln_precision{{run_id=\"{summary.RunId}\"}} {summary.VulnMetrics.Precision:F4}",
"",
"# HELP stellaops_parity_vuln_f1 Vulnerability detection F1 score.",
"# TYPE stellaops_parity_vuln_f1 gauge",
$"stellaops_parity_vuln_f1{{run_id=\"{summary.RunId}\"}} {summary.VulnMetrics.F1Score:F4}",
"",
"# HELP stellaops_parity_latency_p50_ms Scan latency P50 in milliseconds.",
"# TYPE stellaops_parity_latency_p50_ms gauge",
$"stellaops_parity_latency_p50_ms{{scanner=\"stellaops\",run_id=\"{summary.RunId}\"}} {summary.LatencyMetrics.StellaOpsP50Ms:F2}",
$"stellaops_parity_latency_p50_ms{{scanner=\"grype\",run_id=\"{summary.RunId}\"}} {summary.LatencyMetrics.GrypeP50Ms:F2}",
$"stellaops_parity_latency_p50_ms{{scanner=\"trivy\",run_id=\"{summary.RunId}\"}} {summary.LatencyMetrics.TrivyP50Ms:F2}",
"",
"# HELP stellaops_parity_latency_p95_ms Scan latency P95 in milliseconds.",
"# TYPE stellaops_parity_latency_p95_ms gauge",
$"stellaops_parity_latency_p95_ms{{scanner=\"stellaops\",run_id=\"{summary.RunId}\"}} {summary.LatencyMetrics.StellaOpsP95Ms:F2}",
$"stellaops_parity_latency_p95_ms{{scanner=\"grype\",run_id=\"{summary.RunId}\"}} {summary.LatencyMetrics.GrypeP95Ms:F2}",
$"stellaops_parity_latency_p95_ms{{scanner=\"trivy\",run_id=\"{summary.RunId}\"}} {summary.LatencyMetrics.TrivyP95Ms:F2}",
"",
"# HELP stellaops_parity_error_scenarios_passed Number of error scenarios passed.",
"# TYPE stellaops_parity_error_scenarios_passed gauge",
$"stellaops_parity_error_scenarios_passed{{run_id=\"{summary.RunId}\"}} {summary.ErrorMetrics.ScenariosPassed}",
"",
"# HELP stellaops_parity_error_scenarios_total Total number of error scenarios.",
"# TYPE stellaops_parity_error_scenarios_total gauge",
$"stellaops_parity_error_scenarios_total{{run_id=\"{summary.RunId}\"}} {summary.ErrorMetrics.ScenariosTotal}"
};
return string.Join("\n", lines);
}
/// <summary>
/// Exports results to InfluxDB line protocol format.
/// </summary>
public string ExportInfluxLineProtocol(ParityRunSummary summary)
{
ArgumentNullException.ThrowIfNull(summary);
var timestamp = new DateTimeOffset(summary.Timestamp).ToUnixTimeMilliseconds() * 1_000_000; // nanoseconds
var lines = new List<string>
{
$"parity_sbom,run_id={summary.RunId} completeness_ratio={summary.SbomMetrics.PackageCompletenessRatio:F4},matched_count={summary.SbomMetrics.MatchedPackageCount}i {timestamp}",
$"parity_vuln,run_id={summary.RunId} recall={summary.VulnMetrics.Recall:F4},precision={summary.VulnMetrics.Precision:F4},f1={summary.VulnMetrics.F1Score:F4} {timestamp}",
$"parity_latency,run_id={summary.RunId},scanner=stellaops p50={summary.LatencyMetrics.StellaOpsP50Ms:F2},p95={summary.LatencyMetrics.StellaOpsP95Ms:F2},p99={summary.LatencyMetrics.StellaOpsP99Ms:F2} {timestamp}",
$"parity_latency,run_id={summary.RunId},scanner=grype p50={summary.LatencyMetrics.GrypeP50Ms:F2},p95={summary.LatencyMetrics.GrypeP95Ms:F2},p99={summary.LatencyMetrics.GrypeP99Ms:F2} {timestamp}",
$"parity_latency,run_id={summary.RunId},scanner=trivy p50={summary.LatencyMetrics.TrivyP50Ms:F2},p95={summary.LatencyMetrics.TrivyP95Ms:F2},p99={summary.LatencyMetrics.TrivyP99Ms:F2} {timestamp}",
$"parity_errors,run_id={summary.RunId} passed={summary.ErrorMetrics.ScenariosPassed}i,total={summary.ErrorMetrics.ScenariosTotal}i {timestamp}"
};
return string.Join("\n", lines);
}
}
/// <summary>
/// Summary of a single parity test run.
/// </summary>
public sealed record ParityRunSummary
{
public required string RunId { get; init; }
public required DateTime Timestamp { get; init; }
public required string StellaOpsVersion { get; init; }
public required CompetitorVersions CompetitorVersions { get; init; }
public required SbomParityMetrics SbomMetrics { get; init; }
public required VulnParityMetrics VulnMetrics { get; init; }
public required LatencyParityMetrics LatencyMetrics { get; init; }
public required ErrorParityMetrics ErrorMetrics { get; init; }
public required IReadOnlyList<string> FixturesUsed { get; init; }
public string? Notes { get; init; }
}
public sealed record CompetitorVersions
{
public required string SyftVersion { get; init; }
public required string GrypeVersion { get; init; }
public required string TrivyVersion { get; init; }
}
public sealed record SbomParityMetrics
{
public required int StellaOpsPackageCount { get; init; }
public required int SyftPackageCount { get; init; }
public required int MatchedPackageCount { get; init; }
public required double PackageCompletenessRatio { get; init; }
public required double PurlCompletenessRatio { get; init; }
public required double LicenseDetectionRatio { get; init; }
public required double CpeDetectionRatio { get; init; }
}
public sealed record VulnParityMetrics
{
public required int StellaOpsCveCount { get; init; }
public required int GrypeCveCount { get; init; }
public required int TrivyCveCount { get; init; }
public required int UnionCveCount { get; init; }
public required int IntersectionCveCount { get; init; }
public required double Recall { get; init; }
public required double Precision { get; init; }
public required double F1Score { get; init; }
public required SeverityBreakdown StellaOpsSeverity { get; init; }
public required SeverityBreakdown GrypeSeverity { get; init; }
public required SeverityBreakdown TrivySeverity { get; init; }
}
public sealed record SeverityBreakdown
{
public int Critical { get; init; }
public int High { get; init; }
public int Medium { get; init; }
public int Low { get; init; }
public int Unknown { get; init; }
}
public sealed record LatencyParityMetrics
{
public required double StellaOpsP50Ms { get; init; }
public required double StellaOpsP95Ms { get; init; }
public required double StellaOpsP99Ms { get; init; }
public required double GrypeP50Ms { get; init; }
public required double GrypeP95Ms { get; init; }
public required double GrypeP99Ms { get; init; }
public required double TrivyP50Ms { get; init; }
public required double TrivyP95Ms { get; init; }
public required double TrivyP99Ms { get; init; }
public required double StellaOpsVsGrypeRatio { get; init; }
public required double StellaOpsVsTrivyRatio { get; init; }
}
public sealed record ErrorParityMetrics
{
public required int ScenariosPassed { get; init; }
public required int ScenariosTotal { get; init; }
public required IReadOnlyList<ErrorScenarioResult> ScenarioResults { get; init; }
}
public sealed record ErrorScenarioResult
{
public required string ScenarioName { get; init; }
public required bool StellaOpsPassed { get; init; }
public required bool GrypePassed { get; init; }
public required bool TrivyPassed { get; init; }
public required bool MatchesExpected { get; init; }
public string? Notes { get; init; }
}

View File

@@ -0,0 +1,287 @@
// -----------------------------------------------------------------------------
// VulnerabilityComparisonLogic.cs
// Sprint: SPRINT_5100_0008_0001_competitor_parity
// Task: PARITY-5100-005 - Implement vulnerability finding comparison logic
// Description: Logic for comparing vulnerability findings between scanners
// -----------------------------------------------------------------------------
using System.Text.Json;
namespace StellaOps.Parity.Tests;
/// <summary>
/// Compares vulnerability findings between different scanners.
/// </summary>
public sealed class VulnerabilityComparisonLogic
{
/// <summary>
/// Compares vulnerability findings from two scanner outputs.
/// </summary>
public VulnerabilityComparisonResult Compare(ScannerOutput baseline, ScannerOutput candidate)
{
var result = new VulnerabilityComparisonResult
{
BaselineTool = baseline.ToolName,
CandidateTool = candidate.ToolName,
Image = baseline.Image
};
if (baseline.FindingsJson is null || candidate.FindingsJson is null)
{
result.Error = "One or both findings outputs are null";
return result;
}
try
{
var baselineFindings = ExtractFindings(baseline.FindingsJson, baseline.ToolName);
var candidateFindings = ExtractFindings(candidate.FindingsJson, candidate.ToolName);
result.BaselineCveCount = baselineFindings.Count;
result.CandidateCveCount = candidateFindings.Count;
// Find CVEs in baseline but not in candidate (false negatives for candidate)
result.OnlyInBaseline = baselineFindings
.Where(bf => !candidateFindings.Any(cf => cf.CveId == bf.CveId))
.ToList();
// Find CVEs in candidate but not in baseline (potential false positives)
result.OnlyInCandidate = candidateFindings
.Where(cf => !baselineFindings.Any(bf => bf.CveId == cf.CveId))
.ToList();
// Find matching CVEs
result.MatchingCves = baselineFindings
.Where(bf => candidateFindings.Any(cf => cf.CveId == bf.CveId))
.ToList();
// Calculate severity distribution
result.BaselineSeverityDistribution = CalculateSeverityDistribution(baselineFindings);
result.CandidateSeverityDistribution = CalculateSeverityDistribution(candidateFindings);
// Calculate recall (what percentage of baseline CVEs were found)
result.Recall = result.BaselineCveCount > 0
? (double)result.MatchingCves.Count / result.BaselineCveCount * 100
: 100;
// Calculate precision (what percentage of candidate CVEs are true positives)
// Note: This is relative to baseline, not ground truth
result.Precision = result.CandidateCveCount > 0
? (double)result.MatchingCves.Count / result.CandidateCveCount * 100
: 100;
// F1 score
if (result.Precision + result.Recall > 0)
{
result.F1Score = 2 * (result.Precision * result.Recall) / (result.Precision + result.Recall);
}
// Calculate false positive rate (CVEs only in candidate)
result.FalsePositiveRate = result.CandidateCveCount > 0
? (double)result.OnlyInCandidate.Count / result.CandidateCveCount * 100
: 0;
// Calculate false negative rate (CVEs only in baseline, missed by candidate)
result.FalseNegativeRate = result.BaselineCveCount > 0
? (double)result.OnlyInBaseline.Count / result.BaselineCveCount * 100
: 0;
result.Success = true;
}
catch (Exception ex)
{
result.Error = ex.Message;
}
return result;
}
private List<ExtractedFinding> ExtractFindings(JsonDocument findingsJson, string toolName)
{
var findings = new List<ExtractedFinding>();
var root = findingsJson.RootElement;
if (toolName.Equals("grype", StringComparison.OrdinalIgnoreCase))
{
findings.AddRange(ExtractGrypeFindings(root));
}
else if (toolName.Equals("trivy", StringComparison.OrdinalIgnoreCase))
{
findings.AddRange(ExtractTrivyFindings(root));
}
return findings;
}
private IEnumerable<ExtractedFinding> ExtractGrypeFindings(JsonElement root)
{
if (!root.TryGetProperty("matches", out var matches))
yield break;
foreach (var match in matches.EnumerateArray())
{
if (!match.TryGetProperty("vulnerability", out var vuln))
continue;
var cveId = vuln.TryGetProperty("id", out var id) ? id.GetString() : null;
if (string.IsNullOrEmpty(cveId))
continue;
var severity = vuln.TryGetProperty("severity", out var sev)
? ParseSeverity(sev.GetString())
: VulnerabilitySeverity.Unknown;
var packageName = "";
var packageVersion = "";
if (match.TryGetProperty("artifact", out var artifact))
{
packageName = artifact.TryGetProperty("name", out var name) ? name.GetString() ?? "" : "";
packageVersion = artifact.TryGetProperty("version", out var version) ? version.GetString() ?? "" : "";
}
yield return new ExtractedFinding
{
CveId = cveId,
Severity = severity,
PackageName = packageName,
PackageVersion = packageVersion,
FixedVersion = vuln.TryGetProperty("fix", out var fix) && fix.TryGetProperty("versions", out var fixVer)
? string.Join(", ", fixVer.EnumerateArray().Select(v => v.GetString()))
: null
};
}
}
private IEnumerable<ExtractedFinding> ExtractTrivyFindings(JsonElement root)
{
if (!root.TryGetProperty("Results", out var results))
yield break;
foreach (var result in results.EnumerateArray())
{
if (!result.TryGetProperty("Vulnerabilities", out var vulnerabilities))
continue;
foreach (var vuln in vulnerabilities.EnumerateArray())
{
var cveId = vuln.TryGetProperty("VulnerabilityID", out var id) ? id.GetString() : null;
if (string.IsNullOrEmpty(cveId))
continue;
var severity = vuln.TryGetProperty("Severity", out var sev)
? ParseSeverity(sev.GetString())
: VulnerabilitySeverity.Unknown;
yield return new ExtractedFinding
{
CveId = cveId,
Severity = severity,
PackageName = vuln.TryGetProperty("PkgName", out var name) ? name.GetString() ?? "" : "",
PackageVersion = vuln.TryGetProperty("InstalledVersion", out var version) ? version.GetString() ?? "" : "",
FixedVersion = vuln.TryGetProperty("FixedVersion", out var fix) ? fix.GetString() : null
};
}
}
}
private static VulnerabilitySeverity ParseSeverity(string? severity)
{
return severity?.ToUpperInvariant() switch
{
"CRITICAL" => VulnerabilitySeverity.Critical,
"HIGH" => VulnerabilitySeverity.High,
"MEDIUM" => VulnerabilitySeverity.Medium,
"LOW" => VulnerabilitySeverity.Low,
"NEGLIGIBLE" or "NONE" => VulnerabilitySeverity.None,
_ => VulnerabilitySeverity.Unknown
};
}
private static SeverityDistribution CalculateSeverityDistribution(List<ExtractedFinding> findings)
{
return new SeverityDistribution
{
Critical = findings.Count(f => f.Severity == VulnerabilitySeverity.Critical),
High = findings.Count(f => f.Severity == VulnerabilitySeverity.High),
Medium = findings.Count(f => f.Severity == VulnerabilitySeverity.Medium),
Low = findings.Count(f => f.Severity == VulnerabilitySeverity.Low),
None = findings.Count(f => f.Severity == VulnerabilitySeverity.None),
Unknown = findings.Count(f => f.Severity == VulnerabilitySeverity.Unknown)
};
}
}
/// <summary>
/// Result of comparing vulnerability findings.
/// </summary>
public sealed class VulnerabilityComparisonResult
{
public required string BaselineTool { get; init; }
public required string CandidateTool { get; init; }
public required string Image { get; init; }
public bool Success { get; set; }
public string? Error { get; set; }
// CVE counts
public int BaselineCveCount { get; set; }
public int CandidateCveCount { get; set; }
// Matching
public List<ExtractedFinding> OnlyInBaseline { get; set; } = [];
public List<ExtractedFinding> OnlyInCandidate { get; set; } = [];
public List<ExtractedFinding> MatchingCves { get; set; } = [];
// Accuracy metrics (relative to baseline)
public double Recall { get; set; }
public double Precision { get; set; }
public double F1Score { get; set; }
public double FalsePositiveRate { get; set; }
public double FalseNegativeRate { get; set; }
// Severity distribution
public SeverityDistribution BaselineSeverityDistribution { get; set; } = new();
public SeverityDistribution CandidateSeverityDistribution { get; set; } = new();
}
/// <summary>
/// Extracted vulnerability finding.
/// </summary>
public sealed class ExtractedFinding
{
public required string CveId { get; init; }
public required VulnerabilitySeverity Severity { get; init; }
public required string PackageName { get; init; }
public required string PackageVersion { get; init; }
public string? FixedVersion { get; set; }
public double? CvssScore { get; set; }
public override string ToString() => $"{CveId} ({Severity}) in {PackageName}@{PackageVersion}";
}
/// <summary>
/// Severity distribution counts.
/// </summary>
public sealed class SeverityDistribution
{
public int Critical { get; set; }
public int High { get; set; }
public int Medium { get; set; }
public int Low { get; set; }
public int None { get; set; }
public int Unknown { get; set; }
public int Total => Critical + High + Medium + Low + None + Unknown;
}
/// <summary>
/// Vulnerability severity levels.
/// </summary>
public enum VulnerabilitySeverity
{
Unknown = 0,
None = 1,
Low = 2,
Medium = 3,
High = 4,
Critical = 5
}