sprints work

This commit is contained in:
StellaOps Bot
2025-12-25 12:19:12 +02:00
parent 223843f1d1
commit 2a06f780cf
224 changed files with 41796 additions and 1515 deletions

View File

@@ -0,0 +1,118 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions;
using Microsoft.Extensions.Options;
namespace StellaOps.Signals.EvidenceWeightedScore;
/// <summary>
/// Extension methods for registering Evidence-Weighted Scoring services.
/// </summary>
public static class EvidenceWeightedScoringExtensions
{
/// <summary>
/// Adds Evidence-Weighted Scoring services to the service collection.
/// </summary>
/// <param name="services">The service collection.</param>
/// <returns>The service collection for chaining.</returns>
public static IServiceCollection AddEvidenceWeightedScoring(this IServiceCollection services)
{
return services.AddEvidenceWeightedScoring(_ => { });
}
/// <summary>
/// Adds Evidence-Weighted Scoring services to the service collection with configuration.
/// </summary>
/// <param name="services">The service collection.</param>
/// <param name="configure">Configuration action for options.</param>
/// <returns>The service collection for chaining.</returns>
public static IServiceCollection AddEvidenceWeightedScoring(
this IServiceCollection services,
Action<EvidenceWeightPolicyOptions> configure)
{
ArgumentNullException.ThrowIfNull(services);
ArgumentNullException.ThrowIfNull(configure);
// Register options with hot-reload support
services.AddOptions<EvidenceWeightPolicyOptions>()
.Configure(configure);
// Register calculator as singleton (stateless, thread-safe)
services.TryAddSingleton<IEvidenceWeightedScoreCalculator, EvidenceWeightedScoreCalculator>();
// Register policy provider
services.TryAddSingleton<IEvidenceWeightPolicyProvider>(sp =>
{
var optionsMonitor = sp.GetRequiredService<IOptionsMonitor<EvidenceWeightPolicyOptions>>();
return new OptionsEvidenceWeightPolicyProvider(optionsMonitor);
});
// Register TimeProvider if not already registered
services.TryAddSingleton(TimeProvider.System);
return services;
}
/// <summary>
/// Adds Evidence-Weighted Scoring services with a custom policy provider.
/// </summary>
/// <typeparam name="TProvider">The policy provider type.</typeparam>
/// <param name="services">The service collection.</param>
/// <returns>The service collection for chaining.</returns>
public static IServiceCollection AddEvidenceWeightedScoring<TProvider>(this IServiceCollection services)
where TProvider : class, IEvidenceWeightPolicyProvider
{
ArgumentNullException.ThrowIfNull(services);
// Register calculator as singleton
services.TryAddSingleton<IEvidenceWeightedScoreCalculator, EvidenceWeightedScoreCalculator>();
// Register custom policy provider
services.TryAddSingleton<IEvidenceWeightPolicyProvider, TProvider>();
// Register TimeProvider if not already registered
services.TryAddSingleton(TimeProvider.System);
return services;
}
/// <summary>
/// Adds Evidence-Weighted Scoring services with an in-memory policy.
/// Useful for testing or simple deployments.
/// </summary>
/// <param name="services">The service collection.</param>
/// <param name="policy">The policy to use.</param>
/// <returns>The service collection for chaining.</returns>
public static IServiceCollection AddEvidenceWeightedScoringWithPolicy(
this IServiceCollection services,
EvidenceWeightPolicy policy)
{
ArgumentNullException.ThrowIfNull(services);
ArgumentNullException.ThrowIfNull(policy);
// Register calculator as singleton
services.TryAddSingleton<IEvidenceWeightedScoreCalculator, EvidenceWeightedScoreCalculator>();
// Register in-memory provider with the given policy
var provider = new InMemoryEvidenceWeightPolicyProvider();
provider.SetPolicy(policy);
services.TryAddSingleton<IEvidenceWeightPolicyProvider>(provider);
// Register TimeProvider if not already registered
services.TryAddSingleton(TimeProvider.System);
return services;
}
/// <summary>
/// Adds Evidence-Weighted Scoring services with default production policy.
/// </summary>
/// <param name="services">The service collection.</param>
/// <returns>The service collection for chaining.</returns>
public static IServiceCollection AddEvidenceWeightedScoringWithDefaults(this IServiceCollection services)
{
return services.AddEvidenceWeightedScoringWithPolicy(EvidenceWeightPolicy.DefaultProduction);
}
}

View File

@@ -0,0 +1,189 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
using Microsoft.Extensions.Options;
namespace StellaOps.Signals.EvidenceWeightedScore.Normalizers;
/// <summary>
/// Normalizes backport evidence to a [0, 1] BKP score.
/// Higher scores indicate stronger evidence that a vulnerability has been fixed.
/// </summary>
/// <remarks>
/// Evidence tiers (from weakest to strongest):
/// - None: No backport evidence (0.00)
/// - Heuristic: Changelog mention, commit patterns (0.45-0.60)
/// - PatchSignature: Patch-graph signature match (0.70-0.85)
/// - BinaryDiff: Binary-level diff confirmation (0.80-0.92)
/// - VendorVex: Vendor-issued VEX statement (0.85-0.95)
/// - SignedProof: Cryptographically signed proof (0.90-1.00)
///
/// Multiple evidence tiers provide a combination bonus (up to 0.05).
/// </remarks>
public sealed class BackportEvidenceNormalizer : IEvidenceNormalizer<BackportInput>
{
private readonly BackportNormalizerOptions _options;
/// <summary>
/// Initializes a new instance of <see cref="BackportEvidenceNormalizer"/>.
/// </summary>
public BackportEvidenceNormalizer(IOptionsMonitor<NormalizerOptions> options)
{
ArgumentNullException.ThrowIfNull(options);
_options = options.CurrentValue.Backport;
}
/// <summary>
/// Initializes a new instance with explicit options (for testing).
/// </summary>
internal BackportEvidenceNormalizer(BackportNormalizerOptions options)
{
ArgumentNullException.ThrowIfNull(options);
_options = options;
}
/// <inheritdoc />
public string Dimension => "BKP";
/// <inheritdoc />
public double Normalize(BackportInput input)
{
ArgumentNullException.ThrowIfNull(input);
return CalculateScore(input);
}
/// <inheritdoc />
public NormalizationResult NormalizeWithDetails(BackportInput input)
{
ArgumentNullException.ThrowIfNull(input);
var score = CalculateScore(input);
var explanation = GenerateExplanation(input, score);
var components = BuildComponents(input);
return NormalizationResult.WithComponents(score, Dimension, explanation, components);
}
private double CalculateScore(BackportInput input)
{
// Status handling: Fixed or NotAffected = high confidence
if (input.Status == BackportStatus.NotAffected)
{
return CalculateNotAffectedScore(input);
}
if (input.Status == BackportStatus.Fixed)
{
return CalculateFixedScore(input);
}
if (input.Status == BackportStatus.Affected || input.Status == BackportStatus.UnderInvestigation)
{
// Affected = no backport protection; use base score from evidence tier
return CalculateTierBaseScore(input.EvidenceTier, input.Confidence);
}
// Unknown status - rely on evidence tier and confidence
return CalculateTierBaseScore(input.EvidenceTier, input.Confidence);
}
private double CalculateNotAffectedScore(BackportInput input)
{
// NotAffected with high-tier evidence = very high score
var baseScore = GetTierRange(input.EvidenceTier).Min;
var tierBonus = (GetTierRange(input.EvidenceTier).Max - baseScore) * input.Confidence;
var statusBonus = 0.10; // Bonus for NotAffected status
return Math.Min(1.0, baseScore + tierBonus + statusBonus);
}
private double CalculateFixedScore(BackportInput input)
{
// Fixed status = confirmed backport; score based on evidence tier
var (min, max) = GetTierRange(input.EvidenceTier);
var baseScore = min;
var tierBonus = (max - min) * input.Confidence;
return Math.Min(1.0, baseScore + tierBonus);
}
private double CalculateTierBaseScore(BackportEvidenceTier tier, double confidence)
{
if (tier == BackportEvidenceTier.None)
return 0.0;
var (min, max) = GetTierRange(tier);
return min + (max - min) * confidence;
}
private (double Min, double Max) GetTierRange(BackportEvidenceTier tier)
{
return tier switch
{
BackportEvidenceTier.None => _options.Tier0Range, // (0.00, 0.10)
BackportEvidenceTier.Heuristic => _options.Tier1Range, // (0.45, 0.60)
BackportEvidenceTier.PatchSignature => _options.Tier2Range, // (0.70, 0.85)
BackportEvidenceTier.BinaryDiff => _options.Tier3Range, // (0.80, 0.92)
BackportEvidenceTier.VendorVex => _options.Tier4Range, // (0.85, 0.95)
BackportEvidenceTier.SignedProof => _options.Tier5Range, // (0.90, 1.00)
_ => _options.Tier0Range
};
}
private string GenerateExplanation(BackportInput input, double score)
{
if (input.EvidenceTier == BackportEvidenceTier.None)
return "No backport evidence available.";
var statusDesc = input.Status switch
{
BackportStatus.Fixed => "Fixed",
BackportStatus.NotAffected => "Not affected",
BackportStatus.Affected => "Affected",
BackportStatus.UnderInvestigation => "Under investigation",
_ => "Unknown status"
};
var tierDesc = input.EvidenceTier switch
{
BackportEvidenceTier.Heuristic => "heuristic detection (changelog/commit patterns)",
BackportEvidenceTier.PatchSignature => "patch signature match",
BackportEvidenceTier.BinaryDiff => "binary diff confirmation",
BackportEvidenceTier.VendorVex => "vendor VEX statement",
BackportEvidenceTier.SignedProof => "cryptographically signed proof",
_ => "unknown evidence"
};
var confidenceDesc = input.Confidence switch
{
>= 0.9 => "very high",
>= 0.7 => "high",
>= 0.5 => "moderate",
>= 0.3 => "low",
_ => "very low"
};
var proofInfo = !string.IsNullOrEmpty(input.ProofId)
? $" (proof: {input.ProofId})"
: "";
return $"{statusDesc} via {tierDesc} with {confidenceDesc} confidence ({input.Confidence:P0}){proofInfo}. BKP = {score:F2}.";
}
private Dictionary<string, double> BuildComponents(BackportInput input)
{
var components = new Dictionary<string, double>
{
["tier_base"] = GetTierRange(input.EvidenceTier).Min,
["confidence"] = input.Confidence,
["tier_ordinal"] = (int)input.EvidenceTier
};
if (input.Status == BackportStatus.NotAffected)
{
components["status_bonus"] = 0.10;
}
return components;
}
}

View File

@@ -0,0 +1,105 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions;
namespace StellaOps.Signals.EvidenceWeightedScore.Normalizers;
/// <summary>
/// Extension methods for registering evidence normalizer services.
/// </summary>
public static class EvidenceNormalizersServiceCollectionExtensions
{
/// <summary>
/// Adds all evidence normalizer services to the DI container.
/// </summary>
/// <param name="services">The service collection.</param>
/// <returns>The service collection for chaining.</returns>
public static IServiceCollection AddEvidenceNormalizers(this IServiceCollection services)
{
return services.AddEvidenceNormalizers(_ => { });
}
/// <summary>
/// Adds all evidence normalizer services to the DI container with custom options configuration.
/// </summary>
/// <param name="services">The service collection.</param>
/// <param name="configure">Action to configure normalizer options.</param>
/// <returns>The service collection for chaining.</returns>
public static IServiceCollection AddEvidenceNormalizers(
this IServiceCollection services,
Action<NormalizerOptions> configure)
{
ArgumentNullException.ThrowIfNull(services);
ArgumentNullException.ThrowIfNull(configure);
// Register options with default values and apply configuration
services.AddOptions<NormalizerOptions>()
.Configure(configure);
// Register individual normalizers
services.TryAddSingleton<IEvidenceNormalizer<ReachabilityInput>, ReachabilityNormalizer>();
services.TryAddSingleton<IEvidenceNormalizer<RuntimeInput>, RuntimeSignalNormalizer>();
services.TryAddSingleton<IEvidenceNormalizer<BackportInput>, BackportEvidenceNormalizer>();
services.TryAddSingleton<IEvidenceNormalizer<ExploitInput>, ExploitLikelihoodNormalizer>();
services.TryAddSingleton<IEvidenceNormalizer<SourceTrustInput>, SourceTrustNormalizer>();
services.TryAddSingleton<IEvidenceNormalizer<MitigationInput>, MitigationNormalizer>();
// Register the aggregator
services.TryAddSingleton<INormalizerAggregator, NormalizerAggregator>();
return services;
}
/// <summary>
/// Adds all evidence normalizer services with configuration binding from appsettings.
/// </summary>
/// <param name="services">The service collection.</param>
/// <param name="configuration">The configuration root.</param>
/// <param name="sectionName">The configuration section name (default: "EvidenceNormalizers").</param>
/// <returns>The service collection for chaining.</returns>
public static IServiceCollection AddEvidenceNormalizers(
this IServiceCollection services,
IConfiguration configuration,
string sectionName = "EvidenceNormalizers")
{
ArgumentNullException.ThrowIfNull(services);
ArgumentNullException.ThrowIfNull(configuration);
// Bind options from configuration
var section = configuration.GetSection(sectionName);
services.AddOptions<NormalizerOptions>()
.Bind(section)
.ValidateOnStart();
// Register individual normalizers
services.TryAddSingleton<IEvidenceNormalizer<ReachabilityInput>, ReachabilityNormalizer>();
services.TryAddSingleton<IEvidenceNormalizer<RuntimeInput>, RuntimeSignalNormalizer>();
services.TryAddSingleton<IEvidenceNormalizer<BackportInput>, BackportEvidenceNormalizer>();
services.TryAddSingleton<IEvidenceNormalizer<ExploitInput>, ExploitLikelihoodNormalizer>();
services.TryAddSingleton<IEvidenceNormalizer<SourceTrustInput>, SourceTrustNormalizer>();
services.TryAddSingleton<IEvidenceNormalizer<MitigationInput>, MitigationNormalizer>();
// Register the aggregator
services.TryAddSingleton<INormalizerAggregator, NormalizerAggregator>();
return services;
}
/// <summary>
/// Adds the evidence normalizer aggregator only.
/// Use this when individual normalizers are already registered.
/// </summary>
/// <param name="services">The service collection.</param>
/// <returns>The service collection for chaining.</returns>
public static IServiceCollection AddNormalizerAggregator(this IServiceCollection services)
{
ArgumentNullException.ThrowIfNull(services);
services.TryAddSingleton<INormalizerAggregator, NormalizerAggregator>();
return services;
}
}

View File

@@ -0,0 +1,189 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
using Microsoft.Extensions.Options;
namespace StellaOps.Signals.EvidenceWeightedScore.Normalizers;
/// <summary>
/// Normalizes exploit likelihood evidence to a [0, 1] XPL score.
/// Combines EPSS (Exploit Prediction Scoring System) with KEV (Known Exploited Vulnerabilities) status.
/// </summary>
/// <remarks>
/// Scoring logic:
/// - KEV presence establishes a floor (default 0.40) - actively exploited vulnerabilities are high risk
/// - EPSS percentile maps to score bands:
/// - Top 1% (≥99th percentile): 0.901.00
/// - Top 5% (≥95th percentile): 0.700.89
/// - Top 25% (≥75th percentile): 0.400.69
/// - Below 75th percentile: 0.200.39
/// - Missing EPSS data: neutral score (default 0.30)
/// - Public exploit availability adds a bonus
/// - Final score is max(KEV floor, EPSS-based score)
/// </remarks>
public sealed class ExploitLikelihoodNormalizer : IEvidenceNormalizer<ExploitInput>
{
private readonly ExploitNormalizerOptions _options;
/// <summary>
/// Initializes a new instance of <see cref="ExploitLikelihoodNormalizer"/>.
/// </summary>
public ExploitLikelihoodNormalizer(IOptionsMonitor<NormalizerOptions> options)
{
ArgumentNullException.ThrowIfNull(options);
_options = options.CurrentValue.Exploit;
}
/// <summary>
/// Initializes a new instance with explicit options (for testing).
/// </summary>
internal ExploitLikelihoodNormalizer(ExploitNormalizerOptions options)
{
ArgumentNullException.ThrowIfNull(options);
_options = options;
}
/// <inheritdoc />
public string Dimension => "XPL";
/// <inheritdoc />
public double Normalize(ExploitInput input)
{
ArgumentNullException.ThrowIfNull(input);
return CalculateScore(input);
}
/// <inheritdoc />
public NormalizationResult NormalizeWithDetails(ExploitInput input)
{
ArgumentNullException.ThrowIfNull(input);
var score = CalculateScore(input);
var explanation = GenerateExplanation(input, score);
var components = BuildComponents(input);
return NormalizationResult.WithComponents(score, Dimension, explanation, components);
}
private double CalculateScore(ExploitInput input)
{
var epssScore = CalculateEpssScore(input);
var kevFloor = GetKevFloor(input);
var exploitBonus = input.PublicExploitAvailable ? 0.10 : 0.0;
// Final score is max of KEV floor and EPSS score, plus exploit availability bonus
return Math.Min(1.0, Math.Max(kevFloor, epssScore) + exploitBonus);
}
private double CalculateEpssScore(ExploitInput input)
{
// EPSS percentile is in range [0, 100]
var percentile = input.EpssPercentile;
// Convert percentile (0-100) to fraction (0-1) for threshold comparison
var percentileFraction = percentile / 100.0;
if (percentileFraction >= _options.Top1PercentThreshold)
{
// Top 1%: highest risk band
return InterpolateInRange(percentileFraction, _options.Top1PercentThreshold, 1.0, _options.Top1PercentRange);
}
if (percentileFraction >= _options.Top5PercentThreshold)
{
// Top 5%: high risk band
return InterpolateInRange(percentileFraction, _options.Top5PercentThreshold, _options.Top1PercentThreshold, _options.Top5PercentRange);
}
if (percentileFraction >= _options.Top25PercentThreshold)
{
// Top 25%: moderate risk band
return InterpolateInRange(percentileFraction, _options.Top25PercentThreshold, _options.Top5PercentThreshold, _options.Top25PercentRange);
}
// Below 75th percentile: lower risk
return InterpolateInRange(percentileFraction, 0.0, _options.Top25PercentThreshold, _options.LowerPercentRange);
}
private static double InterpolateInRange(double value, double rangeMin, double rangeMax, (double Low, double High) scoreRange)
{
if (rangeMax <= rangeMin)
return scoreRange.Low;
var normalizedPosition = (value - rangeMin) / (rangeMax - rangeMin);
return scoreRange.Low + (scoreRange.High - scoreRange.Low) * normalizedPosition;
}
private double GetKevFloor(ExploitInput input)
{
return input.KevStatus switch
{
KevStatus.InKev => _options.KevFloor,
KevStatus.RemovedFromKev => _options.KevFloor * 0.5, // Reduced but still elevated
KevStatus.NotInKev => 0.0,
_ => 0.0
};
}
private string GenerateExplanation(ExploitInput input, double score)
{
var parts = new List<string>();
// EPSS description
var epssDesc = input.EpssPercentile switch
{
>= 99.0 => $"Very high EPSS ({input.EpssScore:P1}, top 1%)",
>= 95.0 => $"High EPSS ({input.EpssScore:P1}, top 5%)",
>= 75.0 => $"Moderate EPSS ({input.EpssScore:P1}, top 25%)",
>= 50.0 => $"Low EPSS ({input.EpssScore:P1})",
_ => $"Very low EPSS ({input.EpssScore:P1})"
};
parts.Add(epssDesc);
// KEV status
if (input.KevStatus == KevStatus.InKev)
{
var kevInfo = "actively exploited (KEV)";
if (input.KevAddedDate.HasValue)
kevInfo += $", added {input.KevAddedDate.Value:yyyy-MM-dd}";
if (input.KevDueDate.HasValue)
kevInfo += $", due {input.KevDueDate.Value:yyyy-MM-dd}";
parts.Add(kevInfo);
}
else if (input.KevStatus == KevStatus.RemovedFromKev)
{
parts.Add("previously in KEV (removed)");
}
// Public exploit
if (input.PublicExploitAvailable)
{
var maturityInfo = !string.IsNullOrEmpty(input.ExploitMaturity)
? $" ({input.ExploitMaturity})"
: "";
parts.Add($"public exploit available{maturityInfo}");
}
var explanation = string.Join("; ", parts);
return $"{explanation}. XPL = {score:F2}.";
}
private Dictionary<string, double> BuildComponents(ExploitInput input)
{
var components = new Dictionary<string, double>
{
["epss_score"] = input.EpssScore,
["epss_percentile"] = input.EpssPercentile,
["epss_based_score"] = CalculateEpssScore(input),
["kev_floor"] = GetKevFloor(input),
["kev_status"] = (int)input.KevStatus
};
if (input.PublicExploitAvailable)
{
components["exploit_bonus"] = 0.10;
}
return components;
}
}

View File

@@ -0,0 +1,91 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
namespace StellaOps.Signals.EvidenceWeightedScore.Normalizers;
/// <summary>
/// Result of a normalization operation with detailed breakdown.
/// </summary>
/// <param name="Score">Normalized score [0, 1].</param>
/// <param name="Dimension">Dimension name (e.g., "Reachability", "Runtime").</param>
/// <param name="Explanation">Human-readable explanation of the normalization.</param>
/// <param name="Components">Breakdown of individual contributing factors.</param>
public sealed record NormalizationResult(
double Score,
string Dimension,
string Explanation,
IReadOnlyDictionary<string, double> Components)
{
/// <summary>
/// Creates a simple result with no component breakdown.
/// </summary>
public static NormalizationResult Simple(double score, string dimension, string explanation) =>
new(score, dimension, explanation, new Dictionary<string, double>());
/// <summary>
/// Creates a result with component breakdown.
/// </summary>
public static NormalizationResult WithComponents(
double score,
string dimension,
string explanation,
Dictionary<string, double> components) =>
new(score, dimension, explanation, new Dictionary<string, double>(components));
}
/// <summary>
/// Normalizes raw evidence to a [0, 1] score for evidence-weighted scoring.
/// Each implementation bridges a specific data source to the unified scoring model.
/// </summary>
/// <typeparam name="TInput">The raw evidence input type.</typeparam>
public interface IEvidenceNormalizer<in TInput>
{
/// <summary>
/// Gets the dimension name this normalizer produces (e.g., "RCH", "RTS", "BKP").
/// </summary>
string Dimension { get; }
/// <summary>
/// Normalizes raw evidence to a [0, 1] score.
/// </summary>
/// <param name="input">The raw evidence to normalize.</param>
/// <returns>A score in range [0, 1] where higher = stronger evidence.</returns>
double Normalize(TInput input);
/// <summary>
/// Normalizes raw evidence with detailed breakdown.
/// </summary>
/// <param name="input">The raw evidence to normalize.</param>
/// <returns>Detailed normalization result including explanation and components.</returns>
NormalizationResult NormalizeWithDetails(TInput input);
}
/// <summary>
/// Extension methods for normalizers.
/// </summary>
public static class NormalizerExtensions
{
/// <summary>
/// Normalizes input and clamps result to [0, 1].
/// </summary>
public static double NormalizeClamped<TInput>(this IEvidenceNormalizer<TInput> normalizer, TInput input) =>
Math.Clamp(normalizer.Normalize(input), 0.0, 1.0);
/// <summary>
/// Normalizes multiple inputs and returns average.
/// </summary>
public static double NormalizeAverage<TInput>(this IEvidenceNormalizer<TInput> normalizer, IEnumerable<TInput> inputs)
{
var scores = inputs.Select(normalizer.NormalizeClamped).ToList();
return scores.Count == 0 ? 0.0 : scores.Average();
}
/// <summary>
/// Normalizes multiple inputs and returns maximum.
/// </summary>
public static double NormalizeMax<TInput>(this IEvidenceNormalizer<TInput> normalizer, IEnumerable<TInput> inputs)
{
var scores = inputs.Select(normalizer.NormalizeClamped).ToList();
return scores.Count == 0 ? 0.0 : scores.Max();
}
}

View File

@@ -0,0 +1,96 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
namespace StellaOps.Signals.EvidenceWeightedScore.Normalizers;
/// <summary>
/// Aggregated evidence from all sources for a single finding.
/// Used as input to the normalizer aggregator.
/// Maps to existing detailed input types from EvidenceWeightedScoreInput.
/// </summary>
public sealed record FindingEvidence
{
/// <summary>Finding identifier (CVE@PURL format).</summary>
public required string FindingId { get; init; }
/// <summary>Reachability evidence (maps to ReachabilityInput).</summary>
public ReachabilityInput? Reachability { get; init; }
/// <summary>Runtime signal evidence (maps to RuntimeInput).</summary>
public RuntimeInput? Runtime { get; init; }
/// <summary>Backport/patch evidence (maps to BackportInput).</summary>
public BackportInput? Backport { get; init; }
/// <summary>Exploit likelihood evidence (maps to ExploitInput).</summary>
public ExploitInput? Exploit { get; init; }
/// <summary>Source trust evidence (maps to SourceTrustInput).</summary>
public SourceTrustInput? SourceTrust { get; init; }
/// <summary>Active mitigations evidence (maps to MitigationInput).</summary>
public MitigationInput? Mitigations { get; init; }
/// <summary>
/// Creates FindingEvidence from an existing EvidenceWeightedScoreInput.
/// Extracts the detailed input records if present.
/// </summary>
public static FindingEvidence FromScoreInput(EvidenceWeightedScoreInput input) =>
new()
{
FindingId = input.FindingId,
Reachability = input.ReachabilityDetails,
Runtime = input.RuntimeDetails,
Backport = input.BackportDetails,
Exploit = input.ExploitDetails,
SourceTrust = input.SourceTrustDetails,
Mitigations = input.MitigationDetails
};
}
/// <summary>
/// Aggregates all normalizers to produce unified evidence-weighted score input.
/// </summary>
public interface INormalizerAggregator
{
/// <summary>
/// Aggregates all evidence for a finding into normalized input.
/// Retrieves evidence data asynchronously from configured sources.
/// </summary>
/// <param name="findingId">The finding identifier (CVE@PURL format).</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Fully populated evidence-weighted score input.</returns>
Task<EvidenceWeightedScoreInput> AggregateAsync(
string findingId,
CancellationToken cancellationToken = default);
/// <summary>
/// Aggregates pre-loaded evidence into normalized input.
/// Use when evidence has already been retrieved.
/// </summary>
/// <param name="evidence">Pre-loaded evidence for the finding.</param>
/// <returns>Fully populated evidence-weighted score input.</returns>
EvidenceWeightedScoreInput Aggregate(FindingEvidence evidence);
/// <summary>
/// Aggregates with detailed breakdown for all dimensions.
/// </summary>
/// <param name="evidence">Pre-loaded evidence for the finding.</param>
/// <returns>Input with detailed normalization results.</returns>
AggregationResult AggregateWithDetails(FindingEvidence evidence);
}
/// <summary>
/// Detailed aggregation result including all normalization breakdowns.
/// </summary>
public sealed record AggregationResult
{
/// <summary>The normalized input values.</summary>
public required EvidenceWeightedScoreInput Input { get; init; }
/// <summary>Detailed normalization results per dimension.</summary>
public required IReadOnlyDictionary<string, NormalizationResult> Details { get; init; }
/// <summary>Any warnings or issues during normalization.</summary>
public IReadOnlyList<string> Warnings { get; init; } = [];
}

View File

@@ -0,0 +1,192 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
using Microsoft.Extensions.Options;
namespace StellaOps.Signals.EvidenceWeightedScore.Normalizers;
/// <summary>
/// Normalizes mitigation evidence to a [0, 1] MIT score.
/// Higher scores indicate stronger mitigations that reduce exploitability.
/// </summary>
/// <remarks>
/// Mitigation types and typical effectiveness:
/// - FeatureFlag: Code disabled (0.20-0.40)
/// - AuthRequired: Authentication requirement (0.10-0.20)
/// - AdminOnly: Admin-only access (0.15-0.25)
/// - NonDefaultConfig: Non-default configuration (0.15-0.30)
/// - SecurityPolicy: Seccomp/AppArmor/SELinux (0.10-0.25)
/// - Isolation: Container/sandbox isolation (0.10-0.20)
/// - NetworkControl: Network-level controls (0.05-0.15)
/// - InputValidation: Rate limiting/validation (0.05-0.10)
/// - VirtualPatch: IDS/IPS rules (0.10-0.20)
/// - ComponentRemoval: Vulnerable component removed (0.80-1.00)
///
/// Multiple mitigations are summed, capped at 1.0.
/// Verified mitigations receive a confidence bonus.
/// </remarks>
public sealed class MitigationNormalizer : IEvidenceNormalizer<MitigationInput>
{
private readonly MitigationNormalizerOptions _options;
/// <summary>
/// Initializes a new instance of <see cref="MitigationNormalizer"/>.
/// </summary>
public MitigationNormalizer(IOptionsMonitor<NormalizerOptions> options)
{
ArgumentNullException.ThrowIfNull(options);
_options = options.CurrentValue.Mitigation;
}
/// <summary>
/// Initializes a new instance with explicit options (for testing).
/// </summary>
internal MitigationNormalizer(MitigationNormalizerOptions options)
{
ArgumentNullException.ThrowIfNull(options);
_options = options;
}
/// <inheritdoc />
public string Dimension => "MIT";
/// <inheritdoc />
public double Normalize(MitigationInput input)
{
ArgumentNullException.ThrowIfNull(input);
return CalculateScore(input);
}
/// <inheritdoc />
public NormalizationResult NormalizeWithDetails(MitigationInput input)
{
ArgumentNullException.ThrowIfNull(input);
var score = CalculateScore(input);
var explanation = GenerateExplanation(input, score);
var components = BuildComponents(input);
return NormalizationResult.WithComponents(score, Dimension, explanation, components);
}
private double CalculateScore(MitigationInput input)
{
var runtimeBonus = input.RuntimeVerified ? _options.VerificationBonus : 0.0;
// If pre-computed combined effectiveness is provided, validate and use it
if (input.CombinedEffectiveness > 0.0)
{
var validatedEffectiveness = Math.Min(input.CombinedEffectiveness, _options.MaxTotalMitigation);
return Math.Min(1.0, validatedEffectiveness + runtimeBonus);
}
// Calculate from active mitigations
if (input.ActiveMitigations.Count == 0)
return 0.0;
var totalEffectiveness = CalculateTotalEffectiveness(input.ActiveMitigations);
return Math.Min(1.0, totalEffectiveness + runtimeBonus);
}
private double CalculateTotalEffectiveness(IReadOnlyList<ActiveMitigation> mitigations)
{
var total = 0.0;
foreach (var mitigation in mitigations)
{
var effectiveness = mitigation.Effectiveness;
// Apply verification bonus at individual mitigation level
if (mitigation.Verified)
{
effectiveness += _options.VerificationBonus * 0.5; // Half bonus at individual level
}
total += effectiveness;
}
// Cap at max total mitigation
return Math.Min(total, _options.MaxTotalMitigation);
}
private (double Low, double High) GetEffectivenessRange(MitigationType type)
{
return type switch
{
MitigationType.FeatureFlag => _options.FeatureFlagEffectiveness,
MitigationType.AuthRequired => _options.AuthRequiredEffectiveness,
MitigationType.SecurityPolicy => _options.SeccompEffectiveness, // SELinux/AppArmor/seccomp
MitigationType.Isolation => _options.NetworkIsolationEffectiveness, // Reuse range
MitigationType.InputValidation => _options.ReadOnlyFsEffectiveness, // Reuse range
MitigationType.NetworkControl => _options.NetworkIsolationEffectiveness,
MitigationType.VirtualPatch => _options.AuthRequiredEffectiveness, // Similar range
MitigationType.ComponentRemoval => (0.80, 1.00), // Complete removal is very effective
MitigationType.Unknown => (0.0, 0.10),
_ => (0.0, 0.10)
};
}
private string GenerateExplanation(MitigationInput input, double score)
{
if (input.ActiveMitigations.Count == 0 && input.CombinedEffectiveness <= 0.0)
{
return "No active mitigations identified.";
}
var parts = new List<string>();
if (input.ActiveMitigations.Count > 0)
{
var mitigationDescriptions = input.ActiveMitigations
.Select(m => FormatMitigation(m))
.ToList();
parts.Add($"{input.ActiveMitigations.Count} mitigation(s): {string.Join(", ", mitigationDescriptions)}");
}
else if (input.CombinedEffectiveness > 0.0)
{
parts.Add($"Combined effectiveness: {input.CombinedEffectiveness:P0}");
}
if (input.RuntimeVerified)
{
parts.Add("runtime verified");
}
if (!string.IsNullOrEmpty(input.AssessmentSource))
{
parts.Add($"source: {input.AssessmentSource}");
}
var description = string.Join("; ", parts);
return $"{description}. MIT = {score:F2}.";
}
private static string FormatMitigation(ActiveMitigation mitigation)
{
var name = !string.IsNullOrEmpty(mitigation.Name) ? mitigation.Name : mitigation.Type.ToString();
var verified = mitigation.Verified ? " ✓" : "";
return $"{name} ({mitigation.Effectiveness:P0}{verified})";
}
private Dictionary<string, double> BuildComponents(MitigationInput input)
{
var components = new Dictionary<string, double>
{
["mitigation_count"] = input.ActiveMitigations.Count,
["combined_effectiveness"] = input.CombinedEffectiveness,
["runtime_verified"] = input.RuntimeVerified ? 1.0 : 0.0
};
// Add individual mitigation contributions
for (int i = 0; i < Math.Min(input.ActiveMitigations.Count, 5); i++)
{
var m = input.ActiveMitigations[i];
components[$"mitigation_{i}_type"] = (int)m.Type;
components[$"mitigation_{i}_effectiveness"] = m.Effectiveness;
}
return components;
}
}

View File

@@ -0,0 +1,348 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
using Microsoft.Extensions.Options;
namespace StellaOps.Signals.EvidenceWeightedScore.Normalizers;
/// <summary>
/// Aggregates all evidence normalizers to produce unified evidence-weighted score input.
/// Orchestrates the normalization of all dimensions for a finding.
/// </summary>
public sealed class NormalizerAggregator : INormalizerAggregator
{
private readonly IEvidenceNormalizer<ReachabilityInput> _reachabilityNormalizer;
private readonly IEvidenceNormalizer<RuntimeInput> _runtimeNormalizer;
private readonly IEvidenceNormalizer<BackportInput> _backportNormalizer;
private readonly IEvidenceNormalizer<ExploitInput> _exploitNormalizer;
private readonly IEvidenceNormalizer<SourceTrustInput> _sourceTrustNormalizer;
private readonly IEvidenceNormalizer<MitigationInput> _mitigationNormalizer;
private readonly NormalizerOptions _options;
/// <summary>
/// Create an aggregator with default normalizers and options.
/// </summary>
public NormalizerAggregator()
: this(new NormalizerOptions())
{
}
/// <summary>
/// Create an aggregator with specific options.
/// </summary>
public NormalizerAggregator(NormalizerOptions options)
{
_options = options ?? throw new ArgumentNullException(nameof(options));
_reachabilityNormalizer = new ReachabilityNormalizer(_options.Reachability);
_runtimeNormalizer = new RuntimeSignalNormalizer(_options.Runtime);
_backportNormalizer = new BackportEvidenceNormalizer(_options.Backport);
_exploitNormalizer = new ExploitLikelihoodNormalizer(_options.Exploit);
_sourceTrustNormalizer = new SourceTrustNormalizer(_options.SourceTrust);
_mitigationNormalizer = new MitigationNormalizer(_options.Mitigation);
}
/// <summary>
/// Create an aggregator with custom normalizers.
/// </summary>
public NormalizerAggregator(
IEvidenceNormalizer<ReachabilityInput> reachabilityNormalizer,
IEvidenceNormalizer<RuntimeInput> runtimeNormalizer,
IEvidenceNormalizer<BackportInput> backportNormalizer,
IEvidenceNormalizer<ExploitInput> exploitNormalizer,
IEvidenceNormalizer<SourceTrustInput> sourceTrustNormalizer,
IEvidenceNormalizer<MitigationInput> mitigationNormalizer,
NormalizerOptions options)
{
_reachabilityNormalizer = reachabilityNormalizer ?? throw new ArgumentNullException(nameof(reachabilityNormalizer));
_runtimeNormalizer = runtimeNormalizer ?? throw new ArgumentNullException(nameof(runtimeNormalizer));
_backportNormalizer = backportNormalizer ?? throw new ArgumentNullException(nameof(backportNormalizer));
_exploitNormalizer = exploitNormalizer ?? throw new ArgumentNullException(nameof(exploitNormalizer));
_sourceTrustNormalizer = sourceTrustNormalizer ?? throw new ArgumentNullException(nameof(sourceTrustNormalizer));
_mitigationNormalizer = mitigationNormalizer ?? throw new ArgumentNullException(nameof(mitigationNormalizer));
_options = options ?? throw new ArgumentNullException(nameof(options));
}
/// <summary>
/// Create an aggregator with DI-provided options.
/// </summary>
public NormalizerAggregator(IOptionsMonitor<NormalizerOptions> optionsMonitor)
: this(optionsMonitor?.CurrentValue ?? new NormalizerOptions())
{
}
/// <inheritdoc />
public Task<EvidenceWeightedScoreInput> AggregateAsync(
string findingId,
CancellationToken cancellationToken = default)
{
ArgumentException.ThrowIfNullOrEmpty(findingId);
// In a real implementation, this would fetch evidence from various sources
// For now, return a default input with neutral values
// The actual evidence retrieval should be implemented in a higher-level service
var defaultEvidence = new FindingEvidence
{
FindingId = findingId,
// All evidence is null - will use defaults
};
var result = Aggregate(defaultEvidence);
return Task.FromResult(result);
}
/// <inheritdoc />
public EvidenceWeightedScoreInput Aggregate(FindingEvidence evidence)
{
ArgumentNullException.ThrowIfNull(evidence);
var reachability = NormalizeReachability(evidence.Reachability);
var runtime = NormalizeRuntime(evidence.Runtime);
var backport = NormalizeBackport(evidence.Backport);
var exploit = NormalizeExploit(evidence.Exploit);
var sourceTrust = NormalizeSourceTrust(evidence.SourceTrust);
var mitigation = NormalizeMitigation(evidence.Mitigations);
return new EvidenceWeightedScoreInput
{
FindingId = evidence.FindingId,
Rch = reachability,
Rts = runtime,
Bkp = backport,
Xpl = exploit,
Src = sourceTrust,
Mit = mitigation,
ReachabilityDetails = evidence.Reachability,
RuntimeDetails = evidence.Runtime,
BackportDetails = evidence.Backport,
ExploitDetails = evidence.Exploit,
SourceTrustDetails = evidence.SourceTrust,
MitigationDetails = evidence.Mitigations
};
}
/// <inheritdoc />
public AggregationResult AggregateWithDetails(FindingEvidence evidence)
{
ArgumentNullException.ThrowIfNull(evidence);
var warnings = new List<string>();
var details = new Dictionary<string, NormalizationResult>();
// Normalize each dimension with details
var (reachability, reachabilityDetails) = NormalizeReachabilityWithDetails(evidence.Reachability, warnings);
var (runtime, runtimeDetails) = NormalizeRuntimeWithDetails(evidence.Runtime, warnings);
var (backport, backportDetails) = NormalizeBackportWithDetails(evidence.Backport, warnings);
var (exploit, exploitDetails) = NormalizeExploitWithDetails(evidence.Exploit, warnings);
var (sourceTrust, sourceTrustDetails) = NormalizeSourceTrustWithDetails(evidence.SourceTrust, warnings);
var (mitigation, mitigationDetails) = NormalizeMitigationWithDetails(evidence.Mitigations, warnings);
// Collect all details
if (reachabilityDetails != null)
details["RCH"] = reachabilityDetails;
if (runtimeDetails != null)
details["RTS"] = runtimeDetails;
if (backportDetails != null)
details["BKP"] = backportDetails;
if (exploitDetails != null)
details["XPL"] = exploitDetails;
if (sourceTrustDetails != null)
details["SRC"] = sourceTrustDetails;
if (mitigationDetails != null)
details["MIT"] = mitigationDetails;
var input = new EvidenceWeightedScoreInput
{
FindingId = evidence.FindingId,
Rch = reachability,
Rts = runtime,
Bkp = backport,
Xpl = exploit,
Src = sourceTrust,
Mit = mitigation,
ReachabilityDetails = evidence.Reachability,
RuntimeDetails = evidence.Runtime,
BackportDetails = evidence.Backport,
ExploitDetails = evidence.Exploit,
SourceTrustDetails = evidence.SourceTrust,
MitigationDetails = evidence.Mitigations
};
return new AggregationResult
{
Input = input,
Details = details,
Warnings = warnings
};
}
#region Simple Normalization Methods
private double NormalizeReachability(ReachabilityInput? input)
{
if (input == null)
return _options.Reachability.UnknownScore; // Default for unknown
return _reachabilityNormalizer.Normalize(input);
}
private double NormalizeRuntime(RuntimeInput? input)
{
if (input == null)
return _options.Runtime.UnknownScore; // Default for no runtime data
return _runtimeNormalizer.Normalize(input);
}
private double NormalizeBackport(BackportInput? input)
{
if (input == null)
return _options.Backport.Tier0Range.Min; // Default for no backport evidence
return _backportNormalizer.Normalize(input);
}
private double NormalizeExploit(ExploitInput? input)
{
if (input == null)
return _options.Exploit.NoEpssScore; // Default for no EPSS data
return _exploitNormalizer.Normalize(input);
}
private double NormalizeSourceTrust(SourceTrustInput? input)
{
if (input == null)
return 0.50; // Neutral trust for unknown sources
return _sourceTrustNormalizer.Normalize(input);
}
private double NormalizeMitigation(MitigationInput? input)
{
if (input == null)
return 0.0; // No mitigation by default
return _mitigationNormalizer.Normalize(input);
}
#endregion
#region Detailed Normalization Methods
private (double Score, NormalizationResult? Details) NormalizeReachabilityWithDetails(
ReachabilityInput? input, List<string> warnings)
{
if (input == null)
{
warnings.Add("No reachability evidence provided; using neutral score.");
return (_options.Reachability.UnknownScore, null);
}
var validationErrors = input.Validate();
if (validationErrors.Count > 0)
{
warnings.AddRange(validationErrors.Select(e => $"RCH validation: {e}"));
}
var details = _reachabilityNormalizer.NormalizeWithDetails(input);
return (details.Score, details);
}
private (double Score, NormalizationResult? Details) NormalizeRuntimeWithDetails(
RuntimeInput? input, List<string> warnings)
{
if (input == null)
{
warnings.Add("No runtime evidence provided; using zero score.");
return (_options.Runtime.UnknownScore, null);
}
var validationErrors = input.Validate();
if (validationErrors.Count > 0)
{
warnings.AddRange(validationErrors.Select(e => $"RTS validation: {e}"));
}
var details = _runtimeNormalizer.NormalizeWithDetails(input);
return (details.Score, details);
}
private (double Score, NormalizationResult? Details) NormalizeBackportWithDetails(
BackportInput? input, List<string> warnings)
{
if (input == null)
{
warnings.Add("No backport evidence provided; using minimal score.");
return (_options.Backport.Tier0Range.Min, null);
}
var validationErrors = input.Validate();
if (validationErrors.Count > 0)
{
warnings.AddRange(validationErrors.Select(e => $"BKP validation: {e}"));
}
var details = _backportNormalizer.NormalizeWithDetails(input);
return (details.Score, details);
}
private (double Score, NormalizationResult? Details) NormalizeExploitWithDetails(
ExploitInput? input, List<string> warnings)
{
if (input == null)
{
warnings.Add("No exploit likelihood evidence provided; using neutral score.");
return (_options.Exploit.NoEpssScore, null);
}
var validationErrors = input.Validate();
if (validationErrors.Count > 0)
{
warnings.AddRange(validationErrors.Select(e => $"XPL validation: {e}"));
}
var details = _exploitNormalizer.NormalizeWithDetails(input);
return (details.Score, details);
}
private (double Score, NormalizationResult? Details) NormalizeSourceTrustWithDetails(
SourceTrustInput? input, List<string> warnings)
{
if (input == null)
{
warnings.Add("No source trust evidence provided; using neutral score.");
return (0.50, null);
}
var validationErrors = input.Validate();
if (validationErrors.Count > 0)
{
warnings.AddRange(validationErrors.Select(e => $"SRC validation: {e}"));
}
var details = _sourceTrustNormalizer.NormalizeWithDetails(input);
return (details.Score, details);
}
private (double Score, NormalizationResult? Details) NormalizeMitigationWithDetails(
MitigationInput? input, List<string> warnings)
{
if (input == null)
{
warnings.Add("No mitigation evidence provided; using zero score.");
return (0.0, null);
}
var validationErrors = input.Validate();
if (validationErrors.Count > 0)
{
warnings.AddRange(validationErrors.Select(e => $"MIT validation: {e}"));
}
var details = _mitigationNormalizer.NormalizeWithDetails(input);
return (details.Score, details);
}
#endregion
}

View File

@@ -0,0 +1,265 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
namespace StellaOps.Signals.EvidenceWeightedScore.Normalizers;
/// <summary>
/// Configuration options for evidence normalization.
/// </summary>
public sealed class NormalizerOptions
{
/// <summary>Configuration section name.</summary>
public const string SectionName = "EvidenceNormalization";
/// <summary>Reachability normalization options.</summary>
public ReachabilityNormalizerOptions Reachability { get; set; } = new();
/// <summary>Runtime signal normalization options.</summary>
public RuntimeNormalizerOptions Runtime { get; set; } = new();
/// <summary>Backport evidence normalization options.</summary>
public BackportNormalizerOptions Backport { get; set; } = new();
/// <summary>Exploit likelihood normalization options.</summary>
public ExploitNormalizerOptions Exploit { get; set; } = new();
/// <summary>Source trust normalization options.</summary>
public SourceTrustNormalizerOptions SourceTrust { get; set; } = new();
/// <summary>Mitigation normalization options.</summary>
public MitigationNormalizerOptions Mitigation { get; set; } = new();
/// <summary>Default values for missing evidence.</summary>
public DefaultValuesOptions Defaults { get; set; } = new();
}
/// <summary>
/// Reachability normalization configuration.
/// </summary>
public sealed class ReachabilityNormalizerOptions
{
/// <summary>Score for ConfirmedReachable state.</summary>
public double ConfirmedReachableBase { get; set; } = 0.95;
/// <summary>Maximum bonus for confidence on ConfirmedReachable.</summary>
public double ConfirmedReachableBonus { get; set; } = 0.05;
/// <summary>Base score for StaticReachable state.</summary>
public double StaticReachableBase { get; set; } = 0.40;
/// <summary>Maximum bonus range for StaticReachable confidence.</summary>
public double StaticReachableRange { get; set; } = 0.50;
/// <summary>Score for Unknown state.</summary>
public double UnknownScore { get; set; } = 0.50;
/// <summary>Base score for StaticUnreachable state.</summary>
public double StaticUnreachableBase { get; set; } = 0.25;
/// <summary>Maximum reduction for StaticUnreachable confidence.</summary>
public double StaticUnreachableRange { get; set; } = 0.20;
/// <summary>Base score for ConfirmedUnreachable state.</summary>
public double ConfirmedUnreachableBase { get; set; } = 0.05;
/// <summary>Maximum reduction for ConfirmedUnreachable confidence.</summary>
public double ConfirmedUnreachableRange { get; set; } = 0.05;
}
/// <summary>
/// Runtime signal normalization configuration.
/// </summary>
public sealed class RuntimeNormalizerOptions
{
/// <summary>Threshold for high observation count.</summary>
public int HighObservationThreshold { get; set; } = 10;
/// <summary>Threshold for medium observation count.</summary>
public int MediumObservationThreshold { get; set; } = 5;
/// <summary>Base score for high observations.</summary>
public double HighObservationScore { get; set; } = 0.90;
/// <summary>Base score for medium observations.</summary>
public double MediumObservationScore { get; set; } = 0.75;
/// <summary>Base score for low observations.</summary>
public double LowObservationScore { get; set; } = 0.60;
/// <summary>Base score for minimal observations.</summary>
public double MinimalObservationScore { get; set; } = 0.50;
/// <summary>Bonus for very recent observations (< 1 hour).</summary>
public double VeryRecentBonus { get; set; } = 0.10;
/// <summary>Bonus for recent observations (< 6 hours).</summary>
public double RecentBonus { get; set; } = 0.05;
/// <summary>Hours threshold for very recent.</summary>
public double VeryRecentHours { get; set; } = 1.0;
/// <summary>Hours threshold for recent.</summary>
public double RecentHours { get; set; } = 6.0;
/// <summary>Score for Unknown posture (no runtime data).</summary>
public double UnknownScore { get; set; } = 0.0;
/// <summary>Score for Contradicts posture.</summary>
public double ContradictsScore { get; set; } = 0.10;
}
/// <summary>
/// Backport evidence normalization configuration.
/// </summary>
public sealed class BackportNormalizerOptions
{
/// <summary>Score range for Tier 0 (None): [min, max].</summary>
public (double Min, double Max) Tier0Range { get; set; } = (0.00, 0.10);
/// <summary>Score range for Tier 1 (Heuristic): [min, max].</summary>
public (double Min, double Max) Tier1Range { get; set; } = (0.45, 0.60);
/// <summary>Score range for Tier 2 (PatchSignature): [min, max].</summary>
public (double Min, double Max) Tier2Range { get; set; } = (0.70, 0.85);
/// <summary>Score range for Tier 3 (BinaryDiff): [min, max].</summary>
public (double Min, double Max) Tier3Range { get; set; } = (0.80, 0.92);
/// <summary>Score range for Tier 4 (VendorVex): [min, max].</summary>
public (double Min, double Max) Tier4Range { get; set; } = (0.85, 0.95);
/// <summary>Score range for Tier 5 (SignedProof): [min, max].</summary>
public (double Min, double Max) Tier5Range { get; set; } = (0.90, 1.00);
/// <summary>Bonus when multiple evidence tiers are present.</summary>
public double CombinationBonus { get; set; } = 0.05;
/// <summary>Score for no evidence.</summary>
public double NoEvidenceScore { get; set; } = 0.0;
}
/// <summary>
/// Exploit likelihood normalization configuration.
/// </summary>
public sealed class ExploitNormalizerOptions
{
/// <summary>Floor score when CVE is in KEV catalog.</summary>
public double KevFloor { get; set; } = 0.40;
/// <summary>EPSS percentile threshold for top 1%.</summary>
public double Top1PercentThreshold { get; set; } = 0.99;
/// <summary>EPSS percentile threshold for top 5%.</summary>
public double Top5PercentThreshold { get; set; } = 0.95;
/// <summary>EPSS percentile threshold for top 25%.</summary>
public double Top25PercentThreshold { get; set; } = 0.75;
/// <summary>Score range for top 1% percentile.</summary>
public (double Low, double High) Top1PercentRange { get; set; } = (0.90, 1.00);
/// <summary>Score range for top 5% percentile.</summary>
public (double Low, double High) Top5PercentRange { get; set; } = (0.70, 0.89);
/// <summary>Score range for top 25% percentile.</summary>
public (double Low, double High) Top25PercentRange { get; set; } = (0.40, 0.69);
/// <summary>Score range for below top 25% percentile.</summary>
public (double Low, double High) LowerPercentRange { get; set; } = (0.20, 0.39);
/// <summary>Score when no EPSS data available.</summary>
public double NoEpssScore { get; set; } = 0.30;
}
/// <summary>
/// Source trust normalization configuration.
/// </summary>
public sealed class SourceTrustNormalizerOptions
{
/// <summary>Multiplier for Vendor issuer type.</summary>
public double VendorMultiplier { get; set; } = 1.0;
/// <summary>Multiplier for Distribution issuer type.</summary>
public double DistributionMultiplier { get; set; } = 0.85;
/// <summary>Multiplier for TrustedThirdParty issuer type.</summary>
public double TrustedThirdPartyMultiplier { get; set; } = 0.80;
/// <summary>Multiplier for Community issuer type.</summary>
public double CommunityMultiplier { get; set; } = 0.60;
/// <summary>Multiplier for Unknown issuer type.</summary>
public double UnknownMultiplier { get; set; } = 0.30;
/// <summary>Bonus multiplier for signed sources.</summary>
public double SignedBonus { get; set; } = 0.10;
/// <summary>Weight for provenance in trust calculation.</summary>
public double ProvenanceWeight { get; set; } = 0.40;
/// <summary>Weight for coverage in trust calculation.</summary>
public double CoverageWeight { get; set; } = 0.35;
/// <summary>Weight for replayability in trust calculation.</summary>
public double ReplayabilityWeight { get; set; } = 0.25;
}
/// <summary>
/// Mitigation normalization configuration.
/// </summary>
public sealed class MitigationNormalizerOptions
{
/// <summary>Effectiveness for FeatureFlag mitigation.</summary>
public (double Low, double High) FeatureFlagEffectiveness { get; set; } = (0.20, 0.40);
/// <summary>Effectiveness for AuthRequired mitigation.</summary>
public (double Low, double High) AuthRequiredEffectiveness { get; set; } = (0.10, 0.20);
/// <summary>Effectiveness for AdminOnly mitigation.</summary>
public (double Low, double High) AdminOnlyEffectiveness { get; set; } = (0.15, 0.25);
/// <summary>Effectiveness for NonDefaultConfig mitigation.</summary>
public (double Low, double High) NonDefaultConfigEffectiveness { get; set; } = (0.15, 0.30);
/// <summary>Effectiveness for SeccompProfile mitigation.</summary>
public (double Low, double High) SeccompEffectiveness { get; set; } = (0.10, 0.25);
/// <summary>Effectiveness for MandatoryAccessControl mitigation.</summary>
public (double Low, double High) MacEffectiveness { get; set; } = (0.10, 0.20);
/// <summary>Effectiveness for NetworkIsolation mitigation.</summary>
public (double Low, double High) NetworkIsolationEffectiveness { get; set; } = (0.05, 0.15);
/// <summary>Effectiveness for ReadOnlyFilesystem mitigation.</summary>
public (double Low, double High) ReadOnlyFsEffectiveness { get; set; } = (0.05, 0.10);
/// <summary>Maximum total mitigation score (cap).</summary>
public double MaxTotalMitigation { get; set; } = 1.0;
/// <summary>Bonus for runtime-verified mitigations.</summary>
public double VerificationBonus { get; set; } = 0.05;
}
/// <summary>
/// Default values for missing evidence.
/// </summary>
public sealed class DefaultValuesOptions
{
/// <summary>Default RCH when no reachability evidence.</summary>
public double Rch { get; set; } = 0.50;
/// <summary>Default RTS when no runtime evidence.</summary>
public double Rts { get; set; } = 0.0;
/// <summary>Default BKP when no backport evidence.</summary>
public double Bkp { get; set; } = 0.0;
/// <summary>Default XPL when no exploit evidence.</summary>
public double Xpl { get; set; } = 0.30;
/// <summary>Default SRC when no source trust evidence.</summary>
public double Src { get; set; } = 0.30;
/// <summary>Default MIT when no mitigation evidence.</summary>
public double Mit { get; set; } = 0.0;
}

View File

@@ -0,0 +1,217 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
using System.Text;
using Microsoft.Extensions.Options;
namespace StellaOps.Signals.EvidenceWeightedScore.Normalizers;
/// <summary>
/// Normalizes reachability evidence to a [0, 1] RCH score.
/// Higher scores indicate greater reachability risk.
/// </summary>
/// <remarks>
/// Maps ReachabilityState + confidence to normalized scores:
/// - LiveExploitPath: 0.95-1.00 (highest risk)
/// - DynamicReachable: 0.90-0.98 (confirmed reachable via runtime)
/// - StaticReachable: 0.40-0.90 (depends on confidence)
/// - PotentiallyReachable: 0.30-0.60 (conservative analysis)
/// - Unknown: 0.50 (neutral)
/// - NotReachable: 0.00-0.15 (depends on confidence)
/// </remarks>
public sealed class ReachabilityNormalizer : IEvidenceNormalizer<ReachabilityInput>
{
private readonly ReachabilityNormalizerOptions _options;
/// <summary>
/// Create a normalizer with default options.
/// </summary>
public ReachabilityNormalizer()
: this(new ReachabilityNormalizerOptions())
{
}
/// <summary>
/// Create a normalizer with specific options.
/// </summary>
public ReachabilityNormalizer(ReachabilityNormalizerOptions options)
{
_options = options ?? throw new ArgumentNullException(nameof(options));
}
/// <summary>
/// Create a normalizer with DI-provided options.
/// </summary>
public ReachabilityNormalizer(IOptionsMonitor<NormalizerOptions> optionsMonitor)
: this(optionsMonitor?.CurrentValue?.Reachability ?? new ReachabilityNormalizerOptions())
{
}
/// <inheritdoc />
public string Dimension => "RCH";
/// <inheritdoc />
public double Normalize(ReachabilityInput input)
{
ArgumentNullException.ThrowIfNull(input);
return CalculateScore(input);
}
/// <inheritdoc />
public NormalizationResult NormalizeWithDetails(ReachabilityInput input)
{
ArgumentNullException.ThrowIfNull(input);
var score = CalculateScore(input);
var explanation = GenerateExplanation(input, score);
var components = BuildComponents(input);
return NormalizationResult.WithComponents(score, Dimension, explanation, components);
}
private double CalculateScore(ReachabilityInput input)
{
var baseScore = GetBaseScore(input.State);
var confidenceModifier = CalculateConfidenceModifier(input.State, input.Confidence);
var analysisBonus = CalculateAnalysisBonus(input);
var hopPenalty = CalculateHopPenalty(input.HopCount, input.State);
var rawScore = baseScore + confidenceModifier + analysisBonus - hopPenalty;
return Math.Clamp(rawScore, 0.0, 1.0);
}
private double GetBaseScore(ReachabilityState state)
{
return state switch
{
ReachabilityState.LiveExploitPath => _options.ConfirmedReachableBase,
ReachabilityState.DynamicReachable => _options.ConfirmedReachableBase - 0.05, // 0.90
ReachabilityState.StaticReachable => _options.StaticReachableBase,
ReachabilityState.PotentiallyReachable => 0.35, // Conservative base
ReachabilityState.Unknown => _options.UnknownScore,
ReachabilityState.NotReachable => _options.ConfirmedUnreachableBase,
_ => _options.UnknownScore
};
}
private double CalculateConfidenceModifier(ReachabilityState state, double confidence)
{
return state switch
{
// For reachable states: higher confidence = higher risk
ReachabilityState.LiveExploitPath => confidence * _options.ConfirmedReachableBonus,
ReachabilityState.DynamicReachable => confidence * 0.08, // Up to 0.98
ReachabilityState.StaticReachable => confidence * _options.StaticReachableRange,
ReachabilityState.PotentiallyReachable => confidence * 0.25, // Up to 0.60
// For unreachable states: higher confidence = lower risk (subtract more)
ReachabilityState.NotReachable => -(confidence * _options.ConfirmedUnreachableRange),
// Unknown: no confidence modifier
ReachabilityState.Unknown => 0.0,
_ => 0.0
};
}
private double CalculateAnalysisBonus(ReachabilityInput input)
{
// Better analysis methods get a small bonus (more trustworthy results)
var bonus = 0.0;
if (input.HasInterproceduralFlow)
bonus += 0.02;
if (input.HasTaintTracking)
bonus += 0.02;
if (input.HasDataFlowSensitivity)
bonus += 0.01;
// Only apply bonus for positive reachability findings
return input.State is ReachabilityState.StaticReachable
or ReachabilityState.DynamicReachable
or ReachabilityState.LiveExploitPath
? bonus
: 0.0;
}
private double CalculateHopPenalty(int hopCount, ReachabilityState state)
{
// Only penalize high hop counts for static analysis
if (state != ReachabilityState.StaticReachable)
return 0.0;
// More hops = less confident in reachability
// 0 hops = 0 penalty, 10+ hops = max 0.10 penalty
return hopCount switch
{
0 => 0.0,
1 => 0.01,
2 => 0.02,
3 => 0.03,
<= 5 => 0.05,
<= 10 => 0.08,
_ => 0.10
};
}
private Dictionary<string, double> BuildComponents(ReachabilityInput input)
{
var components = new Dictionary<string, double>
{
["state"] = (double)input.State,
["confidence"] = input.Confidence,
["hop_count"] = input.HopCount,
["base_score"] = GetBaseScore(input.State),
["confidence_modifier"] = CalculateConfidenceModifier(input.State, input.Confidence),
["analysis_bonus"] = CalculateAnalysisBonus(input),
["hop_penalty"] = CalculateHopPenalty(input.HopCount, input.State),
["interprocedural_flow"] = input.HasInterproceduralFlow ? 1.0 : 0.0,
["taint_tracking"] = input.HasTaintTracking ? 1.0 : 0.0,
["data_flow_sensitivity"] = input.HasDataFlowSensitivity ? 1.0 : 0.0
};
return components;
}
private string GenerateExplanation(ReachabilityInput input, double score)
{
var sb = new StringBuilder();
var stateDesc = input.State switch
{
ReachabilityState.LiveExploitPath => "Live exploit path observed",
ReachabilityState.DynamicReachable => "Dynamically confirmed reachable",
ReachabilityState.StaticReachable => "Statically determined reachable",
ReachabilityState.PotentiallyReachable => "Potentially reachable (conservative)",
ReachabilityState.Unknown => "Reachability unknown",
ReachabilityState.NotReachable => "Confirmed not reachable",
_ => $"Unknown state ({input.State})"
};
sb.Append($"{stateDesc} with {input.Confidence:P0} confidence");
if (input.HopCount > 0)
sb.Append($", {input.HopCount} hop(s) from entry point");
var analysisFlags = new List<string>();
if (input.HasInterproceduralFlow) analysisFlags.Add("interprocedural");
if (input.HasTaintTracking) analysisFlags.Add("taint-tracked");
if (input.HasDataFlowSensitivity) analysisFlags.Add("data-flow");
if (analysisFlags.Count > 0)
sb.Append($" ({string.Join(", ", analysisFlags)} analysis)");
if (!string.IsNullOrEmpty(input.AnalysisMethod))
sb.Append($" via {input.AnalysisMethod}");
if (!string.IsNullOrEmpty(input.EvidenceSource))
sb.Append($" from {input.EvidenceSource}");
sb.Append($" → RCH={score:F2}");
return sb.ToString();
}
}

View File

@@ -0,0 +1,222 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
using System.Text;
using Microsoft.Extensions.Options;
namespace StellaOps.Signals.EvidenceWeightedScore.Normalizers;
/// <summary>
/// Normalizes runtime signal evidence to a [0, 1] RTS score.
/// Higher scores indicate stronger runtime evidence that the code path is exercised.
/// </summary>
/// <remarks>
/// Maps RuntimePosture + observation count + recency to normalized scores:
/// - FullInstrumentation with high observations: 0.90-1.00
/// - EbpfDeep with medium observations: 0.75-0.90
/// - ActiveTracing with some observations: 0.60-0.75
/// - Passive with minimal observations: 0.50-0.60
/// - None/Unknown: 0.00
/// </remarks>
public sealed class RuntimeSignalNormalizer : IEvidenceNormalizer<RuntimeInput>
{
private readonly RuntimeNormalizerOptions _options;
/// <summary>
/// Create a normalizer with default options.
/// </summary>
public RuntimeSignalNormalizer()
: this(new RuntimeNormalizerOptions())
{
}
/// <summary>
/// Create a normalizer with specific options.
/// </summary>
public RuntimeSignalNormalizer(RuntimeNormalizerOptions options)
{
_options = options ?? throw new ArgumentNullException(nameof(options));
}
/// <summary>
/// Create a normalizer with DI-provided options.
/// </summary>
public RuntimeSignalNormalizer(IOptionsMonitor<NormalizerOptions> optionsMonitor)
: this(optionsMonitor?.CurrentValue?.Runtime ?? new RuntimeNormalizerOptions())
{
}
/// <inheritdoc />
public string Dimension => "RTS";
/// <inheritdoc />
public double Normalize(RuntimeInput input)
{
ArgumentNullException.ThrowIfNull(input);
return CalculateScore(input);
}
/// <inheritdoc />
public NormalizationResult NormalizeWithDetails(RuntimeInput input)
{
ArgumentNullException.ThrowIfNull(input);
var score = CalculateScore(input);
var explanation = GenerateExplanation(input, score);
var components = BuildComponents(input);
return NormalizationResult.WithComponents(score, Dimension, explanation, components);
}
private double CalculateScore(RuntimeInput input)
{
// No runtime observation = no evidence
if (input.Posture == RuntimePosture.None || input.ObservationCount == 0)
return _options.UnknownScore;
var observationScore = CalculateObservationScore(input.ObservationCount);
var postureMultiplier = GetPostureMultiplier(input.Posture);
var recencyBonus = CalculateRecencyBonus(input);
var qualityBonus = CalculateQualityBonus(input);
var rawScore = observationScore * postureMultiplier + recencyBonus + qualityBonus;
return Math.Clamp(rawScore, 0.0, 1.0);
}
private double CalculateObservationScore(int observationCount)
{
return observationCount switch
{
>= 10 when observationCount >= _options.HighObservationThreshold => _options.HighObservationScore,
>= 5 when observationCount >= _options.MediumObservationThreshold => _options.MediumObservationScore,
>= 1 => _options.LowObservationScore,
_ => _options.MinimalObservationScore
};
}
private double GetPostureMultiplier(RuntimePosture posture)
{
// Higher quality observation methods get a multiplier
return posture switch
{
RuntimePosture.FullInstrumentation => 1.10, // Best quality, 10% bonus
RuntimePosture.EbpfDeep => 1.05, // eBPF = excellent
RuntimePosture.ActiveTracing => 1.00, // Baseline
RuntimePosture.Passive => 0.90, // Passive = less confidence
RuntimePosture.None => 0.0,
_ => 0.90
};
}
private double CalculateRecencyBonus(RuntimeInput input)
{
// Use RecencyFactor directly if available
if (input.RecencyFactor > 0.0)
{
// High recency factor (close to 1.0) = recent observations
return input.RecencyFactor switch
{
>= 0.9 => _options.VeryRecentBonus, // Very recent
>= 0.5 => _options.RecentBonus, // Moderately recent
_ => 0.0 // Old observations
};
}
// Fall back to LastObservation timestamp if available
if (input.LastObservation.HasValue)
{
var hoursSince = (DateTimeOffset.UtcNow - input.LastObservation.Value).TotalHours;
return hoursSince switch
{
< 1.0 when hoursSince < _options.VeryRecentHours => _options.VeryRecentBonus,
< 6.0 when hoursSince < _options.RecentHours => _options.RecentBonus,
_ => 0.0
};
}
return 0.0;
}
private double CalculateQualityBonus(RuntimeInput input)
{
var bonus = 0.0;
// Direct path observation is strong evidence
if (input.DirectPathObserved)
bonus += 0.05;
// Production traffic is more meaningful
if (input.IsProductionTraffic)
bonus += 0.03;
return bonus;
}
private Dictionary<string, double> BuildComponents(RuntimeInput input)
{
var components = new Dictionary<string, double>
{
["posture"] = (double)input.Posture,
["observation_count"] = input.ObservationCount,
["recency_factor"] = input.RecencyFactor,
["observation_score"] = CalculateObservationScore(input.ObservationCount),
["posture_multiplier"] = GetPostureMultiplier(input.Posture),
["recency_bonus"] = CalculateRecencyBonus(input),
["quality_bonus"] = CalculateQualityBonus(input),
["direct_path_observed"] = input.DirectPathObserved ? 1.0 : 0.0,
["is_production_traffic"] = input.IsProductionTraffic ? 1.0 : 0.0
};
if (input.SessionDigests?.Count > 0)
components["session_count"] = input.SessionDigests.Count;
return components;
}
private string GenerateExplanation(RuntimeInput input, double score)
{
var sb = new StringBuilder();
if (input.Posture == RuntimePosture.None || input.ObservationCount == 0)
{
sb.Append("No runtime observations available");
sb.Append($" → RTS={score:F2}");
return sb.ToString();
}
var postureDesc = input.Posture switch
{
RuntimePosture.FullInstrumentation => "full instrumentation",
RuntimePosture.EbpfDeep => "eBPF deep observation",
RuntimePosture.ActiveTracing => "active tracing",
RuntimePosture.Passive => "passive monitoring",
_ => $"unknown posture ({input.Posture})"
};
sb.Append($"{input.ObservationCount} observation(s) via {postureDesc}");
if (input.DirectPathObserved)
sb.Append(", vulnerable path directly observed");
if (input.IsProductionTraffic)
sb.Append(" in production traffic");
// Recency description
var recencyDesc = input.RecencyFactor switch
{
>= 0.9 => " (very recent)",
>= 0.5 => " (moderately recent)",
> 0 => " (aging)",
_ => ""
};
sb.Append(recencyDesc);
if (!string.IsNullOrEmpty(input.EvidenceSource))
sb.Append($" from {input.EvidenceSource}");
sb.Append($" → RTS={score:F2}");
return sb.ToString();
}
}

View File

@@ -0,0 +1,225 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
using System.Text;
using Microsoft.Extensions.Options;
namespace StellaOps.Signals.EvidenceWeightedScore.Normalizers;
/// <summary>
/// Normalizes source trust evidence to a [0, 1] SRC score.
/// Higher scores indicate higher trust in the advisory/VEX source.
/// </summary>
/// <remarks>
/// Combines issuer type multiplier with trust vector components:
/// - GovernmentAgency/CNA: highest multiplier
/// - Vendor: high trust (1.0)
/// - Distribution: good trust (0.85)
/// - Upstream: good trust (0.80)
/// - SecurityResearcher: moderate trust (0.70)
/// - Community: lower trust (0.60)
/// - Unknown: minimal trust (0.30)
///
/// Trust vector weighted: provenance (40%) + coverage (35%) + replayability (25%)
/// Bonuses for cryptographic attestation and corroborating sources
/// </remarks>
public sealed class SourceTrustNormalizer : IEvidenceNormalizer<SourceTrustInput>
{
private readonly SourceTrustNormalizerOptions _options;
/// <summary>
/// Create a normalizer with default options.
/// </summary>
public SourceTrustNormalizer()
: this(new SourceTrustNormalizerOptions())
{
}
/// <summary>
/// Create a normalizer with specific options.
/// </summary>
public SourceTrustNormalizer(SourceTrustNormalizerOptions options)
{
_options = options ?? throw new ArgumentNullException(nameof(options));
}
/// <summary>
/// Create a normalizer with DI-provided options.
/// </summary>
public SourceTrustNormalizer(IOptionsMonitor<NormalizerOptions> optionsMonitor)
: this(optionsMonitor?.CurrentValue?.SourceTrust ?? new SourceTrustNormalizerOptions())
{
}
/// <inheritdoc />
public string Dimension => "SRC";
/// <inheritdoc />
public double Normalize(SourceTrustInput input)
{
ArgumentNullException.ThrowIfNull(input);
return CalculateScore(input);
}
/// <inheritdoc />
public NormalizationResult NormalizeWithDetails(SourceTrustInput input)
{
ArgumentNullException.ThrowIfNull(input);
var score = CalculateScore(input);
var explanation = GenerateExplanation(input, score);
var components = BuildComponents(input);
return NormalizationResult.WithComponents(score, Dimension, explanation, components);
}
private double CalculateScore(SourceTrustInput input)
{
var issuerMultiplier = GetIssuerMultiplier(input.IssuerType);
var trustVectorScore = CalculateTrustVectorScore(input);
var attestationBonus = CalculateAttestationBonus(input);
var corroborationBonus = CalculateCorroborationBonus(input);
var historicalBonus = CalculateHistoricalBonus(input);
var rawScore = trustVectorScore * issuerMultiplier + attestationBonus + corroborationBonus + historicalBonus;
return Math.Clamp(rawScore, 0.0, 1.0);
}
private double GetIssuerMultiplier(IssuerType issuerType)
{
return issuerType switch
{
IssuerType.GovernmentAgency => 1.05, // CISA, etc.
IssuerType.Cna => 1.02, // CVE Numbering Authority
IssuerType.Vendor => _options.VendorMultiplier,
IssuerType.Distribution => _options.DistributionMultiplier,
IssuerType.Upstream => 0.82, // Upstream maintainers
IssuerType.SecurityResearcher => 0.75,
IssuerType.Community => _options.CommunityMultiplier,
IssuerType.Unknown => _options.UnknownMultiplier,
_ => _options.UnknownMultiplier
};
}
private double CalculateTrustVectorScore(SourceTrustInput input)
{
// Weighted combination of trust vector components
return _options.ProvenanceWeight * input.ProvenanceTrust +
_options.CoverageWeight * input.CoverageCompleteness +
_options.ReplayabilityWeight * input.Replayability;
}
private double CalculateAttestationBonus(SourceTrustInput input)
{
var bonus = 0.0;
// Cryptographic attestation (DSSE/in-toto) is a strong signal
if (input.IsCryptographicallyAttested)
bonus += _options.SignedBonus;
// Independent verification adds confidence
if (input.IndependentlyVerified)
bonus += 0.05;
return bonus;
}
private double CalculateCorroborationBonus(SourceTrustInput input)
{
// Multiple independent sources increase trust
return input.CorroboratingSourceCount switch
{
0 => 0.0,
1 => 0.02,
2 => 0.04,
>= 3 => 0.06,
_ => 0.0
};
}
private double CalculateHistoricalBonus(SourceTrustInput input)
{
// Good track record earns a small bonus
if (!input.HistoricalAccuracy.HasValue)
return 0.0;
return input.HistoricalAccuracy.Value switch
{
>= 0.95 => 0.05, // Excellent track record
>= 0.85 => 0.03, // Good track record
>= 0.70 => 0.01, // Acceptable track record
_ => -0.02 // Poor track record = penalty
};
}
private Dictionary<string, double> BuildComponents(SourceTrustInput input)
{
var components = new Dictionary<string, double>
{
["issuer_type"] = (double)input.IssuerType,
["issuer_multiplier"] = GetIssuerMultiplier(input.IssuerType),
["provenance_trust"] = input.ProvenanceTrust,
["coverage_completeness"] = input.CoverageCompleteness,
["replayability"] = input.Replayability,
["trust_vector_score"] = CalculateTrustVectorScore(input),
["attestation_bonus"] = CalculateAttestationBonus(input),
["corroboration_bonus"] = CalculateCorroborationBonus(input),
["historical_bonus"] = CalculateHistoricalBonus(input),
["cryptographically_attested"] = input.IsCryptographicallyAttested ? 1.0 : 0.0,
["independently_verified"] = input.IndependentlyVerified ? 1.0 : 0.0,
["corroborating_sources"] = input.CorroboratingSourceCount
};
if (input.HistoricalAccuracy.HasValue)
components["historical_accuracy"] = input.HistoricalAccuracy.Value;
return components;
}
private string GenerateExplanation(SourceTrustInput input, double score)
{
var sb = new StringBuilder();
var issuerDesc = input.IssuerType switch
{
IssuerType.GovernmentAgency => "government agency",
IssuerType.Cna => "CVE Numbering Authority",
IssuerType.Vendor => "software vendor",
IssuerType.Distribution => "distribution maintainer",
IssuerType.Upstream => "upstream project",
IssuerType.SecurityResearcher => "security researcher",
IssuerType.Community => "community source",
IssuerType.Unknown => "unknown source",
_ => $"unknown type ({input.IssuerType})"
};
sb.Append($"From {issuerDesc}");
if (!string.IsNullOrEmpty(input.IssuerId))
sb.Append($" ({input.IssuerId})");
// Trust vector summary
var trustVectorScore = CalculateTrustVectorScore(input);
sb.Append($" with trust vector {trustVectorScore:P0}");
// Attestation
if (input.IsCryptographicallyAttested)
sb.Append(", cryptographically attested");
if (input.IndependentlyVerified)
sb.Append(", independently verified");
// Corroboration
if (input.CorroboratingSourceCount > 0)
sb.Append($", {input.CorroboratingSourceCount} corroborating source(s)");
// Historical accuracy
if (input.HistoricalAccuracy.HasValue)
sb.Append($", {input.HistoricalAccuracy.Value:P0} historical accuracy");
sb.Append($" → SRC={score:F2}");
return sb.ToString();
}
}

View File

@@ -0,0 +1,577 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
using FluentAssertions;
using StellaOps.Signals.EvidenceWeightedScore;
using System.Collections.Concurrent;
using System.Diagnostics;
using Xunit;
namespace StellaOps.Signals.Tests.EvidenceWeightedScore;
/// <summary>
/// Determinism and quality gate tests for Evidence-Weighted Score calculator.
/// These tests ensure reproducibility, ordering independence, thread safety, and performance.
/// </summary>
public class EvidenceWeightedScoreDeterminismTests
{
private readonly IEvidenceWeightedScoreCalculator _calculator = new EvidenceWeightedScoreCalculator();
private readonly EvidenceWeightPolicy _defaultPolicy = EvidenceWeightPolicy.DefaultProduction;
#region Task 51: Determinism Tests
[Fact]
public void SameInputs_SamePolicy_ProducesIdenticalScore()
{
var input = new EvidenceWeightedScoreInput
{
FindingId = "CVE-2024-12345",
Rch = 0.8,
Rts = 0.7,
Bkp = 0.5,
Xpl = 0.6,
Src = 0.5,
Mit = 0.1
};
var result1 = _calculator.Calculate(input, _defaultPolicy);
var result2 = _calculator.Calculate(input, _defaultPolicy);
result1.Score.Should().Be(result2.Score);
result1.PolicyDigest.Should().Be(result2.PolicyDigest);
}
[Fact]
public void SameInputs_SamePolicy_MultipleIterations_AllIdentical()
{
var input = new EvidenceWeightedScoreInput
{
FindingId = "CVE-2024-12345",
Rch = 0.75,
Rts = 0.65,
Bkp = 0.45,
Xpl = 0.55,
Src = 0.35,
Mit = 0.15
};
var results = Enumerable.Range(0, 1000)
.Select(_ => _calculator.Calculate(input, _defaultPolicy))
.ToList();
var firstScore = results[0].Score;
var firstDigest = results[0].PolicyDigest;
results.Should().OnlyContain(r => r.Score == firstScore);
results.Should().OnlyContain(r => r.PolicyDigest == firstDigest);
}
[Fact]
public void PolicyDigest_IsStable_AcrossCalculations()
{
var input1 = new EvidenceWeightedScoreInput
{
FindingId = "CVE-2024-00001",
Rch = 0.1, Rts = 0.2, Bkp = 0.3, Xpl = 0.4, Src = 0.5, Mit = 0.1
};
var input2 = new EvidenceWeightedScoreInput
{
FindingId = "CVE-2024-00002",
Rch = 0.9, Rts = 0.8, Bkp = 0.7, Xpl = 0.6, Src = 0.5, Mit = 0.2
};
var result1 = _calculator.Calculate(input1, _defaultPolicy);
var result2 = _calculator.Calculate(input2, _defaultPolicy);
// Same policy should produce same digest regardless of inputs
result1.PolicyDigest.Should().Be(result2.PolicyDigest);
}
[Fact]
public void DifferentPolicies_ProduceDifferentDigests()
{
var input = new EvidenceWeightedScoreInput
{
FindingId = "CVE-2024-12345",
Rch = 0.5, Rts = 0.5, Bkp = 0.5, Xpl = 0.5, Src = 0.5, Mit = 0.1
};
var policy1 = EvidenceWeightPolicy.DefaultProduction;
var policy2 = new EvidenceWeightPolicy
{
Profile = "custom",
Version = "v2",
Weights = new EvidenceWeights
{
Rch = 0.25, Rts = 0.25, Bkp = 0.20, Xpl = 0.15, Src = 0.10, Mit = 0.05
}
};
var result1 = _calculator.Calculate(input, policy1);
var result2 = _calculator.Calculate(input, policy2);
result1.PolicyDigest.Should().NotBe(result2.PolicyDigest);
}
[Fact]
public void Breakdown_IsConsistent_AcrossCalculations()
{
var input = new EvidenceWeightedScoreInput
{
FindingId = "CVE-2024-12345",
Rch = 0.8, Rts = 0.7, Bkp = 0.5, Xpl = 0.6, Src = 0.5, Mit = 0.1
};
var result1 = _calculator.Calculate(input, _defaultPolicy);
var result2 = _calculator.Calculate(input, _defaultPolicy);
// Breakdown is a list of DimensionContribution records
result1.Breakdown.Should().HaveCount(result2.Breakdown.Count);
for (int i = 0; i < result1.Breakdown.Count; i++)
{
result1.Breakdown[i].Symbol.Should().Be(result2.Breakdown[i].Symbol);
result1.Breakdown[i].Contribution.Should().Be(result2.Breakdown[i].Contribution);
result1.Breakdown[i].InputValue.Should().Be(result2.Breakdown[i].InputValue);
result1.Breakdown[i].Weight.Should().Be(result2.Breakdown[i].Weight);
}
}
[Fact]
public void Flags_AreConsistent_AcrossCalculations()
{
var input = new EvidenceWeightedScoreInput
{
FindingId = "CVE-2024-12345",
Rch = 0.8,
Rts = 0.85, // Should trigger live-signal
Bkp = 0.5,
Xpl = 0.6,
Src = 0.5,
Mit = 0.1
};
var results = Enumerable.Range(0, 100)
.Select(_ => _calculator.Calculate(input, _defaultPolicy))
.ToList();
var firstFlags = results[0].Flags.ToList();
results.Should().OnlyContain(r => r.Flags.SequenceEqual(firstFlags));
}
[Fact]
public void Bucket_IsConsistent_AcrossCalculations()
{
var input = new EvidenceWeightedScoreInput
{
FindingId = "CVE-2024-12345",
Rch = 0.8, Rts = 0.7, Bkp = 0.5, Xpl = 0.6, Src = 0.5, Mit = 0.1
};
var results = Enumerable.Range(0, 100)
.Select(_ => _calculator.Calculate(input, _defaultPolicy))
.ToList();
var firstBucket = results[0].Bucket;
results.Should().OnlyContain(r => r.Bucket == firstBucket);
}
#endregion
#region Task 52: Ordering Independence Tests
[Fact]
public void InputOrder_DoesNotAffectScore()
{
// Create inputs in different orders - score should be identical
var input1 = new EvidenceWeightedScoreInput
{
FindingId = "test",
Rch = 0.8,
Rts = 0.7,
Bkp = 0.5,
Xpl = 0.6,
Src = 0.5,
Mit = 0.1
};
var input2 = new EvidenceWeightedScoreInput
{
FindingId = "test",
Mit = 0.1, // Different init order
Src = 0.5,
Xpl = 0.6,
Bkp = 0.5,
Rts = 0.7,
Rch = 0.8
};
var result1 = _calculator.Calculate(input1, _defaultPolicy);
var result2 = _calculator.Calculate(input2, _defaultPolicy);
result1.Score.Should().Be(result2.Score);
}
[Fact]
public void PolicyWeightOrder_DoesNotAffectScore()
{
var input = new EvidenceWeightedScoreInput
{
FindingId = "test",
Rch = 0.8, Rts = 0.7, Bkp = 0.5, Xpl = 0.6, Src = 0.5, Mit = 0.1
};
var policy1 = new EvidenceWeightPolicy
{
Profile = "test1",
Version = "v1",
Weights = new EvidenceWeights
{
Rch = 0.30, Rts = 0.25, Bkp = 0.15, Xpl = 0.15, Src = 0.10, Mit = 0.10
}
};
var policy2 = new EvidenceWeightPolicy
{
Profile = "test2",
Version = "v1",
Weights = new EvidenceWeights
{
Mit = 0.10, Src = 0.10, Xpl = 0.15, Bkp = 0.15, Rts = 0.25, Rch = 0.30
}
};
var result1 = _calculator.Calculate(input, policy1);
var result2 = _calculator.Calculate(input, policy2);
result1.Score.Should().Be(result2.Score);
}
[Theory]
[MemberData(nameof(GetRandomizedInputs))]
public void RandomizedInputOrder_ProducesConsistentScore(
double rch, double rts, double bkp, double xpl, double src, double mit)
{
var input = new EvidenceWeightedScoreInput
{
FindingId = "test",
Rch = rch, Rts = rts, Bkp = bkp, Xpl = xpl, Src = src, Mit = mit
};
var results = Enumerable.Range(0, 10)
.Select(_ => _calculator.Calculate(input, _defaultPolicy).Score)
.Distinct()
.ToList();
results.Should().ContainSingle("all calculations should produce identical scores");
}
public static IEnumerable<object[]> GetRandomizedInputs()
{
// Use fixed seed for reproducibility
var random = new Random(42);
for (int i = 0; i < 10; i++)
{
yield return new object[]
{
random.NextDouble(),
random.NextDouble(),
random.NextDouble(),
random.NextDouble(),
random.NextDouble(),
random.NextDouble() * 0.5 // MIT typically smaller
};
}
}
#endregion
#region Task 53: Concurrent Calculation Tests
[Fact]
public void ConcurrentCalculations_AreThreadSafe()
{
var input = new EvidenceWeightedScoreInput
{
FindingId = "CVE-2024-12345",
Rch = 0.8, Rts = 0.7, Bkp = 0.5, Xpl = 0.6, Src = 0.5, Mit = 0.1
};
var results = new ConcurrentBag<double>();
var digests = new ConcurrentBag<string>();
Parallel.For(0, 1000, _ =>
{
var result = _calculator.Calculate(input, _defaultPolicy);
results.Add(result.Score);
digests.Add(result.PolicyDigest);
});
results.Distinct().Should().ContainSingle("all concurrent calculations should produce identical scores");
digests.Distinct().Should().ContainSingle("all concurrent calculations should produce identical digests");
}
[Fact]
public void ConcurrentCalculations_WithDifferentInputs_AllComplete()
{
var inputs = Enumerable.Range(0, 100).Select(i => new EvidenceWeightedScoreInput
{
FindingId = $"CVE-2024-{i:D5}",
Rch = 0.1 + (i % 10) * 0.08,
Rts = 0.1 + ((i + 1) % 10) * 0.08,
Bkp = 0.1 + ((i + 2) % 10) * 0.08,
Xpl = 0.1 + ((i + 3) % 10) * 0.08,
Src = 0.1 + ((i + 4) % 10) * 0.08,
Mit = 0.05 + ((i + 5) % 10) * 0.04
}).ToList();
var results = new ConcurrentDictionary<string, double>();
Parallel.ForEach(inputs, input =>
{
var result = _calculator.Calculate(input, _defaultPolicy);
results[input.FindingId] = result.Score;
});
results.Should().HaveCount(100);
results.Values.Should().OnlyContain(s => s >= 0 && s <= 100);
}
[Fact]
public void ConcurrentCalculations_WithDifferentPolicies_AllComplete()
{
var input = new EvidenceWeightedScoreInput
{
FindingId = "CVE-2024-12345",
Rch = 0.5, Rts = 0.5, Bkp = 0.5, Xpl = 0.5, Src = 0.5, Mit = 0.1
};
var policies = Enumerable.Range(0, 50).Select(i => new EvidenceWeightPolicy
{
Profile = $"policy-{i}",
Version = "v1",
Weights = new EvidenceWeights
{
Rch = 0.20 + (i * 0.002),
Rts = 0.20,
Bkp = 0.15,
Xpl = 0.15,
Src = 0.10,
Mit = 0.10
}
}).ToList();
var results = new ConcurrentDictionary<string, (double Score, string Digest)>();
Parallel.ForEach(policies, policy =>
{
var result = _calculator.Calculate(input, policy);
results[policy.Profile] = (result.Score, result.PolicyDigest);
});
results.Should().HaveCount(50);
// Different policies should produce different digests
results.Values.Select(v => v.Digest).Distinct().Should().HaveCount(50);
}
[Fact]
public void HighConcurrency_NoDeadlocksOrRaceConditions()
{
var inputs = Enumerable.Range(0, 1000).Select(i => new EvidenceWeightedScoreInput
{
FindingId = $"CVE-{i}",
Rch = (i % 100) / 100.0,
Rts = ((i + 10) % 100) / 100.0,
Bkp = ((i + 20) % 100) / 100.0,
Xpl = ((i + 30) % 100) / 100.0,
Src = ((i + 40) % 100) / 100.0,
Mit = ((i + 50) % 100) / 200.0
}).ToList();
var completed = 0;
var exceptions = new ConcurrentBag<Exception>();
Parallel.ForEach(inputs, new ParallelOptions { MaxDegreeOfParallelism = Environment.ProcessorCount * 2 }, input =>
{
try
{
var result = _calculator.Calculate(input, _defaultPolicy);
if (result.Score >= 0 && result.Score <= 100)
{
Interlocked.Increment(ref completed);
}
}
catch (Exception ex)
{
exceptions.Add(ex);
}
});
exceptions.Should().BeEmpty("no exceptions should occur during concurrent calculations");
completed.Should().Be(1000, "all calculations should complete successfully");
}
#endregion
#region Task 54: Benchmark Tests
[Fact]
public void Performance_Calculate10KScores_Under1Second()
{
var inputs = Enumerable.Range(0, 10_000).Select(i => new EvidenceWeightedScoreInput
{
FindingId = $"CVE-2024-{i:D5}",
Rch = (i % 100) / 100.0,
Rts = ((i + 10) % 100) / 100.0,
Bkp = ((i + 20) % 100) / 100.0,
Xpl = ((i + 30) % 100) / 100.0,
Src = ((i + 40) % 100) / 100.0,
Mit = ((i + 50) % 100) / 200.0
}).ToList();
// Warmup
for (int i = 0; i < 100; i++)
{
_calculator.Calculate(inputs[i], _defaultPolicy);
}
var stopwatch = Stopwatch.StartNew();
foreach (var input in inputs)
{
_calculator.Calculate(input, _defaultPolicy);
}
stopwatch.Stop();
stopwatch.ElapsedMilliseconds.Should().BeLessThan(1000,
"calculating 10,000 scores should complete in under 1 second");
}
[Fact]
public void Performance_AverageCalculation_Under100Microseconds()
{
var input = new EvidenceWeightedScoreInput
{
FindingId = "CVE-2024-12345",
Rch = 0.8, Rts = 0.7, Bkp = 0.5, Xpl = 0.6, Src = 0.5, Mit = 0.1
};
// Warmup
for (int i = 0; i < 1000; i++)
{
_calculator.Calculate(input, _defaultPolicy);
}
const int iterations = 10_000;
var stopwatch = Stopwatch.StartNew();
for (int i = 0; i < iterations; i++)
{
_calculator.Calculate(input, _defaultPolicy);
}
stopwatch.Stop();
var averageMicroseconds = stopwatch.Elapsed.TotalMicroseconds / iterations;
averageMicroseconds.Should().BeLessThan(100,
"average calculation time should be under 100 microseconds");
}
[Fact]
public void Performance_ParallelCalculation_ScalesWithCores()
{
var inputs = Enumerable.Range(0, 10_000).Select(i => new EvidenceWeightedScoreInput
{
FindingId = $"CVE-{i}",
Rch = (i % 100) / 100.0,
Rts = ((i + 10) % 100) / 100.0,
Bkp = ((i + 20) % 100) / 100.0,
Xpl = ((i + 30) % 100) / 100.0,
Src = ((i + 40) % 100) / 100.0,
Mit = ((i + 50) % 100) / 200.0
}).ToList();
// Warmup
Parallel.ForEach(inputs.Take(100), input => _calculator.Calculate(input, _defaultPolicy));
var stopwatch = Stopwatch.StartNew();
Parallel.ForEach(inputs, input => _calculator.Calculate(input, _defaultPolicy));
stopwatch.Stop();
// Parallel should be faster than 1 second (sequential is already under 1s)
stopwatch.ElapsedMilliseconds.Should().BeLessThan(1000,
"parallel calculation of 10,000 scores should be very fast");
}
[Fact]
public void Performance_PolicyDigestComputation_IsCached()
{
var input = new EvidenceWeightedScoreInput
{
FindingId = "CVE-2024-12345",
Rch = 0.5, Rts = 0.5, Bkp = 0.5, Xpl = 0.5, Src = 0.5, Mit = 0.1
};
// First calculation (may involve digest computation)
var result1 = _calculator.Calculate(input, _defaultPolicy);
// Subsequent calculations should reuse cached digest
const int iterations = 10_000;
var stopwatch = Stopwatch.StartNew();
for (int i = 0; i < iterations; i++)
{
_calculator.Calculate(input, _defaultPolicy);
}
stopwatch.Stop();
// Should be very fast since digest is cached
stopwatch.ElapsedMilliseconds.Should().BeLessThan(500,
"calculations with cached policy digest should be very fast");
}
[Fact]
public void Performance_MemoryAllocation_IsReasonable()
{
var input = new EvidenceWeightedScoreInput
{
FindingId = "CVE-2024-12345",
Rch = 0.8, Rts = 0.7, Bkp = 0.5, Xpl = 0.6, Src = 0.5, Mit = 0.1
};
// Force GC to get baseline
GC.Collect();
GC.WaitForPendingFinalizers();
GC.Collect();
var beforeMemory = GC.GetTotalMemory(forceFullCollection: true);
const int iterations = 10_000;
for (int i = 0; i < iterations; i++)
{
var result = _calculator.Calculate(input, _defaultPolicy);
// Prevent aggressive optimization from eliding the calculation
if (result.Score < 0) throw new InvalidOperationException();
}
GC.Collect();
GC.WaitForPendingFinalizers();
GC.Collect();
var afterMemory = GC.GetTotalMemory(forceFullCollection: true);
var memoryPerIteration = (afterMemory - beforeMemory) / (double)iterations;
// Each result should allocate roughly the size of the result object
// Should be well under 10KB per calculation
memoryPerIteration.Should().BeLessThan(10_000,
"memory allocation per calculation should be reasonable");
}
#endregion
}

View File

@@ -186,26 +186,28 @@ public class EvidenceWeightedScorePropertyTests
var input = CreateInput(rch, rts, bkp, xpl, src, mit);
var result = Calculator.Calculate(input, Policy);
var positiveSum = result.Breakdown
.Where(d => !d.IsSubtractive)
.Sum(d => d.Contribution);
var negativeSum = result.Breakdown
.Where(d => d.IsSubtractive)
.Sum(d => d.Contribution);
var netSum = positiveSum - negativeSum;
// Sum contributions: positive dimensions are positive, MIT is stored as negative
var netSum = result.Breakdown.Sum(d => d.Contribution);
// Each contribution should be in valid range
foreach (var contrib in result.Breakdown)
// The net sum should roughly equal the score / 100 (before guardrails)
// Allow small rounding tolerance
var expectedScore = Math.Max(0, netSum * 100);
var actualRawScore = result.Caps.OriginalScore;
// Verify each non-subtractive contribution is positive or zero
foreach (var contrib in result.Breakdown.Where(d => !d.IsSubtractive))
{
contrib.Contribution.Should().BeGreaterThanOrEqualTo(0);
contrib.Contribution.Should().BeLessThanOrEqualTo(contrib.Weight * 1.01); // Allow small float tolerance
}
// Net should be non-negative and produce the score (approximately)
netSum.Should().BeGreaterThanOrEqualTo(0);
// The score should be approximately 100 * netSum (before guardrails)
var expectedRawScore = (int)Math.Round(netSum * 100);
result.Caps.OriginalScore.Should().BeCloseTo(expectedRawScore, 2);
// Verify subtractive contributions are negative or zero
foreach (var contrib in result.Breakdown.Where(d => d.IsSubtractive))
{
contrib.Contribution.Should().BeLessThanOrEqualTo(0);
}
// Net should produce the raw score (approximately)
actualRawScore.Should().BeCloseTo((int)Math.Round(expectedScore), 2);
}
[Fact]

View File

@@ -0,0 +1,317 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
using FluentAssertions;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Options;
using StellaOps.Signals.EvidenceWeightedScore;
using Xunit;
namespace StellaOps.Signals.Tests.EvidenceWeightedScore;
/// <summary>
/// Integration tests for the DI registration and full scoring pipeline.
/// </summary>
public class EvidenceWeightedScoringIntegrationTests
{
[Fact]
public void AddEvidenceWeightedScoring_RegistersAllServices()
{
var services = new ServiceCollection();
services.AddEvidenceWeightedScoring();
var provider = services.BuildServiceProvider();
provider.GetService<IEvidenceWeightedScoreCalculator>().Should().NotBeNull();
provider.GetService<IEvidenceWeightPolicyProvider>().Should().NotBeNull();
provider.GetService<IOptions<EvidenceWeightPolicyOptions>>().Should().NotBeNull();
}
[Fact]
public void AddEvidenceWeightedScoring_WithConfiguration_AppliesOptions()
{
var services = new ServiceCollection();
services.AddEvidenceWeightedScoring(opts =>
{
opts.DefaultEnvironment = "test-environment";
opts.ProductionWeights.Rch = 0.40;
});
var provider = services.BuildServiceProvider();
var options = provider.GetRequiredService<IOptions<EvidenceWeightPolicyOptions>>().Value;
options.DefaultEnvironment.Should().Be("test-environment");
options.ProductionWeights.Rch.Should().Be(0.40);
}
[Fact]
public async Task AddEvidenceWeightedScoringWithPolicy_UsesProvidedPolicy()
{
var services = new ServiceCollection();
var customPolicy = new EvidenceWeightPolicy
{
Profile = "custom",
Version = "custom.v1",
Weights = new EvidenceWeights
{
Rch = 0.20,
Rts = 0.20,
Bkp = 0.20,
Xpl = 0.20,
Src = 0.10,
Mit = 0.10
}
};
services.AddEvidenceWeightedScoringWithPolicy(customPolicy);
var provider = services.BuildServiceProvider();
var policyProvider = provider.GetRequiredService<IEvidenceWeightPolicyProvider>();
var policy = await policyProvider.GetDefaultPolicyAsync("custom");
policy.Profile.Should().Be("custom");
policy.Weights.Rch.Should().Be(0.20);
}
[Fact]
public async Task AddEvidenceWeightedScoringWithDefaults_UsesProductionPolicy()
{
var services = new ServiceCollection();
services.AddEvidenceWeightedScoringWithDefaults();
var provider = services.BuildServiceProvider();
var policyProvider = provider.GetRequiredService<IEvidenceWeightPolicyProvider>();
var policy = await policyProvider.GetDefaultPolicyAsync("production");
policy.Profile.Should().Be("production");
policy.Version.Should().Be("ews.v1");
}
[Fact]
public void Calculator_IsSingleton()
{
var services = new ServiceCollection();
services.AddEvidenceWeightedScoring();
var provider = services.BuildServiceProvider();
var calc1 = provider.GetRequiredService<IEvidenceWeightedScoreCalculator>();
var calc2 = provider.GetRequiredService<IEvidenceWeightedScoreCalculator>();
calc1.Should().BeSameAs(calc2);
}
[Fact]
public void PolicyProvider_IsSingleton()
{
var services = new ServiceCollection();
services.AddEvidenceWeightedScoring();
var provider = services.BuildServiceProvider();
var pp1 = provider.GetRequiredService<IEvidenceWeightPolicyProvider>();
var pp2 = provider.GetRequiredService<IEvidenceWeightPolicyProvider>();
pp1.Should().BeSameAs(pp2);
}
[Fact]
public async Task FullPipeline_CalculatesScore()
{
var services = new ServiceCollection();
services.AddEvidenceWeightedScoringWithDefaults();
var provider = services.BuildServiceProvider();
var calculator = provider.GetRequiredService<IEvidenceWeightedScoreCalculator>();
var policyProvider = provider.GetRequiredService<IEvidenceWeightPolicyProvider>();
var input = new EvidenceWeightedScoreInput
{
FindingId = "CVE-2024-12345",
Rch = 0.8,
Rts = 0.7,
Bkp = 0.5,
Xpl = 0.6,
Src = 0.5,
Mit = 0.1
};
var policy = await policyProvider.GetDefaultPolicyAsync("production");
var result = calculator.Calculate(input, policy);
result.Should().NotBeNull();
result.Score.Should().BeGreaterThan(0);
result.FindingId.Should().Be("CVE-2024-12345");
result.PolicyDigest.Should().NotBeNullOrEmpty();
}
[Fact]
public async Task FullPipeline_WithCustomProvider_Works()
{
var services = new ServiceCollection();
services.AddEvidenceWeightedScoring<TestPolicyProvider>();
var provider = services.BuildServiceProvider();
var calculator = provider.GetRequiredService<IEvidenceWeightedScoreCalculator>();
var policyProvider = provider.GetRequiredService<IEvidenceWeightPolicyProvider>();
var policy = await policyProvider.GetDefaultPolicyAsync("test");
policy.Profile.Should().Be("test");
}
[Fact]
public async Task FullPipeline_WithTenant_ReturnsCorrectPolicy()
{
var services = new ServiceCollection();
var tenant1Policy = new EvidenceWeightPolicy
{
Profile = "production",
Version = "ews.v1",
TenantId = "tenant1",
Weights = EvidenceWeights.Default
};
var tenant2Policy = new EvidenceWeightPolicy
{
Profile = "production",
Version = "ews.v1",
TenantId = "tenant2",
Weights = new EvidenceWeights
{
Rch = 0.40,
Rts = 0.20,
Bkp = 0.10,
Xpl = 0.15,
Src = 0.10,
Mit = 0.05
}
};
var policyProvider = new InMemoryEvidenceWeightPolicyProvider();
policyProvider.SetPolicy(tenant1Policy);
policyProvider.SetPolicy(tenant2Policy);
services.AddSingleton<IEvidenceWeightPolicyProvider>(policyProvider);
services.AddSingleton<IEvidenceWeightedScoreCalculator, EvidenceWeightedScoreCalculator>();
var provider = services.BuildServiceProvider();
var resolvedPolicyProvider = provider.GetRequiredService<IEvidenceWeightPolicyProvider>();
var policy1 = await resolvedPolicyProvider.GetPolicyAsync("tenant1", "production");
var policy2 = await resolvedPolicyProvider.GetPolicyAsync("tenant2", "production");
policy1.TenantId.Should().Be("tenant1");
policy2.TenantId.Should().Be("tenant2");
policy2.Weights.Rch.Should().Be(0.40);
}
[Fact]
public void OptionsMonitor_SupportsHotReload()
{
var services = new ServiceCollection();
services.AddEvidenceWeightedScoring(opts =>
{
opts.DefaultEnvironment = "initial";
});
var provider = services.BuildServiceProvider();
var monitor = provider.GetRequiredService<IOptionsMonitor<EvidenceWeightPolicyOptions>>();
// Initial value
monitor.CurrentValue.DefaultEnvironment.Should().Be("initial");
// Note: Actual hot-reload would require IConfiguration binding,
// but we verify the monitor is wired correctly
monitor.Should().NotBeNull();
}
[Fact]
public void DuplicateRegistration_DoesNotOverwrite()
{
var services = new ServiceCollection();
// Register custom calculator first
services.AddSingleton<IEvidenceWeightedScoreCalculator>(new CustomCalculator());
// Then register scoring services
services.AddEvidenceWeightedScoring();
var provider = services.BuildServiceProvider();
var calculator = provider.GetRequiredService<IEvidenceWeightedScoreCalculator>();
calculator.Should().BeOfType<CustomCalculator>();
}
[Fact]
public void TimeProvider_IsRegistered()
{
var services = new ServiceCollection();
services.AddEvidenceWeightedScoring();
var provider = services.BuildServiceProvider();
var timeProvider = provider.GetService<TimeProvider>();
timeProvider.Should().NotBeNull();
}
[Fact]
public async Task Calculator_WithCustomTimeProvider_UsesIt()
{
var services = new ServiceCollection();
var fixedTime = new DateTimeOffset(2025, 6, 15, 12, 0, 0, TimeSpan.Zero);
var fakeTimeProvider = new FakeTimeProvider(fixedTime);
services.AddSingleton<TimeProvider>(fakeTimeProvider);
services.AddEvidenceWeightedScoringWithDefaults();
var provider = services.BuildServiceProvider();
var calculator = provider.GetRequiredService<IEvidenceWeightedScoreCalculator>();
var policyProvider = provider.GetRequiredService<IEvidenceWeightPolicyProvider>();
var input = new EvidenceWeightedScoreInput
{
FindingId = "test",
Rch = 0.5,
Rts = 0.5,
Bkp = 0.5,
Xpl = 0.5,
Src = 0.5,
Mit = 0.1
};
var policy = await policyProvider.GetDefaultPolicyAsync("production");
var result = calculator.Calculate(input, policy);
result.Should().NotBeNull();
}
// Test helpers
private sealed class TestPolicyProvider : IEvidenceWeightPolicyProvider
{
private readonly EvidenceWeightPolicy _policy = new()
{
Profile = "test",
Version = "test.v1",
Weights = EvidenceWeights.Default
};
public Task<EvidenceWeightPolicy> GetPolicyAsync(string? tenantId, string environment, CancellationToken cancellationToken = default)
=> Task.FromResult(_policy);
public Task<EvidenceWeightPolicy> GetDefaultPolicyAsync(string environment, CancellationToken cancellationToken = default)
=> Task.FromResult(_policy);
public Task<bool> PolicyExistsAsync(string? tenantId, string environment, CancellationToken cancellationToken = default)
=> Task.FromResult(true);
}
private sealed class CustomCalculator : IEvidenceWeightedScoreCalculator
{
public EvidenceWeightedScoreResult Calculate(EvidenceWeightedScoreInput input, EvidenceWeightPolicy policy) =>
throw new NotImplementedException("Custom calculator");
}
private sealed class FakeTimeProvider(DateTimeOffset fixedTime) : TimeProvider
{
public override DateTimeOffset GetUtcNow() => fixedTime;
}
}

View File

@@ -0,0 +1,538 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
using FluentAssertions;
using Microsoft.Extensions.Options;
using StellaOps.Signals.EvidenceWeightedScore;
using StellaOps.Signals.EvidenceWeightedScore.Normalizers;
using Xunit;
namespace StellaOps.Signals.Tests.EvidenceWeightedScore.Normalizers;
/// <summary>
/// Tests for BackportEvidenceNormalizer.
/// </summary>
public class BackportEvidenceNormalizerTests
{
private readonly BackportNormalizerOptions _defaultOptions = new();
private readonly BackportEvidenceNormalizer _sut;
public BackportEvidenceNormalizerTests()
{
_sut = new BackportEvidenceNormalizer(_defaultOptions);
}
#region Dimension Property Tests
[Fact]
public void Dimension_ReturnsBKP()
{
_sut.Dimension.Should().Be("BKP");
}
#endregion
#region No Evidence Tests
[Fact]
public void Normalize_WithNoEvidence_ReturnsZero()
{
var input = new BackportInput
{
EvidenceTier = BackportEvidenceTier.None,
Confidence = 0.0,
Status = BackportStatus.Unknown
};
var result = _sut.Normalize(input);
result.Should().Be(0.0);
}
[Fact]
public void Normalize_WithNoEvidence_IgnoresConfidence()
{
var input = new BackportInput
{
EvidenceTier = BackportEvidenceTier.None,
Confidence = 1.0, // High confidence with no evidence should still be 0
Status = BackportStatus.Unknown
};
var result = _sut.Normalize(input);
result.Should().BeLessThan(0.15); // Tier 0 max is 0.10
}
#endregion
#region Tier 1 (Heuristic) Tests
[Fact]
public void Normalize_HeuristicTier_LowConfidence_ReturnsBaseScore()
{
var input = new BackportInput
{
EvidenceTier = BackportEvidenceTier.Heuristic,
Confidence = 0.0,
Status = BackportStatus.Unknown
};
var result = _sut.Normalize(input);
result.Should().BeApproximately(_defaultOptions.Tier1Range.Min, 0.01);
}
[Fact]
public void Normalize_HeuristicTier_HighConfidence_ReturnsMaxScore()
{
var input = new BackportInput
{
EvidenceTier = BackportEvidenceTier.Heuristic,
Confidence = 1.0,
Status = BackportStatus.Fixed
};
var result = _sut.Normalize(input);
result.Should().BeApproximately(_defaultOptions.Tier1Range.Max, 0.01);
}
[Fact]
public void Normalize_HeuristicTier_MidConfidence_ReturnsMidScore()
{
var input = new BackportInput
{
EvidenceTier = BackportEvidenceTier.Heuristic,
Confidence = 0.5,
Status = BackportStatus.Unknown
};
var result = _sut.Normalize(input);
var expected = _defaultOptions.Tier1Range.Min +
(_defaultOptions.Tier1Range.Max - _defaultOptions.Tier1Range.Min) * 0.5;
result.Should().BeApproximately(expected, 0.01);
}
#endregion
#region Tier 2 (PatchSignature) Tests
[Fact]
public void Normalize_PatchSignatureTier_ReturnsHigherThanHeuristic()
{
var heuristicInput = new BackportInput
{
EvidenceTier = BackportEvidenceTier.Heuristic,
Confidence = 0.8,
Status = BackportStatus.Fixed
};
var patchInput = new BackportInput
{
EvidenceTier = BackportEvidenceTier.PatchSignature,
Confidence = 0.8,
Status = BackportStatus.Fixed
};
var heuristicScore = _sut.Normalize(heuristicInput);
var patchScore = _sut.Normalize(patchInput);
patchScore.Should().BeGreaterThan(heuristicScore);
}
[Fact]
public void Normalize_PatchSignatureTier_WithinRange()
{
var input = new BackportInput
{
EvidenceTier = BackportEvidenceTier.PatchSignature,
Confidence = 0.7,
Status = BackportStatus.Fixed
};
var result = _sut.Normalize(input);
result.Should().BeInRange(_defaultOptions.Tier2Range.Min, _defaultOptions.Tier2Range.Max);
}
#endregion
#region Tier 3 (BinaryDiff) Tests
[Fact]
public void Normalize_BinaryDiffTier_ReturnsHigherThanPatchSignature()
{
var patchInput = new BackportInput
{
EvidenceTier = BackportEvidenceTier.PatchSignature,
Confidence = 0.9,
Status = BackportStatus.Fixed
};
var binaryInput = new BackportInput
{
EvidenceTier = BackportEvidenceTier.BinaryDiff,
Confidence = 0.9,
Status = BackportStatus.Fixed
};
var patchScore = _sut.Normalize(patchInput);
var binaryScore = _sut.Normalize(binaryInput);
binaryScore.Should().BeGreaterThanOrEqualTo(patchScore);
}
#endregion
#region Tier 4 (VendorVex) Tests
[Fact]
public void Normalize_VendorVexTier_HighScore()
{
var input = new BackportInput
{
EvidenceTier = BackportEvidenceTier.VendorVex,
Confidence = 0.85,
Status = BackportStatus.Fixed
};
var result = _sut.Normalize(input);
result.Should().BeGreaterThanOrEqualTo(0.85);
}
#endregion
#region Tier 5 (SignedProof) Tests
[Fact]
public void Normalize_SignedProofTier_MaxScore()
{
var input = new BackportInput
{
EvidenceTier = BackportEvidenceTier.SignedProof,
Confidence = 1.0,
Status = BackportStatus.Fixed
};
var result = _sut.Normalize(input);
result.Should().BeApproximately(1.0, 0.01);
}
[Fact]
public void Normalize_SignedProofTier_WithinRange()
{
var input = new BackportInput
{
EvidenceTier = BackportEvidenceTier.SignedProof,
Confidence = 0.5,
Status = BackportStatus.Fixed
};
var result = _sut.Normalize(input);
result.Should().BeInRange(_defaultOptions.Tier5Range.Min, _defaultOptions.Tier5Range.Max);
}
#endregion
#region Status Tests
[Fact]
public void Normalize_NotAffectedStatus_GetsBonus()
{
var unknownInput = new BackportInput
{
EvidenceTier = BackportEvidenceTier.VendorVex,
Confidence = 0.9,
Status = BackportStatus.Unknown
};
var notAffectedInput = new BackportInput
{
EvidenceTier = BackportEvidenceTier.VendorVex,
Confidence = 0.9,
Status = BackportStatus.NotAffected
};
var unknownScore = _sut.Normalize(unknownInput);
var notAffectedScore = _sut.Normalize(notAffectedInput);
notAffectedScore.Should().BeGreaterThan(unknownScore);
}
[Fact]
public void Normalize_AffectedStatus_ReturnsBaseTierScore()
{
var input = new BackportInput
{
EvidenceTier = BackportEvidenceTier.Heuristic,
Confidence = 0.5,
Status = BackportStatus.Affected
};
var result = _sut.Normalize(input);
// Should be in heuristic range
result.Should().BeInRange(_defaultOptions.Tier1Range.Min, _defaultOptions.Tier1Range.Max);
}
[Fact]
public void Normalize_UnderInvestigation_ReturnsBaseTierScore()
{
var input = new BackportInput
{
EvidenceTier = BackportEvidenceTier.PatchSignature,
Confidence = 0.7,
Status = BackportStatus.UnderInvestigation
};
var result = _sut.Normalize(input);
result.Should().BeInRange(_defaultOptions.Tier2Range.Min, _defaultOptions.Tier2Range.Max);
}
#endregion
#region Score Clamping Tests
[Fact]
public void Normalize_NotAffectedBonus_ClampedAtOne()
{
var input = new BackportInput
{
EvidenceTier = BackportEvidenceTier.SignedProof,
Confidence = 1.0,
Status = BackportStatus.NotAffected
};
var result = _sut.Normalize(input);
result.Should().BeLessThanOrEqualTo(1.0);
}
#endregion
#region NormalizeWithDetails Tests
[Fact]
public void NormalizeWithDetails_ReturnsCorrectDimension()
{
var input = new BackportInput
{
EvidenceTier = BackportEvidenceTier.PatchSignature,
Confidence = 0.8,
Status = BackportStatus.Fixed
};
var result = _sut.NormalizeWithDetails(input);
result.Dimension.Should().Be("BKP");
}
[Fact]
public void NormalizeWithDetails_ReturnsComponents()
{
var input = new BackportInput
{
EvidenceTier = BackportEvidenceTier.BinaryDiff,
Confidence = 0.9,
Status = BackportStatus.Fixed
};
var result = _sut.NormalizeWithDetails(input);
result.Components.Should().ContainKey("tier_base");
result.Components.Should().ContainKey("confidence");
result.Components.Should().ContainKey("tier_ordinal");
result.Components["tier_ordinal"].Should().Be((int)BackportEvidenceTier.BinaryDiff);
}
[Fact]
public void NormalizeWithDetails_NotAffected_IncludesStatusBonus()
{
var input = new BackportInput
{
EvidenceTier = BackportEvidenceTier.VendorVex,
Confidence = 0.85,
Status = BackportStatus.NotAffected
};
var result = _sut.NormalizeWithDetails(input);
result.Components.Should().ContainKey("status_bonus");
result.Components["status_bonus"].Should().Be(0.10);
}
[Fact]
public void NormalizeWithDetails_GeneratesExplanation()
{
var input = new BackportInput
{
EvidenceTier = BackportEvidenceTier.SignedProof,
Confidence = 0.95,
ProofId = "proof-abc-123",
Status = BackportStatus.Fixed
};
var result = _sut.NormalizeWithDetails(input);
result.Explanation.Should().Contain("Fixed");
result.Explanation.Should().Contain("cryptographically signed proof");
result.Explanation.Should().Contain("high confidence");
result.Explanation.Should().Contain("proof-abc-123");
}
[Fact]
public void NormalizeWithDetails_NoEvidence_ExplainsLack()
{
var input = new BackportInput
{
EvidenceTier = BackportEvidenceTier.None,
Confidence = 0.0,
Status = BackportStatus.Unknown
};
var result = _sut.NormalizeWithDetails(input);
result.Explanation.Should().Contain("No backport evidence");
}
#endregion
#region Monotonicity Tests
[Fact]
public void Normalize_TiersAreMonotonicallyIncreasing()
{
var tiers = new[]
{
BackportEvidenceTier.None,
BackportEvidenceTier.Heuristic,
BackportEvidenceTier.PatchSignature,
BackportEvidenceTier.BinaryDiff,
BackportEvidenceTier.VendorVex,
BackportEvidenceTier.SignedProof
};
var scores = tiers.Select(tier => _sut.Normalize(new BackportInput
{
EvidenceTier = tier,
Confidence = 0.8,
Status = BackportStatus.Fixed
})).ToList();
// Each tier should produce a score >= previous tier
for (int i = 1; i < scores.Count; i++)
{
scores[i].Should().BeGreaterThanOrEqualTo(scores[i - 1],
$"Tier {tiers[i]} should score >= {tiers[i - 1]}");
}
}
[Fact]
public void Normalize_ConfidenceIsMonotonicallyIncreasing()
{
var confidences = new[] { 0.0, 0.25, 0.5, 0.75, 1.0 };
var scores = confidences.Select(confidence => _sut.Normalize(new BackportInput
{
EvidenceTier = BackportEvidenceTier.PatchSignature,
Confidence = confidence,
Status = BackportStatus.Fixed
})).ToList();
// Higher confidence should produce higher or equal scores
for (int i = 1; i < scores.Count; i++)
{
scores[i].Should().BeGreaterThanOrEqualTo(scores[i - 1],
$"Confidence {confidences[i]} should score >= {confidences[i - 1]}");
}
}
#endregion
#region Null Input Tests
[Fact]
public void Normalize_NullInput_ThrowsArgumentNullException()
{
var act = () => _sut.Normalize(null!);
act.Should().Throw<ArgumentNullException>();
}
[Fact]
public void NormalizeWithDetails_NullInput_ThrowsArgumentNullException()
{
var act = () => _sut.NormalizeWithDetails(null!);
act.Should().Throw<ArgumentNullException>();
}
#endregion
#region DI Integration Tests
[Fact]
public void Constructor_WithIOptionsMonitor_WorksCorrectly()
{
var options = new NormalizerOptions
{
Backport = new BackportNormalizerOptions
{
Tier5Range = (0.95, 1.00)
}
};
var optionsMonitor = new TestOptionsMonitor(options);
var normalizer = new BackportEvidenceNormalizer(optionsMonitor);
var input = new BackportInput
{
EvidenceTier = BackportEvidenceTier.SignedProof,
Confidence = 0.5,
Status = BackportStatus.Fixed
};
var result = normalizer.Normalize(input);
// Should use custom Tier5Range
result.Should().BeInRange(0.95, 1.00);
}
private sealed class TestOptionsMonitor(NormalizerOptions value) : IOptionsMonitor<NormalizerOptions>
{
public NormalizerOptions CurrentValue => value;
public NormalizerOptions Get(string? name) => value;
public IDisposable? OnChange(Action<NormalizerOptions, string?> listener) => null;
}
#endregion
#region Determinism Tests
[Fact]
public void Normalize_SameInput_ProducesSameOutput()
{
var input = new BackportInput
{
EvidenceTier = BackportEvidenceTier.BinaryDiff,
Confidence = 0.87,
ProofId = "proof-xyz",
Status = BackportStatus.Fixed
};
var results = Enumerable.Range(0, 100)
.Select(_ => _sut.Normalize(input))
.Distinct()
.ToList();
results.Should().ContainSingle("Deterministic normalizer should produce identical results");
}
#endregion
}

View File

@@ -0,0 +1,371 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
using FluentAssertions;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Options;
using StellaOps.Signals.EvidenceWeightedScore;
using StellaOps.Signals.EvidenceWeightedScore.Normalizers;
using Xunit;
namespace StellaOps.Signals.Tests.EvidenceWeightedScore.Normalizers;
/// <summary>
/// Tests for EvidenceNormalizersServiceCollectionExtensions.
/// </summary>
public class EvidenceNormalizersServiceCollectionExtensionsTests
{
#region AddEvidenceNormalizers (Default) Tests
[Fact]
public void AddEvidenceNormalizers_RegistersAllNormalizers()
{
var services = new ServiceCollection();
services.AddEvidenceNormalizers();
var provider = services.BuildServiceProvider();
// Verify all normalizers are registered
provider.GetService<IEvidenceNormalizer<ReachabilityInput>>().Should().NotBeNull();
provider.GetService<IEvidenceNormalizer<RuntimeInput>>().Should().NotBeNull();
provider.GetService<IEvidenceNormalizer<BackportInput>>().Should().NotBeNull();
provider.GetService<IEvidenceNormalizer<ExploitInput>>().Should().NotBeNull();
provider.GetService<IEvidenceNormalizer<SourceTrustInput>>().Should().NotBeNull();
provider.GetService<IEvidenceNormalizer<MitigationInput>>().Should().NotBeNull();
}
[Fact]
public void AddEvidenceNormalizers_RegistersAggregator()
{
var services = new ServiceCollection();
services.AddEvidenceNormalizers();
var provider = services.BuildServiceProvider();
provider.GetService<INormalizerAggregator>().Should().NotBeNull();
}
[Fact]
public void AddEvidenceNormalizers_RegistersAsCorrectTypes()
{
var services = new ServiceCollection();
services.AddEvidenceNormalizers();
var provider = services.BuildServiceProvider();
provider.GetRequiredService<IEvidenceNormalizer<ReachabilityInput>>()
.Should().BeOfType<ReachabilityNormalizer>();
provider.GetRequiredService<IEvidenceNormalizer<RuntimeInput>>()
.Should().BeOfType<RuntimeSignalNormalizer>();
provider.GetRequiredService<IEvidenceNormalizer<BackportInput>>()
.Should().BeOfType<BackportEvidenceNormalizer>();
provider.GetRequiredService<IEvidenceNormalizer<ExploitInput>>()
.Should().BeOfType<ExploitLikelihoodNormalizer>();
provider.GetRequiredService<IEvidenceNormalizer<SourceTrustInput>>()
.Should().BeOfType<SourceTrustNormalizer>();
provider.GetRequiredService<IEvidenceNormalizer<MitigationInput>>()
.Should().BeOfType<MitigationNormalizer>();
provider.GetRequiredService<INormalizerAggregator>()
.Should().BeOfType<NormalizerAggregator>();
}
[Fact]
public void AddEvidenceNormalizers_NormalizersAreSingletons()
{
var services = new ServiceCollection();
services.AddEvidenceNormalizers();
var provider = services.BuildServiceProvider();
var normalizer1 = provider.GetRequiredService<IEvidenceNormalizer<ReachabilityInput>>();
var normalizer2 = provider.GetRequiredService<IEvidenceNormalizer<ReachabilityInput>>();
normalizer1.Should().BeSameAs(normalizer2);
}
[Fact]
public void AddEvidenceNormalizers_AggregatorIsSingleton()
{
var services = new ServiceCollection();
services.AddEvidenceNormalizers();
var provider = services.BuildServiceProvider();
var aggregator1 = provider.GetRequiredService<INormalizerAggregator>();
var aggregator2 = provider.GetRequiredService<INormalizerAggregator>();
aggregator1.Should().BeSameAs(aggregator2);
}
#endregion
#region AddEvidenceNormalizers (With Configuration) Tests
[Fact]
public void AddEvidenceNormalizers_WithConfiguration_AppliesOptions()
{
var services = new ServiceCollection();
services.AddEvidenceNormalizers(options =>
{
options.Reachability.UnknownScore = 0.65;
});
var provider = services.BuildServiceProvider();
var options = provider.GetRequiredService<IOptions<NormalizerOptions>>();
options.Value.Reachability.UnknownScore.Should().Be(0.65);
}
[Fact]
public void AddEvidenceNormalizers_WithConfiguration_NormalizerUsesOptions()
{
var services = new ServiceCollection();
services.AddEvidenceNormalizers(options =>
{
options.Reachability.UnknownScore = 0.70;
});
var provider = services.BuildServiceProvider();
var aggregator = provider.GetRequiredService<INormalizerAggregator>();
// Aggregate with no reachability evidence should use the unknown score
var evidence = new FindingEvidence
{
FindingId = "CVE-2024-1234@pkg:npm/lodash@4.17.0"
};
var result = aggregator.Aggregate(evidence);
result.Rch.Should().BeApproximately(0.70, 0.01);
}
#endregion
#region AddEvidenceNormalizers (With IConfiguration) Tests
[Fact]
public void AddEvidenceNormalizers_WithIConfiguration_BindsFromSection()
{
var inMemorySettings = new Dictionary<string, string?>
{
{ "EvidenceNormalizers:Reachability:UnknownScore", "0.55" },
{ "EvidenceNormalizers:Exploit:NoEpssScore", "0.25" }
};
var configuration = new ConfigurationBuilder()
.AddInMemoryCollection(inMemorySettings)
.Build();
var services = new ServiceCollection();
services.AddEvidenceNormalizers(configuration);
var provider = services.BuildServiceProvider();
var options = provider.GetRequiredService<IOptions<NormalizerOptions>>();
options.Value.Reachability.UnknownScore.Should().Be(0.55);
options.Value.Exploit.NoEpssScore.Should().Be(0.25);
}
[Fact]
public void AddEvidenceNormalizers_WithIConfiguration_CustomSectionName()
{
var inMemorySettings = new Dictionary<string, string?>
{
{ "CustomSection:Reachability:UnknownScore", "0.42" }
};
var configuration = new ConfigurationBuilder()
.AddInMemoryCollection(inMemorySettings)
.Build();
var services = new ServiceCollection();
services.AddEvidenceNormalizers(configuration, "CustomSection");
var provider = services.BuildServiceProvider();
var options = provider.GetRequiredService<IOptions<NormalizerOptions>>();
options.Value.Reachability.UnknownScore.Should().Be(0.42);
}
#endregion
#region AddNormalizerAggregator Tests
[Fact]
public void AddNormalizerAggregator_RegistersAggregatorOnly()
{
var services = new ServiceCollection();
services.AddNormalizerAggregator();
var provider = services.BuildServiceProvider();
provider.GetService<INormalizerAggregator>().Should().NotBeNull();
// Individual normalizers should not be registered
provider.GetService<IEvidenceNormalizer<ReachabilityInput>>().Should().BeNull();
}
#endregion
#region Double Registration Tests
[Fact]
public void AddEvidenceNormalizers_CalledTwice_DoesNotDuplicate()
{
var services = new ServiceCollection();
services.AddEvidenceNormalizers();
services.AddEvidenceNormalizers();
// Should only have one registration per type
var descriptors = services.Where(d =>
d.ServiceType == typeof(IEvidenceNormalizer<ReachabilityInput>));
descriptors.Should().HaveCount(1);
}
[Fact]
public void AddEvidenceNormalizers_DoesNotReplaceExistingRegistrations()
{
var services = new ServiceCollection();
// Register a custom normalizer first
var customNormalizer = new CustomReachabilityNormalizer();
services.AddSingleton<IEvidenceNormalizer<ReachabilityInput>>(customNormalizer);
services.AddEvidenceNormalizers();
var provider = services.BuildServiceProvider();
// Should keep the original registration
var normalizer = provider.GetRequiredService<IEvidenceNormalizer<ReachabilityInput>>();
normalizer.Should().BeSameAs(customNormalizer);
}
private sealed class CustomReachabilityNormalizer : IEvidenceNormalizer<ReachabilityInput>
{
public string Dimension => "RCH";
public double Normalize(ReachabilityInput input) => 0.99;
public NormalizationResult NormalizeWithDetails(ReachabilityInput input) =>
NormalizationResult.Simple(0.99, "RCH", "Custom normalizer");
}
#endregion
#region Null Argument Tests
[Fact]
public void AddEvidenceNormalizers_NullServices_ThrowsArgumentNullException()
{
IServiceCollection? services = null;
var act = () => services!.AddEvidenceNormalizers();
act.Should().Throw<ArgumentNullException>()
.WithParameterName("services");
}
[Fact]
public void AddEvidenceNormalizers_NullConfigure_ThrowsArgumentNullException()
{
var services = new ServiceCollection();
var act = () => services.AddEvidenceNormalizers((Action<NormalizerOptions>)null!);
act.Should().Throw<ArgumentNullException>()
.WithParameterName("configure");
}
[Fact]
public void AddEvidenceNormalizers_NullConfiguration_ThrowsArgumentNullException()
{
var services = new ServiceCollection();
var act = () => services.AddEvidenceNormalizers((IConfiguration)null!);
act.Should().Throw<ArgumentNullException>()
.WithParameterName("configuration");
}
#endregion
#region Integration Tests
[Fact]
public void AddEvidenceNormalizers_FullPipeline_Works()
{
var services = new ServiceCollection();
services.AddEvidenceNormalizers(options =>
{
options.Reachability.UnknownScore = 0.50;
options.Runtime.UnknownScore = 0.0;
});
var provider = services.BuildServiceProvider();
var aggregator = provider.GetRequiredService<INormalizerAggregator>();
var evidence = new FindingEvidence
{
FindingId = "CVE-2024-1234@pkg:npm/lodash@4.17.0",
Reachability = new ReachabilityInput
{
State = ReachabilityState.StaticReachable,
Confidence = 0.8
},
Exploit = new ExploitInput
{
EpssScore = 0.65,
EpssPercentile = 90.0,
KevStatus = KevStatus.NotInKev
}
};
var result = aggregator.Aggregate(evidence);
// All dimensions should be in valid range
result.Rch.Should().BeInRange(0.0, 1.0);
result.Rts.Should().BeInRange(0.0, 1.0);
result.Bkp.Should().BeInRange(0.0, 1.0);
result.Xpl.Should().BeInRange(0.0, 1.0);
result.Src.Should().BeInRange(0.0, 1.0);
result.Mit.Should().BeInRange(0.0, 1.0);
}
[Fact]
public void AddEvidenceNormalizers_AggregatorWithDetails_Works()
{
var services = new ServiceCollection();
services.AddEvidenceNormalizers();
var provider = services.BuildServiceProvider();
var aggregator = provider.GetRequiredService<INormalizerAggregator>();
var evidence = new FindingEvidence
{
FindingId = "CVE-2024-1234@pkg:npm/lodash@4.17.0",
Reachability = new ReachabilityInput
{
State = ReachabilityState.StaticReachable,
Confidence = 0.8
}
};
var result = aggregator.AggregateWithDetails(evidence);
result.Input.Should().NotBeNull();
result.Details.Should().ContainKey("RCH");
result.Warnings.Should().NotBeEmpty(); // Should warn about missing dimensions
}
#endregion
}

View File

@@ -0,0 +1,523 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
using FluentAssertions;
using Microsoft.Extensions.Options;
using StellaOps.Signals.EvidenceWeightedScore;
using StellaOps.Signals.EvidenceWeightedScore.Normalizers;
using Xunit;
namespace StellaOps.Signals.Tests.EvidenceWeightedScore.Normalizers;
/// <summary>
/// Tests for ExploitLikelihoodNormalizer.
/// </summary>
public class ExploitLikelihoodNormalizerTests
{
private readonly ExploitNormalizerOptions _defaultOptions = new();
private readonly ExploitLikelihoodNormalizer _sut;
public ExploitLikelihoodNormalizerTests()
{
_sut = new ExploitLikelihoodNormalizer(_defaultOptions);
}
#region Dimension Property Tests
[Fact]
public void Dimension_ReturnsXPL()
{
_sut.Dimension.Should().Be("XPL");
}
#endregion
#region EPSS Percentile Band Tests
[Theory]
[InlineData(99.5, 0.90, 1.00)] // Top 1%
[InlineData(99.0, 0.90, 1.00)] // Top 1% boundary
[InlineData(97.0, 0.70, 0.89)] // Top 5%
[InlineData(95.0, 0.70, 0.89)] // Top 5% boundary
[InlineData(85.0, 0.40, 0.69)] // Top 25%
[InlineData(75.0, 0.40, 0.69)] // Top 25% boundary
[InlineData(50.0, 0.20, 0.39)] // Below 75%
[InlineData(10.0, 0.20, 0.39)] // Low percentile
public void Normalize_EpssPercentile_MapsToCorrectBand(double percentile, double expectedMin, double expectedMax)
{
var input = new ExploitInput
{
EpssScore = percentile / 100.0, // Score roughly correlates
EpssPercentile = percentile,
KevStatus = KevStatus.NotInKev
};
var result = _sut.Normalize(input);
result.Should().BeInRange(expectedMin, expectedMax,
$"Percentile {percentile} should map to range [{expectedMin}, {expectedMax}]");
}
[Fact]
public void Normalize_Top1Percent_ScoresHighest()
{
var input = new ExploitInput
{
EpssScore = 0.95,
EpssPercentile = 99.5,
KevStatus = KevStatus.NotInKev
};
var result = _sut.Normalize(input);
result.Should().BeGreaterThanOrEqualTo(0.90);
}
[Fact]
public void Normalize_VeryLowPercentile_ScoresLowest()
{
var input = new ExploitInput
{
EpssScore = 0.001,
EpssPercentile = 5.0,
KevStatus = KevStatus.NotInKev
};
var result = _sut.Normalize(input);
result.Should().BeInRange(0.20, 0.35);
}
#endregion
#region KEV Status Tests
[Fact]
public void Normalize_InKev_AppliesFloor()
{
var input = new ExploitInput
{
EpssScore = 0.01, // Very low EPSS
EpssPercentile = 10.0, // Would normally score low
KevStatus = KevStatus.InKev
};
var result = _sut.Normalize(input);
result.Should().BeGreaterThanOrEqualTo(_defaultOptions.KevFloor,
"KEV status should enforce minimum floor");
}
[Fact]
public void Normalize_InKev_HighEpss_UsesEpssScore()
{
var input = new ExploitInput
{
EpssScore = 0.95,
EpssPercentile = 99.5, // Top 1%
KevStatus = KevStatus.InKev
};
var result = _sut.Normalize(input);
result.Should().BeGreaterThan(_defaultOptions.KevFloor,
"High EPSS score should exceed KEV floor");
}
[Fact]
public void Normalize_RemovedFromKev_ReducedFloor()
{
var input = new ExploitInput
{
EpssScore = 0.01,
EpssPercentile = 10.0,
KevStatus = KevStatus.RemovedFromKev
};
var result = _sut.Normalize(input);
var expectedReducedFloor = _defaultOptions.KevFloor * 0.5;
result.Should().BeGreaterThanOrEqualTo(expectedReducedFloor);
result.Should().BeLessThan(_defaultOptions.KevFloor);
}
[Fact]
public void Normalize_NotInKev_NoFloor()
{
var input = new ExploitInput
{
EpssScore = 0.001,
EpssPercentile = 1.0,
KevStatus = KevStatus.NotInKev
};
var result = _sut.Normalize(input);
result.Should().BeLessThan(_defaultOptions.KevFloor,
"Without KEV status, low EPSS should score below KEV floor");
}
[Fact]
public void Normalize_InKev_WithDates_ScoresCorrectly()
{
var input = new ExploitInput
{
EpssScore = 0.30,
EpssPercentile = 50.0,
KevStatus = KevStatus.InKev,
KevAddedDate = DateTimeOffset.UtcNow.AddDays(-30),
KevDueDate = DateTimeOffset.UtcNow.AddDays(14)
};
var result = _sut.Normalize(input);
result.Should().BeGreaterThanOrEqualTo(_defaultOptions.KevFloor);
}
#endregion
#region Public Exploit Availability Tests
[Fact]
public void Normalize_PublicExploitAvailable_AddsBonus()
{
var inputWithoutExploit = new ExploitInput
{
EpssScore = 0.50,
EpssPercentile = 80.0,
KevStatus = KevStatus.NotInKev,
PublicExploitAvailable = false
};
var inputWithExploit = new ExploitInput
{
EpssScore = 0.50,
EpssPercentile = 80.0,
KevStatus = KevStatus.NotInKev,
PublicExploitAvailable = true
};
var scoreWithout = _sut.Normalize(inputWithoutExploit);
var scoreWith = _sut.Normalize(inputWithExploit);
scoreWith.Should().BeGreaterThan(scoreWithout);
(scoreWith - scoreWithout).Should().BeApproximately(0.10, 0.01);
}
[Fact]
public void Normalize_PublicExploitWithMaturity_ScoresCorrectly()
{
var input = new ExploitInput
{
EpssScore = 0.70,
EpssPercentile = 95.0,
KevStatus = KevStatus.NotInKev,
PublicExploitAvailable = true,
ExploitMaturity = "weaponized"
};
var result = _sut.Normalize(input);
// Use BeCloseTo to handle floating point precision
result.Should().BeGreaterThanOrEqualTo(0.79);
}
#endregion
#region Score Clamping Tests
[Fact]
public void Normalize_MaxScore_ClampedAtOne()
{
var input = new ExploitInput
{
EpssScore = 0.99,
EpssPercentile = 99.9,
KevStatus = KevStatus.InKev,
PublicExploitAvailable = true,
ExploitMaturity = "weaponized"
};
var result = _sut.Normalize(input);
result.Should().BeLessThanOrEqualTo(1.0);
}
#endregion
#region NormalizeWithDetails Tests
[Fact]
public void NormalizeWithDetails_ReturnsCorrectDimension()
{
var input = new ExploitInput
{
EpssScore = 0.50,
EpssPercentile = 75.0,
KevStatus = KevStatus.NotInKev
};
var result = _sut.NormalizeWithDetails(input);
result.Dimension.Should().Be("XPL");
}
[Fact]
public void NormalizeWithDetails_ReturnsComponents()
{
var input = new ExploitInput
{
EpssScore = 0.45,
EpssPercentile = 97.0,
KevStatus = KevStatus.InKev
};
var result = _sut.NormalizeWithDetails(input);
result.Components.Should().ContainKey("epss_score");
result.Components.Should().ContainKey("epss_percentile");
result.Components.Should().ContainKey("epss_based_score");
result.Components.Should().ContainKey("kev_floor");
result.Components.Should().ContainKey("kev_status");
result.Components["kev_status"].Should().Be((int)KevStatus.InKev);
}
[Fact]
public void NormalizeWithDetails_PublicExploit_IncludesBonus()
{
var input = new ExploitInput
{
EpssScore = 0.50,
EpssPercentile = 80.0,
KevStatus = KevStatus.NotInKev,
PublicExploitAvailable = true
};
var result = _sut.NormalizeWithDetails(input);
result.Components.Should().ContainKey("exploit_bonus");
result.Components["exploit_bonus"].Should().Be(0.10);
}
[Fact]
public void NormalizeWithDetails_GeneratesExplanation_TopPercentile()
{
var input = new ExploitInput
{
EpssScore = 0.92,
EpssPercentile = 99.5,
KevStatus = KevStatus.NotInKev
};
var result = _sut.NormalizeWithDetails(input);
result.Explanation.Should().Contain("Very high EPSS");
result.Explanation.Should().Contain("top 1%");
}
[Fact]
public void NormalizeWithDetails_GeneratesExplanation_Kev()
{
var input = new ExploitInput
{
EpssScore = 0.30,
EpssPercentile = 50.0,
KevStatus = KevStatus.InKev,
KevAddedDate = new DateTimeOffset(2024, 6, 15, 0, 0, 0, TimeSpan.Zero)
};
var result = _sut.NormalizeWithDetails(input);
result.Explanation.Should().Contain("actively exploited (KEV)");
result.Explanation.Should().Contain("2024-06-15");
}
[Fact]
public void NormalizeWithDetails_GeneratesExplanation_PublicExploit()
{
var input = new ExploitInput
{
EpssScore = 0.60,
EpssPercentile = 90.0,
KevStatus = KevStatus.NotInKev,
PublicExploitAvailable = true,
ExploitMaturity = "functional"
};
var result = _sut.NormalizeWithDetails(input);
result.Explanation.Should().Contain("public exploit available");
result.Explanation.Should().Contain("functional");
}
#endregion
#region Monotonicity Tests
[Fact]
public void Normalize_PercentileIsMonotonicallyIncreasing()
{
var percentiles = new[] { 10.0, 30.0, 50.0, 70.0, 85.0, 95.0, 99.0 };
var scores = percentiles.Select(p => _sut.Normalize(new ExploitInput
{
EpssScore = p / 100.0,
EpssPercentile = p,
KevStatus = KevStatus.NotInKev
})).ToList();
// Higher percentiles should produce higher or equal scores
for (int i = 1; i < scores.Count; i++)
{
scores[i].Should().BeGreaterThanOrEqualTo(scores[i - 1],
$"Percentile {percentiles[i]} should score >= {percentiles[i - 1]}");
}
}
#endregion
#region Null Input Tests
[Fact]
public void Normalize_NullInput_ThrowsArgumentNullException()
{
var act = () => _sut.Normalize(null!);
act.Should().Throw<ArgumentNullException>();
}
[Fact]
public void NormalizeWithDetails_NullInput_ThrowsArgumentNullException()
{
var act = () => _sut.NormalizeWithDetails(null!);
act.Should().Throw<ArgumentNullException>();
}
#endregion
#region DI Integration Tests
[Fact]
public void Constructor_WithIOptionsMonitor_WorksCorrectly()
{
var options = new NormalizerOptions
{
Exploit = new ExploitNormalizerOptions
{
KevFloor = 0.50 // Custom floor
}
};
var optionsMonitor = new TestOptionsMonitor(options);
var normalizer = new ExploitLikelihoodNormalizer(optionsMonitor);
var input = new ExploitInput
{
EpssScore = 0.01,
EpssPercentile = 5.0,
KevStatus = KevStatus.InKev
};
var result = normalizer.Normalize(input);
result.Should().BeGreaterThanOrEqualTo(0.50); // Custom floor
}
private sealed class TestOptionsMonitor(NormalizerOptions value) : IOptionsMonitor<NormalizerOptions>
{
public NormalizerOptions CurrentValue => value;
public NormalizerOptions Get(string? name) => value;
public IDisposable? OnChange(Action<NormalizerOptions, string?> listener) => null;
}
#endregion
#region Determinism Tests
[Fact]
public void Normalize_SameInput_ProducesSameOutput()
{
var input = new ExploitInput
{
EpssScore = 0.67,
EpssPercentile = 93.5,
KevStatus = KevStatus.InKev,
PublicExploitAvailable = true
};
var results = Enumerable.Range(0, 100)
.Select(_ => _sut.Normalize(input))
.Distinct()
.ToList();
results.Should().ContainSingle("Deterministic normalizer should produce identical results");
}
#endregion
#region Edge Case Tests
[Fact]
public void Normalize_ZeroPercentile_HandlesCorrectly()
{
var input = new ExploitInput
{
EpssScore = 0.0001,
EpssPercentile = 0.0,
KevStatus = KevStatus.NotInKev
};
var result = _sut.Normalize(input);
result.Should().BeInRange(0.20, 0.40);
}
[Fact]
public void Normalize_ExactlyOnBoundary_HandlesCorrectly()
{
// Test exactly on 75th percentile boundary
var input = new ExploitInput
{
EpssScore = 0.30,
EpssPercentile = 75.0,
KevStatus = KevStatus.NotInKev
};
var result = _sut.Normalize(input);
result.Should().BeInRange(0.40, 0.70);
}
[Fact]
public void Normalize_ExactlyOn95Boundary_HandlesCorrectly()
{
var input = new ExploitInput
{
EpssScore = 0.60,
EpssPercentile = 95.0,
KevStatus = KevStatus.NotInKev
};
var result = _sut.Normalize(input);
result.Should().BeInRange(0.70, 0.90);
}
[Fact]
public void Normalize_ExactlyOn99Boundary_HandlesCorrectly()
{
var input = new ExploitInput
{
EpssScore = 0.85,
EpssPercentile = 99.0,
KevStatus = KevStatus.NotInKev
};
var result = _sut.Normalize(input);
result.Should().BeInRange(0.90, 1.00);
}
#endregion
}

View File

@@ -0,0 +1,528 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
using FluentAssertions;
using Microsoft.Extensions.Options;
using StellaOps.Signals.EvidenceWeightedScore;
using StellaOps.Signals.EvidenceWeightedScore.Normalizers;
using Xunit;
namespace StellaOps.Signals.Tests.EvidenceWeightedScore.Normalizers;
/// <summary>
/// Tests for MitigationNormalizer.
/// </summary>
public class MitigationNormalizerTests
{
private readonly MitigationNormalizerOptions _defaultOptions = new();
private readonly MitigationNormalizer _sut;
public MitigationNormalizerTests()
{
_sut = new MitigationNormalizer(_defaultOptions);
}
#region Dimension Property Tests
[Fact]
public void Dimension_ReturnsMIT()
{
_sut.Dimension.Should().Be("MIT");
}
#endregion
#region No Mitigation Tests
[Fact]
public void Normalize_NoMitigations_ReturnsZero()
{
var input = new MitigationInput
{
ActiveMitigations = [],
CombinedEffectiveness = 0.0
};
var result = _sut.Normalize(input);
result.Should().Be(0.0);
}
[Fact]
public void Normalize_EmptyMitigationsList_ReturnsZero()
{
var input = new MitigationInput
{
ActiveMitigations = Array.Empty<ActiveMitigation>(),
CombinedEffectiveness = 0.0
};
var result = _sut.Normalize(input);
result.Should().Be(0.0);
}
#endregion
#region Single Mitigation Tests
[Fact]
public void Normalize_SingleFeatureFlag_ReturnsEffectiveness()
{
var input = new MitigationInput
{
ActiveMitigations =
[
new ActiveMitigation { Type = MitigationType.FeatureFlag, Effectiveness = 0.30 }
],
CombinedEffectiveness = 0.30
};
var result = _sut.Normalize(input);
result.Should().BeApproximately(0.30, 0.01);
}
[Fact]
public void Normalize_SingleAuthRequired_ReturnsEffectiveness()
{
var input = new MitigationInput
{
ActiveMitigations =
[
new ActiveMitigation { Type = MitigationType.AuthRequired, Effectiveness = 0.15 }
],
CombinedEffectiveness = 0.15
};
var result = _sut.Normalize(input);
result.Should().BeApproximately(0.15, 0.01);
}
[Fact]
public void Normalize_SingleSecurityPolicy_ReturnsEffectiveness()
{
var input = new MitigationInput
{
ActiveMitigations =
[
new ActiveMitigation { Type = MitigationType.SecurityPolicy, Effectiveness = 0.20 }
],
CombinedEffectiveness = 0.20
};
var result = _sut.Normalize(input);
result.Should().BeApproximately(0.20, 0.01);
}
#endregion
#region Multiple Mitigations Tests
[Fact]
public void Normalize_MultipleMitigations_SumsEffectiveness()
{
var input = new MitigationInput
{
ActiveMitigations =
[
new ActiveMitigation { Type = MitigationType.FeatureFlag, Effectiveness = 0.25 },
new ActiveMitigation { Type = MitigationType.AuthRequired, Effectiveness = 0.15 }
],
CombinedEffectiveness = 0.40
};
var result = _sut.Normalize(input);
result.Should().BeApproximately(0.40, 0.01);
}
[Fact]
public void Normalize_ManyMitigations_SumsAll()
{
var input = new MitigationInput
{
ActiveMitigations =
[
new ActiveMitigation { Type = MitigationType.FeatureFlag, Effectiveness = 0.20 },
new ActiveMitigation { Type = MitigationType.AuthRequired, Effectiveness = 0.10 },
new ActiveMitigation { Type = MitigationType.SecurityPolicy, Effectiveness = 0.15 },
new ActiveMitigation { Type = MitigationType.Isolation, Effectiveness = 0.10 }
],
CombinedEffectiveness = 0.55
};
var result = _sut.Normalize(input);
result.Should().BeApproximately(0.55, 0.01);
}
#endregion
#region Capping Tests
[Fact]
public void Normalize_ExcessiveMitigations_CappedAtOne()
{
var input = new MitigationInput
{
ActiveMitigations =
[
new ActiveMitigation { Type = MitigationType.FeatureFlag, Effectiveness = 0.40 },
new ActiveMitigation { Type = MitigationType.AuthRequired, Effectiveness = 0.30 },
new ActiveMitigation { Type = MitigationType.SecurityPolicy, Effectiveness = 0.25 },
new ActiveMitigation { Type = MitigationType.Isolation, Effectiveness = 0.20 }
],
CombinedEffectiveness = 1.15 // Exceeds 1.0
};
var result = _sut.Normalize(input);
result.Should().BeLessThanOrEqualTo(1.0);
}
[Fact]
public void Normalize_ComponentRemoval_HighEffectiveness()
{
var input = new MitigationInput
{
ActiveMitigations =
[
new ActiveMitigation { Type = MitigationType.ComponentRemoval, Effectiveness = 0.95 }
],
CombinedEffectiveness = 0.95
};
var result = _sut.Normalize(input);
result.Should().BeGreaterThanOrEqualTo(0.95);
}
#endregion
#region Verification Bonus Tests
[Fact]
public void Normalize_RuntimeVerified_GetsBonus()
{
var inputUnverified = new MitigationInput
{
ActiveMitigations =
[
new ActiveMitigation { Type = MitigationType.SecurityPolicy, Effectiveness = 0.20 }
],
CombinedEffectiveness = 0.20,
RuntimeVerified = false
};
var inputVerified = new MitigationInput
{
ActiveMitigations =
[
new ActiveMitigation { Type = MitigationType.SecurityPolicy, Effectiveness = 0.20 }
],
CombinedEffectiveness = 0.20,
RuntimeVerified = true
};
var scoreUnverified = _sut.Normalize(inputUnverified);
var scoreVerified = _sut.Normalize(inputVerified);
scoreVerified.Should().BeGreaterThan(scoreUnverified);
(scoreVerified - scoreUnverified).Should().BeApproximately(_defaultOptions.VerificationBonus, 0.01);
}
[Fact]
public void Normalize_IndividualMitigationVerified_GetsPartialBonus()
{
var inputUnverified = new MitigationInput
{
ActiveMitigations =
[
new ActiveMitigation { Type = MitigationType.SecurityPolicy, Effectiveness = 0.20, Verified = false }
],
CombinedEffectiveness = 0.0, // Force calculation from mitigations
RuntimeVerified = false
};
var inputVerified = new MitigationInput
{
ActiveMitigations =
[
new ActiveMitigation { Type = MitigationType.SecurityPolicy, Effectiveness = 0.20, Verified = true }
],
CombinedEffectiveness = 0.0, // Force calculation from mitigations
RuntimeVerified = false
};
var scoreUnverified = _sut.Normalize(inputUnverified);
var scoreVerified = _sut.Normalize(inputVerified);
scoreVerified.Should().BeGreaterThan(scoreUnverified);
}
#endregion
#region CombinedEffectiveness vs ActiveMitigations Tests
[Fact]
public void Normalize_CombinedEffectivenessProvided_UsesCombined()
{
var input = new MitigationInput
{
ActiveMitigations =
[
new ActiveMitigation { Type = MitigationType.SecurityPolicy, Effectiveness = 0.20 }
],
CombinedEffectiveness = 0.50 // Higher than individual
};
var result = _sut.Normalize(input);
// Should use pre-computed combined effectiveness
result.Should().BeApproximately(0.50, 0.01);
}
[Fact]
public void Normalize_ZeroCombined_CalculatesFromMitigations()
{
var input = new MitigationInput
{
ActiveMitigations =
[
new ActiveMitigation { Type = MitigationType.FeatureFlag, Effectiveness = 0.25 },
new ActiveMitigation { Type = MitigationType.AuthRequired, Effectiveness = 0.15 }
],
CombinedEffectiveness = 0.0 // Zero forces calculation
};
var result = _sut.Normalize(input);
result.Should().BeApproximately(0.40, 0.01);
}
#endregion
#region NormalizeWithDetails Tests
[Fact]
public void NormalizeWithDetails_ReturnsCorrectDimension()
{
var input = new MitigationInput
{
ActiveMitigations =
[
new ActiveMitigation { Type = MitigationType.FeatureFlag, Effectiveness = 0.30 }
],
CombinedEffectiveness = 0.30
};
var result = _sut.NormalizeWithDetails(input);
result.Dimension.Should().Be("MIT");
}
[Fact]
public void NormalizeWithDetails_ReturnsComponents()
{
var input = new MitigationInput
{
ActiveMitigations =
[
new ActiveMitigation { Type = MitigationType.SecurityPolicy, Effectiveness = 0.20 }
],
CombinedEffectiveness = 0.20,
RuntimeVerified = true
};
var result = _sut.NormalizeWithDetails(input);
result.Components.Should().ContainKey("mitigation_count");
result.Components.Should().ContainKey("combined_effectiveness");
result.Components.Should().ContainKey("runtime_verified");
result.Components["mitigation_count"].Should().Be(1);
result.Components["runtime_verified"].Should().Be(1.0);
}
[Fact]
public void NormalizeWithDetails_IncludesIndividualMitigations()
{
var input = new MitigationInput
{
ActiveMitigations =
[
new ActiveMitigation { Type = MitigationType.FeatureFlag, Effectiveness = 0.30 },
new ActiveMitigation { Type = MitigationType.AuthRequired, Effectiveness = 0.15 }
],
CombinedEffectiveness = 0.45
};
var result = _sut.NormalizeWithDetails(input);
result.Components.Should().ContainKey("mitigation_0_type");
result.Components.Should().ContainKey("mitigation_0_effectiveness");
result.Components.Should().ContainKey("mitigation_1_type");
result.Components.Should().ContainKey("mitigation_1_effectiveness");
}
[Fact]
public void NormalizeWithDetails_GeneratesExplanation()
{
var input = new MitigationInput
{
ActiveMitigations =
[
new ActiveMitigation
{
Type = MitigationType.SecurityPolicy,
Name = "seccomp-strict",
Effectiveness = 0.20,
Verified = true
}
],
CombinedEffectiveness = 0.20,
RuntimeVerified = true,
AssessmentSource = "runtime-scanner"
};
var result = _sut.NormalizeWithDetails(input);
result.Explanation.Should().Contain("seccomp-strict");
result.Explanation.Should().Contain("runtime verified");
result.Explanation.Should().Contain("runtime-scanner");
}
[Fact]
public void NormalizeWithDetails_NoMitigations_ExplainsLack()
{
var input = new MitigationInput
{
ActiveMitigations = [],
CombinedEffectiveness = 0.0
};
var result = _sut.NormalizeWithDetails(input);
result.Explanation.Should().Contain("No active mitigations");
}
#endregion
#region Null Input Tests
[Fact]
public void Normalize_NullInput_ThrowsArgumentNullException()
{
var act = () => _sut.Normalize(null!);
act.Should().Throw<ArgumentNullException>();
}
[Fact]
public void NormalizeWithDetails_NullInput_ThrowsArgumentNullException()
{
var act = () => _sut.NormalizeWithDetails(null!);
act.Should().Throw<ArgumentNullException>();
}
#endregion
#region DI Integration Tests
[Fact]
public void Constructor_WithIOptionsMonitor_WorksCorrectly()
{
var options = new NormalizerOptions
{
Mitigation = new MitigationNormalizerOptions
{
MaxTotalMitigation = 0.80 // Custom cap
}
};
var optionsMonitor = new TestOptionsMonitor(options);
var normalizer = new MitigationNormalizer(optionsMonitor);
var input = new MitigationInput
{
ActiveMitigations =
[
new ActiveMitigation { Type = MitigationType.FeatureFlag, Effectiveness = 0.50 },
new ActiveMitigation { Type = MitigationType.AuthRequired, Effectiveness = 0.50 }
],
CombinedEffectiveness = 1.0 // Would be 1.0 without cap
};
var result = normalizer.Normalize(input);
result.Should().BeLessThanOrEqualTo(0.85); // Custom cap + possible bonus
}
private sealed class TestOptionsMonitor(NormalizerOptions value) : IOptionsMonitor<NormalizerOptions>
{
public NormalizerOptions CurrentValue => value;
public NormalizerOptions Get(string? name) => value;
public IDisposable? OnChange(Action<NormalizerOptions, string?> listener) => null;
}
#endregion
#region Determinism Tests
[Fact]
public void Normalize_SameInput_ProducesSameOutput()
{
var input = new MitigationInput
{
ActiveMitigations =
[
new ActiveMitigation { Type = MitigationType.SecurityPolicy, Effectiveness = 0.22 },
new ActiveMitigation { Type = MitigationType.NetworkControl, Effectiveness = 0.13 }
],
CombinedEffectiveness = 0.35,
RuntimeVerified = true
};
var results = Enumerable.Range(0, 100)
.Select(_ => _sut.Normalize(input))
.Distinct()
.ToList();
results.Should().ContainSingle("Deterministic normalizer should produce identical results");
}
#endregion
#region Mitigation Type Tests
[Theory]
[InlineData(MitigationType.Unknown)]
[InlineData(MitigationType.NetworkControl)]
[InlineData(MitigationType.FeatureFlag)]
[InlineData(MitigationType.SecurityPolicy)]
[InlineData(MitigationType.Isolation)]
[InlineData(MitigationType.InputValidation)]
[InlineData(MitigationType.AuthRequired)]
[InlineData(MitigationType.VirtualPatch)]
[InlineData(MitigationType.ComponentRemoval)]
public void Normalize_AllMitigationTypes_HandleCorrectly(MitigationType type)
{
var input = new MitigationInput
{
ActiveMitigations =
[
new ActiveMitigation { Type = type, Effectiveness = 0.20 }
],
CombinedEffectiveness = 0.20
};
var result = _sut.Normalize(input);
result.Should().BeInRange(0.0, 1.0);
}
#endregion
}

View File

@@ -0,0 +1,452 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
using FluentAssertions;
using Microsoft.Extensions.Options;
using StellaOps.Signals.EvidenceWeightedScore;
using StellaOps.Signals.EvidenceWeightedScore.Normalizers;
using Xunit;
namespace StellaOps.Signals.Tests.EvidenceWeightedScore.Normalizers;
/// <summary>
/// Tests for NormalizerAggregator.
/// </summary>
public class NormalizerAggregatorTests
{
private readonly NormalizerAggregator _sut;
public NormalizerAggregatorTests()
{
_sut = new NormalizerAggregator();
}
#region Basic Aggregation Tests
[Fact]
public void Aggregate_EmptyEvidence_ReturnsDefaults()
{
var evidence = new FindingEvidence
{
FindingId = "CVE-2024-1234@pkg:npm/lodash@4.17.0"
};
var result = _sut.Aggregate(evidence);
result.FindingId.Should().Be(evidence.FindingId);
result.Rch.Should().BeInRange(0.0, 1.0);
result.Rts.Should().BeInRange(0.0, 1.0);
result.Bkp.Should().BeInRange(0.0, 1.0);
result.Xpl.Should().BeInRange(0.0, 1.0);
result.Src.Should().BeInRange(0.0, 1.0);
result.Mit.Should().BeInRange(0.0, 1.0);
}
[Fact]
public void Aggregate_WithAllEvidence_NormalizesAll()
{
var evidence = CreateFullEvidence();
var result = _sut.Aggregate(evidence);
result.FindingId.Should().Be(evidence.FindingId);
result.Rch.Should().BeGreaterThan(0.0);
result.Rts.Should().BeGreaterThan(0.0);
result.Bkp.Should().BeGreaterThan(0.0);
result.Xpl.Should().BeGreaterThan(0.0);
result.Src.Should().BeGreaterThan(0.0);
result.Mit.Should().BeGreaterThan(0.0);
}
[Fact]
public void Aggregate_PreservesDetailedInputs()
{
var evidence = CreateFullEvidence();
var result = _sut.Aggregate(evidence);
result.ReachabilityDetails.Should().BeSameAs(evidence.Reachability);
result.RuntimeDetails.Should().BeSameAs(evidence.Runtime);
result.BackportDetails.Should().BeSameAs(evidence.Backport);
result.ExploitDetails.Should().BeSameAs(evidence.Exploit);
result.SourceTrustDetails.Should().BeSameAs(evidence.SourceTrust);
result.MitigationDetails.Should().BeSameAs(evidence.Mitigations);
}
#endregion
#region Partial Evidence Tests
[Fact]
public void Aggregate_OnlyReachability_UsesDefaultsForOthers()
{
var evidence = new FindingEvidence
{
FindingId = "CVE-2024-1234@pkg:npm/lodash@4.17.0",
Reachability = new ReachabilityInput
{
State = ReachabilityState.StaticReachable,
Confidence = 0.8
}
};
var result = _sut.Aggregate(evidence);
result.Rch.Should().BeGreaterThan(0.5); // High reachability
result.Rts.Should().Be(0.0); // Default for no runtime
result.Bkp.Should().BeApproximately(0.0, 0.05); // Default for no backport
result.Mit.Should().Be(0.0); // Default for no mitigation
}
[Fact]
public void Aggregate_OnlyExploit_UsesDefaultsForOthers()
{
var evidence = new FindingEvidence
{
FindingId = "CVE-2024-5678@pkg:pypi/requests@2.28.0",
Exploit = new ExploitInput
{
EpssScore = 0.85,
EpssPercentile = 97.0,
KevStatus = KevStatus.InKev
}
};
var result = _sut.Aggregate(evidence);
result.Xpl.Should().BeGreaterThan(0.7); // High exploit risk
}
[Fact]
public void Aggregate_OnlyMitigation_UsesDefaultsForOthers()
{
var evidence = new FindingEvidence
{
FindingId = "CVE-2024-9012@pkg:maven/commons-io@2.11.0",
Mitigations = new MitigationInput
{
ActiveMitigations =
[
new ActiveMitigation { Type = MitigationType.FeatureFlag, Effectiveness = 0.30 },
new ActiveMitigation { Type = MitigationType.AuthRequired, Effectiveness = 0.15 }
],
CombinedEffectiveness = 0.45
}
};
var result = _sut.Aggregate(evidence);
result.Mit.Should().BeGreaterThan(0.4);
}
#endregion
#region AggregateWithDetails Tests
[Fact]
public void AggregateWithDetails_ReturnsAllDimensions()
{
var evidence = CreateFullEvidence();
var result = _sut.AggregateWithDetails(evidence);
result.Input.Should().NotBeNull();
result.Details.Should().ContainKey("RCH");
result.Details.Should().ContainKey("RTS");
result.Details.Should().ContainKey("BKP");
result.Details.Should().ContainKey("XPL");
result.Details.Should().ContainKey("SRC");
result.Details.Should().ContainKey("MIT");
}
[Fact]
public void AggregateWithDetails_IncludesExplanations()
{
var evidence = CreateFullEvidence();
var result = _sut.AggregateWithDetails(evidence);
foreach (var (_, details) in result.Details)
{
details.Explanation.Should().NotBeNullOrEmpty();
details.Score.Should().BeInRange(0.0, 1.0);
details.Dimension.Should().NotBeNullOrEmpty();
}
}
[Fact]
public void AggregateWithDetails_EmptyEvidence_GeneratesWarnings()
{
var evidence = new FindingEvidence
{
FindingId = "CVE-2024-1234@pkg:npm/lodash@4.17.0"
};
var result = _sut.AggregateWithDetails(evidence);
result.Warnings.Should().NotBeEmpty();
result.Warnings.Should().Contain(w => w.Contains("reachability"));
result.Warnings.Should().Contain(w => w.Contains("runtime"));
}
[Fact]
public void AggregateWithDetails_PartialEvidence_WarnsAboutMissing()
{
var evidence = new FindingEvidence
{
FindingId = "CVE-2024-1234@pkg:npm/lodash@4.17.0",
Reachability = new ReachabilityInput
{
State = ReachabilityState.StaticReachable,
Confidence = 0.8
}
// Other dimensions missing
};
var result = _sut.AggregateWithDetails(evidence);
result.Details.Should().ContainKey("RCH");
result.Details.Should().NotContainKey("RTS"); // No runtime input
result.Warnings.Should().Contain(w => w.Contains("runtime"));
}
[Fact]
public void AggregateWithDetails_IncludesComponents()
{
var evidence = CreateFullEvidence();
var result = _sut.AggregateWithDetails(evidence);
foreach (var (_, details) in result.Details)
{
details.Components.Should().NotBeEmpty();
}
}
#endregion
#region AggregateAsync Tests
[Fact]
public async Task AggregateAsync_ReturnsValidInput()
{
var findingId = "CVE-2024-1234@pkg:npm/lodash@4.17.0";
var result = await _sut.AggregateAsync(findingId);
result.FindingId.Should().Be(findingId);
result.Rch.Should().BeInRange(0.0, 1.0);
}
[Fact]
public async Task AggregateAsync_NullFindingId_ThrowsArgumentException()
{
var act = () => _sut.AggregateAsync(null!);
await act.Should().ThrowAsync<ArgumentException>();
}
[Fact]
public async Task AggregateAsync_EmptyFindingId_ThrowsArgumentException()
{
var act = () => _sut.AggregateAsync(string.Empty);
await act.Should().ThrowAsync<ArgumentException>();
}
#endregion
#region Null Input Tests
[Fact]
public void Aggregate_NullEvidence_ThrowsArgumentNullException()
{
var act = () => _sut.Aggregate(null!);
act.Should().Throw<ArgumentNullException>();
}
[Fact]
public void AggregateWithDetails_NullEvidence_ThrowsArgumentNullException()
{
var act = () => _sut.AggregateWithDetails(null!);
act.Should().Throw<ArgumentNullException>();
}
#endregion
#region DI Integration Tests
[Fact]
public void Constructor_WithIOptionsMonitor_WorksCorrectly()
{
var options = new NormalizerOptions
{
Reachability = new ReachabilityNormalizerOptions
{
UnknownScore = 0.60
}
};
var optionsMonitor = new TestOptionsMonitor(options);
var aggregator = new NormalizerAggregator(optionsMonitor);
var evidence = new FindingEvidence
{
FindingId = "CVE-2024-1234@pkg:npm/lodash@4.17.0"
};
var result = aggregator.Aggregate(evidence);
// Should use custom unknown score
result.Rch.Should().BeApproximately(0.60, 0.01);
}
private sealed class TestOptionsMonitor(NormalizerOptions value) : IOptionsMonitor<NormalizerOptions>
{
public NormalizerOptions CurrentValue => value;
public NormalizerOptions Get(string? name) => value;
public IDisposable? OnChange(Action<NormalizerOptions, string?> listener) => null;
}
#endregion
#region Determinism Tests
[Fact]
public void Aggregate_SameInput_ProducesSameOutput()
{
var evidence = CreateFullEvidence();
var results = Enumerable.Range(0, 10)
.Select(_ => _sut.Aggregate(evidence))
.ToList();
var firstResult = results[0];
foreach (var result in results.Skip(1))
{
result.Rch.Should().Be(firstResult.Rch);
result.Rts.Should().Be(firstResult.Rts);
result.Bkp.Should().Be(firstResult.Bkp);
result.Xpl.Should().Be(firstResult.Xpl);
result.Src.Should().Be(firstResult.Src);
result.Mit.Should().Be(firstResult.Mit);
}
}
#endregion
#region FromScoreInput Conversion Tests
[Fact]
public void FindingEvidence_FromScoreInput_ExtractsAllDetails()
{
var scoreInput = new EvidenceWeightedScoreInput
{
FindingId = "CVE-2024-1234@pkg:npm/lodash@4.17.0",
Rch = 0.75,
Rts = 0.60,
Bkp = 0.80,
Xpl = 0.40,
Src = 0.85,
Mit = 0.30,
ReachabilityDetails = new ReachabilityInput
{
State = ReachabilityState.StaticReachable,
Confidence = 0.8
},
RuntimeDetails = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 5,
RecencyFactor = 0.7
}
};
var evidence = FindingEvidence.FromScoreInput(scoreInput);
evidence.FindingId.Should().Be(scoreInput.FindingId);
evidence.Reachability.Should().BeSameAs(scoreInput.ReachabilityDetails);
evidence.Runtime.Should().BeSameAs(scoreInput.RuntimeDetails);
}
[Fact]
public void Aggregate_RoundTrip_MaintainsConsistency()
{
var evidence = CreateFullEvidence();
// Aggregate to score input
var scoreInput = _sut.Aggregate(evidence);
// Convert back to evidence
var roundTripEvidence = FindingEvidence.FromScoreInput(scoreInput);
// Re-aggregate
var result = _sut.Aggregate(roundTripEvidence);
// Scores should match
result.Rch.Should().Be(scoreInput.Rch);
result.Rts.Should().Be(scoreInput.Rts);
result.Bkp.Should().Be(scoreInput.Bkp);
result.Xpl.Should().Be(scoreInput.Xpl);
result.Src.Should().Be(scoreInput.Src);
result.Mit.Should().Be(scoreInput.Mit);
}
#endregion
#region Helper Methods
private static FindingEvidence CreateFullEvidence() => new()
{
FindingId = "CVE-2024-1234@pkg:npm/lodash@4.17.0",
Reachability = new ReachabilityInput
{
State = ReachabilityState.StaticReachable,
Confidence = 0.8,
HopCount = 2,
HasTaintTracking = true
},
Runtime = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 8,
RecencyFactor = 0.75,
DirectPathObserved = true
},
Backport = new BackportInput
{
EvidenceTier = BackportEvidenceTier.PatchSignature,
Confidence = 0.85,
Status = BackportStatus.Fixed
},
Exploit = new ExploitInput
{
EpssScore = 0.65,
EpssPercentile = 90.0,
KevStatus = KevStatus.NotInKev,
PublicExploitAvailable = true
},
SourceTrust = new SourceTrustInput
{
IssuerType = IssuerType.Distribution,
IssuerId = "debian-security",
ProvenanceTrust = 0.85,
CoverageCompleteness = 0.80,
Replayability = 0.75,
IsCryptographicallyAttested = true
},
Mitigations = new MitigationInput
{
ActiveMitigations =
[
new ActiveMitigation { Type = MitigationType.SecurityPolicy, Effectiveness = 0.20 }
],
CombinedEffectiveness = 0.20,
RuntimeVerified = true
}
};
#endregion
}

View File

@@ -0,0 +1,466 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
using FluentAssertions;
using Microsoft.Extensions.DependencyInjection;
using StellaOps.Signals.EvidenceWeightedScore;
using StellaOps.Signals.EvidenceWeightedScore.Normalizers;
using Xunit;
namespace StellaOps.Signals.Tests.EvidenceWeightedScore.Normalizers;
/// <summary>
/// Cross-module integration tests for evidence normalization pipeline.
/// Tests the full flow from raw evidence through normalizers to score input.
/// </summary>
public class NormalizerIntegrationTests
{
#region Backport Evidence BKP Score Tests
[Fact]
public void BackportEvidence_PatchSignatureFixed_ProducesHighBkpScore()
{
// Arrange: High-quality backport evidence (patch signature + fixed)
var aggregator = CreateAggregator();
var evidence = new FindingEvidence
{
FindingId = "CVE-2024-1234@pkg:deb/debian/openssl@1.1.1",
Backport = new BackportInput
{
EvidenceTier = BackportEvidenceTier.PatchSignature,
Confidence = 0.90,
Status = BackportStatus.Fixed,
EvidenceSource = "binary-diff"
}
};
// Act
var result = aggregator.Aggregate(evidence);
// Assert: High BKP score
result.Bkp.Should().BeGreaterThan(0.75, "Patch signature with fixed status should produce high BKP");
result.BackportDetails.Should().BeSameAs(evidence.Backport);
}
[Fact]
public void BackportEvidence_HeuristicNotAffected_ProducesModerateBkpScore()
{
var aggregator = CreateAggregator();
var evidence = new FindingEvidence
{
FindingId = "CVE-2024-5678@pkg:rpm/redhat/kernel@5.14.0",
Backport = new BackportInput
{
EvidenceTier = BackportEvidenceTier.Heuristic,
Confidence = 0.70,
Status = BackportStatus.NotAffected,
EvidenceSource = "manual-review"
}
};
var result = aggregator.Aggregate(evidence);
result.Bkp.Should().BeInRange(0.20, 0.70, "Heuristic tier with not_affected should produce moderate BKP");
}
[Fact]
public void BackportEvidence_NoEvidence_ProducesLowBkpScore()
{
var aggregator = CreateAggregator();
var evidence = new FindingEvidence
{
FindingId = "CVE-2024-9999@pkg:npm/unknown@1.0.0"
// No backport evidence
};
var result = aggregator.Aggregate(evidence);
result.Bkp.Should().BeLessThan(0.10, "No backport evidence should produce low BKP");
}
#endregion
#region EPSS + KEV XPL Score Tests
[Fact]
public void ExploitEvidence_HighEpssAndKev_ProducesHighXplScore()
{
var aggregator = CreateAggregator();
var evidence = new FindingEvidence
{
FindingId = "CVE-2024-1234@pkg:npm/lodash@4.17.20",
Exploit = new ExploitInput
{
EpssScore = 0.85,
EpssPercentile = 97.0,
KevStatus = KevStatus.InKev,
KevAddedDate = DateTimeOffset.UtcNow.AddDays(-30),
PublicExploitAvailable = true,
ExploitMaturity = "weaponized"
}
};
var result = aggregator.Aggregate(evidence);
result.Xpl.Should().BeGreaterThan(0.80, "High EPSS + KEV should produce very high XPL");
}
[Fact]
public void ExploitEvidence_MediumEpssNoKev_ProducesMediumXplScore()
{
var aggregator = CreateAggregator();
var evidence = new FindingEvidence
{
FindingId = "CVE-2024-5678@pkg:pypi/requests@2.28.0",
Exploit = new ExploitInput
{
EpssScore = 0.25,
EpssPercentile = 75.0,
KevStatus = KevStatus.NotInKev
}
};
var result = aggregator.Aggregate(evidence);
result.Xpl.Should().BeInRange(0.25, 0.50, "Medium EPSS without KEV should produce medium XPL");
}
[Fact]
public void ExploitEvidence_LowEpss_ProducesLowXplScore()
{
var aggregator = CreateAggregator();
var evidence = new FindingEvidence
{
FindingId = "CVE-2024-9012@pkg:maven/commons-io@2.11.0",
Exploit = new ExploitInput
{
EpssScore = 0.001,
EpssPercentile = 5.0,
KevStatus = KevStatus.NotInKev
}
};
var result = aggregator.Aggregate(evidence);
result.Xpl.Should().BeLessThan(0.30, "Low EPSS should produce low XPL");
}
#endregion
#region Full Evidence Pipeline Score Input Tests
[Fact]
public void FullEvidence_AllDimensions_ProducesValidScoreInput()
{
var aggregator = CreateAggregator();
var evidence = CreateComprehensiveEvidence();
var result = aggregator.Aggregate(evidence);
// Validate all dimensions are in valid range
result.Rch.Should().BeInRange(0.0, 1.0);
result.Rts.Should().BeInRange(0.0, 1.0);
result.Bkp.Should().BeInRange(0.0, 1.0);
result.Xpl.Should().BeInRange(0.0, 1.0);
result.Src.Should().BeInRange(0.0, 1.0);
result.Mit.Should().BeInRange(0.0, 1.0);
// Validate details are preserved
result.ReachabilityDetails.Should().BeSameAs(evidence.Reachability);
result.RuntimeDetails.Should().BeSameAs(evidence.Runtime);
result.BackportDetails.Should().BeSameAs(evidence.Backport);
result.ExploitDetails.Should().BeSameAs(evidence.Exploit);
result.SourceTrustDetails.Should().BeSameAs(evidence.SourceTrust);
result.MitigationDetails.Should().BeSameAs(evidence.Mitigations);
}
[Fact]
public void FullEvidence_AggregateWithDetails_ProducesExplanations()
{
var aggregator = CreateAggregator();
var evidence = CreateComprehensiveEvidence();
var result = aggregator.AggregateWithDetails(evidence);
// Validate all dimensions have explanations
result.Details.Should().ContainKey("RCH");
result.Details.Should().ContainKey("RTS");
result.Details.Should().ContainKey("BKP");
result.Details.Should().ContainKey("XPL");
result.Details.Should().ContainKey("SRC");
result.Details.Should().ContainKey("MIT");
// Validate explanations are meaningful
foreach (var (dimension, details) in result.Details)
{
details.Score.Should().BeInRange(0.0, 1.0, $"{dimension} score should be in [0,1]");
details.Explanation.Should().NotBeNullOrEmpty($"{dimension} should have explanation");
details.Dimension.Should().NotBeNullOrEmpty($"{dimension} should have dimension name");
}
}
[Fact]
public void FullEvidence_ScoreInputPassesValidation()
{
var aggregator = CreateAggregator();
var evidence = CreateComprehensiveEvidence();
var result = aggregator.Aggregate(evidence);
var validationErrors = result.Validate();
validationErrors.Should().BeEmpty("Aggregated score input should pass validation");
}
#endregion
#region End-to-End Scoring Flow Tests
[Fact]
public void EndToEnd_HighRiskFinding_ProducesHigherScoreComponents()
{
var aggregator = CreateAggregator();
// High-risk scenario: reachable, actively exploited, no mitigation
var highRiskEvidence = new FindingEvidence
{
FindingId = "CVE-2024-CRITICAL@pkg:npm/vulnerable@1.0.0",
Reachability = new ReachabilityInput
{
State = ReachabilityState.DynamicReachable,
Confidence = 0.95,
HasTaintTracking = true
},
Runtime = new RuntimeInput
{
Posture = RuntimePosture.FullInstrumentation,
ObservationCount = 15,
RecencyFactor = 0.95,
DirectPathObserved = true
},
Exploit = new ExploitInput
{
EpssScore = 0.90,
EpssPercentile = 99.0,
KevStatus = KevStatus.InKev,
PublicExploitAvailable = true,
ExploitMaturity = "weaponized"
},
SourceTrust = new SourceTrustInput
{
IssuerType = IssuerType.Vendor,
ProvenanceTrust = 0.95,
CoverageCompleteness = 0.90,
Replayability = 0.85,
IsCryptographicallyAttested = true
}
// No mitigations
};
var result = aggregator.Aggregate(highRiskEvidence);
// High-risk finding should have high risk-increasing dimensions
result.Rch.Should().BeGreaterThan(0.80, "Dynamic reachability should be high");
result.Rts.Should().BeGreaterThan(0.70, "Runtime observed should be high");
result.Xpl.Should().BeGreaterThan(0.85, "KEV + high EPSS should be very high");
result.Mit.Should().BeLessThanOrEqualTo(0.01, "No mitigations should be near zero");
}
[Fact]
public void EndToEnd_LowRiskFinding_ProducesLowerScoreComponents()
{
var aggregator = CreateAggregator();
// Low-risk scenario: not reachable, patched, heavily mitigated
var lowRiskEvidence = new FindingEvidence
{
FindingId = "CVE-2024-MINOR@pkg:npm/safe@2.0.0",
Reachability = new ReachabilityInput
{
State = ReachabilityState.NotReachable,
Confidence = 0.90
},
Backport = new BackportInput
{
EvidenceTier = BackportEvidenceTier.PatchSignature,
Confidence = 0.95,
Status = BackportStatus.Fixed
},
Exploit = new ExploitInput
{
EpssScore = 0.001,
EpssPercentile = 2.0,
KevStatus = KevStatus.NotInKev
},
Mitigations = new MitigationInput
{
ActiveMitigations =
[
new ActiveMitigation { Type = MitigationType.SecurityPolicy, Effectiveness = 0.20 },
new ActiveMitigation { Type = MitigationType.Isolation, Effectiveness = 0.30 },
new ActiveMitigation { Type = MitigationType.AuthRequired, Effectiveness = 0.15 }
],
CombinedEffectiveness = 0.65,
RuntimeVerified = true
}
};
var result = aggregator.Aggregate(lowRiskEvidence);
// Low-risk finding should have low risk-increasing dimensions
result.Rch.Should().BeLessThan(0.20, "Not reachable should be low");
result.Bkp.Should().BeGreaterThan(0.75, "Patched with signature should be high");
result.Xpl.Should().BeLessThan(0.30, "Low EPSS should be low");
result.Mit.Should().BeGreaterThan(0.60, "Heavy mitigations should be high");
}
#endregion
#region DI Integration Tests
[Fact]
public void DiIntegration_ResolvedAggregator_WorksCorrectly()
{
var services = new ServiceCollection();
services.AddEvidenceNormalizers();
using var provider = services.BuildServiceProvider();
var aggregator = provider.GetRequiredService<INormalizerAggregator>();
var evidence = CreateComprehensiveEvidence();
var result = aggregator.Aggregate(evidence);
result.Should().NotBeNull();
result.FindingId.Should().Be(evidence.FindingId);
}
[Fact]
public void DiIntegration_CustomOptions_AffectsNormalization()
{
var services = new ServiceCollection();
services.AddEvidenceNormalizers(options =>
{
options.Reachability.UnknownScore = 0.75;
});
using var provider = services.BuildServiceProvider();
var aggregator = provider.GetRequiredService<INormalizerAggregator>();
// No reachability evidence should use custom unknown score
var evidence = new FindingEvidence
{
FindingId = "CVE-2024-1234@pkg:npm/test@1.0.0"
};
var result = aggregator.Aggregate(evidence);
result.Rch.Should().BeApproximately(0.75, 0.01);
}
#endregion
#region Determinism Tests
[Fact]
public void Determinism_SameEvidence_ProducesSameScores()
{
var aggregator = CreateAggregator();
var evidence = CreateComprehensiveEvidence();
var results = Enumerable.Range(0, 100)
.Select(_ => aggregator.Aggregate(evidence))
.ToList();
var first = results[0];
foreach (var result in results.Skip(1))
{
result.Rch.Should().Be(first.Rch);
result.Rts.Should().Be(first.Rts);
result.Bkp.Should().Be(first.Bkp);
result.Xpl.Should().Be(first.Xpl);
result.Src.Should().Be(first.Src);
result.Mit.Should().Be(first.Mit);
}
}
[Fact]
public void Determinism_DifferentAggregatorInstances_ProduceSameScores()
{
var evidence = CreateComprehensiveEvidence();
var results = Enumerable.Range(0, 10)
.Select(_ => CreateAggregator().Aggregate(evidence))
.ToList();
var first = results[0];
foreach (var result in results.Skip(1))
{
result.Rch.Should().Be(first.Rch);
result.Rts.Should().Be(first.Rts);
result.Bkp.Should().Be(first.Bkp);
result.Xpl.Should().Be(first.Xpl);
result.Src.Should().Be(first.Src);
result.Mit.Should().Be(first.Mit);
}
}
#endregion
#region Helper Methods
private static INormalizerAggregator CreateAggregator()
{
return new NormalizerAggregator();
}
private static FindingEvidence CreateComprehensiveEvidence() => new()
{
FindingId = "CVE-2024-1234@pkg:npm/lodash@4.17.0",
Reachability = new ReachabilityInput
{
State = ReachabilityState.StaticReachable,
Confidence = 0.8,
HopCount = 2,
HasTaintTracking = true
},
Runtime = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 8,
RecencyFactor = 0.75,
DirectPathObserved = true
},
Backport = new BackportInput
{
EvidenceTier = BackportEvidenceTier.PatchSignature,
Confidence = 0.85,
Status = BackportStatus.Fixed
},
Exploit = new ExploitInput
{
EpssScore = 0.45,
EpssPercentile = 85.0,
KevStatus = KevStatus.NotInKev,
PublicExploitAvailable = false
},
SourceTrust = new SourceTrustInput
{
IssuerType = IssuerType.Distribution,
IssuerId = "debian-security",
ProvenanceTrust = 0.85,
CoverageCompleteness = 0.80,
Replayability = 0.75,
IsCryptographicallyAttested = true
},
Mitigations = new MitigationInput
{
ActiveMitigations =
[
new ActiveMitigation { Type = MitigationType.SecurityPolicy, Effectiveness = 0.20 }
],
CombinedEffectiveness = 0.20,
RuntimeVerified = true
}
};
#endregion
}

View File

@@ -0,0 +1,446 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
using FluentAssertions;
using StellaOps.Signals.EvidenceWeightedScore;
using StellaOps.Signals.EvidenceWeightedScore.Normalizers;
using Xunit;
namespace StellaOps.Signals.Tests.EvidenceWeightedScore.Normalizers;
/// <summary>
/// Tests for IEvidenceNormalizer interface and NormalizationResult.
/// </summary>
public class EvidenceNormalizerInterfaceTests
{
#region NormalizationResult Tests
[Fact]
public void NormalizationResult_Simple_CreatesWithEmptyComponents()
{
var result = NormalizationResult.Simple(0.75, "RCH", "High reachability");
result.Score.Should().Be(0.75);
result.Dimension.Should().Be("RCH");
result.Explanation.Should().Be("High reachability");
result.Components.Should().BeEmpty();
}
[Fact]
public void NormalizationResult_WithComponents_IncludesBreakdown()
{
var components = new Dictionary<string, double>
{
["base_score"] = 0.60,
["confidence_bonus"] = 0.15
};
var result = NormalizationResult.WithComponents(
0.75,
"RCH",
"Static reachable with high confidence",
components);
result.Score.Should().Be(0.75);
result.Components.Should().HaveCount(2);
result.Components["base_score"].Should().Be(0.60);
result.Components["confidence_bonus"].Should().Be(0.15);
}
[Fact]
public void NormalizationResult_IsImmutable()
{
var components = new Dictionary<string, double> { ["test"] = 1.0 };
var result = NormalizationResult.WithComponents(0.5, "TEST", "Test", components);
// Modifying original dictionary shouldn't affect result
components["another"] = 2.0;
result.Components.Should().HaveCount(1);
result.Components.Should().NotContainKey("another");
}
#endregion
#region Extension Method Tests
[Fact]
public void NormalizeClamped_ClampsAboveOne()
{
var normalizer = new TestNormalizer(1.5);
var result = normalizer.NormalizeClamped("test");
result.Should().Be(1.0);
}
[Fact]
public void NormalizeClamped_ClampsBelowZero()
{
var normalizer = new TestNormalizer(-0.5);
var result = normalizer.NormalizeClamped("test");
result.Should().Be(0.0);
}
[Fact]
public void NormalizeClamped_PassesThroughValidValues()
{
var normalizer = new TestNormalizer(0.75);
var result = normalizer.NormalizeClamped("test");
result.Should().Be(0.75);
}
[Fact]
public void NormalizeAverage_ReturnsAverageOfScores()
{
var normalizer = new SequenceNormalizer([0.2, 0.4, 0.6, 0.8]);
var result = normalizer.NormalizeAverage(["a", "b", "c", "d"]);
result.Should().Be(0.5);
}
[Fact]
public void NormalizeAverage_ReturnsZeroForEmptySequence()
{
var normalizer = new SequenceNormalizer([]);
var result = normalizer.NormalizeAverage(Array.Empty<string>());
result.Should().Be(0.0);
}
[Fact]
public void NormalizeMax_ReturnsMaximumScore()
{
var normalizer = new SequenceNormalizer([0.2, 0.9, 0.4, 0.6]);
var result = normalizer.NormalizeMax(["a", "b", "c", "d"]);
result.Should().Be(0.9);
}
[Fact]
public void NormalizeMax_ReturnsZeroForEmptySequence()
{
var normalizer = new SequenceNormalizer([]);
var result = normalizer.NormalizeMax(Array.Empty<string>());
result.Should().Be(0.0);
}
[Fact]
public void NormalizeMax_ClampsValues()
{
var normalizer = new SequenceNormalizer([0.5, 1.5, 0.3]); // 1.5 should be clamped
var result = normalizer.NormalizeMax(["a", "b", "c"]);
result.Should().Be(1.0);
}
#endregion
#region Test Helpers
private sealed class TestNormalizer(double fixedScore) : IEvidenceNormalizer<string>
{
public string Dimension => "TEST";
public double Normalize(string input) => fixedScore;
public NormalizationResult NormalizeWithDetails(string input) =>
NormalizationResult.Simple(fixedScore, Dimension, $"Fixed score: {fixedScore}");
}
private sealed class SequenceNormalizer(double[] scores) : IEvidenceNormalizer<string>
{
private int _index;
public string Dimension => "SEQ";
public double Normalize(string input) =>
_index < scores.Length ? scores[_index++] : 0.0;
public NormalizationResult NormalizeWithDetails(string input) =>
NormalizationResult.Simple(Normalize(input), Dimension, "Sequence");
}
#endregion
}
/// <summary>
/// Tests for INormalizerAggregator interface and FindingEvidence.
/// </summary>
public class NormalizerAggregatorInterfaceTests
{
#region FindingEvidence Tests
[Fact]
public void FindingEvidence_WithAllEvidence_IsValid()
{
var evidence = new FindingEvidence
{
FindingId = "CVE-2024-12345@pkg:npm/express@4.18.0",
Reachability = new ReachabilityInput
{
State = ReachabilityState.StaticReachable,
Confidence = 0.8,
HopCount = 3
},
Runtime = new RuntimeInput
{
Posture = RuntimePosture.EbpfDeep,
ObservationCount = 15,
RecencyFactor = 0.9
},
Backport = new BackportInput
{
EvidenceTier = BackportEvidenceTier.PatchSignature,
Confidence = 0.85,
Status = BackportStatus.Fixed,
ProofId = "proof-123"
},
Exploit = new ExploitInput
{
EpssScore = 0.45,
EpssPercentile = 97.0,
KevStatus = KevStatus.InKev
},
SourceTrust = new SourceTrustInput
{
IssuerType = IssuerType.Vendor,
ProvenanceTrust = 0.95,
CoverageCompleteness = 0.90,
Replayability = 0.85,
IsCryptographicallyAttested = true
},
Mitigations = new MitigationInput
{
ActiveMitigations =
[
new ActiveMitigation { Type = MitigationType.SecurityPolicy, Effectiveness = 0.20 },
new ActiveMitigation { Type = MitigationType.Isolation, Effectiveness = 0.10 }
],
CombinedEffectiveness = 0.30
}
};
evidence.FindingId.Should().NotBeNullOrEmpty();
evidence.Reachability.Should().NotBeNull();
evidence.Runtime.Should().NotBeNull();
evidence.Backport.Should().NotBeNull();
evidence.Exploit.Should().NotBeNull();
evidence.SourceTrust.Should().NotBeNull();
evidence.Mitigations.Should().NotBeNull();
}
[Fact]
public void FindingEvidence_WithPartialEvidence_IsValid()
{
var evidence = new FindingEvidence
{
FindingId = "CVE-2024-12345",
Reachability = new ReachabilityInput
{
State = ReachabilityState.Unknown,
Confidence = 0.5
}
// Other evidence is null - handled by aggregator with defaults
};
evidence.FindingId.Should().Be("CVE-2024-12345");
evidence.Reachability.Should().NotBeNull();
evidence.Runtime.Should().BeNull();
evidence.Backport.Should().BeNull();
evidence.Exploit.Should().BeNull();
evidence.SourceTrust.Should().BeNull();
evidence.Mitigations.Should().BeNull();
}
[Fact]
public void FindingEvidence_FromScoreInput_CopiesDetails()
{
var input = new EvidenceWeightedScoreInput
{
FindingId = "CVE-2024-12345",
Rch = 0.8,
Rts = 0.7,
Bkp = 0.5,
Xpl = 0.6,
Src = 0.5,
Mit = 0.1,
ReachabilityDetails = new ReachabilityInput
{
State = ReachabilityState.StaticReachable,
Confidence = 0.8
}
};
var evidence = FindingEvidence.FromScoreInput(input);
evidence.FindingId.Should().Be("CVE-2024-12345");
evidence.Reachability.Should().NotBeNull();
evidence.Reachability!.State.Should().Be(ReachabilityState.StaticReachable);
}
#endregion
#region AggregationResult Tests
[Fact]
public void AggregationResult_WithDetails_IsValid()
{
var input = new EvidenceWeightedScoreInput
{
FindingId = "CVE-2024-12345",
Rch = 0.8,
Rts = 0.7,
Bkp = 0.6,
Xpl = 0.5,
Src = 0.4,
Mit = 0.1
};
var details = new Dictionary<string, NormalizationResult>
{
["RCH"] = NormalizationResult.Simple(0.8, "RCH", "High reachability"),
["RTS"] = NormalizationResult.Simple(0.7, "RTS", "Strong runtime signal")
};
var result = new AggregationResult
{
Input = input,
Details = details,
Warnings = ["Minor: Missing EPSS data"]
};
result.Input.Should().NotBeNull();
result.Details.Should().HaveCount(2);
result.Warnings.Should().ContainSingle();
}
#endregion
}
/// <summary>
/// Tests for NormalizerOptions configuration.
/// </summary>
public class NormalizerOptionsTests
{
[Fact]
public void NormalizerOptions_HasCorrectSectionName()
{
NormalizerOptions.SectionName.Should().Be("EvidenceNormalization");
}
[Fact]
public void NormalizerOptions_HasSensibleDefaults()
{
var options = new NormalizerOptions();
// Reachability defaults
options.Reachability.ConfirmedReachableBase.Should().Be(0.95);
options.Reachability.UnknownScore.Should().Be(0.50);
// Runtime defaults
options.Runtime.HighObservationThreshold.Should().Be(10);
options.Runtime.ContradictsScore.Should().Be(0.10);
// Backport defaults (tiers match BackportEvidenceTier enum: 0=None, 1=Heuristic, etc.)
options.Backport.Tier0Range.Should().Be((0.00, 0.10)); // None
options.Backport.Tier1Range.Should().Be((0.45, 0.60)); // Heuristic
options.Backport.Tier2Range.Should().Be((0.70, 0.85)); // PatchSignature
options.Backport.Tier3Range.Should().Be((0.80, 0.92)); // BinaryDiff
options.Backport.Tier4Range.Should().Be((0.85, 0.95)); // VendorVex
options.Backport.Tier5Range.Should().Be((0.90, 1.00)); // SignedProof
options.Backport.CombinationBonus.Should().Be(0.05);
// Exploit defaults
options.Exploit.KevFloor.Should().Be(0.40);
options.Exploit.NoEpssScore.Should().Be(0.30);
// Source trust defaults
options.SourceTrust.VendorMultiplier.Should().Be(1.0);
options.SourceTrust.CommunityMultiplier.Should().Be(0.60);
// Mitigation defaults
options.Mitigation.MaxTotalMitigation.Should().Be(1.0);
// Default values for missing evidence
options.Defaults.Rch.Should().Be(0.50);
options.Defaults.Rts.Should().Be(0.0);
options.Defaults.Mit.Should().Be(0.0);
}
[Fact]
public void ReachabilityNormalizerOptions_CanBeConfigured()
{
var options = new ReachabilityNormalizerOptions
{
ConfirmedReachableBase = 0.90,
ConfirmedReachableBonus = 0.10,
StaticReachableBase = 0.35,
UnknownScore = 0.45
};
options.ConfirmedReachableBase.Should().Be(0.90);
options.ConfirmedReachableBonus.Should().Be(0.10);
options.StaticReachableBase.Should().Be(0.35);
options.UnknownScore.Should().Be(0.45);
}
[Fact]
public void ExploitNormalizerOptions_PercentileThresholdsAreOrdered()
{
var options = new ExploitNormalizerOptions();
options.Top1PercentThreshold.Should().BeGreaterThan(options.Top5PercentThreshold);
options.Top5PercentThreshold.Should().BeGreaterThan(options.Top25PercentThreshold);
}
[Fact]
public void SourceTrustNormalizerOptions_MultipliersAreOrdered()
{
var options = new SourceTrustNormalizerOptions();
options.VendorMultiplier.Should().BeGreaterThanOrEqualTo(options.DistributionMultiplier);
options.DistributionMultiplier.Should().BeGreaterThanOrEqualTo(options.TrustedThirdPartyMultiplier);
options.TrustedThirdPartyMultiplier.Should().BeGreaterThanOrEqualTo(options.CommunityMultiplier);
options.CommunityMultiplier.Should().BeGreaterThanOrEqualTo(options.UnknownMultiplier);
}
[Fact]
public void MitigationNormalizerOptions_EffectivenessRangesAreValid()
{
var options = new MitigationNormalizerOptions();
// All ranges should have Low <= High
options.FeatureFlagEffectiveness.Low.Should().BeLessThanOrEqualTo(options.FeatureFlagEffectiveness.High);
options.AuthRequiredEffectiveness.Low.Should().BeLessThanOrEqualTo(options.AuthRequiredEffectiveness.High);
options.AdminOnlyEffectiveness.Low.Should().BeLessThanOrEqualTo(options.AdminOnlyEffectiveness.High);
options.NonDefaultConfigEffectiveness.Low.Should().BeLessThanOrEqualTo(options.NonDefaultConfigEffectiveness.High);
options.SeccompEffectiveness.Low.Should().BeLessThanOrEqualTo(options.SeccompEffectiveness.High);
options.MacEffectiveness.Low.Should().BeLessThanOrEqualTo(options.MacEffectiveness.High);
options.NetworkIsolationEffectiveness.Low.Should().BeLessThanOrEqualTo(options.NetworkIsolationEffectiveness.High);
options.ReadOnlyFsEffectiveness.Low.Should().BeLessThanOrEqualTo(options.ReadOnlyFsEffectiveness.High);
}
[Fact]
public void DefaultValuesOptions_AllValuesInValidRange()
{
var options = new DefaultValuesOptions();
options.Rch.Should().BeInRange(0.0, 1.0);
options.Rts.Should().BeInRange(0.0, 1.0);
options.Bkp.Should().BeInRange(0.0, 1.0);
options.Xpl.Should().BeInRange(0.0, 1.0);
options.Src.Should().BeInRange(0.0, 1.0);
options.Mit.Should().BeInRange(0.0, 1.0);
}
}

View File

@@ -0,0 +1,632 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
using FluentAssertions;
using Microsoft.Extensions.Options;
using StellaOps.Signals.EvidenceWeightedScore;
using StellaOps.Signals.EvidenceWeightedScore.Normalizers;
using Xunit;
namespace StellaOps.Signals.Tests.EvidenceWeightedScore.Normalizers;
/// <summary>
/// Tests for ReachabilityNormalizer.
/// </summary>
public class ReachabilityNormalizerTests
{
private readonly ReachabilityNormalizerOptions _defaultOptions = new();
private readonly ReachabilityNormalizer _sut;
public ReachabilityNormalizerTests()
{
_sut = new ReachabilityNormalizer(_defaultOptions);
}
#region Dimension Property Tests
[Fact]
public void Dimension_ReturnsRCH()
{
_sut.Dimension.Should().Be("RCH");
}
#endregion
#region LiveExploitPath Tests
[Fact]
public void Normalize_LiveExploitPath_HighConfidence_ReturnsHighScore()
{
var input = new ReachabilityInput
{
State = ReachabilityState.LiveExploitPath,
Confidence = 1.0
};
var result = _sut.Normalize(input);
result.Should().BeGreaterThanOrEqualTo(0.95);
result.Should().BeLessThanOrEqualTo(1.0);
}
[Fact]
public void Normalize_LiveExploitPath_LowConfidence_StillHigh()
{
var input = new ReachabilityInput
{
State = ReachabilityState.LiveExploitPath,
Confidence = 0.5
};
var result = _sut.Normalize(input);
result.Should().BeGreaterThanOrEqualTo(0.95);
}
#endregion
#region DynamicReachable Tests
[Fact]
public void Normalize_DynamicReachable_HighConfidence_ReturnsHighScore()
{
var input = new ReachabilityInput
{
State = ReachabilityState.DynamicReachable,
Confidence = 1.0
};
var result = _sut.Normalize(input);
result.Should().BeGreaterThanOrEqualTo(0.90);
result.Should().BeLessThan(1.0);
}
[Fact]
public void Normalize_DynamicReachable_LowerThanLiveExploit()
{
var liveInput = new ReachabilityInput
{
State = ReachabilityState.LiveExploitPath,
Confidence = 1.0
};
var dynamicInput = new ReachabilityInput
{
State = ReachabilityState.DynamicReachable,
Confidence = 1.0
};
var liveScore = _sut.Normalize(liveInput);
var dynamicScore = _sut.Normalize(dynamicInput);
dynamicScore.Should().BeLessThan(liveScore);
}
#endregion
#region StaticReachable Tests
[Fact]
public void Normalize_StaticReachable_HighConfidence_ReturnsMediumHighScore()
{
var input = new ReachabilityInput
{
State = ReachabilityState.StaticReachable,
Confidence = 1.0
};
var result = _sut.Normalize(input);
result.Should().BeGreaterThanOrEqualTo(0.70);
result.Should().BeLessThanOrEqualTo(0.95);
}
[Fact]
public void Normalize_StaticReachable_LowConfidence_ReturnsLowerScore()
{
var input = new ReachabilityInput
{
State = ReachabilityState.StaticReachable,
Confidence = 0.3
};
var result = _sut.Normalize(input);
result.Should().BeGreaterThanOrEqualTo(0.40);
result.Should().BeLessThan(0.70);
}
[Fact]
public void Normalize_StaticReachable_ConfidenceScales()
{
var lowConfidence = new ReachabilityInput
{
State = ReachabilityState.StaticReachable,
Confidence = 0.3
};
var highConfidence = new ReachabilityInput
{
State = ReachabilityState.StaticReachable,
Confidence = 0.9
};
var lowScore = _sut.Normalize(lowConfidence);
var highScore = _sut.Normalize(highConfidence);
highScore.Should().BeGreaterThan(lowScore);
}
#endregion
#region PotentiallyReachable Tests
[Fact]
public void Normalize_PotentiallyReachable_ReturnsMediumScore()
{
var input = new ReachabilityInput
{
State = ReachabilityState.PotentiallyReachable,
Confidence = 0.5
};
var result = _sut.Normalize(input);
result.Should().BeGreaterThanOrEqualTo(0.30);
result.Should().BeLessThanOrEqualTo(0.60);
}
[Fact]
public void Normalize_PotentiallyReachable_LowerThanStatic()
{
var potentialInput = new ReachabilityInput
{
State = ReachabilityState.PotentiallyReachable,
Confidence = 0.7
};
var staticInput = new ReachabilityInput
{
State = ReachabilityState.StaticReachable,
Confidence = 0.7
};
var potentialScore = _sut.Normalize(potentialInput);
var staticScore = _sut.Normalize(staticInput);
potentialScore.Should().BeLessThan(staticScore);
}
#endregion
#region Unknown State Tests
[Fact]
public void Normalize_Unknown_ReturnsNeutralScore()
{
var input = new ReachabilityInput
{
State = ReachabilityState.Unknown,
Confidence = 0.0
};
var result = _sut.Normalize(input);
result.Should().BeApproximately(_defaultOptions.UnknownScore, 0.01);
}
[Fact]
public void Normalize_Unknown_ConfidenceDoesNotAffect()
{
var lowConfidence = new ReachabilityInput
{
State = ReachabilityState.Unknown,
Confidence = 0.2
};
var highConfidence = new ReachabilityInput
{
State = ReachabilityState.Unknown,
Confidence = 0.9
};
var lowScore = _sut.Normalize(lowConfidence);
var highScore = _sut.Normalize(highConfidence);
lowScore.Should().BeApproximately(highScore, 0.01);
}
#endregion
#region NotReachable Tests
[Fact]
public void Normalize_NotReachable_HighConfidence_ReturnsLowScore()
{
var input = new ReachabilityInput
{
State = ReachabilityState.NotReachable,
Confidence = 1.0
};
var result = _sut.Normalize(input);
result.Should().BeGreaterThanOrEqualTo(0.0);
result.Should().BeLessThanOrEqualTo(0.10);
}
[Fact]
public void Normalize_NotReachable_LowConfidence_ReturnsSlightlyHigherScore()
{
var input = new ReachabilityInput
{
State = ReachabilityState.NotReachable,
Confidence = 0.3
};
var result = _sut.Normalize(input);
result.Should().BeGreaterThanOrEqualTo(0.0);
result.Should().BeLessThan(0.10);
}
#endregion
#region Hop Count Tests
[Fact]
public void Normalize_StaticReachable_ZeroHops_NoHopPenalty()
{
var input = new ReachabilityInput
{
State = ReachabilityState.StaticReachable,
Confidence = 0.8,
HopCount = 0
};
var result = _sut.Normalize(input);
// Should be higher than with hops
result.Should().BeGreaterThanOrEqualTo(0.70);
}
[Fact]
public void Normalize_StaticReachable_ManyHops_PenaltyApplied()
{
var zeroHops = new ReachabilityInput
{
State = ReachabilityState.StaticReachable,
Confidence = 0.8,
HopCount = 0
};
var manyHops = new ReachabilityInput
{
State = ReachabilityState.StaticReachable,
Confidence = 0.8,
HopCount = 10
};
var zeroHopScore = _sut.Normalize(zeroHops);
var manyHopScore = _sut.Normalize(manyHops);
manyHopScore.Should().BeLessThan(zeroHopScore);
}
[Fact]
public void Normalize_DynamicReachable_HopsNotPenalized()
{
// For dynamic analysis, hop count shouldn't matter as much
var zeroHops = new ReachabilityInput
{
State = ReachabilityState.DynamicReachable,
Confidence = 0.9,
HopCount = 0
};
var manyHops = new ReachabilityInput
{
State = ReachabilityState.DynamicReachable,
Confidence = 0.9,
HopCount = 10
};
var zeroHopScore = _sut.Normalize(zeroHops);
var manyHopScore = _sut.Normalize(manyHops);
// Should be the same (no hop penalty for dynamic)
zeroHopScore.Should().BeApproximately(manyHopScore, 0.01);
}
#endregion
#region Analysis Quality Bonus Tests
[Fact]
public void Normalize_StaticReachable_WithTaintTracking_GetsBonus()
{
var withoutTaint = new ReachabilityInput
{
State = ReachabilityState.StaticReachable,
Confidence = 0.8,
HasTaintTracking = false
};
var withTaint = new ReachabilityInput
{
State = ReachabilityState.StaticReachable,
Confidence = 0.8,
HasTaintTracking = true
};
var withoutScore = _sut.Normalize(withoutTaint);
var withScore = _sut.Normalize(withTaint);
withScore.Should().BeGreaterThan(withoutScore);
}
[Fact]
public void Normalize_StaticReachable_AllAnalysisFlags_MaxBonus()
{
var minimal = new ReachabilityInput
{
State = ReachabilityState.StaticReachable,
Confidence = 0.8,
HasInterproceduralFlow = false,
HasTaintTracking = false,
HasDataFlowSensitivity = false
};
var full = new ReachabilityInput
{
State = ReachabilityState.StaticReachable,
Confidence = 0.8,
HasInterproceduralFlow = true,
HasTaintTracking = true,
HasDataFlowSensitivity = true
};
var minimalScore = _sut.Normalize(minimal);
var fullScore = _sut.Normalize(full);
fullScore.Should().BeGreaterThan(minimalScore);
(fullScore - minimalScore).Should().BeApproximately(0.05, 0.01); // 0.02 + 0.02 + 0.01
}
[Fact]
public void Normalize_NotReachable_AnalysisFlagsNoBonus()
{
// Analysis bonuses should not apply to unreachable findings
var withFlags = new ReachabilityInput
{
State = ReachabilityState.NotReachable,
Confidence = 1.0,
HasInterproceduralFlow = true,
HasTaintTracking = true,
HasDataFlowSensitivity = true
};
var withoutFlags = new ReachabilityInput
{
State = ReachabilityState.NotReachable,
Confidence = 1.0,
HasInterproceduralFlow = false,
HasTaintTracking = false,
HasDataFlowSensitivity = false
};
var withScore = _sut.Normalize(withFlags);
var withoutScore = _sut.Normalize(withoutFlags);
withScore.Should().BeApproximately(withoutScore, 0.01);
}
#endregion
#region NormalizeWithDetails Tests
[Fact]
public void NormalizeWithDetails_ReturnsCorrectDimension()
{
var input = new ReachabilityInput
{
State = ReachabilityState.StaticReachable,
Confidence = 0.7
};
var result = _sut.NormalizeWithDetails(input);
result.Dimension.Should().Be("RCH");
}
[Fact]
public void NormalizeWithDetails_ReturnsComponents()
{
var input = new ReachabilityInput
{
State = ReachabilityState.StaticReachable,
Confidence = 0.8,
HopCount = 3,
HasTaintTracking = true
};
var result = _sut.NormalizeWithDetails(input);
result.Components.Should().ContainKey("state");
result.Components.Should().ContainKey("confidence");
result.Components.Should().ContainKey("hop_count");
result.Components.Should().ContainKey("base_score");
result.Components.Should().ContainKey("confidence_modifier");
result.Components.Should().ContainKey("analysis_bonus");
result.Components.Should().ContainKey("hop_penalty");
result.Components.Should().ContainKey("taint_tracking");
result.Components["state"].Should().Be((double)ReachabilityState.StaticReachable);
result.Components["confidence"].Should().Be(0.8);
result.Components["hop_count"].Should().Be(3);
result.Components["taint_tracking"].Should().Be(1.0);
}
[Fact]
public void NormalizeWithDetails_GeneratesExplanation()
{
var input = new ReachabilityInput
{
State = ReachabilityState.StaticReachable,
Confidence = 0.85,
HopCount = 2,
HasTaintTracking = true,
AnalysisMethod = "codeql",
EvidenceSource = "stellaops-native"
};
var result = _sut.NormalizeWithDetails(input);
result.Explanation.Should().Contain("Statically determined reachable");
result.Explanation.Should().Contain("85%");
result.Explanation.Should().Contain("2 hop");
result.Explanation.Should().Contain("taint-tracked");
result.Explanation.Should().Contain("codeql");
result.Explanation.Should().Contain("stellaops-native");
result.Explanation.Should().Contain("RCH=");
}
[Fact]
public void NormalizeWithDetails_Unknown_ExplainsCorrectly()
{
var input = new ReachabilityInput
{
State = ReachabilityState.Unknown,
Confidence = 0.0
};
var result = _sut.NormalizeWithDetails(input);
result.Explanation.Should().Contain("Reachability unknown");
}
#endregion
#region Null Input Tests
[Fact]
public void Normalize_NullInput_ThrowsArgumentNullException()
{
var act = () => _sut.Normalize(null!);
act.Should().Throw<ArgumentNullException>();
}
[Fact]
public void NormalizeWithDetails_NullInput_ThrowsArgumentNullException()
{
var act = () => _sut.NormalizeWithDetails(null!);
act.Should().Throw<ArgumentNullException>();
}
#endregion
#region DI Integration Tests
[Fact]
public void Constructor_WithIOptionsMonitor_WorksCorrectly()
{
var options = new NormalizerOptions
{
Reachability = new ReachabilityNormalizerOptions
{
UnknownScore = 0.60 // Custom unknown score
}
};
var optionsMonitor = new TestOptionsMonitor(options);
var normalizer = new ReachabilityNormalizer(optionsMonitor);
var input = new ReachabilityInput
{
State = ReachabilityState.Unknown,
Confidence = 0.0
};
var result = normalizer.Normalize(input);
result.Should().BeApproximately(0.60, 0.01);
}
private sealed class TestOptionsMonitor(NormalizerOptions value) : IOptionsMonitor<NormalizerOptions>
{
public NormalizerOptions CurrentValue => value;
public NormalizerOptions Get(string? name) => value;
public IDisposable? OnChange(Action<NormalizerOptions, string?> listener) => null;
}
#endregion
#region Determinism Tests
[Fact]
public void Normalize_SameInput_ProducesSameOutput()
{
var input = new ReachabilityInput
{
State = ReachabilityState.StaticReachable,
Confidence = 0.73,
HopCount = 4,
HasInterproceduralFlow = true,
HasTaintTracking = true,
AnalysisMethod = "codeql"
};
var results = Enumerable.Range(0, 100)
.Select(_ => _sut.Normalize(input))
.Distinct()
.ToList();
results.Should().ContainSingle("Deterministic normalizer should produce identical results");
}
#endregion
#region Score Ordering Tests
[Theory]
[InlineData(ReachabilityState.LiveExploitPath)]
[InlineData(ReachabilityState.DynamicReachable)]
[InlineData(ReachabilityState.StaticReachable)]
[InlineData(ReachabilityState.PotentiallyReachable)]
[InlineData(ReachabilityState.Unknown)]
[InlineData(ReachabilityState.NotReachable)]
public void Normalize_AllStates_ReturnValidRange(ReachabilityState state)
{
var input = new ReachabilityInput
{
State = state,
Confidence = 0.75
};
var result = _sut.Normalize(input);
result.Should().BeInRange(0.0, 1.0);
}
[Fact]
public void Normalize_StateOrdering_HigherStatesProduceHigherScores()
{
var states = new[]
{
ReachabilityState.NotReachable,
ReachabilityState.Unknown,
ReachabilityState.PotentiallyReachable,
ReachabilityState.StaticReachable,
ReachabilityState.DynamicReachable,
ReachabilityState.LiveExploitPath
};
var scores = states.Select(state => _sut.Normalize(new ReachabilityInput
{
State = state,
Confidence = 0.8
})).ToList();
// Scores should generally increase (with Unknown being neutral)
scores[0].Should().BeLessThan(scores[2]); // NotReachable < PotentiallyReachable
scores[2].Should().BeLessThan(scores[3]); // PotentiallyReachable < StaticReachable
scores[3].Should().BeLessThan(scores[4]); // StaticReachable < DynamicReachable
scores[4].Should().BeLessThan(scores[5]); // DynamicReachable < LiveExploitPath
}
#endregion
}

View File

@@ -0,0 +1,616 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
using FluentAssertions;
using Microsoft.Extensions.Options;
using StellaOps.Signals.EvidenceWeightedScore;
using StellaOps.Signals.EvidenceWeightedScore.Normalizers;
using Xunit;
namespace StellaOps.Signals.Tests.EvidenceWeightedScore.Normalizers;
/// <summary>
/// Tests for RuntimeSignalNormalizer.
/// </summary>
public class RuntimeSignalNormalizerTests
{
private readonly RuntimeNormalizerOptions _defaultOptions = new();
private readonly RuntimeSignalNormalizer _sut;
public RuntimeSignalNormalizerTests()
{
_sut = new RuntimeSignalNormalizer(_defaultOptions);
}
#region Dimension Property Tests
[Fact]
public void Dimension_ReturnsRTS()
{
_sut.Dimension.Should().Be("RTS");
}
#endregion
#region No Observation Tests
[Fact]
public void Normalize_NoPosture_ReturnsZero()
{
var input = new RuntimeInput
{
Posture = RuntimePosture.None,
ObservationCount = 0,
RecencyFactor = 0.0
};
var result = _sut.Normalize(input);
result.Should().Be(_defaultOptions.UnknownScore);
}
[Fact]
public void Normalize_ZeroObservations_ReturnsZero()
{
var input = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 0,
RecencyFactor = 0.9
};
var result = _sut.Normalize(input);
result.Should().Be(_defaultOptions.UnknownScore);
}
#endregion
#region Observation Count Scaling Tests
[Fact]
public void Normalize_HighObservations_ReturnsHighScore()
{
var input = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 15,
RecencyFactor = 0.5
};
var result = _sut.Normalize(input);
result.Should().BeGreaterThanOrEqualTo(0.85);
}
[Fact]
public void Normalize_MediumObservations_ReturnsMediumScore()
{
var input = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 7,
RecencyFactor = 0.5
};
var result = _sut.Normalize(input);
result.Should().BeGreaterThanOrEqualTo(0.70);
result.Should().BeLessThan(0.90);
}
[Fact]
public void Normalize_LowObservations_ReturnsLowerScore()
{
var input = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 2,
RecencyFactor = 0.5
};
var result = _sut.Normalize(input);
result.Should().BeGreaterThanOrEqualTo(0.55);
result.Should().BeLessThan(0.75);
}
[Fact]
public void Normalize_ObservationCountScales()
{
var low = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 2,
RecencyFactor = 0.5
};
var high = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 15,
RecencyFactor = 0.5
};
var lowScore = _sut.Normalize(low);
var highScore = _sut.Normalize(high);
highScore.Should().BeGreaterThan(lowScore);
}
#endregion
#region Posture Multiplier Tests
[Fact]
public void Normalize_FullInstrumentation_HighestMultiplier()
{
var fullInst = new RuntimeInput
{
Posture = RuntimePosture.FullInstrumentation,
ObservationCount = 10,
RecencyFactor = 0.5
};
var active = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 10,
RecencyFactor = 0.5
};
var fullScore = _sut.Normalize(fullInst);
var activeScore = _sut.Normalize(active);
fullScore.Should().BeGreaterThan(activeScore);
}
[Fact]
public void Normalize_EbpfDeep_HighMultiplier()
{
var ebpf = new RuntimeInput
{
Posture = RuntimePosture.EbpfDeep,
ObservationCount = 10,
RecencyFactor = 0.5
};
var active = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 10,
RecencyFactor = 0.5
};
var ebpfScore = _sut.Normalize(ebpf);
var activeScore = _sut.Normalize(active);
ebpfScore.Should().BeGreaterThan(activeScore);
}
[Fact]
public void Normalize_Passive_LowerMultiplier()
{
var passive = new RuntimeInput
{
Posture = RuntimePosture.Passive,
ObservationCount = 10,
RecencyFactor = 0.5
};
var active = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 10,
RecencyFactor = 0.5
};
var passiveScore = _sut.Normalize(passive);
var activeScore = _sut.Normalize(active);
passiveScore.Should().BeLessThan(activeScore);
}
#endregion
#region Recency Bonus Tests
[Fact]
public void Normalize_VeryRecentObservations_GetsBonus()
{
var recent = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 5,
RecencyFactor = 0.95
};
var old = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 5,
RecencyFactor = 0.3
};
var recentScore = _sut.Normalize(recent);
var oldScore = _sut.Normalize(old);
recentScore.Should().BeGreaterThan(oldScore);
(recentScore - oldScore).Should().BeApproximately(_defaultOptions.VeryRecentBonus, 0.02);
}
[Fact]
public void Normalize_ModeratelyRecentObservations_GetsPartialBonus()
{
var modRecent = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 5,
RecencyFactor = 0.6
};
var old = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 5,
RecencyFactor = 0.2
};
var modRecentScore = _sut.Normalize(modRecent);
var oldScore = _sut.Normalize(old);
modRecentScore.Should().BeGreaterThan(oldScore);
(modRecentScore - oldScore).Should().BeApproximately(_defaultOptions.RecentBonus, 0.02);
}
[Fact]
public void Normalize_OldObservations_NoBonus()
{
var input = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 5,
RecencyFactor = 0.1
};
var result = _sut.Normalize(input);
// Should be observation score * posture multiplier only
result.Should().BeLessThanOrEqualTo(0.80);
}
#endregion
#region Quality Bonus Tests
[Fact]
public void Normalize_DirectPathObserved_GetsBonus()
{
var withDirect = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 5,
RecencyFactor = 0.5,
DirectPathObserved = true
};
var withoutDirect = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 5,
RecencyFactor = 0.5,
DirectPathObserved = false
};
var withScore = _sut.Normalize(withDirect);
var withoutScore = _sut.Normalize(withoutDirect);
withScore.Should().BeGreaterThan(withoutScore);
(withScore - withoutScore).Should().BeApproximately(0.05, 0.01);
}
[Fact]
public void Normalize_ProductionTraffic_GetsBonus()
{
var production = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 5,
RecencyFactor = 0.5,
IsProductionTraffic = true
};
var nonProd = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 5,
RecencyFactor = 0.5,
IsProductionTraffic = false
};
var prodScore = _sut.Normalize(production);
var nonProdScore = _sut.Normalize(nonProd);
prodScore.Should().BeGreaterThan(nonProdScore);
(prodScore - nonProdScore).Should().BeApproximately(0.03, 0.01);
}
[Fact]
public void Normalize_AllBonuses_Accumulate()
{
var minimal = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 5,
RecencyFactor = 0.2,
DirectPathObserved = false,
IsProductionTraffic = false
};
var full = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 5,
RecencyFactor = 0.95,
DirectPathObserved = true,
IsProductionTraffic = true
};
var minimalScore = _sut.Normalize(minimal);
var fullScore = _sut.Normalize(full);
// Full should have: recency bonus (0.10) + direct (0.05) + production (0.03) = 0.18 extra
(fullScore - minimalScore).Should().BeApproximately(0.18, 0.03);
}
#endregion
#region Score Capping Tests
[Fact]
public void Normalize_MaxBonuses_CappedAtOne()
{
var maxInput = new RuntimeInput
{
Posture = RuntimePosture.FullInstrumentation,
ObservationCount = 100,
RecencyFactor = 1.0,
DirectPathObserved = true,
IsProductionTraffic = true
};
var result = _sut.Normalize(maxInput);
result.Should().BeLessThanOrEqualTo(1.0);
}
#endregion
#region NormalizeWithDetails Tests
[Fact]
public void NormalizeWithDetails_ReturnsCorrectDimension()
{
var input = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 5,
RecencyFactor = 0.7
};
var result = _sut.NormalizeWithDetails(input);
result.Dimension.Should().Be("RTS");
}
[Fact]
public void NormalizeWithDetails_ReturnsComponents()
{
var input = new RuntimeInput
{
Posture = RuntimePosture.EbpfDeep,
ObservationCount = 8,
RecencyFactor = 0.85,
DirectPathObserved = true,
IsProductionTraffic = true
};
var result = _sut.NormalizeWithDetails(input);
result.Components.Should().ContainKey("posture");
result.Components.Should().ContainKey("observation_count");
result.Components.Should().ContainKey("recency_factor");
result.Components.Should().ContainKey("observation_score");
result.Components.Should().ContainKey("posture_multiplier");
result.Components.Should().ContainKey("recency_bonus");
result.Components.Should().ContainKey("quality_bonus");
result.Components.Should().ContainKey("direct_path_observed");
result.Components.Should().ContainKey("is_production_traffic");
result.Components["posture"].Should().Be((double)RuntimePosture.EbpfDeep);
result.Components["observation_count"].Should().Be(8);
result.Components["direct_path_observed"].Should().Be(1.0);
result.Components["is_production_traffic"].Should().Be(1.0);
}
[Fact]
public void NormalizeWithDetails_IncludesSessionCount()
{
var input = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 5,
RecencyFactor = 0.5,
SessionDigests = ["sha256:aaa", "sha256:bbb", "sha256:ccc"]
};
var result = _sut.NormalizeWithDetails(input);
result.Components.Should().ContainKey("session_count");
result.Components["session_count"].Should().Be(3);
}
[Fact]
public void NormalizeWithDetails_GeneratesExplanation()
{
var input = new RuntimeInput
{
Posture = RuntimePosture.EbpfDeep,
ObservationCount = 12,
RecencyFactor = 0.92,
DirectPathObserved = true,
IsProductionTraffic = true,
EvidenceSource = "stellaops-ebpf"
};
var result = _sut.NormalizeWithDetails(input);
result.Explanation.Should().Contain("12 observation(s)");
result.Explanation.Should().Contain("eBPF deep observation");
result.Explanation.Should().Contain("vulnerable path directly observed");
result.Explanation.Should().Contain("production traffic");
result.Explanation.Should().Contain("very recent");
result.Explanation.Should().Contain("stellaops-ebpf");
result.Explanation.Should().Contain("RTS=");
}
[Fact]
public void NormalizeWithDetails_NoObservations_ExplainsCorrectly()
{
var input = new RuntimeInput
{
Posture = RuntimePosture.None,
ObservationCount = 0,
RecencyFactor = 0.0
};
var result = _sut.NormalizeWithDetails(input);
result.Explanation.Should().Contain("No runtime observations");
}
#endregion
#region Null Input Tests
[Fact]
public void Normalize_NullInput_ThrowsArgumentNullException()
{
var act = () => _sut.Normalize(null!);
act.Should().Throw<ArgumentNullException>();
}
[Fact]
public void NormalizeWithDetails_NullInput_ThrowsArgumentNullException()
{
var act = () => _sut.NormalizeWithDetails(null!);
act.Should().Throw<ArgumentNullException>();
}
#endregion
#region DI Integration Tests
[Fact]
public void Constructor_WithIOptionsMonitor_WorksCorrectly()
{
var options = new NormalizerOptions
{
Runtime = new RuntimeNormalizerOptions
{
HighObservationScore = 0.95, // Custom high score
VeryRecentBonus = 0.15 // Custom bonus
}
};
var optionsMonitor = new TestOptionsMonitor(options);
var normalizer = new RuntimeSignalNormalizer(optionsMonitor);
var input = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 15,
RecencyFactor = 0.95
};
var result = normalizer.Normalize(input);
// Should reflect custom high observation score + custom bonus
result.Should().BeGreaterThanOrEqualTo(1.0); // May be capped at 1.0
}
private sealed class TestOptionsMonitor(NormalizerOptions value) : IOptionsMonitor<NormalizerOptions>
{
public NormalizerOptions CurrentValue => value;
public NormalizerOptions Get(string? name) => value;
public IDisposable? OnChange(Action<NormalizerOptions, string?> listener) => null;
}
#endregion
#region Determinism Tests
[Fact]
public void Normalize_SameInput_ProducesSameOutput()
{
var input = new RuntimeInput
{
Posture = RuntimePosture.EbpfDeep,
ObservationCount = 7,
RecencyFactor = 0.67,
DirectPathObserved = true,
IsProductionTraffic = false,
EvidenceSource = "test-sensor"
};
var results = Enumerable.Range(0, 100)
.Select(_ => _sut.Normalize(input))
.Distinct()
.ToList();
results.Should().ContainSingle("Deterministic normalizer should produce identical results");
}
#endregion
#region Posture Ordering Tests
[Theory]
[InlineData(RuntimePosture.None)]
[InlineData(RuntimePosture.Passive)]
[InlineData(RuntimePosture.ActiveTracing)]
[InlineData(RuntimePosture.EbpfDeep)]
[InlineData(RuntimePosture.FullInstrumentation)]
public void Normalize_AllPostures_ReturnValidRange(RuntimePosture posture)
{
var input = new RuntimeInput
{
Posture = posture,
ObservationCount = posture == RuntimePosture.None ? 0 : 5,
RecencyFactor = 0.5
};
var result = _sut.Normalize(input);
result.Should().BeInRange(0.0, 1.0);
}
[Fact]
public void Normalize_PostureOrdering_BetterPosturesProduceHigherScores()
{
var postures = new[]
{
RuntimePosture.Passive,
RuntimePosture.ActiveTracing,
RuntimePosture.EbpfDeep,
RuntimePosture.FullInstrumentation
};
var scores = postures.Select(posture => _sut.Normalize(new RuntimeInput
{
Posture = posture,
ObservationCount = 10,
RecencyFactor = 0.5
})).ToList();
// Scores should generally increase with better postures
scores[0].Should().BeLessThan(scores[1]); // Passive < ActiveTracing
scores[1].Should().BeLessThan(scores[2]); // ActiveTracing < EbpfDeep
scores[2].Should().BeLessThan(scores[3]); // EbpfDeep < FullInstrumentation
}
#endregion
}

View File

@@ -0,0 +1,551 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
using FluentAssertions;
using Microsoft.Extensions.Options;
using StellaOps.Signals.EvidenceWeightedScore;
using StellaOps.Signals.EvidenceWeightedScore.Normalizers;
using Xunit;
namespace StellaOps.Signals.Tests.EvidenceWeightedScore.Normalizers;
/// <summary>
/// Tests for SourceTrustNormalizer.
/// </summary>
public class SourceTrustNormalizerTests
{
private readonly SourceTrustNormalizerOptions _defaultOptions = new();
private readonly SourceTrustNormalizer _sut;
public SourceTrustNormalizerTests()
{
_sut = new SourceTrustNormalizer(_defaultOptions);
}
#region Dimension Property Tests
[Fact]
public void Dimension_ReturnsSRC()
{
_sut.Dimension.Should().Be("SRC");
}
#endregion
#region Issuer Type Tests
[Fact]
public void Normalize_GovernmentAgency_HighestMultiplier()
{
var input = CreateBaseInput() with { IssuerType = IssuerType.GovernmentAgency };
var result = _sut.Normalize(input);
result.Should().BeGreaterThan(0.78);
}
[Fact]
public void Normalize_Cna_HighMultiplier()
{
var input = CreateBaseInput() with { IssuerType = IssuerType.Cna };
var result = _sut.Normalize(input);
result.Should().BeGreaterThan(0.75);
}
[Fact]
public void Normalize_Vendor_HighTrust()
{
var input = CreateBaseInput() with { IssuerType = IssuerType.Vendor };
var result = _sut.Normalize(input);
result.Should().BeGreaterThan(0.70);
}
[Fact]
public void Normalize_Distribution_GoodTrust()
{
var input = CreateBaseInput() with { IssuerType = IssuerType.Distribution };
var result = _sut.Normalize(input);
result.Should().BeGreaterThan(0.60);
}
[Fact]
public void Normalize_Community_LowerTrust()
{
var input = CreateBaseInput() with { IssuerType = IssuerType.Community };
var result = _sut.Normalize(input);
result.Should().BeGreaterThan(0.40);
result.Should().BeLessThan(0.70);
}
[Fact]
public void Normalize_Unknown_MinimalTrust()
{
var input = CreateBaseInput() with { IssuerType = IssuerType.Unknown };
var result = _sut.Normalize(input);
result.Should().BeLessThan(0.40);
}
[Fact]
public void Normalize_IssuerTypeOrdering()
{
var issuers = new[]
{
IssuerType.Unknown,
IssuerType.Community,
IssuerType.SecurityResearcher,
IssuerType.Upstream,
IssuerType.Distribution,
IssuerType.Vendor,
IssuerType.Cna,
IssuerType.GovernmentAgency
};
var scores = issuers.Select(issuer => _sut.Normalize(CreateBaseInput() with
{
IssuerType = issuer
})).ToList();
// General ordering: Unknown < Community < ... < GovernmentAgency
scores[0].Should().BeLessThan(scores[1]); // Unknown < Community
scores[1].Should().BeLessThan(scores[5]); // Community < Vendor
scores[5].Should().BeLessThan(scores[7]); // Vendor < GovernmentAgency
}
#endregion
#region Trust Vector Tests
[Fact]
public void Normalize_HighProvenance_HigherScore()
{
var lowProvenance = CreateBaseInput() with { ProvenanceTrust = 0.3 };
var highProvenance = CreateBaseInput() with { ProvenanceTrust = 0.95 };
var lowScore = _sut.Normalize(lowProvenance);
var highScore = _sut.Normalize(highProvenance);
highScore.Should().BeGreaterThan(lowScore);
}
[Fact]
public void Normalize_HighCoverage_HigherScore()
{
var lowCoverage = CreateBaseInput() with { CoverageCompleteness = 0.2 };
var highCoverage = CreateBaseInput() with { CoverageCompleteness = 0.9 };
var lowScore = _sut.Normalize(lowCoverage);
var highScore = _sut.Normalize(highCoverage);
highScore.Should().BeGreaterThan(lowScore);
}
[Fact]
public void Normalize_HighReplayability_HigherScore()
{
var lowReplay = CreateBaseInput() with { Replayability = 0.2 };
var highReplay = CreateBaseInput() with { Replayability = 0.9 };
var lowScore = _sut.Normalize(lowReplay);
var highScore = _sut.Normalize(highReplay);
highScore.Should().BeGreaterThan(lowScore);
}
[Fact]
public void Normalize_ProvenanceWeightedHighest()
{
// Provenance should have highest weight (40%)
var baseInput = CreateBaseInput() with
{
ProvenanceTrust = 0.5,
CoverageCompleteness = 0.5,
Replayability = 0.5
};
// Increase only provenance
var highProvenance = baseInput with { ProvenanceTrust = 1.0 };
// Increase only coverage
var highCoverage = baseInput with { CoverageCompleteness = 1.0 };
var provenanceDelta = _sut.Normalize(highProvenance) - _sut.Normalize(baseInput);
var coverageDelta = _sut.Normalize(highCoverage) - _sut.Normalize(baseInput);
// Provenance increase should have larger impact
provenanceDelta.Should().BeGreaterThan(coverageDelta);
}
#endregion
#region Attestation Bonus Tests
[Fact]
public void Normalize_CryptographicallyAttested_GetsBonus()
{
var unattested = CreateBaseInput() with { IsCryptographicallyAttested = false };
var attested = CreateBaseInput() with { IsCryptographicallyAttested = true };
var unattestedScore = _sut.Normalize(unattested);
var attestedScore = _sut.Normalize(attested);
attestedScore.Should().BeGreaterThan(unattestedScore);
(attestedScore - unattestedScore).Should().BeApproximately(_defaultOptions.SignedBonus, 0.02);
}
[Fact]
public void Normalize_IndependentlyVerified_GetsBonus()
{
var unverified = CreateBaseInput() with { IndependentlyVerified = false };
var verified = CreateBaseInput() with { IndependentlyVerified = true };
var unverifiedScore = _sut.Normalize(unverified);
var verifiedScore = _sut.Normalize(verified);
verifiedScore.Should().BeGreaterThan(unverifiedScore);
(verifiedScore - unverifiedScore).Should().BeApproximately(0.05, 0.01);
}
[Fact]
public void Normalize_BothAttestations_BonusesStack()
{
var none = CreateBaseInput() with
{
IsCryptographicallyAttested = false,
IndependentlyVerified = false
};
var both = CreateBaseInput() with
{
IsCryptographicallyAttested = true,
IndependentlyVerified = true
};
var noneScore = _sut.Normalize(none);
var bothScore = _sut.Normalize(both);
(bothScore - noneScore).Should().BeApproximately(0.15, 0.02); // 0.10 + 0.05
}
#endregion
#region Corroboration Tests
[Fact]
public void Normalize_CorroboratingSources_GetsBonus()
{
var noCorroboration = CreateBaseInput() with { CorroboratingSourceCount = 0 };
var withCorroboration = CreateBaseInput() with { CorroboratingSourceCount = 2 };
var noScore = _sut.Normalize(noCorroboration);
var withScore = _sut.Normalize(withCorroboration);
withScore.Should().BeGreaterThan(noScore);
}
[Fact]
public void Normalize_ManyCorroboratingSources_CappedBonus()
{
var three = CreateBaseInput() with { CorroboratingSourceCount = 3 };
var ten = CreateBaseInput() with { CorroboratingSourceCount = 10 };
var threeScore = _sut.Normalize(three);
var tenScore = _sut.Normalize(ten);
// Both should have same bonus (capped at 3+)
threeScore.Should().BeApproximately(tenScore, 0.01);
}
#endregion
#region Historical Accuracy Tests
[Fact]
public void Normalize_ExcellentHistory_GetsBonus()
{
var noHistory = CreateBaseInput() with { HistoricalAccuracy = null };
var excellentHistory = CreateBaseInput() with { HistoricalAccuracy = 0.98 };
var noScore = _sut.Normalize(noHistory);
var excellentScore = _sut.Normalize(excellentHistory);
excellentScore.Should().BeGreaterThan(noScore);
}
[Fact]
public void Normalize_PoorHistory_GetsPenalty()
{
var noHistory = CreateBaseInput() with { HistoricalAccuracy = null };
var poorHistory = CreateBaseInput() with { HistoricalAccuracy = 0.50 };
var noScore = _sut.Normalize(noHistory);
var poorScore = _sut.Normalize(poorHistory);
poorScore.Should().BeLessThan(noScore);
}
[Theory]
[InlineData(0.96, 0.05)] // Excellent
[InlineData(0.88, 0.03)] // Good
[InlineData(0.72, 0.01)] // Acceptable
[InlineData(0.60, -0.02)] // Poor
public void Normalize_HistoricalAccuracyTiers(double accuracy, double expectedBonus)
{
var noHistory = CreateBaseInput() with { HistoricalAccuracy = null };
var withHistory = CreateBaseInput() with { HistoricalAccuracy = accuracy };
var noScore = _sut.Normalize(noHistory);
var withScore = _sut.Normalize(withHistory);
(withScore - noScore).Should().BeApproximately(expectedBonus, 0.02);
}
#endregion
#region NormalizeWithDetails Tests
[Fact]
public void NormalizeWithDetails_ReturnsCorrectDimension()
{
var input = CreateBaseInput();
var result = _sut.NormalizeWithDetails(input);
result.Dimension.Should().Be("SRC");
}
[Fact]
public void NormalizeWithDetails_ReturnsComponents()
{
var input = CreateBaseInput() with
{
IsCryptographicallyAttested = true,
IndependentlyVerified = true,
CorroboratingSourceCount = 2,
HistoricalAccuracy = 0.92
};
var result = _sut.NormalizeWithDetails(input);
result.Components.Should().ContainKey("issuer_type");
result.Components.Should().ContainKey("issuer_multiplier");
result.Components.Should().ContainKey("provenance_trust");
result.Components.Should().ContainKey("coverage_completeness");
result.Components.Should().ContainKey("replayability");
result.Components.Should().ContainKey("trust_vector_score");
result.Components.Should().ContainKey("attestation_bonus");
result.Components.Should().ContainKey("corroboration_bonus");
result.Components.Should().ContainKey("historical_bonus");
result.Components.Should().ContainKey("cryptographically_attested");
result.Components.Should().ContainKey("independently_verified");
result.Components.Should().ContainKey("corroborating_sources");
result.Components.Should().ContainKey("historical_accuracy");
result.Components["cryptographically_attested"].Should().Be(1.0);
result.Components["independently_verified"].Should().Be(1.0);
result.Components["corroborating_sources"].Should().Be(2);
result.Components["historical_accuracy"].Should().Be(0.92);
}
[Fact]
public void NormalizeWithDetails_GeneratesExplanation()
{
var input = CreateBaseInput() with
{
IssuerType = IssuerType.Vendor,
IssuerId = "redhat-psirt",
IsCryptographicallyAttested = true,
CorroboratingSourceCount = 2,
HistoricalAccuracy = 0.95
};
var result = _sut.NormalizeWithDetails(input);
result.Explanation.Should().Contain("software vendor");
result.Explanation.Should().Contain("redhat-psirt");
result.Explanation.Should().Contain("cryptographically attested");
result.Explanation.Should().Contain("2 corroborating source(s)");
result.Explanation.Should().Contain("95%");
result.Explanation.Should().Contain("SRC=");
}
[Fact]
public void NormalizeWithDetails_UnknownSource_ExplainsCorrectly()
{
var input = CreateBaseInput() with { IssuerType = IssuerType.Unknown };
var result = _sut.NormalizeWithDetails(input);
result.Explanation.Should().Contain("unknown source");
}
#endregion
#region Null Input Tests
[Fact]
public void Normalize_NullInput_ThrowsArgumentNullException()
{
var act = () => _sut.Normalize(null!);
act.Should().Throw<ArgumentNullException>();
}
[Fact]
public void NormalizeWithDetails_NullInput_ThrowsArgumentNullException()
{
var act = () => _sut.NormalizeWithDetails(null!);
act.Should().Throw<ArgumentNullException>();
}
#endregion
#region DI Integration Tests
[Fact]
public void Constructor_WithIOptionsMonitor_WorksCorrectly()
{
var options = new NormalizerOptions
{
SourceTrust = new SourceTrustNormalizerOptions
{
VendorMultiplier = 1.2, // Custom multiplier
SignedBonus = 0.15 // Custom bonus
}
};
var optionsMonitor = new TestOptionsMonitor(options);
var normalizer = new SourceTrustNormalizer(optionsMonitor);
var input = CreateBaseInput() with
{
IssuerType = IssuerType.Vendor,
IsCryptographicallyAttested = true
};
var result = normalizer.Normalize(input);
// Should reflect custom options
result.Should().BeGreaterThan(0.90);
}
private sealed class TestOptionsMonitor(NormalizerOptions value) : IOptionsMonitor<NormalizerOptions>
{
public NormalizerOptions CurrentValue => value;
public NormalizerOptions Get(string? name) => value;
public IDisposable? OnChange(Action<NormalizerOptions, string?> listener) => null;
}
#endregion
#region Score Capping Tests
[Fact]
public void Normalize_MaxBonuses_CappedAtOne()
{
var maxInput = new SourceTrustInput
{
IssuerType = IssuerType.GovernmentAgency,
ProvenanceTrust = 1.0,
CoverageCompleteness = 1.0,
Replayability = 1.0,
IsCryptographicallyAttested = true,
IndependentlyVerified = true,
HistoricalAccuracy = 0.99,
CorroboratingSourceCount = 10
};
var result = _sut.Normalize(maxInput);
result.Should().BeLessThanOrEqualTo(1.0);
}
[Fact]
public void Normalize_MinimalInput_NotNegative()
{
var minInput = new SourceTrustInput
{
IssuerType = IssuerType.Unknown,
ProvenanceTrust = 0.0,
CoverageCompleteness = 0.0,
Replayability = 0.0,
HistoricalAccuracy = 0.2 // Poor history = penalty
};
var result = _sut.Normalize(minInput);
result.Should().BeGreaterThanOrEqualTo(0.0);
}
#endregion
#region Determinism Tests
[Fact]
public void Normalize_SameInput_ProducesSameOutput()
{
var input = new SourceTrustInput
{
IssuerType = IssuerType.Distribution,
IssuerId = "debian-security",
ProvenanceTrust = 0.82,
CoverageCompleteness = 0.75,
Replayability = 0.88,
IsCryptographicallyAttested = true,
CorroboratingSourceCount = 1,
HistoricalAccuracy = 0.90
};
var results = Enumerable.Range(0, 100)
.Select(_ => _sut.Normalize(input))
.Distinct()
.ToList();
results.Should().ContainSingle("Deterministic normalizer should produce identical results");
}
#endregion
#region All IssuerTypes Valid Range Tests
[Theory]
[InlineData(IssuerType.Unknown)]
[InlineData(IssuerType.Community)]
[InlineData(IssuerType.SecurityResearcher)]
[InlineData(IssuerType.Distribution)]
[InlineData(IssuerType.Upstream)]
[InlineData(IssuerType.Vendor)]
[InlineData(IssuerType.Cna)]
[InlineData(IssuerType.GovernmentAgency)]
public void Normalize_AllIssuerTypes_ReturnValidRange(IssuerType issuerType)
{
var input = CreateBaseInput() with { IssuerType = issuerType };
var result = _sut.Normalize(input);
result.Should().BeInRange(0.0, 1.0);
}
#endregion
#region Helper Methods
private static SourceTrustInput CreateBaseInput() => new()
{
IssuerType = IssuerType.Vendor,
ProvenanceTrust = 0.80,
CoverageCompleteness = 0.75,
Replayability = 0.70
};
#endregion
}