sprints work

This commit is contained in:
StellaOps Bot
2025-12-25 12:19:12 +02:00
parent 223843f1d1
commit 2a06f780cf
224 changed files with 41796 additions and 1515 deletions

View File

@@ -0,0 +1,696 @@
// -----------------------------------------------------------------------------
// ScoreProvenanceChain.cs
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-030
// Description: Score provenance chain linking Finding → Evidence → Score → Verdict
// -----------------------------------------------------------------------------
using System.Collections.Immutable;
using System.Security.Cryptography;
using System.Text;
using System.Text.Json;
using System.Text.Json.Serialization;
namespace StellaOps.Policy.Engine.Attestation;
/// <summary>
/// Complete provenance chain tracking a vulnerability finding through
/// evidence collection, score calculation, and policy verdict.
/// </summary>
/// <remarks>
/// This chain provides audit-grade traceability:
/// 1. **Finding**: The vulnerability that triggered evaluation (CVE, PURL, digest).
/// 2. **Evidence**: The attestations/documents considered (SBOM, VEX, reachability).
/// 3. **Score**: The EWS calculation with all inputs and weights.
/// 4. **Verdict**: The final policy decision with rule chain.
///
/// Each step includes content-addressed references for deterministic replay.
/// </remarks>
public sealed record ScoreProvenanceChain
{
/// <summary>
/// Creates a new ScoreProvenanceChain.
/// </summary>
public ScoreProvenanceChain(
ProvenanceFindingRef finding,
ProvenanceEvidenceSet evidenceSet,
ProvenanceScoreNode score,
ProvenanceVerdictRef verdict,
DateTimeOffset createdAt)
{
Finding = finding ?? throw new ArgumentNullException(nameof(finding));
EvidenceSet = evidenceSet ?? throw new ArgumentNullException(nameof(evidenceSet));
Score = score ?? throw new ArgumentNullException(nameof(score));
Verdict = verdict ?? throw new ArgumentNullException(nameof(verdict));
CreatedAt = createdAt;
ChainDigest = ComputeChainDigest();
}
/// <summary>
/// Reference to the vulnerability finding that triggered evaluation.
/// </summary>
public ProvenanceFindingRef Finding { get; }
/// <summary>
/// Set of evidence attestations that were considered.
/// </summary>
public ProvenanceEvidenceSet EvidenceSet { get; }
/// <summary>
/// Score computation node with inputs, weights, and result.
/// </summary>
public ProvenanceScoreNode Score { get; }
/// <summary>
/// Reference to the final policy verdict.
/// </summary>
public ProvenanceVerdictRef Verdict { get; }
/// <summary>
/// Chain creation timestamp (UTC).
/// </summary>
public DateTimeOffset CreatedAt { get; }
/// <summary>
/// Digest of the entire provenance chain for tamper detection.
/// </summary>
public string ChainDigest { get; }
/// <summary>
/// Validates the chain integrity by recomputing the digest.
/// </summary>
public bool ValidateIntegrity()
{
var recomputed = ComputeChainDigest();
return string.Equals(ChainDigest, recomputed, StringComparison.Ordinal);
}
/// <summary>
/// Gets a summary of the provenance chain for logging.
/// </summary>
public string GetSummary()
{
return $"[{Finding.VulnerabilityId}] " +
$"Evidence({EvidenceSet.TotalCount}) → " +
$"Score({Score.FinalScore}, {Score.Bucket}) → " +
$"Verdict({Verdict.Status})";
}
private string ComputeChainDigest()
{
// Canonical structure for hashing
var canonical = new
{
finding = new
{
vuln_id = Finding.VulnerabilityId,
component_purl = Finding.ComponentPurl,
finding_digest = Finding.FindingDigest
},
evidence_set = new
{
sbom_count = EvidenceSet.SbomRefs.Length,
vex_count = EvidenceSet.VexRefs.Length,
reachability_count = EvidenceSet.ReachabilityRefs.Length,
scan_count = EvidenceSet.ScanRefs.Length,
evidence_digest = EvidenceSet.SetDigest
},
score = new
{
final_score = Score.FinalScore,
bucket = Score.Bucket,
policy_digest = Score.PolicyDigest,
input_digest = Score.InputDigest
},
verdict = new
{
status = Verdict.Status,
severity = Verdict.Severity,
rule_name = Verdict.MatchedRuleName,
verdict_digest = Verdict.VerdictDigest
},
created_at = CreatedAt.ToUniversalTime().ToString("O")
};
var json = JsonSerializer.Serialize(canonical, ProvenanceJsonOptions.Default);
var hash = SHA256.HashData(Encoding.UTF8.GetBytes(json));
return Convert.ToHexStringLower(hash);
}
/// <summary>
/// Creates a ScoreProvenanceChain from a VerdictPredicate and supporting context.
/// </summary>
public static ScoreProvenanceChain FromVerdictPredicate(
VerdictPredicate predicate,
ProvenanceFindingRef finding,
ProvenanceEvidenceSet evidenceSet)
{
ArgumentNullException.ThrowIfNull(predicate);
ArgumentNullException.ThrowIfNull(finding);
ArgumentNullException.ThrowIfNull(evidenceSet);
var scoreNode = ProvenanceScoreNode.FromVerdictEws(predicate.EvidenceWeightedScore, predicate.FindingId);
var verdictRef = ProvenanceVerdictRef.FromVerdictPredicate(predicate);
return new ScoreProvenanceChain(
finding: finding,
evidenceSet: evidenceSet,
score: scoreNode,
verdict: verdictRef,
createdAt: DateTimeOffset.UtcNow
);
}
}
/// <summary>
/// Reference to the vulnerability finding that triggered evaluation.
/// </summary>
public sealed record ProvenanceFindingRef
{
/// <summary>
/// Creates a new ProvenanceFindingRef.
/// </summary>
public ProvenanceFindingRef(
string vulnerabilityId,
string? componentPurl = null,
string? findingDigest = null,
string? advisorySource = null,
DateTimeOffset? publishedAt = null)
{
VulnerabilityId = Validation.TrimToNull(vulnerabilityId)
?? throw new ArgumentNullException(nameof(vulnerabilityId));
ComponentPurl = Validation.TrimToNull(componentPurl);
FindingDigest = Validation.TrimToNull(findingDigest);
AdvisorySource = Validation.TrimToNull(advisorySource);
PublishedAt = publishedAt;
}
/// <summary>
/// Vulnerability identifier (CVE, GHSA, etc.).
/// </summary>
public string VulnerabilityId { get; }
/// <summary>
/// Package URL of the affected component (optional).
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public string? ComponentPurl { get; }
/// <summary>
/// Content digest of the finding document (optional).
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public string? FindingDigest { get; }
/// <summary>
/// Advisory source (NVD, OSV, vendor, etc.).
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public string? AdvisorySource { get; }
/// <summary>
/// Advisory publication date (optional).
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public DateTimeOffset? PublishedAt { get; }
}
/// <summary>
/// Set of evidence attestations considered during scoring.
/// </summary>
public sealed record ProvenanceEvidenceSet
{
/// <summary>
/// Creates a new ProvenanceEvidenceSet.
/// </summary>
public ProvenanceEvidenceSet(
IEnumerable<ProvenanceEvidenceRef>? sbomRefs = null,
IEnumerable<ProvenanceEvidenceRef>? vexRefs = null,
IEnumerable<ProvenanceEvidenceRef>? reachabilityRefs = null,
IEnumerable<ProvenanceEvidenceRef>? scanRefs = null,
IEnumerable<ProvenanceEvidenceRef>? otherRefs = null)
{
SbomRefs = NormalizeRefs(sbomRefs);
VexRefs = NormalizeRefs(vexRefs);
ReachabilityRefs = NormalizeRefs(reachabilityRefs);
ScanRefs = NormalizeRefs(scanRefs);
OtherRefs = NormalizeRefs(otherRefs);
SetDigest = ComputeSetDigest();
}
/// <summary>
/// SBOM attestation references.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableArray<ProvenanceEvidenceRef> SbomRefs { get; }
/// <summary>
/// VEX document references.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableArray<ProvenanceEvidenceRef> VexRefs { get; }
/// <summary>
/// Reachability analysis attestation references.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableArray<ProvenanceEvidenceRef> ReachabilityRefs { get; }
/// <summary>
/// Scan result attestation references.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableArray<ProvenanceEvidenceRef> ScanRefs { get; }
/// <summary>
/// Other evidence references.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableArray<ProvenanceEvidenceRef> OtherRefs { get; }
/// <summary>
/// Digest of the entire evidence set.
/// </summary>
public string SetDigest { get; }
/// <summary>
/// Total count of all evidence references.
/// </summary>
public int TotalCount =>
SbomRefs.Length + VexRefs.Length + ReachabilityRefs.Length + ScanRefs.Length + OtherRefs.Length;
/// <summary>
/// Whether any evidence was considered.
/// </summary>
public bool HasEvidence => TotalCount > 0;
/// <summary>
/// Gets all references in deterministic order.
/// </summary>
public IEnumerable<ProvenanceEvidenceRef> GetAllRefs()
{
return SbomRefs
.Concat(VexRefs)
.Concat(ReachabilityRefs)
.Concat(ScanRefs)
.Concat(OtherRefs);
}
private static ImmutableArray<ProvenanceEvidenceRef> NormalizeRefs(IEnumerable<ProvenanceEvidenceRef>? refs)
{
if (refs is null)
{
return [];
}
return refs
.Where(static r => r is not null)
.OrderBy(static r => r.Type, StringComparer.Ordinal)
.ThenBy(static r => r.Digest, StringComparer.Ordinal)
.ToImmutableArray();
}
private string ComputeSetDigest()
{
var digests = GetAllRefs()
.Select(static r => r.Digest)
.Where(static d => !string.IsNullOrEmpty(d))
.OrderBy(static d => d, StringComparer.Ordinal)
.ToArray();
if (digests.Length == 0)
{
return "empty";
}
var combined = string.Join(":", digests);
var hash = SHA256.HashData(Encoding.UTF8.GetBytes(combined));
return Convert.ToHexStringLower(hash);
}
/// <summary>
/// Empty evidence set.
/// </summary>
public static ProvenanceEvidenceSet Empty => new();
}
/// <summary>
/// Reference to a single evidence attestation.
/// </summary>
public sealed record ProvenanceEvidenceRef
{
/// <summary>
/// Creates a new ProvenanceEvidenceRef.
/// </summary>
public ProvenanceEvidenceRef(
string type,
string digest,
string? uri = null,
string? provider = null,
DateTimeOffset? createdAt = null,
string? status = null)
{
Type = Validation.TrimToNull(type) ?? throw new ArgumentNullException(nameof(type));
Digest = Validation.TrimToNull(digest) ?? throw new ArgumentNullException(nameof(digest));
Uri = Validation.TrimToNull(uri);
Provider = Validation.TrimToNull(provider);
CreatedAt = createdAt;
Status = Validation.TrimToNull(status);
}
/// <summary>
/// Evidence type (sbom, vex, reachability, scan, etc.).
/// </summary>
public string Type { get; }
/// <summary>
/// Content digest of the evidence attestation.
/// </summary>
public string Digest { get; }
/// <summary>
/// URI reference to the evidence (optional).
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public string? Uri { get; }
/// <summary>
/// Evidence provider (vendor, tool, etc.).
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public string? Provider { get; }
/// <summary>
/// Evidence creation timestamp.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public DateTimeOffset? CreatedAt { get; }
/// <summary>
/// Evidence status (e.g., VEX status).
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public string? Status { get; }
/// <summary>
/// Well-known evidence types.
/// </summary>
public static class Types
{
public const string Sbom = "sbom";
public const string Vex = "vex";
public const string Reachability = "reachability";
public const string Scan = "scan";
public const string Advisory = "advisory";
public const string RuntimeSignal = "runtime-signal";
public const string BackportAnalysis = "backport-analysis";
public const string ExploitIntel = "exploit-intel";
}
}
/// <summary>
/// Score computation node in the provenance chain.
/// </summary>
public sealed record ProvenanceScoreNode
{
/// <summary>
/// Creates a new ProvenanceScoreNode.
/// </summary>
public ProvenanceScoreNode(
int finalScore,
string bucket,
VerdictEvidenceInputs inputs,
VerdictEvidenceWeights weights,
string policyDigest,
string calculatorVersion,
DateTimeOffset calculatedAt,
IEnumerable<string>? appliedFlags = null,
VerdictAppliedGuardrails? guardrails = null)
{
FinalScore = finalScore;
Bucket = Validation.TrimToNull(bucket) ?? throw new ArgumentNullException(nameof(bucket));
Inputs = inputs ?? throw new ArgumentNullException(nameof(inputs));
Weights = weights ?? throw new ArgumentNullException(nameof(weights));
PolicyDigest = Validation.TrimToNull(policyDigest) ?? throw new ArgumentNullException(nameof(policyDigest));
CalculatorVersion = Validation.TrimToNull(calculatorVersion) ?? throw new ArgumentNullException(nameof(calculatorVersion));
CalculatedAt = calculatedAt;
AppliedFlags = NormalizeFlags(appliedFlags);
Guardrails = guardrails;
InputDigest = ComputeInputDigest();
}
/// <summary>
/// Final computed score [0, 100].
/// </summary>
public int FinalScore { get; }
/// <summary>
/// Score bucket (ActNow, ScheduleNext, Investigate, Watchlist).
/// </summary>
public string Bucket { get; }
/// <summary>
/// Normalized input values used for calculation.
/// </summary>
public VerdictEvidenceInputs Inputs { get; }
/// <summary>
/// Weights applied to each dimension.
/// </summary>
public VerdictEvidenceWeights Weights { get; }
/// <summary>
/// Policy digest used for calculation.
/// </summary>
public string PolicyDigest { get; }
/// <summary>
/// Calculator version for reproducibility.
/// </summary>
public string CalculatorVersion { get; }
/// <summary>
/// Calculation timestamp (UTC).
/// </summary>
public DateTimeOffset CalculatedAt { get; }
/// <summary>
/// Flags applied during scoring.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableArray<string> AppliedFlags { get; }
/// <summary>
/// Guardrails applied during scoring.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public VerdictAppliedGuardrails? Guardrails { get; }
/// <summary>
/// Digest of inputs for verification.
/// </summary>
public string InputDigest { get; }
private static ImmutableArray<string> NormalizeFlags(IEnumerable<string>? flags)
{
if (flags is null)
{
return [];
}
return flags
.Select(static f => f?.Trim())
.Where(static f => !string.IsNullOrEmpty(f))
.Select(static f => f!)
.OrderBy(static f => f, StringComparer.Ordinal)
.Distinct(StringComparer.OrdinalIgnoreCase)
.ToImmutableArray();
}
private string ComputeInputDigest()
{
var canonical = new
{
rch = Inputs.Reachability,
rts = Inputs.Runtime,
bkp = Inputs.Backport,
xpl = Inputs.Exploit,
src = Inputs.SourceTrust,
mit = Inputs.Mitigation,
w_rch = Weights.Reachability,
w_rts = Weights.Runtime,
w_bkp = Weights.Backport,
w_xpl = Weights.Exploit,
w_src = Weights.SourceTrust,
w_mit = Weights.Mitigation
};
var json = JsonSerializer.Serialize(canonical, ProvenanceJsonOptions.Default);
var hash = SHA256.HashData(Encoding.UTF8.GetBytes(json));
return Convert.ToHexStringLower(hash);
}
/// <summary>
/// Creates a ProvenanceScoreNode from a VerdictEvidenceWeightedScore.
/// </summary>
public static ProvenanceScoreNode FromVerdictEws(VerdictEvidenceWeightedScore? ews, string findingId)
{
if (ews is null)
{
// No EWS - create a placeholder node
return new ProvenanceScoreNode(
finalScore: 0,
bucket: "Unknown",
inputs: new VerdictEvidenceInputs(0, 0, 0, 0, 0, 0),
weights: new VerdictEvidenceWeights(0, 0, 0, 0, 0, 0),
policyDigest: "none",
calculatorVersion: "none",
calculatedAt: DateTimeOffset.UtcNow
);
}
var proof = ews.Proof;
if (proof is null)
{
// EWS without proof - use available data
return new ProvenanceScoreNode(
finalScore: ews.Score,
bucket: ews.Bucket,
inputs: new VerdictEvidenceInputs(0, 0, 0, 0, 0, 0),
weights: new VerdictEvidenceWeights(0, 0, 0, 0, 0, 0),
policyDigest: ews.PolicyDigest ?? "unknown",
calculatorVersion: "unknown",
calculatedAt: ews.CalculatedAt ?? DateTimeOffset.UtcNow,
appliedFlags: ews.Flags,
guardrails: ews.Guardrails
);
}
return new ProvenanceScoreNode(
finalScore: ews.Score,
bucket: ews.Bucket,
inputs: proof.Inputs,
weights: proof.Weights,
policyDigest: proof.PolicyDigest,
calculatorVersion: proof.CalculatorVersion,
calculatedAt: proof.CalculatedAt,
appliedFlags: ews.Flags,
guardrails: ews.Guardrails
);
}
}
/// <summary>
/// Reference to the final policy verdict.
/// </summary>
public sealed record ProvenanceVerdictRef
{
/// <summary>
/// Creates a new ProvenanceVerdictRef.
/// </summary>
public ProvenanceVerdictRef(
string status,
string severity,
string matchedRuleName,
int matchedRulePriority,
string verdictDigest,
DateTimeOffset evaluatedAt,
string? rationale = null)
{
Status = Validation.TrimToNull(status) ?? throw new ArgumentNullException(nameof(status));
Severity = Validation.TrimToNull(severity) ?? throw new ArgumentNullException(nameof(severity));
MatchedRuleName = Validation.TrimToNull(matchedRuleName) ?? throw new ArgumentNullException(nameof(matchedRuleName));
MatchedRulePriority = matchedRulePriority;
VerdictDigest = Validation.TrimToNull(verdictDigest) ?? throw new ArgumentNullException(nameof(verdictDigest));
EvaluatedAt = evaluatedAt;
Rationale = Validation.TrimToNull(rationale);
}
/// <summary>
/// Verdict status (affected, not_affected, fixed, etc.).
/// </summary>
public string Status { get; }
/// <summary>
/// Final severity determination.
/// </summary>
public string Severity { get; }
/// <summary>
/// Name of the policy rule that matched.
/// </summary>
public string MatchedRuleName { get; }
/// <summary>
/// Priority of the matched rule.
/// </summary>
public int MatchedRulePriority { get; }
/// <summary>
/// Content digest of the verdict for verification.
/// </summary>
public string VerdictDigest { get; }
/// <summary>
/// Evaluation timestamp (UTC).
/// </summary>
public DateTimeOffset EvaluatedAt { get; }
/// <summary>
/// Human-readable rationale (optional).
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public string? Rationale { get; }
/// <summary>
/// Creates a ProvenanceVerdictRef from a VerdictPredicate.
/// </summary>
public static ProvenanceVerdictRef FromVerdictPredicate(VerdictPredicate predicate)
{
ArgumentNullException.ThrowIfNull(predicate);
// Compute verdict digest from key fields
var canonical = new
{
tenant_id = predicate.TenantId,
policy_id = predicate.PolicyId,
policy_version = predicate.PolicyVersion,
finding_id = predicate.FindingId,
status = predicate.Verdict.Status,
severity = predicate.Verdict.Severity,
score = predicate.Verdict.Score,
evaluated_at = predicate.EvaluatedAt.ToUniversalTime().ToString("O")
};
var json = JsonSerializer.Serialize(canonical, ProvenanceJsonOptions.Default);
var hash = SHA256.HashData(Encoding.UTF8.GetBytes(json));
var verdictDigest = Convert.ToHexStringLower(hash);
// Get matched rule name from rule chain
var matchedRule = predicate.RuleChain.FirstOrDefault();
var matchedRuleName = matchedRule?.RuleId ?? "default";
return new ProvenanceVerdictRef(
status: predicate.Verdict.Status,
severity: predicate.Verdict.Severity,
matchedRuleName: matchedRuleName,
matchedRulePriority: 0, // Priority not directly available from predicate
verdictDigest: verdictDigest,
evaluatedAt: predicate.EvaluatedAt,
rationale: predicate.Verdict.Rationale
);
}
}
/// <summary>
/// JSON serialization options for provenance chain.
/// </summary>
internal static class ProvenanceJsonOptions
{
public static JsonSerializerOptions Default { get; } = new()
{
PropertyNamingPolicy = JsonNamingPolicy.SnakeCaseLower,
WriteIndented = false,
DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull
};
}

View File

@@ -0,0 +1,237 @@
// -----------------------------------------------------------------------------
// ScoringDeterminismVerifier.cs
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-029
// Description: Scoring determinism verification for attestation verification
// -----------------------------------------------------------------------------
using Microsoft.Extensions.Logging;
using StellaOps.Signals.EvidenceWeightedScore;
namespace StellaOps.Policy.Engine.Attestation;
/// <summary>
/// Result of scoring determinism verification.
/// </summary>
public sealed record ScoringVerificationResult
{
/// <summary>
/// Whether the score verification passed (recalculated matches attested).
/// </summary>
public required bool IsValid { get; init; }
/// <summary>
/// The attested score from the verdict.
/// </summary>
public required int AttestedScore { get; init; }
/// <summary>
/// The recalculated score using the proof inputs.
/// </summary>
public required int RecalculatedScore { get; init; }
/// <summary>
/// Difference between attested and recalculated (should be 0 for valid).
/// </summary>
public int Difference => Math.Abs(AttestedScore - RecalculatedScore);
/// <summary>
/// Error message if verification failed.
/// </summary>
public string? Error { get; init; }
/// <summary>
/// Creates a successful verification result.
/// </summary>
public static ScoringVerificationResult Success(int score) => new()
{
IsValid = true,
AttestedScore = score,
RecalculatedScore = score,
Error = null
};
/// <summary>
/// Creates a failed verification result due to score mismatch.
/// </summary>
public static ScoringVerificationResult ScoreMismatch(int attested, int recalculated) => new()
{
IsValid = false,
AttestedScore = attested,
RecalculatedScore = recalculated,
Error = $"Score mismatch: attested={attested}, recalculated={recalculated}, diff={Math.Abs(attested - recalculated)}"
};
/// <summary>
/// Creates a failed verification result due to missing proof.
/// </summary>
public static ScoringVerificationResult MissingProof(int attestedScore) => new()
{
IsValid = false,
AttestedScore = attestedScore,
RecalculatedScore = 0,
Error = "No scoring proof available for verification"
};
/// <summary>
/// Creates a skipped verification result (no EWS present).
/// </summary>
public static ScoringVerificationResult Skipped() => new()
{
IsValid = true,
AttestedScore = 0,
RecalculatedScore = 0,
Error = null
};
}
/// <summary>
/// Interface for scoring determinism verification.
/// </summary>
public interface IScoringDeterminismVerifier
{
/// <summary>
/// Verifies that the attested score can be reproduced from the proof.
/// </summary>
/// <param name="ews">The attested evidence-weighted score.</param>
/// <returns>Verification result.</returns>
ScoringVerificationResult Verify(VerdictEvidenceWeightedScore? ews);
/// <summary>
/// Verifies that a verdict predicate's score is deterministically reproducible.
/// </summary>
/// <param name="predicate">The verdict predicate to verify.</param>
/// <returns>Verification result.</returns>
ScoringVerificationResult VerifyPredicate(VerdictPredicate? predicate);
}
/// <summary>
/// Verifies scoring determinism by recalculating from proof inputs.
/// </summary>
public sealed class ScoringDeterminismVerifier : IScoringDeterminismVerifier
{
private readonly IEvidenceWeightedScoreCalculator _calculator;
private readonly ILogger<ScoringDeterminismVerifier> _logger;
/// <summary>
/// Creates a new ScoringDeterminismVerifier.
/// </summary>
public ScoringDeterminismVerifier(
IEvidenceWeightedScoreCalculator calculator,
ILogger<ScoringDeterminismVerifier> logger)
{
_calculator = calculator ?? throw new ArgumentNullException(nameof(calculator));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
}
/// <inheritdoc />
public ScoringVerificationResult Verify(VerdictEvidenceWeightedScore? ews)
{
if (ews is null)
{
_logger.LogDebug("No EWS present in verdict, skipping determinism verification");
return ScoringVerificationResult.Skipped();
}
if (ews.Proof is null)
{
_logger.LogWarning(
"EWS present but no proof available for determinism verification (score={Score})",
ews.Score);
return ScoringVerificationResult.MissingProof(ews.Score);
}
try
{
// Reconstruct inputs from proof
var input = new EvidenceWeightedScoreInput
{
FindingId = "verification", // Placeholder - not used in calculation
Rch = ews.Proof.Inputs.Reachability,
Rts = ews.Proof.Inputs.Runtime,
Bkp = ews.Proof.Inputs.Backport,
Xpl = ews.Proof.Inputs.Exploit,
Src = ews.Proof.Inputs.SourceTrust,
Mit = ews.Proof.Inputs.Mitigation,
};
// Reconstruct weights from proof
var weights = new EvidenceWeights
{
Rch = ews.Proof.Weights.Reachability,
Rts = ews.Proof.Weights.Runtime,
Bkp = ews.Proof.Weights.Backport,
Xpl = ews.Proof.Weights.Exploit,
Src = ews.Proof.Weights.SourceTrust,
Mit = ews.Proof.Weights.Mitigation,
};
// Create policy with the proof weights
var policy = new EvidenceWeightPolicy
{
Version = "ews.v1",
Profile = "verification",
Weights = weights,
};
// Recalculate
var result = _calculator.Calculate(input, policy);
// Compare
if (result.Score == ews.Score)
{
_logger.LogDebug(
"Scoring determinism verified: score={Score}",
ews.Score);
return ScoringVerificationResult.Success(ews.Score);
}
else
{
_logger.LogWarning(
"Scoring determinism failed: attested={Attested}, recalculated={Recalculated}",
ews.Score,
result.Score);
return ScoringVerificationResult.ScoreMismatch(ews.Score, result.Score);
}
}
catch (Exception ex)
{
_logger.LogError(ex, "Error during scoring determinism verification");
return new ScoringVerificationResult
{
IsValid = false,
AttestedScore = ews.Score,
RecalculatedScore = 0,
Error = $"Verification error: {ex.Message}"
};
}
}
/// <inheritdoc />
public ScoringVerificationResult VerifyPredicate(VerdictPredicate? predicate)
{
if (predicate is null)
{
_logger.LogDebug("No predicate provided, skipping determinism verification");
return ScoringVerificationResult.Skipped();
}
return Verify(predicate.EvidenceWeightedScore);
}
}
/// <summary>
/// Factory for creating scoring determinism verifiers.
/// </summary>
public static class ScoringDeterminismVerifierFactory
{
/// <summary>
/// Creates a new ScoringDeterminismVerifier with default calculator.
/// </summary>
public static IScoringDeterminismVerifier Create(ILogger<ScoringDeterminismVerifier> logger)
{
return new ScoringDeterminismVerifier(
new EvidenceWeightedScoreCalculator(),
logger);
}
}

View File

@@ -0,0 +1,266 @@
// -----------------------------------------------------------------------------
// VerdictBudgetCheck.cs
// Sprint: SPRINT_8200_0001_0006_budget_threshold_attestation
// Tasks: BUDGET-8200-006, BUDGET-8200-007
// Description: Budget check attestation data for verdict predicates
// -----------------------------------------------------------------------------
using System.Collections.Immutable;
using System.Security.Cryptography;
using System.Text;
using System.Text.Json;
using System.Text.Json.Serialization;
namespace StellaOps.Policy.Engine.Attestation;
/// <summary>
/// Budget check information for verdict attestation.
/// Captures the budget configuration and evaluation result at decision time.
/// </summary>
public sealed record VerdictBudgetCheck
{
/// <summary>
/// Creates a new VerdictBudgetCheck.
/// </summary>
public VerdictBudgetCheck(
string environment,
VerdictBudgetConfig config,
VerdictBudgetActualCounts actualCounts,
string result,
string configHash,
DateTimeOffset evaluatedAt,
IEnumerable<VerdictBudgetViolation>? violations = null)
{
Environment = Validation.TrimToNull(environment) ?? throw new ArgumentNullException(nameof(environment));
Config = config ?? throw new ArgumentNullException(nameof(config));
ActualCounts = actualCounts ?? throw new ArgumentNullException(nameof(actualCounts));
Result = Validation.TrimToNull(result) ?? throw new ArgumentNullException(nameof(result));
ConfigHash = Validation.TrimToNull(configHash) ?? throw new ArgumentNullException(nameof(configHash));
EvaluatedAt = evaluatedAt;
Violations = NormalizeViolations(violations);
}
/// <summary>
/// Environment for which the budget was evaluated.
/// </summary>
public string Environment { get; }
/// <summary>
/// Budget configuration that was applied.
/// </summary>
public VerdictBudgetConfig Config { get; }
/// <summary>
/// Actual counts observed at evaluation time.
/// </summary>
public VerdictBudgetActualCounts ActualCounts { get; }
/// <summary>
/// Budget check result: pass, warn, fail.
/// </summary>
public string Result { get; }
/// <summary>
/// SHA-256 hash of budget configuration for determinism proof.
/// Format: sha256:{64 hex characters}
/// </summary>
public string ConfigHash { get; }
/// <summary>
/// Timestamp when the budget was evaluated.
/// </summary>
public DateTimeOffset EvaluatedAt { get; }
/// <summary>
/// Violations if any limits were exceeded.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableArray<VerdictBudgetViolation> Violations { get; }
/// <summary>
/// Computes a deterministic hash of a budget configuration.
/// </summary>
public static string ComputeConfigHash(VerdictBudgetConfig config)
{
ArgumentNullException.ThrowIfNull(config);
// Serialize with canonical options for deterministic output
var options = new JsonSerializerOptions
{
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
WriteIndented = false,
DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull
};
var json = JsonSerializer.Serialize(config, options);
var bytes = Encoding.UTF8.GetBytes(json);
var hash = SHA256.HashData(bytes);
return $"sha256:{Convert.ToHexString(hash).ToLowerInvariant()}";
}
private static ImmutableArray<VerdictBudgetViolation> NormalizeViolations(
IEnumerable<VerdictBudgetViolation>? violations)
{
if (violations is null)
{
return [];
}
return violations
.Where(static v => v is not null)
.OrderBy(static v => v.Type, StringComparer.Ordinal)
.ThenBy(static v => v.Reason ?? string.Empty, StringComparer.Ordinal)
.ToImmutableArray();
}
}
/// <summary>
/// Budget configuration that was applied during evaluation.
/// </summary>
public sealed record VerdictBudgetConfig
{
/// <summary>
/// Creates a new VerdictBudgetConfig.
/// </summary>
public VerdictBudgetConfig(
int maxUnknownCount,
double maxCumulativeUncertainty,
string action,
IReadOnlyDictionary<string, int>? reasonLimits = null)
{
MaxUnknownCount = maxUnknownCount;
MaxCumulativeUncertainty = maxCumulativeUncertainty;
Action = Validation.TrimToNull(action) ?? "warn";
ReasonLimits = NormalizeReasonLimits(reasonLimits);
}
/// <summary>
/// Maximum number of unknowns allowed.
/// </summary>
public int MaxUnknownCount { get; }
/// <summary>
/// Maximum cumulative uncertainty score allowed.
/// </summary>
public double MaxCumulativeUncertainty { get; }
/// <summary>
/// Action to take when budget is exceeded: warn, block.
/// </summary>
public string Action { get; }
/// <summary>
/// Per-reason code limits (optional).
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableSortedDictionary<string, int> ReasonLimits { get; }
private static ImmutableSortedDictionary<string, int> NormalizeReasonLimits(
IReadOnlyDictionary<string, int>? limits)
{
if (limits is null || limits.Count == 0)
{
return ImmutableSortedDictionary<string, int>.Empty;
}
return limits
.Where(static kv => !string.IsNullOrWhiteSpace(kv.Key))
.ToImmutableSortedDictionary(
static kv => kv.Key.Trim(),
static kv => kv.Value,
StringComparer.Ordinal);
}
}
/// <summary>
/// Actual counts observed at evaluation time.
/// </summary>
public sealed record VerdictBudgetActualCounts
{
/// <summary>
/// Creates a new VerdictBudgetActualCounts.
/// </summary>
public VerdictBudgetActualCounts(
int total,
double cumulativeUncertainty,
IReadOnlyDictionary<string, int>? byReason = null)
{
Total = total;
CumulativeUncertainty = cumulativeUncertainty;
ByReason = NormalizeByReason(byReason);
}
/// <summary>
/// Total number of unknowns.
/// </summary>
public int Total { get; }
/// <summary>
/// Cumulative uncertainty score across all unknowns.
/// </summary>
public double CumulativeUncertainty { get; }
/// <summary>
/// Breakdown by reason code.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableSortedDictionary<string, int> ByReason { get; }
private static ImmutableSortedDictionary<string, int> NormalizeByReason(
IReadOnlyDictionary<string, int>? byReason)
{
if (byReason is null || byReason.Count == 0)
{
return ImmutableSortedDictionary<string, int>.Empty;
}
return byReason
.Where(static kv => !string.IsNullOrWhiteSpace(kv.Key))
.ToImmutableSortedDictionary(
static kv => kv.Key.Trim(),
static kv => kv.Value,
StringComparer.Ordinal);
}
}
/// <summary>
/// Represents a budget limit violation.
/// </summary>
public sealed record VerdictBudgetViolation
{
/// <summary>
/// Creates a new VerdictBudgetViolation.
/// </summary>
public VerdictBudgetViolation(
string type,
int limit,
int actual,
string? reason = null)
{
Type = Validation.TrimToNull(type) ?? throw new ArgumentNullException(nameof(type));
Limit = limit;
Actual = actual;
Reason = Validation.TrimToNull(reason);
}
/// <summary>
/// Type of violation: total, cumulative, reason.
/// </summary>
public string Type { get; }
/// <summary>
/// The limit that was exceeded.
/// </summary>
public int Limit { get; }
/// <summary>
/// The actual value that exceeded the limit.
/// </summary>
public int Actual { get; }
/// <summary>
/// Reason code, if this is a per-reason violation.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public string? Reason { get; }
}

View File

@@ -0,0 +1,521 @@
// -----------------------------------------------------------------------------
// VerdictEvidenceWeightedScore.cs
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-025, PINT-8200-028
// Description: Serializable EWS decomposition and ScoringProof for verdict attestation
// -----------------------------------------------------------------------------
using System.Collections.Immutable;
using System.Text.Json.Serialization;
using StellaOps.Signals.EvidenceWeightedScore;
namespace StellaOps.Policy.Engine.Attestation;
/// <summary>
/// Evidence-Weighted Score (EWS) decomposition for verdict serialization.
/// Includes score, bucket, dimension breakdown, flags, and calculation metadata.
/// </summary>
public sealed record VerdictEvidenceWeightedScore
{
/// <summary>
/// Creates a new VerdictEvidenceWeightedScore from its components.
/// </summary>
public VerdictEvidenceWeightedScore(
int score,
string bucket,
IEnumerable<VerdictDimensionContribution>? breakdown = null,
IEnumerable<string>? flags = null,
IEnumerable<string>? explanations = null,
string? policyDigest = null,
DateTimeOffset? calculatedAt = null,
VerdictAppliedGuardrails? guardrails = null,
VerdictScoringProof? proof = null)
{
Score = score is < 0 or > 100
? throw new ArgumentOutOfRangeException(nameof(score), score, "Score must be between 0 and 100.")
: score;
Bucket = Validation.TrimToNull(bucket) ?? throw new ArgumentNullException(nameof(bucket));
Breakdown = NormalizeBreakdown(breakdown);
Flags = NormalizeFlags(flags);
Explanations = NormalizeExplanations(explanations);
PolicyDigest = Validation.TrimToNull(policyDigest);
CalculatedAt = calculatedAt;
Guardrails = guardrails;
Proof = proof;
}
/// <summary>
/// Final score [0, 100]. Higher = more evidence of real risk.
/// </summary>
public int Score { get; }
/// <summary>
/// Score bucket for quick triage (ActNow, ScheduleNext, Investigate, Watchlist).
/// </summary>
public string Bucket { get; }
/// <summary>
/// Per-dimension score contributions (breakdown).
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableArray<VerdictDimensionContribution> Breakdown { get; }
/// <summary>
/// Active flags for badges (e.g., "live-signal", "proven-path", "vendor-na").
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableArray<string> Flags { get; }
/// <summary>
/// Human-readable explanations of top contributing factors.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableArray<string> Explanations { get; }
/// <summary>
/// Policy digest for determinism verification.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public string? PolicyDigest { get; }
/// <summary>
/// Calculation timestamp (UTC ISO-8601).
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public DateTimeOffset? CalculatedAt { get; }
/// <summary>
/// Applied guardrails (caps/floors) during calculation.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public VerdictAppliedGuardrails? Guardrails { get; }
/// <summary>
/// Scoring proof for reproducibility verification.
/// Contains raw inputs and weights to allow deterministic recalculation.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public VerdictScoringProof? Proof { get; }
/// <summary>
/// Creates a VerdictEvidenceWeightedScore from an EvidenceWeightedScoreResult.
/// </summary>
public static VerdictEvidenceWeightedScore? FromEwsResult(EvidenceWeightedScoreResult? ewsResult)
{
if (ewsResult is null)
{
return null;
}
return new VerdictEvidenceWeightedScore(
score: ewsResult.Score,
bucket: ewsResult.Bucket.ToString(),
breakdown: ewsResult.Breakdown.Select(d => VerdictDimensionContribution.FromDimensionContribution(d)),
flags: ewsResult.Flags,
explanations: ewsResult.Explanations,
policyDigest: ewsResult.PolicyDigest,
calculatedAt: ewsResult.CalculatedAt,
guardrails: VerdictAppliedGuardrails.FromAppliedGuardrails(ewsResult.Caps),
proof: VerdictScoringProof.FromEwsResult(ewsResult)
);
}
private static ImmutableArray<VerdictDimensionContribution> NormalizeBreakdown(
IEnumerable<VerdictDimensionContribution>? breakdown)
{
if (breakdown is null)
{
return [];
}
return breakdown
.Where(static b => b is not null)
.OrderByDescending(static b => Math.Abs(b.Contribution))
.ToImmutableArray();
}
private static ImmutableArray<string> NormalizeFlags(IEnumerable<string>? flags)
{
if (flags is null)
{
return [];
}
return flags
.Select(static f => f?.Trim())
.Where(static f => !string.IsNullOrEmpty(f))
.Select(static f => f!)
.OrderBy(static f => f, StringComparer.Ordinal)
.Distinct(StringComparer.OrdinalIgnoreCase)
.ToImmutableArray();
}
private static ImmutableArray<string> NormalizeExplanations(IEnumerable<string>? explanations)
{
if (explanations is null)
{
return [];
}
return explanations
.Select(static e => e?.Trim())
.Where(static e => !string.IsNullOrEmpty(e))
.Select(static e => e!)
.ToImmutableArray();
}
}
/// <summary>
/// Per-dimension contribution to the evidence-weighted score.
/// </summary>
public sealed record VerdictDimensionContribution
{
/// <summary>
/// Creates a new VerdictDimensionContribution.
/// </summary>
public VerdictDimensionContribution(
string dimension,
string symbol,
double inputValue,
double weight,
double contribution,
bool isSubtractive = false)
{
Dimension = Validation.TrimToNull(dimension) ?? throw new ArgumentNullException(nameof(dimension));
Symbol = Validation.TrimToNull(symbol) ?? throw new ArgumentNullException(nameof(symbol));
InputValue = inputValue;
Weight = weight;
Contribution = contribution;
IsSubtractive = isSubtractive;
}
/// <summary>
/// Dimension name (e.g., "Reachability", "Runtime").
/// </summary>
public string Dimension { get; }
/// <summary>
/// Symbol (RCH, RTS, BKP, XPL, SRC, MIT).
/// </summary>
public string Symbol { get; }
/// <summary>
/// Normalized input value [0, 1].
/// </summary>
public double InputValue { get; }
/// <summary>
/// Weight applied to this dimension.
/// </summary>
public double Weight { get; }
/// <summary>
/// Contribution to raw score (weight * input, or negative for MIT).
/// </summary>
public double Contribution { get; }
/// <summary>
/// Whether this is a subtractive dimension (like MIT).
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public bool IsSubtractive { get; }
/// <summary>
/// Creates a VerdictDimensionContribution from a DimensionContribution.
/// </summary>
public static VerdictDimensionContribution FromDimensionContribution(DimensionContribution contribution)
{
ArgumentNullException.ThrowIfNull(contribution);
return new VerdictDimensionContribution(
dimension: contribution.Dimension,
symbol: contribution.Symbol,
inputValue: contribution.InputValue,
weight: contribution.Weight,
contribution: contribution.Contribution,
isSubtractive: contribution.IsSubtractive
);
}
}
/// <summary>
/// Record of applied guardrails during EWS calculation.
/// </summary>
public sealed record VerdictAppliedGuardrails
{
/// <summary>
/// Creates a new VerdictAppliedGuardrails.
/// </summary>
public VerdictAppliedGuardrails(
bool speculativeCap,
bool notAffectedCap,
bool runtimeFloor,
int originalScore,
int adjustedScore)
{
SpeculativeCap = speculativeCap;
NotAffectedCap = notAffectedCap;
RuntimeFloor = runtimeFloor;
OriginalScore = originalScore;
AdjustedScore = adjustedScore;
}
/// <summary>
/// Whether the speculative cap was applied.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public bool SpeculativeCap { get; }
/// <summary>
/// Whether the not-affected cap was applied.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public bool NotAffectedCap { get; }
/// <summary>
/// Whether the runtime floor was applied.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public bool RuntimeFloor { get; }
/// <summary>
/// Original score before guardrails.
/// </summary>
public int OriginalScore { get; }
/// <summary>
/// Score after guardrails.
/// </summary>
public int AdjustedScore { get; }
/// <summary>
/// Check if any guardrail was applied.
/// </summary>
[JsonIgnore]
public bool AnyApplied => SpeculativeCap || NotAffectedCap || RuntimeFloor;
/// <summary>
/// Creates a VerdictAppliedGuardrails from an AppliedGuardrails.
/// </summary>
public static VerdictAppliedGuardrails? FromAppliedGuardrails(AppliedGuardrails? guardrails)
{
if (guardrails is null)
{
return null;
}
// Only include if any guardrail was actually applied
if (!guardrails.AnyApplied)
{
return null;
}
return new VerdictAppliedGuardrails(
speculativeCap: guardrails.SpeculativeCap,
notAffectedCap: guardrails.NotAffectedCap,
runtimeFloor: guardrails.RuntimeFloor,
originalScore: guardrails.OriginalScore,
adjustedScore: guardrails.AdjustedScore
);
}
}
/// <summary>
/// Scoring proof for deterministic reproducibility verification.
/// Contains all inputs needed to recalculate and verify the score.
/// </summary>
public sealed record VerdictScoringProof
{
/// <summary>
/// Creates a new VerdictScoringProof.
/// </summary>
public VerdictScoringProof(
VerdictEvidenceInputs inputs,
VerdictEvidenceWeights weights,
string policyDigest,
string calculatorVersion,
DateTimeOffset calculatedAt)
{
Inputs = inputs ?? throw new ArgumentNullException(nameof(inputs));
Weights = weights ?? throw new ArgumentNullException(nameof(weights));
PolicyDigest = Validation.TrimToNull(policyDigest) ?? throw new ArgumentNullException(nameof(policyDigest));
CalculatorVersion = Validation.TrimToNull(calculatorVersion) ?? throw new ArgumentNullException(nameof(calculatorVersion));
CalculatedAt = calculatedAt;
}
/// <summary>
/// Normalized input values [0, 1] for each dimension.
/// </summary>
public VerdictEvidenceInputs Inputs { get; }
/// <summary>
/// Weight values used for scoring.
/// </summary>
public VerdictEvidenceWeights Weights { get; }
/// <summary>
/// Policy digest (SHA256) used for calculation.
/// </summary>
public string PolicyDigest { get; }
/// <summary>
/// Calculator version string for reproducibility.
/// </summary>
public string CalculatorVersion { get; }
/// <summary>
/// Calculation timestamp (UTC).
/// </summary>
public DateTimeOffset CalculatedAt { get; }
/// <summary>
/// Creates a VerdictScoringProof from an EvidenceWeightedScoreResult.
/// </summary>
public static VerdictScoringProof? FromEwsResult(EvidenceWeightedScoreResult? ewsResult)
{
if (ewsResult is null)
{
return null;
}
return new VerdictScoringProof(
inputs: VerdictEvidenceInputs.FromEvidenceInputValues(ewsResult.Inputs),
weights: VerdictEvidenceWeights.FromEvidenceWeights(ewsResult.Weights),
policyDigest: ewsResult.PolicyDigest,
calculatorVersion: "1.0.0", // TODO: Get from calculator metadata
calculatedAt: ewsResult.CalculatedAt
);
}
}
/// <summary>
/// Normalized input values for scoring.
/// </summary>
public sealed record VerdictEvidenceInputs
{
/// <summary>
/// Creates a new VerdictEvidenceInputs.
/// </summary>
public VerdictEvidenceInputs(
double reachability,
double runtime,
double backport,
double exploit,
double sourceTrust,
double mitigation)
{
Reachability = reachability;
Runtime = runtime;
Backport = backport;
Exploit = exploit;
SourceTrust = sourceTrust;
Mitigation = mitigation;
}
/// <summary>Reachability input [0, 1].</summary>
[JsonPropertyName("rch")]
public double Reachability { get; }
/// <summary>Runtime signal input [0, 1].</summary>
[JsonPropertyName("rts")]
public double Runtime { get; }
/// <summary>Backport analysis input [0, 1].</summary>
[JsonPropertyName("bkp")]
public double Backport { get; }
/// <summary>Exploit evidence input [0, 1].</summary>
[JsonPropertyName("xpl")]
public double Exploit { get; }
/// <summary>Source trust input [0, 1].</summary>
[JsonPropertyName("src")]
public double SourceTrust { get; }
/// <summary>Mitigation factor input [0, 1].</summary>
[JsonPropertyName("mit")]
public double Mitigation { get; }
/// <summary>
/// Creates from an EvidenceInputValues.
/// </summary>
public static VerdictEvidenceInputs FromEvidenceInputValues(EvidenceInputValues inputs)
{
ArgumentNullException.ThrowIfNull(inputs);
return new VerdictEvidenceInputs(
reachability: inputs.Rch,
runtime: inputs.Rts,
backport: inputs.Bkp,
exploit: inputs.Xpl,
sourceTrust: inputs.Src,
mitigation: inputs.Mit
);
}
}
/// <summary>
/// Weight values for scoring dimensions.
/// </summary>
public sealed record VerdictEvidenceWeights
{
/// <summary>
/// Creates a new VerdictEvidenceWeights.
/// </summary>
public VerdictEvidenceWeights(
double reachability,
double runtime,
double backport,
double exploit,
double sourceTrust,
double mitigation)
{
Reachability = reachability;
Runtime = runtime;
Backport = backport;
Exploit = exploit;
SourceTrust = sourceTrust;
Mitigation = mitigation;
}
/// <summary>Reachability weight [0, 1].</summary>
[JsonPropertyName("rch")]
public double Reachability { get; }
/// <summary>Runtime signal weight [0, 1].</summary>
[JsonPropertyName("rts")]
public double Runtime { get; }
/// <summary>Backport analysis weight [0, 1].</summary>
[JsonPropertyName("bkp")]
public double Backport { get; }
/// <summary>Exploit evidence weight [0, 1].</summary>
[JsonPropertyName("xpl")]
public double Exploit { get; }
/// <summary>Source trust weight [0, 1].</summary>
[JsonPropertyName("src")]
public double SourceTrust { get; }
/// <summary>Mitigation factor weight [0, 1].</summary>
[JsonPropertyName("mit")]
public double Mitigation { get; }
/// <summary>
/// Creates from an EvidenceWeights.
/// </summary>
public static VerdictEvidenceWeights FromEvidenceWeights(EvidenceWeights weights)
{
ArgumentNullException.ThrowIfNull(weights);
return new VerdictEvidenceWeights(
reachability: weights.Rch,
runtime: weights.Rts,
backport: weights.Bkp,
exploit: weights.Xpl,
sourceTrust: weights.Src,
mitigation: weights.Mit
);
}
}

View File

@@ -23,6 +23,8 @@ public sealed record VerdictPredicate
IEnumerable<VerdictEvidence>? evidence = null,
IEnumerable<VerdictVexImpact>? vexImpacts = null,
VerdictReachability? reachability = null,
VerdictEvidenceWeightedScore? evidenceWeightedScore = null,
VerdictBudgetCheck? budgetCheck = null,
ImmutableSortedDictionary<string, string>? metadata = null)
{
Type = PredicateType;
@@ -47,6 +49,8 @@ public sealed record VerdictPredicate
Evidence = NormalizeEvidence(evidence);
VexImpacts = NormalizeVexImpacts(vexImpacts);
Reachability = reachability;
EvidenceWeightedScore = evidenceWeightedScore;
BudgetCheck = budgetCheck;
Metadata = NormalizeMetadata(metadata);
}
@@ -77,6 +81,19 @@ public sealed record VerdictPredicate
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public VerdictReachability? Reachability { get; }
/// <summary>
/// Evidence-weighted score decomposition for scoring transparency.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public VerdictEvidenceWeightedScore? EvidenceWeightedScore { get; }
/// <summary>
/// Budget check information for unknown budget enforcement.
/// Captures the budget configuration and result at decision time.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public VerdictBudgetCheck? BudgetCheck { get; }
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableSortedDictionary<string, string> Metadata { get; }

View File

@@ -76,6 +76,9 @@ public sealed class VerdictPredicateBuilder
// Extract reachability (if present in metadata)
var reachability = ExtractReachability(trace);
// Extract evidence-weighted score (if present)
var evidenceWeightedScore = VerdictEvidenceWeightedScore.FromEwsResult(trace.EvidenceWeightedScore);
// Build metadata with determinism hash
var metadata = BuildMetadata(trace, evidence);
@@ -91,6 +94,7 @@ public sealed class VerdictPredicateBuilder
evidence: evidence,
vexImpacts: vexImpacts,
reachability: reachability,
evidenceWeightedScore: evidenceWeightedScore,
metadata: metadata
);
}
@@ -249,6 +253,8 @@ public sealed class VerdictPredicateBuilder
evidence: evidence,
vexImpacts: null,
reachability: null,
evidenceWeightedScore: null,
budgetCheck: null,
metadata: null
);

View File

@@ -7,6 +7,7 @@ using StellaOps.Policy.Confidence.Models;
using StellaOps.Policy.Exceptions.Models;
using StellaOps.Policy.Unknowns.Models;
using StellaOps.PolicyDsl;
using StellaOps.Signals.EvidenceWeightedScore;
namespace StellaOps.Policy.Engine.Evaluation;
@@ -128,7 +129,8 @@ internal sealed record PolicyEvaluationResult(
ConfidenceScore? Confidence,
PolicyFailureReason? FailureReason = null,
string? FailureMessage = null,
BudgetStatusSummary? UnknownBudgetStatus = null)
BudgetStatusSummary? UnknownBudgetStatus = null,
EvidenceWeightedScoreResult? EvidenceWeightedScore = null)
{
public static PolicyEvaluationResult CreateDefault(string? severity) => new(
Matched: false,
@@ -139,7 +141,8 @@ internal sealed record PolicyEvaluationResult(
Annotations: ImmutableDictionary<string, string>.Empty,
Warnings: ImmutableArray<string>.Empty,
AppliedException: null,
Confidence: null);
Confidence: null,
EvidenceWeightedScore: null);
}
internal enum PolicyFailureReason

View File

@@ -10,10 +10,15 @@ using StellaOps.Policy;
using StellaOps.Policy.Confidence.Configuration;
using StellaOps.Policy.Confidence.Models;
using StellaOps.Policy.Confidence.Services;
using StellaOps.Policy.Engine.Scoring.EvidenceWeightedScore;
using StellaOps.Policy.Unknowns.Models;
using StellaOps.Policy.Unknowns.Services;
using StellaOps.PolicyDsl;
// Alias Confidence types to avoid ambiguity with EWS types
using ConfidenceReachabilityState = StellaOps.Policy.Confidence.Models.ReachabilityState;
using ConfidenceRuntimePosture = StellaOps.Policy.Confidence.Models.RuntimePosture;
namespace StellaOps.Policy.Engine.Evaluation;
/// <summary>
@@ -23,15 +28,18 @@ internal sealed class PolicyEvaluator
{
private readonly IConfidenceCalculator _confidenceCalculator;
private readonly IUnknownBudgetService? _budgetService;
private readonly IFindingScoreEnricher? _scoreEnricher;
public PolicyEvaluator(
IConfidenceCalculator? confidenceCalculator = null,
IUnknownBudgetService? budgetService = null)
IUnknownBudgetService? budgetService = null,
IFindingScoreEnricher? scoreEnricher = null)
{
_confidenceCalculator = confidenceCalculator
?? new ConfidenceCalculator(
new StaticOptionsMonitor<ConfidenceWeightOptions>(new ConfidenceWeightOptions()));
_budgetService = budgetService;
_scoreEnricher = scoreEnricher;
}
public PolicyEvaluationResult Evaluate(PolicyEvaluationRequest request)
@@ -46,7 +54,10 @@ internal sealed class PolicyEvaluator
throw new ArgumentNullException(nameof(request.Document));
}
var evaluator = new PolicyExpressionEvaluator(request.Context);
// Pre-compute EWS so it's available during rule evaluation for score-based rules
var precomputedScore = PrecomputeEvidenceWeightedScore(request.Context);
var evaluator = new PolicyExpressionEvaluator(request.Context, precomputedScore);
var orderedRules = request.Document.Rules
.Select(static (rule, index) => new { rule, index })
.OrderBy(x => x.rule.Priority)
@@ -85,13 +96,15 @@ internal sealed class PolicyEvaluator
var result = ApplyExceptions(request, baseResult);
var budgeted = ApplyUnknownBudget(request.Context, result);
return ApplyConfidence(request.Context, budgeted);
var withConfidence = ApplyConfidence(request.Context, budgeted);
return ApplyEvidenceWeightedScore(request.Context, withConfidence, precomputedScore);
}
var defaultResult = PolicyEvaluationResult.CreateDefault(request.Context.Severity.Normalized);
var defaultWithExceptions = ApplyExceptions(request, defaultResult);
var budgetedDefault = ApplyUnknownBudget(request.Context, defaultWithExceptions);
return ApplyConfidence(request.Context, budgetedDefault);
var defaultWithConfidence = ApplyConfidence(request.Context, budgetedDefault);
return ApplyEvidenceWeightedScore(request.Context, defaultWithConfidence, precomputedScore);
}
private static void ApplyAction(
@@ -513,6 +526,139 @@ internal sealed class PolicyEvaluator
return baseResult with { Confidence = confidence };
}
/// <summary>
/// Pre-computes the Evidence-Weighted Score before rule evaluation so it's available
/// for score-based policy rules (e.g., "when score >= 80 then block").
/// </summary>
private global::StellaOps.Signals.EvidenceWeightedScore.EvidenceWeightedScoreResult? PrecomputeEvidenceWeightedScore(
PolicyEvaluationContext context)
{
// Skip if no enricher configured
if (_scoreEnricher is null || !_scoreEnricher.IsEnabled)
{
return null;
}
try
{
// Generate finding ID from context
var findingId = GenerateFindingIdFromContext(context);
// Extract evidence from context
var evidence = context.ExtractEwsEvidence(
findingId,
epssScore: context.Advisory.Metadata.TryGetValue("epss.score", out var epssStr)
? double.TryParse(epssStr, out var epss) ? epss : null
: null,
epssPercentile: context.Advisory.Metadata.TryGetValue("epss.percentile", out var epssPercStr)
? double.TryParse(epssPercStr, out var epssPerc) ? epssPerc : null
: null,
isInKev: context.Advisory.Metadata.TryGetValue("kev.status", out var kevStatus)
&& kevStatus.Equals("true", StringComparison.OrdinalIgnoreCase),
kevAddedDate: context.Advisory.Metadata.TryGetValue("kev.added", out var kevAddedStr)
? DateTimeOffset.TryParse(kevAddedStr, out var kevAdded) ? kevAdded : null
: null);
// Calculate score synchronously
var enrichmentResult = _scoreEnricher.Enrich(evidence);
return enrichmentResult.IsSuccess ? enrichmentResult.Score : null;
}
catch
{
// Pre-computation should not fail the evaluation
return null;
}
}
/// <summary>
/// Generates a deterministic finding ID from context (without requiring result).
/// </summary>
private static string GenerateFindingIdFromContext(PolicyEvaluationContext context)
{
var source = context.Advisory.Source ?? "unknown";
var severity = context.Severity.Normalized ?? "unknown";
// Use advisory metadata CVE ID if available
if (context.Advisory.Metadata.TryGetValue("cve", out var cve) && !string.IsNullOrEmpty(cve))
{
return $"finding:{cve}:{source}";
}
// Fall back to deterministic hash
var input = $"{source}|{severity}|{context.Now:O}";
Span<byte> hash = stackalloc byte[32];
SHA256.HashData(Encoding.UTF8.GetBytes(input), hash);
return $"finding:sha256:{Convert.ToHexString(hash).ToLowerInvariant()[..16]}";
}
/// <summary>
/// Applies Evidence-Weighted Score enrichment if the enricher is available and enabled.
/// Uses pre-computed score if available to avoid recalculation.
/// </summary>
private PolicyEvaluationResult ApplyEvidenceWeightedScore(
PolicyEvaluationContext context,
PolicyEvaluationResult baseResult,
global::StellaOps.Signals.EvidenceWeightedScore.EvidenceWeightedScoreResult? precomputedScore = null)
{
// Use precomputed score if available
var score = precomputedScore;
// If no precomputed score and enricher is enabled, compute now
if (score is null && _scoreEnricher is not null && _scoreEnricher.IsEnabled)
{
score = PrecomputeEvidenceWeightedScore(context);
}
// Skip if no score available
if (score is null)
{
return baseResult;
}
try
{
// Add score to annotations for DSL access
var annotations = baseResult.Annotations.ToBuilder();
annotations["ews.score"] = score.Score.ToString("F2", CultureInfo.InvariantCulture);
annotations["ews.bucket"] = score.Bucket.ToString();
return baseResult with
{
EvidenceWeightedScore = score,
Annotations = annotations.ToImmutable()
};
}
catch
{
// Score enrichment should not fail the evaluation
// Return base result unchanged
return baseResult;
}
}
/// <summary>
/// Generates a deterministic finding ID from evaluation context.
/// </summary>
private static string GenerateFindingId(PolicyEvaluationContext context, PolicyEvaluationResult result)
{
var source = context.Advisory.Source ?? "unknown";
var severity = context.Severity.Normalized ?? "unknown";
var ruleName = result.RuleName ?? "default";
// Use advisory metadata CVE ID if available
if (context.Advisory.Metadata.TryGetValue("cve", out var cve) && !string.IsNullOrEmpty(cve))
{
return $"finding:{cve}:{source}";
}
// Fall back to deterministic hash
var input = $"{source}|{severity}|{ruleName}|{context.Now:O}";
Span<byte> hash = stackalloc byte[32];
SHA256.HashData(Encoding.UTF8.GetBytes(input), hash);
return $"finding:sha256:{Convert.ToHexString(hash).ToLowerInvariant()[..16]}";
}
private static ConfidenceInput BuildConfidenceInput(PolicyEvaluationContext context, PolicyEvaluationResult result)
{
return new ConfidenceInput
@@ -535,10 +681,10 @@ internal sealed class PolicyEvaluator
}
var state = reachability.IsReachable
? (reachability.HasRuntimeEvidence ? ReachabilityState.ConfirmedReachable : ReachabilityState.StaticReachable)
? (reachability.HasRuntimeEvidence ? ConfidenceReachabilityState.ConfirmedReachable : ConfidenceReachabilityState.StaticReachable)
: reachability.IsUnreachable
? (reachability.HasRuntimeEvidence ? ReachabilityState.ConfirmedUnreachable : ReachabilityState.StaticUnreachable)
: ReachabilityState.Unknown;
? (reachability.HasRuntimeEvidence ? ConfidenceReachabilityState.ConfirmedUnreachable : ConfidenceReachabilityState.StaticUnreachable)
: ConfidenceReachabilityState.Unknown;
var digests = string.IsNullOrWhiteSpace(reachability.EvidenceRef)
? Array.Empty<string>()
@@ -560,8 +706,8 @@ internal sealed class PolicyEvaluator
}
var posture = context.Reachability.IsReachable || context.Reachability.IsUnreachable
? RuntimePosture.Supports
: RuntimePosture.Unknown;
? ConfidenceRuntimePosture.Supports
: ConfidenceRuntimePosture.Unknown;
return new RuntimeEvidence
{

View File

@@ -4,6 +4,7 @@ using System.Collections.Immutable;
using System.Globalization;
using System.Linq;
using StellaOps.PolicyDsl;
using StellaOps.Signals.EvidenceWeightedScore;
namespace StellaOps.Policy.Engine.Evaluation;
@@ -23,10 +24,14 @@ internal sealed class PolicyExpressionEvaluator
};
private readonly PolicyEvaluationContext context;
private readonly EvidenceWeightedScoreResult? _evidenceWeightedScore;
public PolicyExpressionEvaluator(PolicyEvaluationContext context)
public PolicyExpressionEvaluator(
PolicyEvaluationContext context,
EvidenceWeightedScoreResult? evidenceWeightedScore = null)
{
this.context = context ?? throw new ArgumentNullException(nameof(context));
_evidenceWeightedScore = evidenceWeightedScore;
}
public EvaluationValue Evaluate(PolicyExpression expression, EvaluationScope? scope = null)
@@ -65,6 +70,9 @@ internal sealed class PolicyExpressionEvaluator
"sbom" => new EvaluationValue(new SbomScope(context.Sbom)),
"reachability" => new EvaluationValue(new ReachabilityScope(context.Reachability)),
"entropy" => new EvaluationValue(new EntropyScope(context.Entropy)),
"score" => _evidenceWeightedScore is not null
? new EvaluationValue(new ScoreScope(_evidenceWeightedScore))
: EvaluationValue.Null,
"now" => new EvaluationValue(context.Now),
"true" => EvaluationValue.True,
"false" => EvaluationValue.False,
@@ -111,6 +119,11 @@ internal sealed class PolicyExpressionEvaluator
return entropy.Get(member.Member);
}
if (raw is ScoreScope scoreScope)
{
return scoreScope.Get(member.Member);
}
if (raw is ComponentScope componentScope)
{
return componentScope.Get(member.Member);
@@ -202,6 +215,22 @@ internal sealed class PolicyExpressionEvaluator
{
return advisoryScope.Invoke(member.Member, invocation.Arguments, scope, this);
}
if (root.Name == "score" && targetRaw is ScoreScope scoreScope)
{
return member.Member.ToLowerInvariant() switch
{
"has_flag" or "hasflag" => invocation.Arguments.Length > 0
? scoreScope.HasFlag(Evaluate(invocation.Arguments[0], scope).AsString() ?? "")
: EvaluationValue.False,
"between" => invocation.Arguments.Length >= 2
? scoreScope.Between(
Evaluate(invocation.Arguments[0], scope).AsDecimal() ?? 0m,
Evaluate(invocation.Arguments[1], scope).AsDecimal() ?? 100m)
: EvaluationValue.False,
_ => EvaluationValue.Null,
};
}
}
}
@@ -915,6 +944,94 @@ internal sealed class PolicyExpressionEvaluator
};
}
/// <summary>
/// SPL scope for Evidence-Weighted Score predicates.
/// Provides access to score value, bucket, flags, and individual dimensions.
/// </summary>
/// <example>
/// SPL predicates supported:
/// - score >= 80
/// - score.value >= 80
/// - score.bucket == "ActNow"
/// - score.is_act_now == true
/// - score.rch > 0.8
/// - score.runt > 0.5
/// - score.has_flag("live-signal")
/// - score.flags contains "kev"
/// </example>
private sealed class ScoreScope
{
private readonly EvidenceWeightedScoreResult score;
public ScoreScope(EvidenceWeightedScoreResult score)
{
this.score = score;
}
public EvaluationValue Get(string member) => member.ToLowerInvariant() switch
{
// Core score value (allows direct comparison: score >= 80)
"value" => new EvaluationValue(score.Score),
// Bucket access
"bucket" => new EvaluationValue(score.Bucket.ToString()),
"is_act_now" or "isactnow" => new EvaluationValue(score.Bucket == ScoreBucket.ActNow),
"is_schedule_next" or "isschedulenext" => new EvaluationValue(score.Bucket == ScoreBucket.ScheduleNext),
"is_investigate" or "isinvestigate" => new EvaluationValue(score.Bucket == ScoreBucket.Investigate),
"is_watchlist" or "iswatchlist" => new EvaluationValue(score.Bucket == ScoreBucket.Watchlist),
// Individual dimension scores (0-1 normalized) - using Breakdown
"rch" or "reachability" => new EvaluationValue(GetDimensionInput("RCH")),
"rts" or "runtime" => new EvaluationValue(GetDimensionInput("RTS")),
"bkp" or "backport" => new EvaluationValue(GetDimensionInput("BKP")),
"xpl" or "exploit" => new EvaluationValue(GetDimensionInput("XPL")),
"src" or "source_trust" => new EvaluationValue(GetDimensionInput("SRC")),
"mit" or "mitigation" => new EvaluationValue(GetDimensionInput("MIT")),
// Flags as array
"flags" => new EvaluationValue(score.Flags.Select(f => (object?)f).ToImmutableArray()),
// Policy info
"policy_digest" or "policydigest" => new EvaluationValue(score.PolicyDigest),
// Calculation metadata
"calculated_at" or "calculatedat" => new EvaluationValue(score.CalculatedAt),
// Explanations
"explanations" => new EvaluationValue(score.Explanations.Select(e => (object?)e).ToImmutableArray()),
_ => EvaluationValue.Null,
};
private double GetDimensionInput(string symbol)
{
var contribution = score.Breakdown.FirstOrDefault(c =>
c.Symbol.Equals(symbol, StringComparison.OrdinalIgnoreCase));
return contribution?.InputValue ?? 0.0;
}
/// <summary>
/// Check if score has a specific flag.
/// </summary>
public EvaluationValue HasFlag(string flagName)
{
if (string.IsNullOrWhiteSpace(flagName))
{
return EvaluationValue.False;
}
return new EvaluationValue(score.Flags.Contains(flagName, StringComparer.OrdinalIgnoreCase));
}
/// <summary>
/// Check if score is between min and max (inclusive).
/// </summary>
public EvaluationValue Between(decimal min, decimal max)
{
return new EvaluationValue(score.Score >= min && score.Score <= max);
}
}
/// <summary>
/// SPL scope for macOS component predicates.
/// Provides access to bundle signing, entitlements, sandboxing, and package receipt information.

View File

@@ -0,0 +1,323 @@
// -----------------------------------------------------------------------------
// VerdictSummary.cs
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-024
// Description: VerdictSummary extension for including EWS bucket and top factors
// -----------------------------------------------------------------------------
using System.Collections.Immutable;
using StellaOps.Signals.EvidenceWeightedScore;
namespace StellaOps.Policy.Engine.Evaluation;
/// <summary>
/// A summarized view of a policy evaluation result, including evidence-weighted
/// score bucket and top contributing factors for quick triage visualization.
/// </summary>
public sealed record VerdictSummary
{
/// <summary>The overall verdict status (e.g., "affected", "not_affected").</summary>
public required string Status { get; init; }
/// <summary>The severity level (Critical, High, Medium, Low, Info).</summary>
public string? Severity { get; init; }
/// <summary>Whether a rule matched this finding.</summary>
public bool RuleMatched { get; init; }
/// <summary>Name of the matching rule, if any.</summary>
public string? RuleName { get; init; }
/// <summary>Rule priority, if applicable.</summary>
public int? Priority { get; init; }
/// <summary>Evidence-weighted score bucket for quick triage.</summary>
public string? ScoreBucket { get; init; }
/// <summary>Numeric score (0-100) from evidence-weighted scoring.</summary>
public int? Score { get; init; }
/// <summary>
/// Top contributing factors from EWS breakdown, ordered by contribution magnitude.
/// Each entry contains the dimension name and its contribution.
/// </summary>
public ImmutableArray<VerdictFactor> TopFactors { get; init; } = [];
/// <summary>Active flags from EWS (e.g., "live-signal", "kev", "vendor-na").</summary>
public ImmutableArray<string> Flags { get; init; } = [];
/// <summary>Human-readable explanations for the score.</summary>
public ImmutableArray<string> Explanations { get; init; } = [];
/// <summary>Whether guardrails (caps/floors) were applied to the score.</summary>
public bool GuardrailsApplied { get; init; }
/// <summary>Warnings emitted during evaluation.</summary>
public ImmutableArray<string> Warnings { get; init; } = [];
/// <summary>Whether an exception was applied to this finding.</summary>
public bool ExceptionApplied { get; init; }
/// <summary>Legacy confidence score, if available.</summary>
public decimal? ConfidenceScore { get; init; }
/// <summary>Legacy confidence band, if available.</summary>
public string? ConfidenceBand { get; init; }
}
/// <summary>
/// A single contributing factor to the evidence-weighted score.
/// </summary>
public sealed record VerdictFactor
{
/// <summary>Full dimension name (e.g., "Reachability", "Runtime Signal").</summary>
public required string Dimension { get; init; }
/// <summary>Short symbol (e.g., "RCH", "RTS", "XPL").</summary>
public required string Symbol { get; init; }
/// <summary>Contribution to the score (positive for additive, negative for subtractive).</summary>
public required double Contribution { get; init; }
/// <summary>Weight applied to this dimension.</summary>
public required double Weight { get; init; }
/// <summary>Normalized input value [0, 1].</summary>
public required double InputValue { get; init; }
/// <summary>Whether this is a subtractive factor (like Mitigation).</summary>
public bool IsSubtractive { get; init; }
}
/// <summary>
/// Extension methods for creating <see cref="VerdictSummary"/> from evaluation results.
/// </summary>
internal static class VerdictSummaryExtensions
{
/// <summary>
/// Maximum number of top factors to include in the summary.
/// </summary>
private const int MaxTopFactors = 5;
/// <summary>
/// Creates a <see cref="VerdictSummary"/> from a <see cref="PolicyEvaluationResult"/>.
/// </summary>
/// <param name="result">The policy evaluation result.</param>
/// <returns>A summarized view of the verdict including EWS bucket and top factors.</returns>
internal static VerdictSummary ToSummary(this PolicyEvaluationResult result)
{
ArgumentNullException.ThrowIfNull(result);
var ews = result.EvidenceWeightedScore;
return new VerdictSummary
{
Status = result.Status,
Severity = result.Severity,
RuleMatched = result.Matched,
RuleName = result.RuleName,
Priority = result.Priority,
ScoreBucket = ews?.Bucket.ToString(),
Score = ews?.Score,
TopFactors = ExtractTopFactors(ews),
Flags = ews?.Flags.ToImmutableArray() ?? [],
Explanations = ews?.Explanations.ToImmutableArray() ?? [],
GuardrailsApplied = ews?.Caps.AnyApplied ?? false,
Warnings = result.Warnings,
ExceptionApplied = result.AppliedException is not null,
ConfidenceScore = result.Confidence?.Value,
ConfidenceBand = result.Confidence?.Tier.ToString(),
};
}
/// <summary>
/// Creates a minimal <see cref="VerdictSummary"/> with only status and rule info.
/// Use this for quick serialization when EWS details are not needed.
/// </summary>
/// <param name="result">The policy evaluation result.</param>
/// <returns>A minimal summarized view.</returns>
internal static VerdictSummary ToMinimalSummary(this PolicyEvaluationResult result)
{
ArgumentNullException.ThrowIfNull(result);
return new VerdictSummary
{
Status = result.Status,
Severity = result.Severity,
RuleMatched = result.Matched,
RuleName = result.RuleName,
Priority = result.Priority,
ScoreBucket = result.EvidenceWeightedScore?.Bucket.ToString(),
Score = result.EvidenceWeightedScore?.Score,
Warnings = result.Warnings,
ExceptionApplied = result.AppliedException is not null,
};
}
/// <summary>
/// Extracts the top contributing factors from the EWS breakdown,
/// ordered by absolute contribution magnitude (descending).
/// </summary>
private static ImmutableArray<VerdictFactor> ExtractTopFactors(EvidenceWeightedScoreResult? ews)
{
if (ews?.Breakdown is null || ews.Breakdown.Count == 0)
{
return [];
}
return ews.Breakdown
.OrderByDescending(d => Math.Abs(d.Contribution))
.Take(MaxTopFactors)
.Select(d => new VerdictFactor
{
Dimension = d.Dimension,
Symbol = d.Symbol,
Contribution = d.Contribution,
Weight = d.Weight,
InputValue = d.InputValue,
IsSubtractive = d.IsSubtractive,
})
.ToImmutableArray();
}
/// <summary>
/// Gets the primary contributing factor from the EWS breakdown.
/// Returns null if no breakdown is available.
/// </summary>
/// <param name="ews">The evidence-weighted score result.</param>
/// <returns>The highest-contributing factor, or null.</returns>
public static VerdictFactor? GetPrimaryFactor(this EvidenceWeightedScoreResult? ews)
{
if (ews?.Breakdown is null || ews.Breakdown.Count == 0)
{
return null;
}
var primary = ews.Breakdown
.OrderByDescending(d => Math.Abs(d.Contribution))
.FirstOrDefault();
if (primary is null)
{
return null;
}
return new VerdictFactor
{
Dimension = primary.Dimension,
Symbol = primary.Symbol,
Contribution = primary.Contribution,
Weight = primary.Weight,
InputValue = primary.InputValue,
IsSubtractive = primary.IsSubtractive,
};
}
/// <summary>
/// Formats the verdict summary as a single-line triage string.
/// Example: "[ActNow 92] CVE-2024-1234: RCH(+35), XPL(+28), RTS(+20) | live-signal"
/// </summary>
/// <param name="summary">The verdict summary.</param>
/// <param name="findingId">Optional finding ID to include.</param>
/// <returns>A formatted triage string.</returns>
public static string FormatTriageLine(this VerdictSummary summary, string? findingId = null)
{
ArgumentNullException.ThrowIfNull(summary);
var parts = new List<string>();
// Score bucket and value
if (summary.Score.HasValue)
{
parts.Add($"[{summary.ScoreBucket ?? "?"} {summary.Score}]");
}
// Finding ID if provided
if (!string.IsNullOrEmpty(findingId))
{
parts.Add($"{findingId}:");
}
// Top factors
if (summary.TopFactors.Length > 0)
{
var factors = summary.TopFactors
.Take(3)
.Select(f => $"{f.Symbol}({(f.Contribution >= 0 ? "+" : "")}{f.Contribution:F0})")
.ToArray();
parts.Add(string.Join(", ", factors));
}
// Flags
if (summary.Flags.Length > 0)
{
parts.Add($"| {string.Join(", ", summary.Flags.Take(3))}");
}
return string.Join(" ", parts);
}
/// <summary>
/// Gets a brief explanation of why this verdict received its score bucket.
/// </summary>
/// <param name="summary">The verdict summary.</param>
/// <returns>A human-readable explanation.</returns>
public static string GetBucketExplanation(this VerdictSummary summary)
{
ArgumentNullException.ThrowIfNull(summary);
if (!summary.Score.HasValue)
{
return "No evidence-weighted score available.";
}
var bucket = summary.ScoreBucket;
var score = summary.Score.Value;
var explanation = bucket switch
{
"ActNow" => $"Score {score}/100: Strong evidence of exploitable risk. Immediate action recommended.",
"ScheduleNext" => $"Score {score}/100: Likely real risk. Schedule remediation for next sprint.",
"Investigate" => $"Score {score}/100: Moderate evidence. Investigate when working on this component.",
"Watchlist" => $"Score {score}/100: Insufficient evidence. Monitor for changes.",
_ => $"Score {score}/100."
};
// Add primary factor context
if (summary.TopFactors.Length > 0)
{
var primary = summary.TopFactors[0];
var factorContext = primary.Symbol switch
{
"RCH" => "Reachability analysis is the primary driver.",
"RTS" => "Runtime signals detected exploitation activity.",
"XPL" => "Known exploit evidence is significant.",
"BKP" => "Backport information affects the score.",
"SRC" => "Source trust levels impact the assessment.",
"MIT" => "Mitigations reduce the effective risk.",
_ => null
};
if (factorContext is not null)
{
explanation = $"{explanation} {factorContext}";
}
}
// Add flag context
if (summary.Flags.Contains("live-signal"))
{
explanation = $"{explanation} ALERT: Live exploitation signal detected!";
}
else if (summary.Flags.Contains("kev"))
{
explanation = $"{explanation} This is a Known Exploited Vulnerability (KEV).";
}
else if (summary.Flags.Contains("vendor-na"))
{
explanation = $"{explanation} Vendor has confirmed not affected.";
}
return explanation;
}
}

View File

@@ -1,6 +1,7 @@
using System;
using System.Collections.Immutable;
using StellaOps.Policy;
using StellaOps.Signals.EvidenceWeightedScore;
namespace StellaOps.Policy.Engine.Materialization;
@@ -60,6 +61,11 @@ public sealed record PolicyExplainTrace
/// </summary>
public ImmutableArray<PolicyExplainVexImpact> VexImpacts { get; init; } = ImmutableArray<PolicyExplainVexImpact>.Empty;
/// <summary>
/// Evidence-weighted score result (if calculated).
/// </summary>
public EvidenceWeightedScoreResult? EvidenceWeightedScore { get; init; }
/// <summary>
/// Additional metadata (component PURL, SBOM ID, trace ID, reachability status, etc.).
/// </summary>

View File

@@ -0,0 +1,446 @@
// -----------------------------------------------------------------------------
// ConfidenceToEwsAdapter.cs
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-032
// Description: Adapter to translate legacy Confidence scores to EWS format
// -----------------------------------------------------------------------------
using StellaOps.Policy.Confidence.Models;
using StellaOps.Signals.EvidenceWeightedScore;
namespace StellaOps.Policy.Engine.Scoring.EvidenceWeightedScore;
/// <summary>
/// Result of Confidence to EWS adaptation.
/// </summary>
public sealed record ConfidenceToEwsAdaptationResult
{
/// <summary>
/// Creates a new ConfidenceToEwsAdaptationResult.
/// </summary>
public ConfidenceToEwsAdaptationResult(
EvidenceWeightedScoreResult ewsResult,
ConfidenceScore originalConfidence,
AdaptationDetails details)
{
EwsResult = ewsResult ?? throw new ArgumentNullException(nameof(ewsResult));
OriginalConfidence = originalConfidence ?? throw new ArgumentNullException(nameof(originalConfidence));
Details = details ?? throw new ArgumentNullException(nameof(details));
}
/// <summary>
/// The adapted EWS result.
/// </summary>
public EvidenceWeightedScoreResult EwsResult { get; }
/// <summary>
/// The original Confidence score.
/// </summary>
public ConfidenceScore OriginalConfidence { get; }
/// <summary>
/// Details about the adaptation process.
/// </summary>
public AdaptationDetails Details { get; }
}
/// <summary>
/// Details about how the adaptation was performed.
/// </summary>
public sealed record AdaptationDetails
{
/// <summary>
/// Creates new AdaptationDetails.
/// </summary>
public AdaptationDetails(
IReadOnlyDictionary<string, double> dimensionMappings,
string mappingStrategy,
IReadOnlyList<string> warnings)
{
DimensionMappings = dimensionMappings ?? throw new ArgumentNullException(nameof(dimensionMappings));
MappingStrategy = mappingStrategy ?? throw new ArgumentNullException(nameof(mappingStrategy));
Warnings = warnings ?? throw new ArgumentNullException(nameof(warnings));
}
/// <summary>
/// How each Confidence factor was mapped to EWS dimensions.
/// </summary>
public IReadOnlyDictionary<string, double> DimensionMappings { get; }
/// <summary>
/// The strategy used for mapping (e.g., "direct", "interpolated").
/// </summary>
public string MappingStrategy { get; }
/// <summary>
/// Any warnings about the adaptation.
/// </summary>
public IReadOnlyList<string> Warnings { get; }
}
/// <summary>
/// Adapter to translate legacy Confidence scores to Evidence-Weighted Scores.
/// </summary>
/// <remarks>
/// <para>
/// The Confidence system uses a 0.0-1.0 scale where higher = more confidence in NOT being affected.
/// The EWS system uses a 0-100 scale where higher = more evidence of real risk.
/// </para>
/// <para>
/// Key differences:
/// - Confidence: High = likely not affected = lower risk
/// - EWS: High = likely affected = higher risk
/// </para>
/// <para>
/// Mapping strategy:
/// - Invert Confidence factors that measure "safety" to measure "risk"
/// - Map Confidence factors to closest EWS dimensions
/// - Apply EWS scaling (0-100 instead of 0.0-1.0)
/// </para>
/// </remarks>
public sealed class ConfidenceToEwsAdapter
{
private readonly IEvidenceWeightedScoreCalculator _calculator;
/// <summary>
/// Creates a new ConfidenceToEwsAdapter.
/// </summary>
public ConfidenceToEwsAdapter(IEvidenceWeightedScoreCalculator? calculator = null)
{
_calculator = calculator ?? new EvidenceWeightedScoreCalculator();
}
/// <summary>
/// Adapts a Confidence score to an EWS result.
/// </summary>
/// <param name="confidence">The Confidence score to adapt.</param>
/// <param name="findingId">The finding ID for the EWS result.</param>
/// <returns>The adapted EWS result with details.</returns>
public ConfidenceToEwsAdaptationResult Adapt(ConfidenceScore confidence, string findingId)
{
ArgumentNullException.ThrowIfNull(confidence);
ArgumentException.ThrowIfNullOrWhiteSpace(findingId);
var (input, mappings, warnings) = MapConfidenceToEwsInput(confidence, findingId);
var ewsResult = _calculator.Calculate(input, EvidenceWeightPolicy.DefaultProduction);
var details = new AdaptationDetails(
dimensionMappings: mappings,
mappingStrategy: "inverted-factor-mapping",
warnings: warnings
);
return new ConfidenceToEwsAdaptationResult(
ewsResult: ewsResult,
originalConfidence: confidence,
details: details
);
}
/// <summary>
/// Compares a Confidence score with an EWS result to assess alignment.
/// </summary>
/// <param name="confidence">The Confidence score.</param>
/// <param name="ewsResult">The EWS result.</param>
/// <returns>Comparison result with alignment details.</returns>
public ConfidenceEwsComparison Compare(ConfidenceScore confidence, EvidenceWeightedScoreResult ewsResult)
{
ArgumentNullException.ThrowIfNull(confidence);
ArgumentNullException.ThrowIfNull(ewsResult);
// Adapt Confidence to EWS for comparison
var adapted = Adapt(confidence, ewsResult.FindingId);
// Calculate alignment
var scoreDifference = Math.Abs(adapted.EwsResult.Score - ewsResult.Score);
var bucketMatch = adapted.EwsResult.Bucket == ewsResult.Bucket;
var alignment = scoreDifference switch
{
< 5 => AlignmentLevel.Excellent,
< 10 => AlignmentLevel.Good,
< 20 => AlignmentLevel.Moderate,
< 30 => AlignmentLevel.Poor,
_ => AlignmentLevel.Divergent
};
return new ConfidenceEwsComparison(
originalConfidence: confidence,
originalEws: ewsResult,
adaptedEws: adapted.EwsResult,
scoreDifference: scoreDifference,
bucketMatch: bucketMatch,
alignment: alignment
);
}
private static (EvidenceWeightedScoreInput Input, Dictionary<string, double> Mappings, List<string> Warnings)
MapConfidenceToEwsInput(ConfidenceScore confidence, string findingId)
{
var mappings = new Dictionary<string, double>(StringComparer.OrdinalIgnoreCase);
var warnings = new List<string>();
// Find factors by type
var reachabilityFactor = confidence.Factors.FirstOrDefault(f => f.Type == ConfidenceFactorType.Reachability);
var runtimeFactor = confidence.Factors.FirstOrDefault(f => f.Type == ConfidenceFactorType.Runtime);
var vexFactor = confidence.Factors.FirstOrDefault(f => f.Type == ConfidenceFactorType.Vex);
var provenanceFactor = confidence.Factors.FirstOrDefault(f => f.Type == ConfidenceFactorType.Provenance);
var advisoryFactor = confidence.Factors.FirstOrDefault(f => f.Type == ConfidenceFactorType.Advisory);
// Map Reachability (Confidence) → RCH (EWS)
// Confidence: high = unreachable (safe) → EWS: invert so high = reachable (risky)
var rch = InvertConfidenceFactor(reachabilityFactor, "Reachability", mappings, warnings);
// Map Runtime (Confidence) → RTS (EWS)
// Confidence: high = runtime contradicts (safe) → EWS: invert so high = runtime confirms (risky)
var rts = InvertConfidenceFactor(runtimeFactor, "Runtime", mappings, warnings);
// Map VEX (Confidence) → BKP (EWS)
// VEX not_affected with high trust → BKP high means vendor confirmed safe
// Note: This is a loose mapping since VEX and Backport are different concepts
var bkp = MapVexToBackport(vexFactor, mappings, warnings);
// Map Provenance/Advisory → SRC (EWS)
// Provenance quality affects source trust
var src = MapProvenanceToSourceTrust(provenanceFactor, advisoryFactor, mappings, warnings);
// XPL (Exploit) - no direct Confidence equivalent
// Default to neutral (0.5) as Confidence doesn't track exploit intelligence
var xpl = 0.5;
mappings["xpl"] = xpl;
warnings.Add("No exploit factor in Confidence; defaulting XPL to 0.5");
// MIT (Mitigation) - no direct Confidence equivalent
// Default to 0 (no mitigation assumed)
var mit = 0.0;
mappings["mit"] = mit;
warnings.Add("No mitigation factor in Confidence; defaulting MIT to 0.0");
var input = new EvidenceWeightedScoreInput
{
FindingId = findingId,
Rch = rch,
Rts = rts,
Bkp = bkp,
Xpl = xpl,
Src = src,
Mit = mit
};
return (input, mappings, warnings);
}
private static double InvertConfidenceFactor(
ConfidenceFactor? factor,
string name,
Dictionary<string, double> mappings,
List<string> warnings)
{
if (factor is null)
{
var defaultValue = 0.5;
mappings[$"{name.ToLowerInvariant()}_to_ews"] = defaultValue;
warnings.Add($"No {name} factor in Confidence; defaulting to {defaultValue}");
return defaultValue;
}
// Invert: high confidence (safe) → low EWS (safe)
// Low confidence (risky) → high EWS (risky)
var inverted = 1.0 - (double)factor.RawValue;
mappings[$"{name.ToLowerInvariant()}_to_ews"] = inverted;
return inverted;
}
private static double MapVexToBackport(
ConfidenceFactor? vexFactor,
Dictionary<string, double> mappings,
List<string> warnings)
{
if (vexFactor is null)
{
var defaultValue = 0.5;
mappings["vex_to_bkp"] = defaultValue;
warnings.Add("No VEX factor in Confidence; defaulting BKP to 0.5");
return defaultValue;
}
// VEX high trust (not affected) → BKP high (backport confirms safe)
// This is an approximation - VEX and backport serve different purposes
// VEX says "vendor says not affected"
// BKP says "version comparison shows patched"
// We treat high VEX trust as evidence of being "handled" similarly to backport
var bkp = (double)vexFactor.RawValue;
mappings["vex_to_bkp"] = bkp;
warnings.Add("VEX factor mapped to BKP (approximation - different semantic meanings)");
return bkp;
}
private static double MapProvenanceToSourceTrust(
ConfidenceFactor? provenanceFactor,
ConfidenceFactor? advisoryFactor,
Dictionary<string, double> mappings,
List<string> warnings)
{
double provenanceValue = provenanceFactor is not null ? (double)provenanceFactor.RawValue : 0.5;
double advisoryValue = advisoryFactor is not null ? (double)advisoryFactor.RawValue : 0.5;
// Average provenance and advisory factors for source trust
// High provenance quality + fresh advisory = high source trust
var src = (provenanceValue + advisoryValue) / 2.0;
mappings["provenance_to_src"] = provenanceValue;
mappings["advisory_to_src"] = advisoryValue;
mappings["src_combined"] = src;
if (provenanceFactor is null && advisoryFactor is null)
{
warnings.Add("No Provenance or Advisory factors; defaulting SRC to 0.5");
}
return src;
}
}
/// <summary>
/// Result of comparing Confidence and EWS scores.
/// </summary>
public sealed record ConfidenceEwsComparison
{
/// <summary>
/// Creates a new ConfidenceEwsComparison.
/// </summary>
public ConfidenceEwsComparison(
ConfidenceScore originalConfidence,
EvidenceWeightedScoreResult originalEws,
EvidenceWeightedScoreResult adaptedEws,
int scoreDifference,
bool bucketMatch,
AlignmentLevel alignment)
{
OriginalConfidence = originalConfidence;
OriginalEws = originalEws;
AdaptedEws = adaptedEws;
ScoreDifference = scoreDifference;
BucketMatch = bucketMatch;
Alignment = alignment;
}
/// <summary>
/// The original Confidence score.
/// </summary>
public ConfidenceScore OriginalConfidence { get; }
/// <summary>
/// The original EWS result (from direct calculation).
/// </summary>
public EvidenceWeightedScoreResult OriginalEws { get; }
/// <summary>
/// EWS result adapted from Confidence score.
/// </summary>
public EvidenceWeightedScoreResult AdaptedEws { get; }
/// <summary>
/// Absolute difference between original and adapted EWS scores.
/// </summary>
public int ScoreDifference { get; }
/// <summary>
/// Whether the bucket assignment matches.
/// </summary>
public bool BucketMatch { get; }
/// <summary>
/// Overall alignment level.
/// </summary>
public AlignmentLevel Alignment { get; }
/// <summary>
/// Whether the scores are considered aligned (Moderate or better).
/// </summary>
public bool IsAligned => Alignment is AlignmentLevel.Excellent
or AlignmentLevel.Good or AlignmentLevel.Moderate;
/// <summary>
/// Gets a summary of the comparison.
/// </summary>
public string GetSummary()
{
return $"Confidence {OriginalConfidence.Value:P0} ({OriginalConfidence.Tier}) ↔ " +
$"EWS {OriginalEws.Score} ({OriginalEws.Bucket}) | " +
$"Adapted EWS {AdaptedEws.Score} ({AdaptedEws.Bucket}) | " +
$"Diff={ScoreDifference}, Alignment={Alignment}";
}
}
/// <summary>
/// Level of alignment between Confidence and EWS scores.
/// </summary>
public enum AlignmentLevel
{
/// <summary>Score difference &lt; 5 points.</summary>
Excellent,
/// <summary>Score difference &lt; 10 points.</summary>
Good,
/// <summary>Score difference &lt; 20 points.</summary>
Moderate,
/// <summary>Score difference &lt; 30 points.</summary>
Poor,
/// <summary>Score difference ≥ 30 points.</summary>
Divergent
}
/// <summary>
/// Extension methods for Confidence to EWS adaptation.
/// </summary>
public static class ConfidenceToEwsExtensions
{
/// <summary>
/// Adapts a Confidence score to an approximate EWS score value (0-100).
/// </summary>
/// <remarks>
/// This is a quick approximation that inverts the Confidence value.
/// For accurate mapping, use ConfidenceToEwsAdapter.Adapt().
/// </remarks>
public static int ToApproximateEwsScore(this ConfidenceScore confidence)
{
// Confidence: 1.0 = very confident safe → EWS: 0 = low risk
// Confidence: 0.0 = no confidence → EWS: 100 = high risk
return (int)Math.Round((1.0m - confidence.Value) * 100m);
}
/// <summary>
/// Gets the approximate EWS bucket for a Confidence score.
/// </summary>
public static ScoreBucket ToApproximateEwsBucket(this ConfidenceScore confidence)
{
var approxScore = confidence.ToApproximateEwsScore();
return approxScore switch
{
>= 90 => ScoreBucket.ActNow,
>= 70 => ScoreBucket.ScheduleNext,
>= 40 => ScoreBucket.Investigate,
_ => ScoreBucket.Watchlist
};
}
/// <summary>
/// Maps ConfidenceTier to approximate EWS ScoreBucket.
/// </summary>
public static ScoreBucket ToApproximateEwsBucket(this ConfidenceTier tier)
{
// Invert: high confidence (safe) → low priority bucket
return tier switch
{
ConfidenceTier.VeryHigh => ScoreBucket.Watchlist, // Very confident = low risk
ConfidenceTier.High => ScoreBucket.Watchlist, // High confidence = low risk
ConfidenceTier.Medium => ScoreBucket.Investigate, // Medium = investigate
ConfidenceTier.Low => ScoreBucket.ScheduleNext, // Low confidence = schedule
ConfidenceTier.VeryLow => ScoreBucket.ActNow, // No confidence = act now
_ => ScoreBucket.Investigate
};
}
}

View File

@@ -0,0 +1,390 @@
// -----------------------------------------------------------------------------
// DualEmitVerdictEnricher.cs
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-033
// Description: Dual-emit mode for Confidence and EWS scores in verdicts
// -----------------------------------------------------------------------------
using System.Diagnostics.Metrics;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using StellaOps.Policy.Confidence.Models;
using StellaOps.Signals.EvidenceWeightedScore;
namespace StellaOps.Policy.Engine.Scoring.EvidenceWeightedScore;
/// <summary>
/// Result of dual-emit verdict enrichment.
/// </summary>
public sealed record DualEmitResult
{
/// <summary>
/// Creates a new DualEmitResult.
/// </summary>
public DualEmitResult(
ConfidenceScore? confidence,
EvidenceWeightedScoreResult? evidenceWeightedScore,
DualEmitComparison? comparison)
{
Confidence = confidence;
EvidenceWeightedScore = evidenceWeightedScore;
Comparison = comparison;
}
/// <summary>
/// The Confidence score (legacy).
/// </summary>
public ConfidenceScore? Confidence { get; }
/// <summary>
/// The Evidence-Weighted Score (new).
/// </summary>
public EvidenceWeightedScoreResult? EvidenceWeightedScore { get; }
/// <summary>
/// Comparison between the two scores when both are present.
/// </summary>
public DualEmitComparison? Comparison { get; }
/// <summary>
/// Whether both scores are present.
/// </summary>
public bool HasBothScores => Confidence is not null && EvidenceWeightedScore is not null;
/// <summary>
/// Whether the scores are aligned (if comparison available).
/// </summary>
public bool IsAligned => Comparison?.IsAligned ?? true;
}
/// <summary>
/// Comparison between Confidence and EWS scores.
/// </summary>
public sealed record DualEmitComparison
{
/// <summary>
/// Creates a new DualEmitComparison.
/// </summary>
public DualEmitComparison(
decimal confidenceValue,
int ewsScore,
string confidenceTier,
string ewsBucket,
int scoreDifference,
bool tierBucketMatch,
bool isAligned)
{
ConfidenceValue = confidenceValue;
EwsScore = ewsScore;
ConfidenceTier = confidenceTier;
EwsBucket = ewsBucket;
ScoreDifference = scoreDifference;
TierBucketMatch = tierBucketMatch;
IsAligned = isAligned;
}
/// <summary>
/// Confidence value [0, 1].
/// </summary>
public decimal ConfidenceValue { get; }
/// <summary>
/// EWS score [0, 100].
/// </summary>
public int EwsScore { get; }
/// <summary>
/// Confidence tier (VeryHigh, High, Medium, Low, VeryLow).
/// </summary>
public string ConfidenceTier { get; }
/// <summary>
/// EWS bucket (ActNow, ScheduleNext, Investigate, Watchlist).
/// </summary>
public string EwsBucket { get; }
/// <summary>
/// Absolute difference when Confidence is mapped to 0-100 scale.
/// </summary>
public int ScoreDifference { get; }
/// <summary>
/// Whether tier/bucket semantically match (High→Watchlist, Low→ActNow).
/// </summary>
public bool TierBucketMatch { get; }
/// <summary>
/// Whether scores are considered aligned (diff &lt; 20 and tier matches).
/// </summary>
public bool IsAligned { get; }
/// <summary>
/// Creates a comparison from Confidence and EWS scores.
/// </summary>
public static DualEmitComparison Create(ConfidenceScore confidence, EvidenceWeightedScoreResult ews)
{
ArgumentNullException.ThrowIfNull(confidence);
ArgumentNullException.ThrowIfNull(ews);
// Map Confidence to 0-100 (inverted: high confidence = low risk)
var confidenceAs100 = (int)Math.Round((1.0m - confidence.Value) * 100m);
var scoreDiff = Math.Abs(confidenceAs100 - ews.Score);
// Check tier/bucket match (inverted semantics)
var tierBucketMatch = IsTierBucketMatch(confidence.Tier, ews.Bucket);
// Aligned if diff < 20 and tier matches
var isAligned = scoreDiff < 20 && tierBucketMatch;
return new DualEmitComparison(
confidenceValue: confidence.Value,
ewsScore: ews.Score,
confidenceTier: confidence.Tier.ToString(),
ewsBucket: ews.Bucket.ToString(),
scoreDifference: scoreDiff,
tierBucketMatch: tierBucketMatch,
isAligned: isAligned
);
}
private static bool IsTierBucketMatch(Confidence.Models.ConfidenceTier tier, ScoreBucket bucket)
{
// Map inverted semantics:
// High Confidence (safe) → Watchlist (low priority)
// Low Confidence (risky) → ActNow (high priority)
return (tier, bucket) switch
{
(Confidence.Models.ConfidenceTier.VeryHigh, ScoreBucket.Watchlist) => true,
(Confidence.Models.ConfidenceTier.High, ScoreBucket.Watchlist) => true,
(Confidence.Models.ConfidenceTier.High, ScoreBucket.Investigate) => true,
(Confidence.Models.ConfidenceTier.Medium, ScoreBucket.Investigate) => true,
(Confidence.Models.ConfidenceTier.Medium, ScoreBucket.ScheduleNext) => true,
(Confidence.Models.ConfidenceTier.Low, ScoreBucket.ScheduleNext) => true,
(Confidence.Models.ConfidenceTier.Low, ScoreBucket.ActNow) => true,
(Confidence.Models.ConfidenceTier.VeryLow, ScoreBucket.ActNow) => true,
_ => false
};
}
}
/// <summary>
/// Service for dual-emit mode that enriches verdicts with both Confidence and EWS scores.
/// </summary>
public interface IDualEmitVerdictEnricher
{
/// <summary>
/// Whether dual-emit mode is enabled.
/// </summary>
bool IsEnabled { get; }
/// <summary>
/// Enriches a verdict with both Confidence and EWS scores.
/// </summary>
/// <param name="confidence">The Confidence score (may be null).</param>
/// <param name="ewsScore">The EWS score (may be null).</param>
/// <returns>The dual-emit result with comparison if both present.</returns>
DualEmitResult Enrich(ConfidenceScore? confidence, EvidenceWeightedScoreResult? ewsScore);
}
/// <summary>
/// Implementation of dual-emit verdict enricher.
/// </summary>
public sealed class DualEmitVerdictEnricher : IDualEmitVerdictEnricher
{
private readonly IOptionsMonitor<PolicyEvidenceWeightedScoreOptions> _options;
private readonly ILogger<DualEmitVerdictEnricher> _logger;
private readonly Counter<long> _dualEmitCounter;
private readonly Counter<long> _alignmentCounter;
private readonly Histogram<double> _scoreDifferenceHistogram;
/// <summary>
/// Creates a new DualEmitVerdictEnricher.
/// </summary>
public DualEmitVerdictEnricher(
IOptionsMonitor<PolicyEvidenceWeightedScoreOptions> options,
ILogger<DualEmitVerdictEnricher> logger,
IMeterFactory? meterFactory = null)
{
_options = options ?? throw new ArgumentNullException(nameof(options));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
var meter = meterFactory?.Create("StellaOps.Policy.DualEmit")
?? new Meter("StellaOps.Policy.DualEmit");
_dualEmitCounter = meter.CreateCounter<long>(
"stellaops.policy.dual_emit.verdicts",
"verdicts",
"Number of verdicts processed in dual-emit mode");
_alignmentCounter = meter.CreateCounter<long>(
"stellaops.policy.dual_emit.alignment",
"verdicts",
"Number of aligned/misaligned verdicts in dual-emit mode");
_scoreDifferenceHistogram = meter.CreateHistogram<double>(
"stellaops.policy.dual_emit.score_difference",
"points",
"Distribution of score differences between Confidence and EWS");
}
/// <inheritdoc />
public bool IsEnabled => _options.CurrentValue.Enabled && _options.CurrentValue.DualEmitMode;
/// <inheritdoc />
public DualEmitResult Enrich(ConfidenceScore? confidence, EvidenceWeightedScoreResult? ewsScore)
{
// Fast path when disabled
if (!IsEnabled)
{
return new DualEmitResult(confidence, ewsScore, null);
}
// Create comparison if both present
DualEmitComparison? comparison = null;
if (confidence is not null && ewsScore is not null)
{
comparison = DualEmitComparison.Create(confidence, ewsScore);
EmitTelemetry(comparison);
}
return new DualEmitResult(confidence, ewsScore, comparison);
}
private void EmitTelemetry(DualEmitComparison comparison)
{
// Skip if telemetry disabled
if (!_options.CurrentValue.EmitComparisonTelemetry)
{
return;
}
try
{
// Increment counters
_dualEmitCounter.Add(1, new KeyValuePair<string, object?>("has_both", true));
_alignmentCounter.Add(1, new KeyValuePair<string, object?>(
"status", comparison.IsAligned ? "aligned" : "misaligned"));
// Record score difference
_scoreDifferenceHistogram.Record(comparison.ScoreDifference);
// Log misalignments at debug level
if (!comparison.IsAligned)
{
_logger.LogDebug(
"Dual-emit score misalignment: Confidence={ConfidenceValue:P0} ({ConfidenceTier}) ↔ EWS={EwsScore} ({EwsBucket}), diff={ScoreDiff}",
comparison.ConfidenceValue,
comparison.ConfidenceTier,
comparison.EwsScore,
comparison.EwsBucket,
comparison.ScoreDifference);
}
}
catch (Exception ex)
{
// Telemetry should never fail the enrichment
_logger.LogWarning(ex, "Failed to emit dual-emit telemetry");
}
}
}
/// <summary>
/// Extension methods for dual-emit mode.
/// </summary>
public static class DualEmitExtensions
{
/// <summary>
/// Gets the primary score value based on configuration.
/// </summary>
/// <param name="result">The dual-emit result.</param>
/// <param name="useEwsAsPrimary">Whether to use EWS as primary (otherwise Confidence).</param>
/// <returns>The primary score as a value 0-100.</returns>
public static int GetPrimaryScore(this DualEmitResult result, bool useEwsAsPrimary)
{
if (useEwsAsPrimary && result.EvidenceWeightedScore is not null)
{
return result.EvidenceWeightedScore.Score;
}
if (result.Confidence is not null)
{
// Convert Confidence [0,1] to [0,100] (inverted: high confidence = low score)
return (int)Math.Round((1.0m - result.Confidence.Value) * 100m);
}
// Default to neutral
return 50;
}
/// <summary>
/// Gets the primary bucket/tier based on configuration.
/// </summary>
/// <param name="result">The dual-emit result.</param>
/// <param name="useEwsAsPrimary">Whether to use EWS as primary.</param>
/// <returns>The primary bucket/tier as a string.</returns>
public static string GetPrimaryBucket(this DualEmitResult result, bool useEwsAsPrimary)
{
if (useEwsAsPrimary && result.EvidenceWeightedScore is not null)
{
return result.EvidenceWeightedScore.Bucket.ToString();
}
if (result.Confidence is not null)
{
// Map Confidence tier to bucket name (inverted)
return result.Confidence.Tier switch
{
ConfidenceTier.VeryHigh => "Watchlist",
ConfidenceTier.High => "Watchlist",
ConfidenceTier.Medium => "Investigate",
ConfidenceTier.Low => "ScheduleNext",
ConfidenceTier.VeryLow => "ActNow",
_ => "Investigate"
};
}
return "Investigate";
}
/// <summary>
/// Gets a summary string for the dual-emit result.
/// </summary>
public static string GetSummary(this DualEmitResult result)
{
var parts = new List<string>();
if (result.Confidence is not null)
{
parts.Add($"Confidence={result.Confidence.Value:P0}({result.Confidence.Tier})");
}
if (result.EvidenceWeightedScore is not null)
{
parts.Add($"EWS={result.EvidenceWeightedScore.Score}({result.EvidenceWeightedScore.Bucket})");
}
if (result.Comparison is not null)
{
parts.Add($"Aligned={result.Comparison.IsAligned}(diff={result.Comparison.ScoreDifference})");
}
return string.Join(" | ", parts);
}
}
/// <summary>
/// Registration helper for dual-emit mode.
/// Note: Actual DI registration will be handled by the host assembly
/// that has access to Microsoft.Extensions.DependencyInjection.
/// </summary>
internal static class DualEmitServiceCollectionHelpers
{
/// <summary>
/// Returns the service registration types for dual-emit services.
/// </summary>
public static (Type Service, Type Implementation) GetDualEmitServices()
{
return (typeof(IDualEmitVerdictEnricher), typeof(DualEmitVerdictEnricher));
}
}

View File

@@ -0,0 +1,304 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-004 - Implement EvidenceWeightedScoreEnricher
using System.Collections.Concurrent;
using System.Diagnostics;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using StellaOps.Signals.EvidenceWeightedScore;
using StellaOps.Signals.EvidenceWeightedScore.Normalizers;
namespace StellaOps.Policy.Engine.Scoring.EvidenceWeightedScore;
/// <summary>
/// Enriches findings with Evidence-Weighted Scores by calling the normalizer aggregator and calculator.
/// </summary>
public sealed class EvidenceWeightedScoreEnricher : IFindingScoreEnricher
{
private readonly INormalizerAggregator _aggregator;
private readonly IEvidenceWeightedScoreCalculator _calculator;
private readonly IEvidenceWeightPolicyProvider _policyProvider;
private readonly IOptionsMonitor<PolicyEvidenceWeightedScoreOptions> _options;
private readonly ILogger<EvidenceWeightedScoreEnricher>? _logger;
private readonly IScoreEnrichmentCache? _cache;
public EvidenceWeightedScoreEnricher(
INormalizerAggregator aggregator,
IEvidenceWeightedScoreCalculator calculator,
IEvidenceWeightPolicyProvider policyProvider,
IOptionsMonitor<PolicyEvidenceWeightedScoreOptions> options,
ILogger<EvidenceWeightedScoreEnricher>? logger = null,
IScoreEnrichmentCache? cache = null)
{
_aggregator = aggregator ?? throw new ArgumentNullException(nameof(aggregator));
_calculator = calculator ?? throw new ArgumentNullException(nameof(calculator));
_policyProvider = policyProvider ?? throw new ArgumentNullException(nameof(policyProvider));
_options = options ?? throw new ArgumentNullException(nameof(options));
_logger = logger;
_cache = cache;
}
/// <inheritdoc />
public bool IsEnabled => _options.CurrentValue.Enabled;
/// <inheritdoc />
public ValueTask<ScoreEnrichmentResult> EnrichAsync(
FindingEvidence evidence,
CancellationToken cancellationToken = default)
{
// For now, the implementation is synchronous - async is for future when
// we might need to fetch additional evidence asynchronously
return ValueTask.FromResult(Enrich(evidence));
}
/// <inheritdoc />
public ScoreEnrichmentResult Enrich(FindingEvidence evidence)
{
ArgumentNullException.ThrowIfNull(evidence);
var options = _options.CurrentValue;
// Check if feature is enabled
if (!options.Enabled)
{
return ScoreEnrichmentResult.Skipped(evidence.FindingId);
}
// Check cache first if enabled
if (options.EnableCaching && _cache is not null)
{
if (_cache.TryGet(evidence.FindingId, out var cachedScore) && cachedScore is not null)
{
_logger?.LogDebug(
"Cache hit for EWS: FindingId={FindingId}, Score={Score}",
evidence.FindingId, cachedScore.Score);
return ScoreEnrichmentResult.Success(
evidence.FindingId,
cachedScore,
fromCache: true);
}
}
try
{
var stopwatch = Stopwatch.StartNew();
// Aggregate evidence into normalized input
var input = _aggregator.Aggregate(evidence);
// Get policy (use configured digest or default)
var policy = GetPolicy(options);
// Calculate score
var score = _calculator.Calculate(input, policy);
stopwatch.Stop();
// Cache the result if enabled
if (options.EnableCaching && _cache is not null && _cache.Count < options.MaxCachedScoresPerContext)
{
_cache.Set(evidence.FindingId, score);
}
_logger?.LogDebug(
"Calculated EWS: FindingId={FindingId}, Score={Score}, Bucket={Bucket}, Duration={Duration}ms",
evidence.FindingId, score.Score, score.Bucket, stopwatch.ElapsedMilliseconds);
return ScoreEnrichmentResult.Success(
evidence.FindingId,
score,
fromCache: false,
duration: stopwatch.Elapsed);
}
catch (Exception ex)
{
_logger?.LogWarning(
ex,
"Failed to calculate EWS for FindingId={FindingId}: {Error}",
evidence.FindingId, ex.Message);
return ScoreEnrichmentResult.Failure(evidence.FindingId, ex.Message);
}
}
/// <inheritdoc />
public async IAsyncEnumerable<ScoreEnrichmentResult> EnrichBatchAsync(
IEnumerable<FindingEvidence> evidenceList,
[System.Runtime.CompilerServices.EnumeratorCancellation] CancellationToken cancellationToken = default)
{
ArgumentNullException.ThrowIfNull(evidenceList);
foreach (var evidence in evidenceList)
{
if (cancellationToken.IsCancellationRequested)
{
yield break;
}
yield return await EnrichAsync(evidence, cancellationToken);
}
}
private EvidenceWeightPolicy GetPolicy(PolicyEvidenceWeightedScoreOptions options)
{
// Get default policy synchronously (blocking call) - use cached policy in production
// The async API is available but for the sync Enrich method we need sync access
var defaultPolicy = _policyProvider
.GetDefaultPolicyAsync("default", CancellationToken.None)
.GetAwaiter()
.GetResult();
return ApplyWeightOverrides(defaultPolicy, options);
}
private static EvidenceWeightPolicy ApplyWeightOverrides(
EvidenceWeightPolicy policy,
PolicyEvidenceWeightedScoreOptions options)
{
// Apply weight overrides if configured
if (options.Weights is not null)
{
var newWeights = options.Weights.ToWeights(policy.Weights);
policy = policy with { Weights = newWeights };
}
// Apply bucket threshold overrides if configured
if (options.BucketThresholds is not null)
{
var newThresholds = options.BucketThresholds.ToThresholds(policy.Buckets);
policy = policy with { Buckets = newThresholds };
}
return policy;
}
}
/// <summary>
/// In-memory cache for EWS scores within an evaluation context.
/// Thread-safe for concurrent access.
/// </summary>
public sealed class InMemoryScoreEnrichmentCache : IScoreEnrichmentCache
{
private readonly ConcurrentDictionary<string, EvidenceWeightedScoreResult> _cache = new(StringComparer.OrdinalIgnoreCase);
// Telemetry counters
private long _hits;
private long _misses;
private long _sets;
/// <inheritdoc />
public int Count => _cache.Count;
/// <inheritdoc />
public bool TryGet(string findingId, out EvidenceWeightedScoreResult? score)
{
ArgumentException.ThrowIfNullOrEmpty(findingId);
if (_cache.TryGetValue(findingId, out var cached))
{
Interlocked.Increment(ref _hits);
score = cached;
return true;
}
Interlocked.Increment(ref _misses);
score = null;
return false;
}
/// <inheritdoc />
public void Set(string findingId, EvidenceWeightedScoreResult score)
{
ArgumentException.ThrowIfNullOrEmpty(findingId);
ArgumentNullException.ThrowIfNull(score);
_cache[findingId] = score;
Interlocked.Increment(ref _sets);
}
/// <inheritdoc />
public void Clear()
{
_cache.Clear();
}
/// <summary>
/// Number of cache hits.
/// </summary>
public long Hits => Interlocked.Read(ref _hits);
/// <summary>
/// Number of cache misses.
/// </summary>
public long Misses => Interlocked.Read(ref _misses);
/// <summary>
/// Number of cache sets.
/// </summary>
public long Sets => Interlocked.Read(ref _sets);
/// <summary>
/// Cache hit rate (0-1).
/// </summary>
public double HitRate
{
get
{
var total = Hits + Misses;
return total == 0 ? 0.0 : (double)Hits / total;
}
}
/// <summary>
/// Gets cache statistics for telemetry.
/// </summary>
public CacheStatistics GetStatistics() => new(
Count: Count,
Hits: Hits,
Misses: Misses,
Sets: Sets,
HitRate: HitRate);
/// <summary>
/// Resets telemetry counters.
/// </summary>
public void ResetStatistics()
{
Interlocked.Exchange(ref _hits, 0);
Interlocked.Exchange(ref _misses, 0);
Interlocked.Exchange(ref _sets, 0);
}
}
/// <summary>
/// Cache statistics for telemetry.
/// </summary>
public readonly record struct CacheStatistics(
int Count,
long Hits,
long Misses,
long Sets,
double HitRate);
/// <summary>
/// Factory for creating score enrichment caches.
/// </summary>
public interface IScoreEnrichmentCacheFactory
{
/// <summary>
/// Creates a new cache for an evaluation context.
/// </summary>
IScoreEnrichmentCache Create();
}
/// <summary>
/// Default factory that creates in-memory caches.
/// </summary>
public sealed class InMemoryScoreEnrichmentCacheFactory : IScoreEnrichmentCacheFactory
{
/// <inheritdoc />
public IScoreEnrichmentCache Create() => new InMemoryScoreEnrichmentCache();
}

View File

@@ -0,0 +1,130 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-037 - Extend AddPolicyEngine() to include EWS services
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions;
using Microsoft.Extensions.Options;
using StellaOps.Signals.EvidenceWeightedScore;
namespace StellaOps.Policy.Engine.Scoring.EvidenceWeightedScore;
/// <summary>
/// Extension methods for registering Evidence-Weighted Score services in the Policy Engine.
/// </summary>
public static class EvidenceWeightedScoreServiceCollectionExtensions
{
/// <summary>
/// Adds Evidence-Weighted Score services to the Policy Engine.
/// </summary>
/// <remarks>
/// Registers:
/// - <see cref="PolicyEvidenceWeightedScoreOptions"/> via configuration binding
/// - <see cref="IFindingScoreEnricher"/> for score calculation during policy evaluation
/// - <see cref="IScoreEnrichmentCache"/> for caching (when enabled)
/// - <see cref="IDualEmitVerdictEnricher"/> for dual-emit mode
/// - <see cref="IMigrationTelemetryService"/> for migration metrics
/// - <see cref="ConfidenceToEwsAdapter"/> for legacy score translation
/// </remarks>
/// <param name="services">Service collection.</param>
/// <returns>The service collection for chaining.</returns>
public static IServiceCollection AddEvidenceWeightedScore(this IServiceCollection services)
{
// Options binding
services.AddOptions<PolicyEvidenceWeightedScoreOptions>()
.BindConfiguration(PolicyEvidenceWeightedScoreOptions.SectionName);
// Core calculator from Signals library (if not already registered)
services.TryAddSingleton<IEvidenceWeightedScoreCalculator, EvidenceWeightedScoreCalculator>();
// Score enricher (invokes calculator during policy evaluation)
services.TryAddSingleton<IFindingScoreEnricher, EvidenceWeightedScoreEnricher>();
// Cache for scores within evaluation context
services.TryAddSingleton<IScoreEnrichmentCache, InMemoryScoreEnrichmentCache>();
// Dual-emit enricher for migration
services.TryAddSingleton<IDualEmitVerdictEnricher, DualEmitVerdictEnricher>();
// Migration telemetry
services.TryAddSingleton<IMigrationTelemetryService, MigrationTelemetryService>();
// Confidence adapter for legacy comparison
services.TryAddSingleton<ConfidenceToEwsAdapter>();
return services;
}
/// <summary>
/// Adds Evidence-Weighted Score services with custom configuration.
/// </summary>
/// <param name="services">Service collection.</param>
/// <param name="configure">Configuration action.</param>
/// <returns>The service collection for chaining.</returns>
public static IServiceCollection AddEvidenceWeightedScore(
this IServiceCollection services,
Action<PolicyEvidenceWeightedScoreOptions> configure)
{
services.Configure(configure);
return services.AddEvidenceWeightedScore();
}
/// <summary>
/// Conditionally adds Evidence-Weighted Score services based on configuration.
/// </summary>
/// <remarks>
/// This method reads the configuration at registration time and only registers
/// services if <see cref="PolicyEvidenceWeightedScoreOptions.Enabled"/> is true.
/// Use this when you want zero overhead when EWS is disabled.
/// </remarks>
/// <param name="services">Service collection.</param>
/// <param name="configuration">Configuration root for reading options.</param>
/// <returns>The service collection for chaining.</returns>
public static IServiceCollection AddEvidenceWeightedScoreIfEnabled(
this IServiceCollection services,
Microsoft.Extensions.Configuration.IConfiguration configuration)
{
var options = configuration
.GetSection(PolicyEvidenceWeightedScoreOptions.SectionName)
.Get<PolicyEvidenceWeightedScoreOptions>();
if (options?.Enabled == true)
{
services.AddEvidenceWeightedScore();
}
else
{
// Register null enricher when disabled (no-op)
services.TryAddSingleton<IFindingScoreEnricher, NullFindingScoreEnricher>();
}
return services;
}
/// <summary>
/// Adds only the migration support services (telemetry, adapter) without full EWS.
/// </summary>
/// <remarks>
/// Use this during Phase 1 (feature flag) when you want to prepare for migration
/// but not yet enable EWS calculation.
/// </remarks>
/// <param name="services">Service collection.</param>
/// <returns>The service collection for chaining.</returns>
public static IServiceCollection AddEvidenceWeightedScoreMigrationSupport(
this IServiceCollection services)
{
// Options binding
services.AddOptions<PolicyEvidenceWeightedScoreOptions>()
.BindConfiguration(PolicyEvidenceWeightedScoreOptions.SectionName);
// Migration services only
services.TryAddSingleton<IMigrationTelemetryService, MigrationTelemetryService>();
services.TryAddSingleton<ConfidenceToEwsAdapter>();
// Null enricher (no actual EWS calculation)
services.TryAddSingleton<IFindingScoreEnricher, NullFindingScoreEnricher>();
return services;
}
}

View File

@@ -0,0 +1,197 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-003 - Create IFindingScoreEnricher interface
using StellaOps.Signals.EvidenceWeightedScore;
using StellaOps.Signals.EvidenceWeightedScore.Normalizers;
namespace StellaOps.Policy.Engine.Scoring.EvidenceWeightedScore;
// Use FindingEvidence from the Normalizers namespace
// StellaOps.Signals.EvidenceWeightedScore.Normalizers.FindingEvidence
/// <summary>
/// Result of score enrichment for a finding.
/// </summary>
public sealed record ScoreEnrichmentResult
{
/// <summary>Finding identifier.</summary>
public required string FindingId { get; init; }
/// <summary>
/// The calculated Evidence-Weighted Score result.
/// Null if scoring was not performed (e.g., feature disabled or error).
/// </summary>
public EvidenceWeightedScoreResult? Score { get; init; }
/// <summary>
/// Whether scoring was successful.
/// </summary>
public bool IsSuccess => Score is not null;
/// <summary>
/// Error message if scoring failed.
/// </summary>
public string? Error { get; init; }
/// <summary>
/// Whether the result came from cache.
/// </summary>
public bool FromCache { get; init; }
/// <summary>
/// Duration of score calculation (if not from cache).
/// </summary>
public TimeSpan? CalculationDuration { get; init; }
/// <summary>
/// Creates a successful result.
/// </summary>
public static ScoreEnrichmentResult Success(
string findingId,
EvidenceWeightedScoreResult score,
bool fromCache = false,
TimeSpan? duration = null) => new()
{
FindingId = findingId,
Score = score,
FromCache = fromCache,
CalculationDuration = duration
};
/// <summary>
/// Creates a failed result.
/// </summary>
public static ScoreEnrichmentResult Failure(string findingId, string error) => new()
{
FindingId = findingId,
Error = error
};
/// <summary>
/// Creates a skipped result (feature disabled).
/// </summary>
public static ScoreEnrichmentResult Skipped(string findingId) => new()
{
FindingId = findingId
};
}
/// <summary>
/// Interface for enriching findings with Evidence-Weighted Scores during policy evaluation.
/// </summary>
public interface IFindingScoreEnricher
{
/// <summary>
/// Enriches a finding with an Evidence-Weighted Score.
/// </summary>
/// <param name="evidence">Evidence collected for the finding.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Score enrichment result.</returns>
ValueTask<ScoreEnrichmentResult> EnrichAsync(
FindingEvidence evidence,
CancellationToken cancellationToken = default);
/// <summary>
/// Enriches a finding synchronously (for pipeline integration).
/// </summary>
/// <param name="evidence">Evidence collected for the finding.</param>
/// <returns>Score enrichment result.</returns>
ScoreEnrichmentResult Enrich(FindingEvidence evidence);
/// <summary>
/// Enriches multiple findings in batch.
/// </summary>
/// <param name="evidenceList">List of evidence for findings.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Enumerable of score enrichment results.</returns>
IAsyncEnumerable<ScoreEnrichmentResult> EnrichBatchAsync(
IEnumerable<FindingEvidence> evidenceList,
CancellationToken cancellationToken = default);
/// <summary>
/// Whether EWS enrichment is enabled.
/// </summary>
bool IsEnabled { get; }
}
/// <summary>
/// Cache for EWS scores within an evaluation context.
/// Thread-safe for concurrent access.
/// </summary>
public interface IScoreEnrichmentCache
{
/// <summary>
/// Tries to get a cached score for a finding.
/// </summary>
/// <param name="findingId">Finding identifier.</param>
/// <param name="score">Cached score if found.</param>
/// <returns>True if found in cache.</returns>
bool TryGet(string findingId, out EvidenceWeightedScoreResult? score);
/// <summary>
/// Caches a score for a finding.
/// </summary>
/// <param name="findingId">Finding identifier.</param>
/// <param name="score">Score to cache.</param>
void Set(string findingId, EvidenceWeightedScoreResult score);
/// <summary>
/// Current cache size.
/// </summary>
int Count { get; }
/// <summary>
/// Clears the cache.
/// </summary>
void Clear();
}
/// <summary>
/// Null implementation of score enricher for when EWS is disabled.
/// </summary>
public sealed class NullFindingScoreEnricher : IFindingScoreEnricher
{
/// <summary>
/// Singleton instance.
/// </summary>
public static NullFindingScoreEnricher Instance { get; } = new();
private NullFindingScoreEnricher() { }
/// <inheritdoc />
public bool IsEnabled => false;
/// <inheritdoc />
public ValueTask<ScoreEnrichmentResult> EnrichAsync(
FindingEvidence evidence,
CancellationToken cancellationToken = default)
{
return ValueTask.FromResult(ScoreEnrichmentResult.Skipped(evidence.FindingId));
}
/// <inheritdoc />
public ScoreEnrichmentResult Enrich(FindingEvidence evidence)
{
return ScoreEnrichmentResult.Skipped(evidence.FindingId);
}
/// <inheritdoc />
public async IAsyncEnumerable<ScoreEnrichmentResult> EnrichBatchAsync(
IEnumerable<FindingEvidence> evidenceList,
[System.Runtime.CompilerServices.EnumeratorCancellation] CancellationToken cancellationToken = default)
{
foreach (var evidence in evidenceList)
{
if (cancellationToken.IsCancellationRequested)
{
yield break;
}
yield return ScoreEnrichmentResult.Skipped(evidence.FindingId);
}
await Task.CompletedTask;
}
}

View File

@@ -0,0 +1,468 @@
// -----------------------------------------------------------------------------
// MigrationTelemetryService.cs
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-034
// Description: Migration telemetry comparing Confidence vs EWS rankings
// -----------------------------------------------------------------------------
using System.Collections.Concurrent;
using System.Diagnostics.Metrics;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using StellaOps.Policy.Confidence.Models;
using StellaOps.Signals.EvidenceWeightedScore;
namespace StellaOps.Policy.Engine.Scoring.EvidenceWeightedScore;
/// <summary>
/// Aggregated statistics for migration telemetry.
/// </summary>
public sealed record MigrationTelemetryStats
{
/// <summary>
/// Total verdicts processed.
/// </summary>
public long TotalVerdicts { get; init; }
/// <summary>
/// Verdicts with both Confidence and EWS scores.
/// </summary>
public long DualScoredVerdicts { get; init; }
/// <summary>
/// Verdicts where scores are aligned (diff &lt; 20).
/// </summary>
public long AlignedVerdicts { get; init; }
/// <summary>
/// Verdicts where tier/bucket match semantically.
/// </summary>
public long TierMatchVerdicts { get; init; }
/// <summary>
/// Alignment rate (0-1).
/// </summary>
public double AlignmentRate => DualScoredVerdicts > 0
? (double)AlignedVerdicts / DualScoredVerdicts
: 0;
/// <summary>
/// Tier match rate (0-1).
/// </summary>
public double TierMatchRate => DualScoredVerdicts > 0
? (double)TierMatchVerdicts / DualScoredVerdicts
: 0;
/// <summary>
/// Average score difference when both scores present.
/// </summary>
public double AverageScoreDifference { get; init; }
/// <summary>
/// Distribution of score differences by range.
/// </summary>
public IReadOnlyDictionary<string, long> ScoreDifferenceDistribution { get; init; }
= new Dictionary<string, long>();
/// <summary>
/// Distribution by Confidence tier.
/// </summary>
public IReadOnlyDictionary<string, long> ByConfidenceTier { get; init; }
= new Dictionary<string, long>();
/// <summary>
/// Distribution by EWS bucket.
/// </summary>
public IReadOnlyDictionary<string, long> ByEwsBucket { get; init; }
= new Dictionary<string, long>();
/// <summary>
/// Timestamp when stats were captured.
/// </summary>
public DateTimeOffset CapturedAt { get; init; } = DateTimeOffset.UtcNow;
}
/// <summary>
/// Individual ranking comparison sample for detailed analysis.
/// </summary>
public sealed record RankingComparisonSample
{
/// <summary>
/// Creates a new RankingComparisonSample.
/// </summary>
public RankingComparisonSample(
string findingId,
decimal confidenceValue,
int ewsScore,
ConfidenceTier confidenceTier,
ScoreBucket ewsBucket,
int scoreDifference,
bool isAligned,
bool tierBucketMatch,
DateTimeOffset timestamp)
{
FindingId = findingId;
ConfidenceValue = confidenceValue;
EwsScore = ewsScore;
ConfidenceTier = confidenceTier;
EwsBucket = ewsBucket;
ScoreDifference = scoreDifference;
IsAligned = isAligned;
TierBucketMatch = tierBucketMatch;
Timestamp = timestamp;
}
public string FindingId { get; }
public decimal ConfidenceValue { get; }
public int EwsScore { get; }
public ConfidenceTier ConfidenceTier { get; }
public ScoreBucket EwsBucket { get; }
public int ScoreDifference { get; }
public bool IsAligned { get; }
public bool TierBucketMatch { get; }
public DateTimeOffset Timestamp { get; }
}
/// <summary>
/// Service for tracking migration telemetry comparing Confidence vs EWS.
/// </summary>
public interface IMigrationTelemetryService
{
/// <summary>
/// Whether migration telemetry is enabled.
/// </summary>
bool IsEnabled { get; }
/// <summary>
/// Records a comparison between Confidence and EWS scores.
/// </summary>
void RecordComparison(
string findingId,
ConfidenceScore confidence,
EvidenceWeightedScoreResult ewsScore);
/// <summary>
/// Gets the current aggregated statistics.
/// </summary>
MigrationTelemetryStats GetStats();
/// <summary>
/// Gets recent comparison samples (for debugging).
/// </summary>
IReadOnlyList<RankingComparisonSample> GetRecentSamples(int count = 100);
/// <summary>
/// Resets all telemetry counters.
/// </summary>
void Reset();
}
/// <summary>
/// Implementation of migration telemetry service.
/// </summary>
public sealed class MigrationTelemetryService : IMigrationTelemetryService
{
private readonly IOptionsMonitor<PolicyEvidenceWeightedScoreOptions> _options;
private readonly ILogger<MigrationTelemetryService> _logger;
// Counters
private long _totalVerdicts;
private long _dualScoredVerdicts;
private long _alignedVerdicts;
private long _tierMatchVerdicts;
private long _totalScoreDifference;
// Distribution counters
private readonly ConcurrentDictionary<string, long> _scoreDiffDistribution = new();
private readonly ConcurrentDictionary<string, long> _byConfidenceTier = new();
private readonly ConcurrentDictionary<string, long> _byEwsBucket = new();
// Recent samples (circular buffer)
private readonly ConcurrentQueue<RankingComparisonSample> _recentSamples = new();
private const int MaxSamples = 1000;
// Metrics
private readonly Counter<long> _comparisonCounter;
private readonly Counter<long> _alignmentCounter;
private readonly Counter<long> _tierMatchCounter;
private readonly Histogram<double> _scoreDiffHistogram;
/// <summary>
/// Creates a new MigrationTelemetryService.
/// </summary>
public MigrationTelemetryService(
IOptionsMonitor<PolicyEvidenceWeightedScoreOptions> options,
ILogger<MigrationTelemetryService> logger,
IMeterFactory? meterFactory = null)
{
_options = options ?? throw new ArgumentNullException(nameof(options));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
var meter = meterFactory?.Create("StellaOps.Policy.Migration")
?? new Meter("StellaOps.Policy.Migration");
_comparisonCounter = meter.CreateCounter<long>(
"stellaops.policy.migration.comparisons",
"comparisons",
"Total Confidence vs EWS comparisons");
_alignmentCounter = meter.CreateCounter<long>(
"stellaops.policy.migration.aligned",
"verdicts",
"Aligned verdict count");
_tierMatchCounter = meter.CreateCounter<long>(
"stellaops.policy.migration.tier_match",
"verdicts",
"Tier/bucket match count");
_scoreDiffHistogram = meter.CreateHistogram<double>(
"stellaops.policy.migration.score_diff",
"points",
"Score difference distribution");
// Initialize distribution buckets
foreach (var range in new[] { "0-5", "5-10", "10-20", "20-30", "30+" })
{
_scoreDiffDistribution[range] = 0;
}
foreach (var tier in Enum.GetNames<ConfidenceTier>())
{
_byConfidenceTier[tier] = 0;
}
foreach (var bucket in Enum.GetNames<ScoreBucket>())
{
_byEwsBucket[bucket] = 0;
}
}
/// <inheritdoc />
public bool IsEnabled => _options.CurrentValue.Enabled
&& _options.CurrentValue.DualEmitMode
&& _options.CurrentValue.EmitComparisonTelemetry;
/// <inheritdoc />
public void RecordComparison(
string findingId,
ConfidenceScore confidence,
EvidenceWeightedScoreResult ewsScore)
{
if (!IsEnabled)
{
return;
}
ArgumentException.ThrowIfNullOrWhiteSpace(findingId);
ArgumentNullException.ThrowIfNull(confidence);
ArgumentNullException.ThrowIfNull(ewsScore);
try
{
Interlocked.Increment(ref _totalVerdicts);
Interlocked.Increment(ref _dualScoredVerdicts);
// Calculate comparison metrics
var confidenceAs100 = (int)Math.Round((1.0m - confidence.Value) * 100m);
var scoreDiff = Math.Abs(confidenceAs100 - ewsScore.Score);
var isAligned = scoreDiff < 20;
var tierMatch = IsTierBucketMatch(confidence.Tier, ewsScore.Bucket);
// Update counters
if (isAligned)
{
Interlocked.Increment(ref _alignedVerdicts);
_alignmentCounter.Add(1);
}
if (tierMatch)
{
Interlocked.Increment(ref _tierMatchVerdicts);
_tierMatchCounter.Add(1);
}
Interlocked.Add(ref _totalScoreDifference, scoreDiff);
// Update distributions
var diffRange = scoreDiff switch
{
< 5 => "0-5",
< 10 => "5-10",
< 20 => "10-20",
< 30 => "20-30",
_ => "30+"
};
_scoreDiffDistribution.AddOrUpdate(diffRange, 1, (_, v) => v + 1);
_byConfidenceTier.AddOrUpdate(confidence.Tier.ToString(), 1, (_, v) => v + 1);
_byEwsBucket.AddOrUpdate(ewsScore.Bucket.ToString(), 1, (_, v) => v + 1);
// Record metrics
_comparisonCounter.Add(1, new KeyValuePair<string, object?>("aligned", isAligned));
_scoreDiffHistogram.Record(scoreDiff);
// Store sample
var sample = new RankingComparisonSample(
findingId: findingId,
confidenceValue: confidence.Value,
ewsScore: ewsScore.Score,
confidenceTier: confidence.Tier,
ewsBucket: ewsScore.Bucket,
scoreDifference: scoreDiff,
isAligned: isAligned,
tierBucketMatch: tierMatch,
timestamp: DateTimeOffset.UtcNow
);
_recentSamples.Enqueue(sample);
// Trim samples if needed
while (_recentSamples.Count > MaxSamples)
{
_recentSamples.TryDequeue(out _);
}
// Log significant misalignments
if (!isAligned && scoreDiff >= 30)
{
_logger.LogDebug(
"Significant score misalignment for {FindingId}: Confidence={ConfidenceValue:P0} ({Tier}) vs EWS={EwsScore} ({Bucket}), diff={Diff}",
findingId,
confidence.Value,
confidence.Tier,
ewsScore.Score,
ewsScore.Bucket,
scoreDiff);
}
}
catch (Exception ex)
{
_logger.LogWarning(ex, "Failed to record migration telemetry for {FindingId}", findingId);
}
}
/// <inheritdoc />
public MigrationTelemetryStats GetStats()
{
var total = Interlocked.Read(ref _totalVerdicts);
var dualScored = Interlocked.Read(ref _dualScoredVerdicts);
var aligned = Interlocked.Read(ref _alignedVerdicts);
var tierMatch = Interlocked.Read(ref _tierMatchVerdicts);
var totalDiff = Interlocked.Read(ref _totalScoreDifference);
return new MigrationTelemetryStats
{
TotalVerdicts = total,
DualScoredVerdicts = dualScored,
AlignedVerdicts = aligned,
TierMatchVerdicts = tierMatch,
AverageScoreDifference = dualScored > 0 ? (double)totalDiff / dualScored : 0,
ScoreDifferenceDistribution = new Dictionary<string, long>(_scoreDiffDistribution),
ByConfidenceTier = new Dictionary<string, long>(_byConfidenceTier),
ByEwsBucket = new Dictionary<string, long>(_byEwsBucket),
CapturedAt = DateTimeOffset.UtcNow
};
}
/// <inheritdoc />
public IReadOnlyList<RankingComparisonSample> GetRecentSamples(int count = 100)
{
return _recentSamples
.TakeLast(Math.Min(count, MaxSamples))
.ToList();
}
/// <inheritdoc />
public void Reset()
{
Interlocked.Exchange(ref _totalVerdicts, 0);
Interlocked.Exchange(ref _dualScoredVerdicts, 0);
Interlocked.Exchange(ref _alignedVerdicts, 0);
Interlocked.Exchange(ref _tierMatchVerdicts, 0);
Interlocked.Exchange(ref _totalScoreDifference, 0);
_scoreDiffDistribution.Clear();
_byConfidenceTier.Clear();
_byEwsBucket.Clear();
while (_recentSamples.TryDequeue(out _)) { }
_logger.LogInformation("Migration telemetry reset");
}
private static bool IsTierBucketMatch(ConfidenceTier tier, ScoreBucket bucket)
{
return (tier, bucket) switch
{
(ConfidenceTier.VeryHigh, ScoreBucket.Watchlist) => true,
(ConfidenceTier.High, ScoreBucket.Watchlist) => true,
(ConfidenceTier.High, ScoreBucket.Investigate) => true,
(ConfidenceTier.Medium, ScoreBucket.Investigate) => true,
(ConfidenceTier.Medium, ScoreBucket.ScheduleNext) => true,
(ConfidenceTier.Low, ScoreBucket.ScheduleNext) => true,
(ConfidenceTier.Low, ScoreBucket.ActNow) => true,
(ConfidenceTier.VeryLow, ScoreBucket.ActNow) => true,
_ => false
};
}
}
/// <summary>
/// Extension methods for migration telemetry reporting.
/// </summary>
public static class MigrationTelemetryExtensions
{
/// <summary>
/// Generates a human-readable report from migration stats.
/// </summary>
public static string ToReport(this MigrationTelemetryStats stats)
{
var lines = new List<string>
{
"=== Migration Telemetry Report ===",
$"Captured: {stats.CapturedAt:O}",
"",
"--- Summary ---",
$"Total Verdicts: {stats.TotalVerdicts:N0}",
$"Dual-Scored: {stats.DualScoredVerdicts:N0}",
$"Aligned: {stats.AlignedVerdicts:N0} ({stats.AlignmentRate:P1})",
$"Tier Match: {stats.TierMatchVerdicts:N0} ({stats.TierMatchRate:P1})",
$"Avg Score Diff: {stats.AverageScoreDifference:F1}",
"",
"--- Score Difference Distribution ---"
};
foreach (var (range, count) in stats.ScoreDifferenceDistribution.OrderBy(kv => kv.Key))
{
var pct = stats.DualScoredVerdicts > 0 ? (double)count / stats.DualScoredVerdicts : 0;
lines.Add($" {range}: {count:N0} ({pct:P1})");
}
lines.Add("");
lines.Add("--- By Confidence Tier ---");
foreach (var (tier, count) in stats.ByConfidenceTier.OrderBy(kv => kv.Key))
{
lines.Add($" {tier}: {count:N0}");
}
lines.Add("");
lines.Add("--- By EWS Bucket ---");
foreach (var (bucket, count) in stats.ByEwsBucket.OrderBy(kv => kv.Key))
{
lines.Add($" {bucket}: {count:N0}");
}
return string.Join(Environment.NewLine, lines);
}
/// <summary>
/// Gets a summary line for the stats.
/// </summary>
public static string ToSummaryLine(this MigrationTelemetryStats stats)
{
return $"Migration: {stats.DualScoredVerdicts:N0} dual-scored, " +
$"{stats.AlignmentRate:P0} aligned, " +
$"{stats.TierMatchRate:P0} tier match, " +
$"avg diff {stats.AverageScoreDifference:F1}";
}
}

View File

@@ -0,0 +1,314 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-005, PINT-8200-006 - Integrate enricher into PolicyEvaluator pipeline
using StellaOps.Signals.EvidenceWeightedScore.Normalizers;
// Type aliases to avoid conflicts with types in StellaOps.Policy.Engine.Scoring
using EwsReachabilityInput = StellaOps.Signals.EvidenceWeightedScore.ReachabilityInput;
using EwsReachabilityState = StellaOps.Signals.EvidenceWeightedScore.ReachabilityState;
using EwsRuntimeInput = StellaOps.Signals.EvidenceWeightedScore.RuntimeInput;
using EwsRuntimePosture = StellaOps.Signals.EvidenceWeightedScore.RuntimePosture;
using EwsBackportInput = StellaOps.Signals.EvidenceWeightedScore.BackportInput;
using EwsBackportStatus = StellaOps.Signals.EvidenceWeightedScore.BackportStatus;
using EwsBackportEvidenceTier = StellaOps.Signals.EvidenceWeightedScore.BackportEvidenceTier;
using EwsExploitInput = StellaOps.Signals.EvidenceWeightedScore.ExploitInput;
using EwsKevStatus = StellaOps.Signals.EvidenceWeightedScore.KevStatus;
using EwsSourceTrustInput = StellaOps.Signals.EvidenceWeightedScore.SourceTrustInput;
using EwsIssuerType = StellaOps.Signals.EvidenceWeightedScore.IssuerType;
using EwsMitigationInput = StellaOps.Signals.EvidenceWeightedScore.MitigationInput;
using EwsActiveMitigation = StellaOps.Signals.EvidenceWeightedScore.ActiveMitigation;
namespace StellaOps.Policy.Engine.Scoring.EvidenceWeightedScore;
/// <summary>
/// Extends PolicyEvaluationContext with EWS evidence extraction.
/// Internal because PolicyEvaluationContext is internal.
/// </summary>
internal static class PolicyEvaluationContextEwsExtensions
{
/// <summary>
/// Extracts FindingEvidence from a policy evaluation context for EWS calculation.
/// Maps existing context data to the normalizer input format.
/// </summary>
/// <param name="context">The policy evaluation context.</param>
/// <param name="findingId">The finding identifier.</param>
/// <param name="epssScore">EPSS score if available (0-1).</param>
/// <param name="epssPercentile">EPSS percentile if available (0-100).</param>
/// <param name="isInKev">Whether the CVE is in the KEV catalog.</param>
/// <param name="kevAddedDate">When the CVE was added to KEV.</param>
/// <returns>FindingEvidence for EWS calculation.</returns>
public static FindingEvidence ExtractEwsEvidence(
this Evaluation.PolicyEvaluationContext context,
string findingId,
double? epssScore = null,
double? epssPercentile = null,
bool isInKev = false,
DateTimeOffset? kevAddedDate = null)
{
ArgumentNullException.ThrowIfNull(context);
ArgumentException.ThrowIfNullOrEmpty(findingId);
return new FindingEvidence
{
FindingId = findingId,
Reachability = ExtractReachability(context),
Runtime = ExtractRuntime(context),
Backport = ExtractBackport(context),
Exploit = ExtractExploit(epssScore, epssPercentile, isInKev, kevAddedDate),
SourceTrust = ExtractSourceTrust(context),
Mitigations = ExtractMitigations(context)
};
}
private static EwsReachabilityInput? ExtractReachability(Evaluation.PolicyEvaluationContext context)
{
var reachability = context.Reachability;
// Map context state to ReachabilityState enum
var state = reachability.State?.ToLowerInvariant() switch
{
"reachable" => reachability.HasRuntimeEvidence
? EwsReachabilityState.DynamicReachable
: EwsReachabilityState.StaticReachable,
"unreachable" => EwsReachabilityState.NotReachable,
"conditional" => EwsReachabilityState.PotentiallyReachable,
"under_investigation" => EwsReachabilityState.Unknown,
"live_exploit" => EwsReachabilityState.LiveExploitPath,
_ => EwsReachabilityState.Unknown
};
// If unknown with no confidence, return null (no evidence)
if (state == EwsReachabilityState.Unknown && reachability.Confidence == 0)
{
return null;
}
return new EwsReachabilityInput
{
State = state,
Confidence = (double)reachability.Confidence,
HasTaintTracking = reachability.HasRuntimeEvidence,
HopCount = 0, // Not available in current context
EvidenceSource = reachability.Source
};
}
private static EwsRuntimeInput? ExtractRuntime(Evaluation.PolicyEvaluationContext context)
{
// Only create runtime input if there's runtime evidence
if (!context.Reachability.HasRuntimeEvidence)
{
return null;
}
// Calculate recency factor based on how recent the evidence is
// 1.0 for within last 24h, decaying over time
var recencyFactor = 1.0; // Assume recent if we have evidence
return new EwsRuntimeInput
{
Posture = EwsRuntimePosture.ActiveTracing,
ObservationCount = 1, // Default to 1 if we have evidence
LastObservation = context.Now,
RecencyFactor = recencyFactor
};
}
private static EwsBackportInput? ExtractBackport(Evaluation.PolicyEvaluationContext context)
{
// Extract backport evidence from VEX statements
var vexStatements = context.Vex.Statements;
if (vexStatements.IsDefaultOrEmpty)
{
return null;
}
// Look for "fixed" or "not_affected" status in VEX
var hasBackportEvidence = vexStatements.Any(s =>
s.Status.Equals("fixed", StringComparison.OrdinalIgnoreCase) ||
s.Status.Equals("not_affected", StringComparison.OrdinalIgnoreCase));
if (!hasBackportEvidence)
{
return null;
}
var statement = vexStatements.FirstOrDefault(s =>
s.Status.Equals("fixed", StringComparison.OrdinalIgnoreCase) ||
s.Status.Equals("not_affected", StringComparison.OrdinalIgnoreCase));
// Should never be null since hasBackportEvidence was true, but check anyway
if (statement is null)
{
return null;
}
var status = statement.Status.Equals("fixed", StringComparison.OrdinalIgnoreCase)
? EwsBackportStatus.Fixed
: EwsBackportStatus.NotAffected;
return new EwsBackportInput
{
Status = status,
EvidenceTier = EwsBackportEvidenceTier.VendorVex, // VEX-based evidence
EvidenceSource = context.Advisory.Source ?? "unknown",
Confidence = 0.8, // VEX statements have high confidence
ProofId = statement.StatementId
};
}
private static EwsExploitInput? ExtractExploit(
double? epssScore,
double? epssPercentile,
bool isInKev,
DateTimeOffset? kevAddedDate)
{
// Only create exploit input if we have some data
if (!epssScore.HasValue && !isInKev)
{
return null;
}
return new EwsExploitInput
{
EpssScore = epssScore ?? 0.0,
EpssPercentile = epssPercentile ?? 0.0,
KevStatus = isInKev ? EwsKevStatus.InKev : EwsKevStatus.NotInKev,
KevAddedDate = kevAddedDate,
PublicExploitAvailable = false // Would need additional data source
};
}
private static EwsSourceTrustInput? ExtractSourceTrust(Evaluation.PolicyEvaluationContext context)
{
var source = context.Advisory.Source;
if (string.IsNullOrEmpty(source))
{
return null;
}
// Map source to issuer type
var issuerType = MapSourceToIssuerType(source);
// Calculate base trust from VEX coverage
var vexCoverage = context.Vex.Statements.IsDefaultOrEmpty ? 0.3 : 0.7;
// Provenance adds to trust
var provenanceScore = context.ProvenanceAttested == true ? 0.8 : 0.4;
// Replayability depends on whether we have attestation
var replayability = context.ProvenanceAttested == true ? 0.9 : 0.5;
return new EwsSourceTrustInput
{
IssuerType = issuerType,
ProvenanceTrust = provenanceScore,
CoverageCompleteness = vexCoverage,
Replayability = replayability,
IsCryptographicallyAttested = context.ProvenanceAttested == true
};
}
private static EwsIssuerType MapSourceToIssuerType(string source)
{
var sourceLower = source.ToLowerInvariant();
// Vendor sources
if (sourceLower.Contains("vendor") ||
sourceLower.Contains("red hat") ||
sourceLower.Contains("redhat") ||
sourceLower.Contains("microsoft") ||
sourceLower.Contains("google") ||
sourceLower.Contains("oracle") ||
sourceLower.Contains("vmware") ||
sourceLower.Contains("cisco") ||
sourceLower.Contains("apache"))
{
return EwsIssuerType.Vendor;
}
// Distribution sources
if (sourceLower.Contains("distro") ||
sourceLower.Contains("ubuntu") ||
sourceLower.Contains("debian") ||
sourceLower.Contains("alpine") ||
sourceLower.Contains("fedora") ||
sourceLower.Contains("centos") ||
sourceLower.Contains("suse") ||
sourceLower.Contains("canonical"))
{
return EwsIssuerType.Distribution;
}
// CNA / Government
if (sourceLower.Contains("nvd") ||
sourceLower.Contains("cve") ||
sourceLower.Contains("nist") ||
sourceLower.Contains("cisa") ||
sourceLower.Contains("mitre"))
{
return EwsIssuerType.Cna;
}
// Security researcher
if (sourceLower.Contains("research") ||
sourceLower.Contains("security") ||
sourceLower.Contains("vuln") ||
sourceLower.Contains("snyk") ||
sourceLower.Contains("qualys"))
{
return EwsIssuerType.SecurityResearcher;
}
// Default to community
return EwsIssuerType.Community;
}
private static EwsMitigationInput? ExtractMitigations(Evaluation.PolicyEvaluationContext context)
{
// Check for mitigations in annotations or other sources
var mitigations = new List<EwsActiveMitigation>();
// TODO: In a full implementation, this would check context for:
// - Network isolation flags
// - Feature flags
// - Seccomp/AppArmor profiles
// - Runtime protections
// For now, return null if no mitigations detected
if (mitigations.Count == 0)
{
return null;
}
return new EwsMitigationInput
{
ActiveMitigations = mitigations,
CombinedEffectiveness = CalculateCombinedEffectiveness(mitigations)
};
}
private static double CalculateCombinedEffectiveness(IReadOnlyList<EwsActiveMitigation> mitigations)
{
if (mitigations.Count == 0)
{
return 0.0;
}
// Combined effectiveness using diminishing returns formula
var combined = 0.0;
var remaining = 1.0;
foreach (var mitigation in mitigations.OrderByDescending(m => m.Effectiveness))
{
combined += mitigation.Effectiveness * remaining;
remaining *= (1.0 - mitigation.Effectiveness);
}
return Math.Clamp(combined, 0.0, 1.0);
}
}

View File

@@ -0,0 +1,232 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-001 - Create PolicyEvidenceWeightedScoreOptions
using StellaOps.Signals.EvidenceWeightedScore;
namespace StellaOps.Policy.Engine.Scoring.EvidenceWeightedScore;
/// <summary>
/// Configuration options for Evidence-Weighted Score integration in the Policy Engine.
/// </summary>
public sealed class PolicyEvidenceWeightedScoreOptions
{
/// <summary>
/// Configuration section name.
/// </summary>
public const string SectionName = "PolicyEngine:EvidenceWeightedScore";
/// <summary>
/// Whether EWS is enabled in the policy engine (default: false for safe rollout).
/// When false, only legacy Confidence scoring is used.
/// </summary>
public bool Enabled { get; set; }
/// <summary>
/// Whether to emit both Confidence and EWS scores during migration.
/// Useful for A/B comparison and gradual rollout.
/// Only applies when <see cref="Enabled"/> is true.
/// </summary>
public bool DualEmitMode { get; set; } = true;
/// <summary>
/// Whether to use EWS as the primary score (affects verdict status decisions).
/// When false (default), EWS is calculated but Confidence is still used for decisions.
/// Only applies when <see cref="Enabled"/> is true.
/// </summary>
public bool UseAsPrimaryScore { get; set; }
/// <summary>
/// Whether to emit comparison telemetry between EWS and Confidence scores.
/// Only applies when <see cref="DualEmitMode"/> is true.
/// </summary>
public bool EmitComparisonTelemetry { get; set; } = true;
/// <summary>
/// Whether to cache EWS results within a single evaluation context.
/// Default: true for performance.
/// </summary>
public bool EnableCaching { get; set; } = true;
/// <summary>
/// Maximum number of cached scores per evaluation context.
/// Prevents unbounded memory usage during large batch evaluations.
/// </summary>
public int MaxCachedScoresPerContext { get; set; } = 10_000;
/// <summary>
/// Policy version/digest to use. When null, uses the default policy from options.
/// Can be overridden per-tenant via tenant configuration.
/// </summary>
public string? PolicyDigest { get; set; }
/// <summary>
/// Custom weight overrides per dimension.
/// When null, uses default weights from the underlying calculator.
/// </summary>
public EvidenceWeightsConfiguration? Weights { get; set; }
/// <summary>
/// Custom bucket threshold overrides.
/// When null, uses default bucket thresholds.
/// </summary>
public BucketThresholdsConfiguration? BucketThresholds { get; set; }
/// <summary>
/// Whether to include full EWS breakdown in verdicts.
/// Setting to false reduces verdict payload size but loses explainability.
/// </summary>
public bool IncludeBreakdownInVerdict { get; set; } = true;
/// <summary>
/// Whether to include score attestation proofs in verdicts.
/// Required for audit trails and reproducibility verification.
/// </summary>
public bool IncludeScoringProof { get; set; } = true;
/// <summary>
/// Validates the options configuration.
/// </summary>
public void Validate()
{
if (MaxCachedScoresPerContext < 100)
{
throw new InvalidOperationException(
$"{nameof(MaxCachedScoresPerContext)} must be at least 100, got {MaxCachedScoresPerContext}");
}
if (MaxCachedScoresPerContext > 1_000_000)
{
throw new InvalidOperationException(
$"{nameof(MaxCachedScoresPerContext)} must not exceed 1,000,000, got {MaxCachedScoresPerContext}");
}
Weights?.Validate();
BucketThresholds?.Validate();
}
}
/// <summary>
/// Custom weight configuration for EWS dimensions.
/// </summary>
public sealed class EvidenceWeightsConfiguration
{
/// <summary>Reachability weight (0-1).</summary>
public double? Rch { get; set; }
/// <summary>Runtime signal weight (0-1).</summary>
public double? Rts { get; set; }
/// <summary>Backport evidence weight (0-1).</summary>
public double? Bkp { get; set; }
/// <summary>Exploit likelihood weight (0-1).</summary>
public double? Xpl { get; set; }
/// <summary>Source trust weight (0-1).</summary>
public double? Src { get; set; }
/// <summary>Mitigation weight (0-1, subtractive).</summary>
public double? Mit { get; set; }
/// <summary>
/// Converts to <see cref="EvidenceWeights"/> using defaults for unset values.
/// </summary>
public EvidenceWeights ToWeights(EvidenceWeights defaults)
{
return defaults with
{
Rch = Rch ?? defaults.Rch,
Rts = Rts ?? defaults.Rts,
Bkp = Bkp ?? defaults.Bkp,
Xpl = Xpl ?? defaults.Xpl,
Src = Src ?? defaults.Src,
Mit = Mit ?? defaults.Mit
};
}
/// <summary>
/// Validates weight values are in valid range.
/// </summary>
public void Validate()
{
ValidateWeight(nameof(Rch), Rch);
ValidateWeight(nameof(Rts), Rts);
ValidateWeight(nameof(Bkp), Bkp);
ValidateWeight(nameof(Xpl), Xpl);
ValidateWeight(nameof(Src), Src);
ValidateWeight(nameof(Mit), Mit);
}
private static void ValidateWeight(string name, double? value)
{
if (value.HasValue && (value.Value < 0.0 || value.Value > 1.0))
{
throw new InvalidOperationException(
$"Weight '{name}' must be between 0 and 1, got {value.Value}");
}
}
}
/// <summary>
/// Custom bucket threshold configuration.
/// </summary>
public sealed class BucketThresholdsConfiguration
{
/// <summary>Minimum score for ActNow bucket (default: 90).</summary>
public int? ActNowMin { get; set; }
/// <summary>Minimum score for ScheduleNext bucket (default: 70).</summary>
public int? ScheduleNextMin { get; set; }
/// <summary>Minimum score for Investigate bucket (default: 40).</summary>
public int? InvestigateMin { get; set; }
/// <summary>
/// Converts to <see cref="BucketThresholds"/> using defaults for unset values.
/// </summary>
public BucketThresholds ToThresholds(BucketThresholds defaults)
{
return defaults with
{
ActNowMin = ActNowMin ?? defaults.ActNowMin,
ScheduleNextMin = ScheduleNextMin ?? defaults.ScheduleNextMin,
InvestigateMin = InvestigateMin ?? defaults.InvestigateMin
};
}
/// <summary>
/// Validates bucket thresholds are in valid order.
/// </summary>
public void Validate()
{
var actNow = ActNowMin ?? 90;
var scheduleNext = ScheduleNextMin ?? 70;
var investigate = InvestigateMin ?? 40;
if (actNow < scheduleNext)
{
throw new InvalidOperationException(
$"ActNowMin threshold ({actNow}) must be >= ScheduleNextMin threshold ({scheduleNext})");
}
if (scheduleNext < investigate)
{
throw new InvalidOperationException(
$"ScheduleNextMin threshold ({scheduleNext}) must be >= InvestigateMin threshold ({investigate})");
}
if (investigate < 0)
{
throw new InvalidOperationException(
$"InvestigateMin threshold ({investigate}) must be >= 0");
}
if (actNow > 100)
{
throw new InvalidOperationException(
$"ActNowMin threshold ({actNow}) must be <= 100");
}
}
}

View File

@@ -0,0 +1,554 @@
// -----------------------------------------------------------------------------
// DslCompletionProvider.cs
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-019
// Description: Provides DSL autocomplete hints for score fields and other constructs
// -----------------------------------------------------------------------------
using System.Collections.Immutable;
namespace StellaOps.PolicyDsl;
/// <summary>
/// Provides completion hints for the Stella Policy DSL.
/// This provider generates structured completion suggestions that can be used
/// by any editor client (Monaco, VS Code, etc.).
/// </summary>
public static class DslCompletionProvider
{
/// <summary>
/// Gets all available completion items grouped by category.
/// </summary>
public static DslCompletionCatalog GetCompletionCatalog() => DslCompletionCatalog.Instance;
/// <summary>
/// Gets completion items relevant for the given context.
/// </summary>
/// <param name="context">The completion context including cursor position and text.</param>
/// <returns>Filtered completion items relevant to the context.</returns>
public static ImmutableArray<DslCompletionItem> GetCompletionsForContext(DslCompletionContext context)
{
ArgumentNullException.ThrowIfNull(context);
var results = ImmutableArray.CreateBuilder<DslCompletionItem>();
var catalog = DslCompletionCatalog.Instance;
// Check for namespace prefix completion
if (context.TriggerText.EndsWith("score.", StringComparison.Ordinal))
{
results.AddRange(catalog.ScoreFields);
return results.ToImmutable();
}
if (context.TriggerText.EndsWith("sbom.", StringComparison.Ordinal))
{
results.AddRange(catalog.SbomFields);
return results.ToImmutable();
}
if (context.TriggerText.EndsWith("advisory.", StringComparison.Ordinal))
{
results.AddRange(catalog.AdvisoryFields);
return results.ToImmutable();
}
if (context.TriggerText.EndsWith("vex.", StringComparison.Ordinal))
{
results.AddRange(catalog.VexFields);
return results.ToImmutable();
}
if (context.TriggerText.EndsWith("signals.", StringComparison.Ordinal))
{
results.AddRange(catalog.SignalFields);
return results.ToImmutable();
}
if (context.TriggerText.EndsWith("reachability.", StringComparison.Ordinal))
{
results.AddRange(catalog.ReachabilityFields);
return results.ToImmutable();
}
// Check for value completion contexts
if (IsScoreBucketContext(context.TriggerText))
{
results.AddRange(catalog.ScoreBuckets);
return results.ToImmutable();
}
if (IsScoreFlagContext(context.TriggerText))
{
results.AddRange(catalog.ScoreFlags);
return results.ToImmutable();
}
if (IsVexStatusContext(context.TriggerText))
{
results.AddRange(catalog.VexStatuses);
return results.ToImmutable();
}
if (IsVexJustificationContext(context.TriggerText))
{
results.AddRange(catalog.VexJustifications);
return results.ToImmutable();
}
// Check for action context (after 'then' or 'else')
if (IsActionContext(context.TriggerText))
{
results.AddRange(catalog.Actions);
return results.ToImmutable();
}
// Default: return all top-level completions
results.AddRange(catalog.Keywords);
results.AddRange(catalog.Functions);
results.AddRange(catalog.Namespaces);
return results.ToImmutable();
}
private static bool IsScoreBucketContext(string text) =>
text.Contains("score.bucket", StringComparison.OrdinalIgnoreCase) &&
(text.EndsWith("== ", StringComparison.Ordinal) ||
text.EndsWith("!= ", StringComparison.Ordinal) ||
text.EndsWith("in [", StringComparison.Ordinal) ||
text.EndsWith("== \"", StringComparison.Ordinal));
private static bool IsScoreFlagContext(string text) =>
text.Contains("score.flags", StringComparison.OrdinalIgnoreCase) &&
(text.EndsWith("contains ", StringComparison.Ordinal) ||
text.EndsWith("contains \"", StringComparison.Ordinal) ||
text.EndsWith("in [", StringComparison.Ordinal));
private static bool IsVexStatusContext(string text) =>
text.Contains("status", StringComparison.OrdinalIgnoreCase) &&
(text.EndsWith("== ", StringComparison.Ordinal) ||
text.EndsWith(":= ", StringComparison.Ordinal) ||
text.EndsWith("!= ", StringComparison.Ordinal) ||
text.EndsWith("== \"", StringComparison.Ordinal) ||
text.EndsWith(":= \"", StringComparison.Ordinal));
private static bool IsVexJustificationContext(string text) =>
text.Contains("justification", StringComparison.OrdinalIgnoreCase) &&
(text.EndsWith("== ", StringComparison.Ordinal) ||
text.EndsWith("!= ", StringComparison.Ordinal) ||
text.EndsWith("== \"", StringComparison.Ordinal));
private static bool IsActionContext(string text)
{
var trimmed = text.TrimEnd();
return trimmed.EndsWith(" then", StringComparison.OrdinalIgnoreCase) ||
trimmed.EndsWith(" else", StringComparison.OrdinalIgnoreCase);
}
}
/// <summary>
/// Context for completion requests.
/// </summary>
/// <param name="TriggerText">The text up to and including the cursor position.</param>
/// <param name="LineNumber">The 1-based line number of the cursor.</param>
/// <param name="Column">The 1-based column number of the cursor.</param>
public sealed record DslCompletionContext(
string TriggerText,
int LineNumber = 1,
int Column = 1);
/// <summary>
/// A single completion item.
/// </summary>
/// <param name="Label">The display label for the completion.</param>
/// <param name="Kind">The kind of completion (keyword, field, function, etc.).</param>
/// <param name="InsertText">The text to insert when the completion is accepted.</param>
/// <param name="Documentation">Documentation describing the completion item.</param>
/// <param name="Detail">Additional detail shown in the completion list.</param>
/// <param name="IsSnippet">Whether the insert text is a snippet with placeholders.</param>
public sealed record DslCompletionItem(
string Label,
DslCompletionKind Kind,
string InsertText,
string Documentation,
string? Detail = null,
bool IsSnippet = false);
/// <summary>
/// The kind of completion item.
/// </summary>
public enum DslCompletionKind
{
Keyword = 14,
Function = 1,
Field = 5,
Constant = 21,
Namespace = 9,
Snippet = 15,
}
/// <summary>
/// Catalog of all completion items, organized by category.
/// </summary>
public sealed class DslCompletionCatalog
{
/// <summary>
/// Singleton instance of the completion catalog.
/// </summary>
public static DslCompletionCatalog Instance { get; } = new();
private DslCompletionCatalog()
{
// Initialize all completion categories
Keywords = BuildKeywords();
Functions = BuildFunctions();
Namespaces = BuildNamespaces();
ScoreFields = BuildScoreFields();
ScoreBuckets = BuildScoreBuckets();
ScoreFlags = BuildScoreFlags();
SbomFields = BuildSbomFields();
AdvisoryFields = BuildAdvisoryFields();
VexFields = BuildVexFields();
VexStatuses = BuildVexStatuses();
VexJustifications = BuildVexJustifications();
SignalFields = BuildSignalFields();
ReachabilityFields = BuildReachabilityFields();
Actions = BuildActions();
}
/// <summary>DSL keywords (policy, rule, when, then, etc.).</summary>
public ImmutableArray<DslCompletionItem> Keywords { get; }
/// <summary>Built-in functions.</summary>
public ImmutableArray<DslCompletionItem> Functions { get; }
/// <summary>Top-level namespaces (score, sbom, advisory, etc.).</summary>
public ImmutableArray<DslCompletionItem> Namespaces { get; }
/// <summary>Score namespace fields.</summary>
public ImmutableArray<DslCompletionItem> ScoreFields { get; }
/// <summary>Score bucket values.</summary>
public ImmutableArray<DslCompletionItem> ScoreBuckets { get; }
/// <summary>Score flag values.</summary>
public ImmutableArray<DslCompletionItem> ScoreFlags { get; }
/// <summary>SBOM namespace fields.</summary>
public ImmutableArray<DslCompletionItem> SbomFields { get; }
/// <summary>Advisory namespace fields.</summary>
public ImmutableArray<DslCompletionItem> AdvisoryFields { get; }
/// <summary>VEX namespace fields.</summary>
public ImmutableArray<DslCompletionItem> VexFields { get; }
/// <summary>VEX status values.</summary>
public ImmutableArray<DslCompletionItem> VexStatuses { get; }
/// <summary>VEX justification values.</summary>
public ImmutableArray<DslCompletionItem> VexJustifications { get; }
/// <summary>Signal namespace fields.</summary>
public ImmutableArray<DslCompletionItem> SignalFields { get; }
/// <summary>Reachability namespace fields.</summary>
public ImmutableArray<DslCompletionItem> ReachabilityFields { get; }
/// <summary>Action keywords and patterns.</summary>
public ImmutableArray<DslCompletionItem> Actions { get; }
private static ImmutableArray<DslCompletionItem> BuildKeywords() =>
[
new("policy", DslCompletionKind.Keyword, "policy \"${1:PolicyName}\" syntax \"stella-dsl@1\" {\n\t$0\n}",
"Define a new policy document.", "Policy Declaration", true),
new("rule", DslCompletionKind.Keyword, "rule ${1:rule_name} priority ${2:10} {\n\twhen ${3:condition}\n\tthen ${4:action}\n\tbecause \"${5:rationale}\";\n}",
"Define a policy rule with when/then logic.", "Rule Definition", true),
new("when", DslCompletionKind.Keyword, "when ${1:condition}",
"Condition clause for rule execution.", "Rule Condition", true),
new("then", DslCompletionKind.Keyword, "then ${1:action}",
"Action clause executed when condition is true.", "Rule Action", true),
new("else", DslCompletionKind.Keyword, "else ${1:action}",
"Fallback action clause.", "Rule Else Action", true),
new("because", DslCompletionKind.Keyword, "because \"${1:rationale}\"",
"Mandatory rationale for status/severity changes.", "Rule Rationale", true),
new("metadata", DslCompletionKind.Keyword, "metadata {\n\tdescription = \"${1:description}\"\n\ttags = [$2]\n}",
"Define metadata for the policy.", "Metadata Section", true),
new("settings", DslCompletionKind.Keyword, "settings {\n\t${1:shadow} = ${2:true};\n}",
"Configure evaluation settings.", "Settings Section", true),
new("profile", DslCompletionKind.Keyword, "profile ${1:severity} {\n\t$0\n}",
"Define a profile block for scoring modifiers.", "Profile Section", true),
new("and", DslCompletionKind.Keyword, "and", "Logical AND operator."),
new("or", DslCompletionKind.Keyword, "or", "Logical OR operator."),
new("not", DslCompletionKind.Keyword, "not", "Logical NOT operator."),
new("in", DslCompletionKind.Keyword, "in", "Membership test operator."),
new("between", DslCompletionKind.Keyword, "between ${1:min} and ${2:max}",
"Range comparison operator.", "Range Check", true),
new("contains", DslCompletionKind.Keyword, "contains", "Array contains operator."),
];
private static ImmutableArray<DslCompletionItem> BuildFunctions() =>
[
new("normalize_cvss", DslCompletionKind.Function, "normalize_cvss(${1:advisory})",
"Parse advisory for CVSS data and return severity scalar.", "Advisory → SeverityScalar", true),
new("severity_band", DslCompletionKind.Function, "severity_band(\"${1:severity}\")",
"Normalise severity string to band.", "string → SeverityBand", true),
new("risk_score", DslCompletionKind.Function, "risk_score(${1:base}, ${2:modifier})",
"Calculate risk by multiplying severity × trust × reachability.", "Variadic", true),
new("exists", DslCompletionKind.Function, "exists(${1:expression})",
"Return true when value is non-null/empty.", "→ bool", true),
new("coalesce", DslCompletionKind.Function, "coalesce(${1:a}, ${2:b})",
"Return first non-null argument.", "→ value", true),
new("days_between", DslCompletionKind.Function, "days_between(${1:dateA}, ${2:dateB})",
"Calculate absolute day difference (UTC).", "→ int", true),
];
private static ImmutableArray<DslCompletionItem> BuildNamespaces() =>
[
new("score", DslCompletionKind.Namespace, "score",
"Evidence-weighted score object. Access via score.value, score.bucket, etc."),
new("sbom", DslCompletionKind.Namespace, "sbom",
"SBOM (Software Bill of Materials) data for the finding."),
new("advisory", DslCompletionKind.Namespace, "advisory",
"Security advisory information."),
new("vex", DslCompletionKind.Namespace, "vex",
"VEX (Vulnerability Exploitability eXchange) statements."),
new("severity", DslCompletionKind.Namespace, "severity",
"Severity information for the finding."),
new("signals", DslCompletionKind.Namespace, "signals",
"Signal data including trust scores and runtime evidence."),
new("reachability", DslCompletionKind.Namespace, "reachability",
"Reachability analysis results."),
new("entropy", DslCompletionKind.Namespace, "entropy",
"Entropy and uncertainty metrics."),
new("env", DslCompletionKind.Namespace, "env",
"Environment context (dev, staging, prod, etc.)."),
new("run", DslCompletionKind.Namespace, "run",
"Runtime context (policy ID, tenant, timestamp)."),
];
private static ImmutableArray<DslCompletionItem> BuildScoreFields() =>
[
// Core score value
new("value", DslCompletionKind.Field, "value",
"Numeric score value (0-100). Use in comparisons like: score.value >= 80",
"decimal"),
// Bucket access
new("bucket", DslCompletionKind.Field, "bucket",
"Score bucket: ActNow, ScheduleNext, Investigate, or Watchlist.",
"string"),
new("is_act_now", DslCompletionKind.Field, "is_act_now",
"True if bucket is ActNow (highest priority).",
"bool"),
new("is_schedule_next", DslCompletionKind.Field, "is_schedule_next",
"True if bucket is ScheduleNext.",
"bool"),
new("is_investigate", DslCompletionKind.Field, "is_investigate",
"True if bucket is Investigate.",
"bool"),
new("is_watchlist", DslCompletionKind.Field, "is_watchlist",
"True if bucket is Watchlist (lowest priority).",
"bool"),
// Individual dimension scores (0-1 normalized)
new("rch", DslCompletionKind.Field, "rch",
"Reachability dimension score (0-1 normalized). Alias: reachability",
"double"),
new("reachability", DslCompletionKind.Field, "reachability",
"Reachability dimension score (0-1 normalized). Alias: rch",
"double"),
new("rts", DslCompletionKind.Field, "rts",
"Runtime signal dimension score (0-1 normalized). Alias: runtime",
"double"),
new("runtime", DslCompletionKind.Field, "runtime",
"Runtime signal dimension score (0-1 normalized). Alias: rts",
"double"),
new("bkp", DslCompletionKind.Field, "bkp",
"Backport dimension score (0-1 normalized). Alias: backport",
"double"),
new("backport", DslCompletionKind.Field, "backport",
"Backport dimension score (0-1 normalized). Alias: bkp",
"double"),
new("xpl", DslCompletionKind.Field, "xpl",
"Exploit evidence dimension score (0-1 normalized). Alias: exploit",
"double"),
new("exploit", DslCompletionKind.Field, "exploit",
"Exploit evidence dimension score (0-1 normalized). Alias: xpl",
"double"),
new("src", DslCompletionKind.Field, "src",
"Source trust dimension score (0-1 normalized). Alias: source_trust",
"double"),
new("source_trust", DslCompletionKind.Field, "source_trust",
"Source trust dimension score (0-1 normalized). Alias: src",
"double"),
new("mit", DslCompletionKind.Field, "mit",
"Mitigation dimension score (0-1 normalized). Alias: mitigation",
"double"),
new("mitigation", DslCompletionKind.Field, "mitigation",
"Mitigation dimension score (0-1 normalized). Alias: mit",
"double"),
// Flags
new("flags", DslCompletionKind.Field, "flags",
"Array of score flags (e.g., \"kev\", \"live-signal\", \"vendor-na\").",
"string[]"),
// Metadata
new("policy_digest", DslCompletionKind.Field, "policy_digest",
"SHA-256 digest of the policy used for scoring.",
"string"),
new("calculated_at", DslCompletionKind.Field, "calculated_at",
"ISO 8601 timestamp when score was calculated.",
"DateTime"),
new("explanations", DslCompletionKind.Field, "explanations",
"Array of human-readable explanations for the score.",
"string[]"),
];
private static ImmutableArray<DslCompletionItem> BuildScoreBuckets() =>
[
new("ActNow", DslCompletionKind.Constant, "\"ActNow\"",
"Highest priority: immediate action required."),
new("ScheduleNext", DslCompletionKind.Constant, "\"ScheduleNext\"",
"High priority: schedule remediation soon."),
new("Investigate", DslCompletionKind.Constant, "\"Investigate\"",
"Medium priority: requires investigation."),
new("Watchlist", DslCompletionKind.Constant, "\"Watchlist\"",
"Low priority: monitor for changes."),
];
private static ImmutableArray<DslCompletionItem> BuildScoreFlags() =>
[
new("kev", DslCompletionKind.Constant, "\"kev\"",
"Known Exploited Vulnerability (CISA KEV list)."),
new("live-signal", DslCompletionKind.Constant, "\"live-signal\"",
"Runtime evidence detected active exploitation."),
new("vendor-na", DslCompletionKind.Constant, "\"vendor-na\"",
"Vendor confirms not affected."),
new("epss-high", DslCompletionKind.Constant, "\"epss-high\"",
"High EPSS probability score."),
new("reachable", DslCompletionKind.Constant, "\"reachable\"",
"Code is statically or dynamically reachable."),
new("unreachable", DslCompletionKind.Constant, "\"unreachable\"",
"Code is confirmed unreachable."),
new("backported", DslCompletionKind.Constant, "\"backported\"",
"Fix has been backported by vendor."),
];
private static ImmutableArray<DslCompletionItem> BuildSbomFields() =>
[
new("purl", DslCompletionKind.Field, "purl", "Package URL of the component."),
new("name", DslCompletionKind.Field, "name", "Component name."),
new("version", DslCompletionKind.Field, "version", "Component version."),
new("licenses", DslCompletionKind.Field, "licenses", "Component licenses."),
new("layerDigest", DslCompletionKind.Field, "layerDigest", "Container layer digest."),
new("tags", DslCompletionKind.Field, "tags", "Component tags."),
new("usedByEntrypoint", DslCompletionKind.Field, "usedByEntrypoint",
"Whether component is used by entrypoint."),
];
private static ImmutableArray<DslCompletionItem> BuildAdvisoryFields() =>
[
new("id", DslCompletionKind.Field, "id", "Advisory identifier."),
new("source", DslCompletionKind.Field, "source", "Advisory source (GHSA, OSV, etc.)."),
new("aliases", DslCompletionKind.Field, "aliases", "Advisory aliases (CVE, etc.)."),
new("severity", DslCompletionKind.Field, "severity", "Advisory severity."),
new("cvss", DslCompletionKind.Field, "cvss", "CVSS score."),
new("publishedAt", DslCompletionKind.Field, "publishedAt", "Publication date."),
new("modifiedAt", DslCompletionKind.Field, "modifiedAt", "Last modification date."),
];
private static ImmutableArray<DslCompletionItem> BuildVexFields() =>
[
new("status", DslCompletionKind.Field, "status", "VEX status."),
new("justification", DslCompletionKind.Field, "justification", "VEX justification."),
new("statementId", DslCompletionKind.Field, "statementId", "VEX statement ID."),
new("timestamp", DslCompletionKind.Field, "timestamp", "VEX timestamp."),
new("scope", DslCompletionKind.Field, "scope", "VEX scope."),
new("any", DslCompletionKind.Function, "any(${1:predicate})",
"True if any VEX statement satisfies the predicate.", "(Statement → bool) → bool", true),
new("all", DslCompletionKind.Function, "all(${1:predicate})",
"True if all VEX statements satisfy the predicate.", "(Statement → bool) → bool", true),
new("latest", DslCompletionKind.Function, "latest()",
"Return the lexicographically newest VEX statement.", "→ Statement", true),
new("count", DslCompletionKind.Function, "count(${1:predicate})",
"Count VEX statements matching predicate.", "→ int", true),
];
private static ImmutableArray<DslCompletionItem> BuildVexStatuses() =>
[
new("affected", DslCompletionKind.Constant, "\"affected\"",
"Component is affected by the vulnerability."),
new("not_affected", DslCompletionKind.Constant, "\"not_affected\"",
"Component is not affected."),
new("fixed", DslCompletionKind.Constant, "\"fixed\"",
"Vulnerability has been fixed."),
new("suppressed", DslCompletionKind.Constant, "\"suppressed\"",
"Finding is suppressed."),
new("under_investigation", DslCompletionKind.Constant, "\"under_investigation\"",
"Under investigation."),
new("escalated", DslCompletionKind.Constant, "\"escalated\"",
"Finding has been escalated."),
];
private static ImmutableArray<DslCompletionItem> BuildVexJustifications() =>
[
new("component_not_present", DslCompletionKind.Constant, "\"component_not_present\"",
"Component is not present in the product."),
new("vulnerable_code_not_present", DslCompletionKind.Constant, "\"vulnerable_code_not_present\"",
"Vulnerable code is not present."),
new("vulnerable_code_not_in_execute_path", DslCompletionKind.Constant, "\"vulnerable_code_not_in_execute_path\"",
"Vulnerable code is not in execution path."),
new("vulnerable_code_cannot_be_controlled_by_adversary", DslCompletionKind.Constant, "\"vulnerable_code_cannot_be_controlled_by_adversary\"",
"Vulnerable code cannot be controlled by adversary."),
new("inline_mitigations_already_exist", DslCompletionKind.Constant, "\"inline_mitigations_already_exist\"",
"Inline mitigations already exist."),
];
private static ImmutableArray<DslCompletionItem> BuildSignalFields() =>
[
new("trust_score", DslCompletionKind.Field, "trust_score",
"Trust score (01)."),
new("reachability.state", DslCompletionKind.Field, "reachability.state",
"Reachability state."),
new("reachability.score", DslCompletionKind.Field, "reachability.score",
"Reachability score (01)."),
new("entropy_penalty", DslCompletionKind.Field, "entropy_penalty",
"Entropy penalty (00.3)."),
new("uncertainty.level", DslCompletionKind.Field, "uncertainty.level",
"Uncertainty level (U1U3)."),
new("runtime_hits", DslCompletionKind.Field, "runtime_hits",
"Runtime hit indicator."),
];
private static ImmutableArray<DslCompletionItem> BuildReachabilityFields() =>
[
new("state", DslCompletionKind.Field, "state",
"Reachability state (reachable, unreachable, unknown)."),
new("score", DslCompletionKind.Field, "score",
"Reachability confidence score (01)."),
new("callchain", DslCompletionKind.Field, "callchain",
"Call chain evidence if reachable."),
new("tool", DslCompletionKind.Field, "tool",
"Tool that determined reachability."),
];
private static ImmutableArray<DslCompletionItem> BuildActions() =>
[
new("status :=", DslCompletionKind.Keyword, "status := \"${1:status}\"",
"Set the finding status.", "Status Assignment", true),
new("severity :=", DslCompletionKind.Keyword, "severity := ${1:expression}",
"Set the finding severity.", "Severity Assignment", true),
new("ignore", DslCompletionKind.Keyword, "ignore until ${1:date} because \"${2:rationale}\"",
"Temporarily suppress finding until date.", "Ignore Action", true),
new("escalate", DslCompletionKind.Keyword, "escalate to severity_band(\"${1:severity}\") when ${2:condition}",
"Escalate severity when condition is true.", "Escalate Action", true),
new("warn", DslCompletionKind.Keyword, "warn message \"${1:text}\"",
"Add warning verdict.", "Warn Action", true),
new("defer", DslCompletionKind.Keyword, "defer until ${1:condition}",
"Defer finding evaluation.", "Defer Action", true),
new("annotate", DslCompletionKind.Keyword, "annotate ${1:key} := ${2:value}",
"Add free-form annotation to explain payload.", "Annotate Action", true),
new("requireVex", DslCompletionKind.Keyword, "requireVex {\n\tvendors = [${1:\"Vendor\"}]\n\tjustifications = [${2:\"component_not_present\"}]\n}",
"Require matching VEX evidence.", "Require VEX Action", true),
];
}

View File

@@ -66,6 +66,24 @@ public sealed record BudgetCheckResult
public IReadOnlyDictionary<UnknownReasonCode, BudgetViolation> Violations { get; init; }
= new Dictionary<UnknownReasonCode, BudgetViolation>();
public string? Message { get; init; }
/// <summary>
/// The budget configuration that was applied during evaluation.
/// Required for attestation to capture the policy at decision time.
/// </summary>
public UnknownBudget? Budget { get; init; }
/// <summary>
/// Breakdown of unknown counts by reason code.
/// Required for attestation detail.
/// </summary>
public IReadOnlyDictionary<UnknownReasonCode, int> CountsByReason { get; init; }
= new Dictionary<UnknownReasonCode, int>();
/// <summary>
/// Cumulative uncertainty score across all unknowns.
/// </summary>
public double CumulativeUncertainty { get; init; }
}
/// <summary>

View File

@@ -92,6 +92,9 @@ public sealed class UnknownBudgetService : IUnknownBudgetService
? null
: budget.ExceededMessage ?? $"Unknown budget exceeded: {total} unknowns in {normalized}";
// Calculate cumulative uncertainty from unknown uncertainty factors
var cumulativeUncertainty = safeUnknowns.Sum(u => (double)u.UncertaintyFactor);
return new BudgetCheckResult
{
IsWithinBudget = isWithinBudget,
@@ -99,7 +102,10 @@ public sealed class UnknownBudgetService : IUnknownBudgetService
TotalUnknowns = total,
TotalLimit = budget.TotalLimit,
Violations = violations,
Message = message
Message = message,
Budget = budget,
CountsByReason = byReason,
CumulativeUncertainty = cumulativeUncertainty
};
}

View File

@@ -0,0 +1,244 @@
// -----------------------------------------------------------------------------
// VerdictBudgetCheckTests.cs
// Sprint: SPRINT_8200_0001_0006_budget_threshold_attestation
// Tasks: BUDGET-8200-011, BUDGET-8200-012, BUDGET-8200-013
// Description: Unit tests for budget check attestation
// -----------------------------------------------------------------------------
using FluentAssertions;
using StellaOps.Policy.Engine.Attestation;
using Xunit;
namespace StellaOps.Policy.Engine.Tests.Attestation;
public class VerdictBudgetCheckTests
{
[Fact]
public void VerdictBudgetCheck_WithAllFields_CreatesSuccessfully()
{
// Arrange
var config = new VerdictBudgetConfig(
maxUnknownCount: 10,
maxCumulativeUncertainty: 2.5,
action: "warn",
reasonLimits: new Dictionary<string, int> { ["Reachability"] = 5 });
var actualCounts = new VerdictBudgetActualCounts(
total: 3,
cumulativeUncertainty: 1.2,
byReason: new Dictionary<string, int> { ["Reachability"] = 2 });
var configHash = VerdictBudgetCheck.ComputeConfigHash(config);
// Act
var budgetCheck = new VerdictBudgetCheck(
environment: "production",
config: config,
actualCounts: actualCounts,
result: "pass",
configHash: configHash,
evaluatedAt: DateTimeOffset.UtcNow,
violations: null);
// Assert
budgetCheck.Environment.Should().Be("production");
budgetCheck.Config.MaxUnknownCount.Should().Be(10);
budgetCheck.ActualCounts.Total.Should().Be(3);
budgetCheck.Result.Should().Be("pass");
budgetCheck.ConfigHash.Should().StartWith("sha256:");
budgetCheck.Violations.Should().BeEmpty();
}
[Fact]
public void VerdictBudgetCheck_WithViolations_IncludesAllViolations()
{
// Arrange
var config = new VerdictBudgetConfig(5, 2.0, "fail");
var actualCounts = new VerdictBudgetActualCounts(10, 3.0);
var violations = new[]
{
new VerdictBudgetViolation("total", 5, 10),
new VerdictBudgetViolation("reason", 3, 5, "Reachability")
};
// Act
var budgetCheck = new VerdictBudgetCheck(
"staging",
config,
actualCounts,
"fail",
VerdictBudgetCheck.ComputeConfigHash(config),
DateTimeOffset.UtcNow,
violations);
// Assert
budgetCheck.Violations.Should().HaveCount(2);
budgetCheck.Violations[0].Type.Should().Be("reason"); // Sorted
budgetCheck.Violations[1].Type.Should().Be("total");
}
[Fact]
public void ComputeConfigHash_SameConfig_ProducesSameHash()
{
// Arrange
var config1 = new VerdictBudgetConfig(10, 2.5, "warn",
new Dictionary<string, int> { ["Reachability"] = 5 });
var config2 = new VerdictBudgetConfig(10, 2.5, "warn",
new Dictionary<string, int> { ["Reachability"] = 5 });
// Act
var hash1 = VerdictBudgetCheck.ComputeConfigHash(config1);
var hash2 = VerdictBudgetCheck.ComputeConfigHash(config2);
// Assert
hash1.Should().Be(hash2);
}
[Fact]
public void ComputeConfigHash_DifferentConfig_ProducesDifferentHash()
{
// Arrange
var config1 = new VerdictBudgetConfig(10, 2.5, "warn");
var config2 = new VerdictBudgetConfig(20, 2.5, "warn");
// Act
var hash1 = VerdictBudgetCheck.ComputeConfigHash(config1);
var hash2 = VerdictBudgetCheck.ComputeConfigHash(config2);
// Assert
hash1.Should().NotBe(hash2);
}
[Fact]
public void ComputeConfigHash_IsDeterministic()
{
// Arrange
var config = new VerdictBudgetConfig(10, 2.5, "warn",
new Dictionary<string, int>
{
["Reachability"] = 5,
["Identity"] = 3,
["Provenance"] = 2
});
// Act - compute multiple times
var hashes = Enumerable.Range(0, 10)
.Select(_ => VerdictBudgetCheck.ComputeConfigHash(config))
.Distinct()
.ToList();
// Assert
hashes.Should().HaveCount(1, "same config should always produce same hash");
}
[Fact]
public void VerdictBudgetConfig_NormalizesReasonLimits()
{
// Arrange
var limits = new Dictionary<string, int>
{
[" Reachability "] = 5,
[" Identity "] = 3,
[""] = 0 // Should be filtered out
};
// Act
var config = new VerdictBudgetConfig(10, 2.5, "warn", limits);
// Assert
config.ReasonLimits.Should().ContainKey("Reachability");
config.ReasonLimits.Should().ContainKey("Identity");
config.ReasonLimits.Should().NotContainKey("");
}
[Fact]
public void VerdictBudgetActualCounts_NormalizesByReason()
{
// Arrange
var byReason = new Dictionary<string, int>
{
[" Reachability "] = 5,
[" Identity "] = 3
};
// Act
var counts = new VerdictBudgetActualCounts(8, 2.0, byReason);
// Assert
counts.ByReason.Should().ContainKey("Reachability");
counts.ByReason.Should().ContainKey("Identity");
}
[Fact]
public void VerdictBudgetViolation_WithReason_IncludesReason()
{
// Act
var violation = new VerdictBudgetViolation("reason", 5, 10, "Reachability");
// Assert
violation.Type.Should().Be("reason");
violation.Limit.Should().Be(5);
violation.Actual.Should().Be(10);
violation.Reason.Should().Be("Reachability");
}
[Fact]
public void VerdictBudgetViolation_WithoutReason_HasNullReason()
{
// Act
var violation = new VerdictBudgetViolation("total", 5, 10);
// Assert
violation.Reason.Should().BeNull();
}
[Fact]
public void DifferentEnvironments_ProduceDifferentBudgetChecks()
{
// Arrange
var config = new VerdictBudgetConfig(10, 2.5, "warn");
var actualCounts = new VerdictBudgetActualCounts(3, 1.2);
var configHash = VerdictBudgetCheck.ComputeConfigHash(config);
var now = DateTimeOffset.UtcNow;
// Act
var prodCheck = new VerdictBudgetCheck("production", config, actualCounts, "pass", configHash, now);
var devCheck = new VerdictBudgetCheck("development", config, actualCounts, "pass", configHash, now);
// Assert
prodCheck.Environment.Should().Be("production");
devCheck.Environment.Should().Be("development");
prodCheck.ConfigHash.Should().Be(devCheck.ConfigHash, "same config should have same hash");
}
[Fact]
public void VerdictPredicate_IncludesBudgetCheck()
{
// Arrange
var config = new VerdictBudgetConfig(10, 2.5, "warn");
var actualCounts = new VerdictBudgetActualCounts(3, 1.2);
var budgetCheck = new VerdictBudgetCheck(
"production",
config,
actualCounts,
"pass",
VerdictBudgetCheck.ComputeConfigHash(config),
DateTimeOffset.UtcNow);
// Act
var predicate = new VerdictPredicate(
tenantId: "tenant-1",
policyId: "policy-1",
policyVersion: 1,
runId: "run-1",
findingId: "finding-1",
evaluatedAt: DateTimeOffset.UtcNow,
verdict: new VerdictInfo("passed", "low", 25.0),
budgetCheck: budgetCheck);
// Assert
predicate.BudgetCheck.Should().NotBeNull();
predicate.BudgetCheck!.Environment.Should().Be("production");
predicate.BudgetCheck!.Result.Should().Be("pass");
}
}

View File

@@ -6,7 +6,7 @@
using FluentAssertions;
using StellaOps.Policy.Engine;
using StellaOps.DeltaVerdict;
using StellaOps.Excititor.Core.Vex;
using StellaOps.Excititor.Core;
using StellaOps.Policy.Unknowns;
using Xunit;

View File

@@ -0,0 +1,608 @@
// -----------------------------------------------------------------------------
// VerdictSummaryTests.cs
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-024
// Description: Unit tests for VerdictSummary extension methods
// -----------------------------------------------------------------------------
using System.Collections.Immutable;
using FluentAssertions;
using StellaOps.Policy.Confidence.Models;
using StellaOps.Policy.Engine.Evaluation;
using StellaOps.Signals.EvidenceWeightedScore;
using Xunit;
namespace StellaOps.Policy.Engine.Tests.Evaluation;
/// <summary>
/// Unit tests for <see cref="VerdictSummary"/> and <see cref="VerdictSummaryExtensions"/>.
/// </summary>
public class VerdictSummaryTests
{
#region ToSummary Tests
[Fact]
public void ToSummary_WithFullEws_ReturnsCompleteSummary()
{
// Arrange
var ews = CreateEwsResult(
score: 85,
bucket: ScoreBucket.ScheduleNext,
flags: ["kev", "reachable"],
explanations: ["High EPSS score", "Confirmed reachable"]);
var result = CreatePolicyResult(
status: "affected",
severity: "High",
matched: true,
ruleName: "block-kev",
ews: ews);
// Act
var summary = result.ToSummary();
// Assert
summary.Status.Should().Be("affected");
summary.Severity.Should().Be("High");
summary.RuleMatched.Should().BeTrue();
summary.RuleName.Should().Be("block-kev");
summary.ScoreBucket.Should().Be("ScheduleNext");
summary.Score.Should().Be(85);
summary.Flags.Should().BeEquivalentTo(["kev", "reachable"]);
summary.Explanations.Should().BeEquivalentTo(["High EPSS score", "Confirmed reachable"]);
}
[Fact]
public void ToSummary_WithoutEws_ReturnsPartialSummary()
{
// Arrange
var result = CreatePolicyResult(
status: "not_affected",
severity: "Medium",
matched: false,
ews: null);
// Act
var summary = result.ToSummary();
// Assert
summary.Status.Should().Be("not_affected");
summary.Severity.Should().Be("Medium");
summary.RuleMatched.Should().BeFalse();
summary.ScoreBucket.Should().BeNull();
summary.Score.Should().BeNull();
summary.TopFactors.Should().BeEmpty();
summary.Flags.Should().BeEmpty();
}
[Fact]
public void ToSummary_ExtractsTopFactorsOrderedByContribution()
{
// Arrange
var breakdown = new List<DimensionContribution>
{
CreateContribution("Runtime", "RTS", 0.8, 20, 16.0),
CreateContribution("Reachability", "RCH", 0.9, 25, 22.5),
CreateContribution("Exploit", "XPL", 0.5, 15, 7.5),
CreateContribution("Mitigation", "MIT", 0.3, 10, -3.0, isSubtractive: true),
};
var ews = CreateEwsResultWithBreakdown(85, ScoreBucket.ScheduleNext, breakdown);
var result = CreatePolicyResult(ews: ews);
// Act
var summary = result.ToSummary();
// Assert
summary.TopFactors.Should().HaveCount(4);
summary.TopFactors[0].Symbol.Should().Be("RCH"); // 22.5 contribution
summary.TopFactors[1].Symbol.Should().Be("RTS"); // 16.0 contribution
summary.TopFactors[2].Symbol.Should().Be("XPL"); // 7.5 contribution
summary.TopFactors[3].Symbol.Should().Be("MIT"); // -3.0 (abs = 3.0)
}
[Fact]
public void ToSummary_LimitsTopFactorsToFive()
{
// Arrange
var breakdown = new List<DimensionContribution>
{
CreateContribution("Reachability", "RCH", 0.9, 25, 22.5),
CreateContribution("Runtime", "RTS", 0.8, 20, 16.0),
CreateContribution("Exploit", "XPL", 0.5, 15, 7.5),
CreateContribution("Source", "SRC", 0.4, 10, 4.0),
CreateContribution("Backport", "BKP", 0.3, 10, 3.0),
CreateContribution("Mitigation", "MIT", 0.2, 5, -1.0, isSubtractive: true),
};
var ews = CreateEwsResultWithBreakdown(85, ScoreBucket.ScheduleNext, breakdown);
var result = CreatePolicyResult(ews: ews);
// Act
var summary = result.ToSummary();
// Assert
summary.TopFactors.Should().HaveCount(5);
}
[Fact]
public void ToSummary_IncludesGuardrailsApplied()
{
// Arrange
var ews = CreateEwsResult(
score: 65,
bucket: ScoreBucket.Investigate,
guardrails: new AppliedGuardrails
{
SpeculativeCap = true,
OriginalScore = 85,
AdjustedScore = 65
});
var result = CreatePolicyResult(ews: ews);
// Act
var summary = result.ToSummary();
// Assert
summary.GuardrailsApplied.Should().BeTrue();
}
[Fact]
public void ToSummary_IncludesExceptionApplied()
{
// Arrange
var result = CreatePolicyResult(
exception: new PolicyExceptionApplication(
ExceptionId: "EXC-001",
EffectId: "effect-001",
EffectType: PolicyExceptionEffectType.Suppress,
OriginalStatus: "affected",
OriginalSeverity: "high",
AppliedStatus: "not_affected",
AppliedSeverity: null,
Metadata: ImmutableDictionary<string, string>.Empty));
// Act
var summary = result.ToSummary();
// Assert
summary.ExceptionApplied.Should().BeTrue();
}
[Fact]
public void ToSummary_IncludesLegacyConfidence()
{
// Arrange - Value=0.75 gives Tier=High
var confidence = new ConfidenceScore
{
Value = 0.75m,
Factors = [],
Explanation = "Medium confidence test"
};
var result = CreatePolicyResult(confidence: confidence);
// Act
var summary = result.ToSummary();
// Assert
summary.ConfidenceScore.Should().Be(0.75m);
summary.ConfidenceBand.Should().Be("High");
}
#endregion
#region ToMinimalSummary Tests
[Fact]
public void ToMinimalSummary_IncludesOnlyEssentialFields()
{
// Arrange
var ews = CreateEwsResult(
score: 92,
bucket: ScoreBucket.ActNow,
flags: ["live-signal", "kev"],
explanations: ["Runtime exploitation detected"]);
var result = CreatePolicyResult(
status: "affected",
severity: "Critical",
matched: true,
ruleName: "block-live-signal",
ews: ews);
// Act
var summary = result.ToMinimalSummary();
// Assert
summary.Status.Should().Be("affected");
summary.Severity.Should().Be("Critical");
summary.RuleMatched.Should().BeTrue();
summary.RuleName.Should().Be("block-live-signal");
summary.ScoreBucket.Should().Be("ActNow");
summary.Score.Should().Be(92);
// Minimal summary should NOT include top factors, flags, explanations
summary.TopFactors.Should().BeEmpty();
summary.Flags.Should().BeEmpty();
summary.Explanations.Should().BeEmpty();
}
#endregion
#region GetPrimaryFactor Tests
[Fact]
public void GetPrimaryFactor_ReturnsHighestContributor()
{
// Arrange
var breakdown = new List<DimensionContribution>
{
CreateContribution("Runtime", "RTS", 0.8, 20, 16.0),
CreateContribution("Reachability", "RCH", 0.9, 25, 22.5),
CreateContribution("Exploit", "XPL", 0.5, 15, 7.5),
};
var ews = CreateEwsResultWithBreakdown(85, ScoreBucket.ScheduleNext, breakdown);
// Act
var primary = ews.GetPrimaryFactor();
// Assert
primary.Should().NotBeNull();
primary!.Symbol.Should().Be("RCH");
primary.Contribution.Should().Be(22.5);
}
[Fact]
public void GetPrimaryFactor_WithNullEws_ReturnsNull()
{
// Arrange
EvidenceWeightedScoreResult? ews = null;
// Act
var primary = ews.GetPrimaryFactor();
// Assert
primary.Should().BeNull();
}
[Fact]
public void GetPrimaryFactor_WithEmptyBreakdown_ReturnsNull()
{
// Arrange
var ews = CreateEwsResultWithBreakdown(50, ScoreBucket.Investigate, []);
// Act
var primary = ews.GetPrimaryFactor();
// Assert
primary.Should().BeNull();
}
#endregion
#region FormatTriageLine Tests
[Fact]
public void FormatTriageLine_IncludesAllComponents()
{
// Arrange
var summary = new VerdictSummary
{
Status = "affected",
Score = 92,
ScoreBucket = "ActNow",
TopFactors =
[
new VerdictFactor { Dimension = "Reachability", Symbol = "RCH", Contribution = 25, Weight = 25, InputValue = 1.0 },
new VerdictFactor { Dimension = "Runtime", Symbol = "RTS", Contribution = 20, Weight = 20, InputValue = 1.0 },
new VerdictFactor { Dimension = "Exploit", Symbol = "XPL", Contribution = 15, Weight = 15, InputValue = 1.0 },
],
Flags = ["live-signal", "kev"],
};
// Act
var line = summary.FormatTriageLine("CVE-2024-1234");
// Assert
line.Should().Contain("[ActNow 92]");
line.Should().Contain("CVE-2024-1234:");
line.Should().Contain("RCH(+25)");
line.Should().Contain("RTS(+20)");
line.Should().Contain("XPL(+15)");
line.Should().Contain("| live-signal, kev");
}
[Fact]
public void FormatTriageLine_HandlesNegativeContributions()
{
// Arrange
var summary = new VerdictSummary
{
Status = "affected",
Score = 45,
ScoreBucket = "Investigate",
TopFactors =
[
new VerdictFactor { Dimension = "Mitigation", Symbol = "MIT", Contribution = -15, Weight = 15, InputValue = 1.0, IsSubtractive = true },
],
};
// Act
var line = summary.FormatTriageLine();
// Assert
line.Should().Contain("MIT(-15)");
}
[Fact]
public void FormatTriageLine_WithoutScore_OmitsScoreSection()
{
// Arrange
var summary = new VerdictSummary
{
Status = "affected",
};
// Act
var line = summary.FormatTriageLine();
// Assert
line.Should().NotContain("[");
line.Should().NotContain("]");
}
#endregion
#region GetBucketExplanation Tests
[Fact]
public void GetBucketExplanation_ActNow_ReturnsUrgentMessage()
{
// Arrange
var summary = new VerdictSummary
{
Status = "affected",
Score = 95,
ScoreBucket = "ActNow",
};
// Act
var explanation = summary.GetBucketExplanation();
// Assert
explanation.Should().Contain("95/100");
explanation.Should().Contain("Strong evidence");
explanation.Should().Contain("Immediate action");
}
[Fact]
public void GetBucketExplanation_WithKevFlag_MentionsKev()
{
// Arrange
var summary = new VerdictSummary
{
Status = "affected",
Score = 85,
ScoreBucket = "ScheduleNext",
Flags = ["kev"],
};
// Act
var explanation = summary.GetBucketExplanation();
// Assert
explanation.Should().Contain("Known Exploited Vulnerability");
}
[Fact]
public void GetBucketExplanation_WithLiveSignal_ShowsAlert()
{
// Arrange
var summary = new VerdictSummary
{
Status = "affected",
Score = 92,
ScoreBucket = "ActNow",
Flags = ["live-signal"],
};
// Act
var explanation = summary.GetBucketExplanation();
// Assert
explanation.Should().Contain("ALERT");
explanation.Should().Contain("Live exploitation");
}
[Fact]
public void GetBucketExplanation_WithVendorNa_MentionsVendorConfirmation()
{
// Arrange
var summary = new VerdictSummary
{
Status = "not_affected",
Score = 15,
ScoreBucket = "Watchlist",
Flags = ["vendor-na"],
};
// Act
var explanation = summary.GetBucketExplanation();
// Assert
explanation.Should().Contain("Vendor has confirmed not affected");
}
[Fact]
public void GetBucketExplanation_WithPrimaryReachabilityFactor_MentionsReachability()
{
// Arrange
var summary = new VerdictSummary
{
Status = "affected",
Score = 75,
ScoreBucket = "ScheduleNext",
TopFactors =
[
new VerdictFactor { Dimension = "Reachability", Symbol = "RCH", Contribution = 25, Weight = 25, InputValue = 1.0 },
],
};
// Act
var explanation = summary.GetBucketExplanation();
// Assert
explanation.Should().Contain("Reachability analysis is the primary driver");
}
[Fact]
public void GetBucketExplanation_WithoutScore_ReturnsNotAvailable()
{
// Arrange
var summary = new VerdictSummary
{
Status = "affected",
};
// Act
var explanation = summary.GetBucketExplanation();
// Assert
explanation.Should().Be("No evidence-weighted score available.");
}
#endregion
#region Null Safety Tests
[Fact]
public void ToSummary_NullResult_ThrowsArgumentNullException()
{
// Arrange
PolicyEvaluationResult? result = null;
// Act & Assert
var action = () => result!.ToSummary();
action.Should().Throw<ArgumentNullException>();
}
[Fact]
public void ToMinimalSummary_NullResult_ThrowsArgumentNullException()
{
// Arrange
PolicyEvaluationResult? result = null;
// Act & Assert
var action = () => result!.ToMinimalSummary();
action.Should().Throw<ArgumentNullException>();
}
[Fact]
public void FormatTriageLine_NullSummary_ThrowsArgumentNullException()
{
// Arrange
VerdictSummary? summary = null;
// Act & Assert
var action = () => summary!.FormatTriageLine();
action.Should().Throw<ArgumentNullException>();
}
[Fact]
public void GetBucketExplanation_NullSummary_ThrowsArgumentNullException()
{
// Arrange
VerdictSummary? summary = null;
// Act & Assert
var action = () => summary!.GetBucketExplanation();
action.Should().Throw<ArgumentNullException>();
}
#endregion
#region Helpers
private static PolicyEvaluationResult CreatePolicyResult(
string status = "affected",
string? severity = null,
bool matched = false,
string? ruleName = null,
int? priority = null,
EvidenceWeightedScoreResult? ews = null,
ConfidenceScore? confidence = null,
PolicyExceptionApplication? exception = null)
{
return new PolicyEvaluationResult(
Matched: matched,
Status: status,
Severity: severity,
RuleName: ruleName,
Priority: priority,
Annotations: ImmutableDictionary<string, string>.Empty,
Warnings: ImmutableArray<string>.Empty,
AppliedException: exception,
Confidence: confidence,
EvidenceWeightedScore: ews);
}
private static EvidenceWeightedScoreResult CreateEwsResult(
int score = 50,
ScoreBucket bucket = ScoreBucket.Investigate,
IEnumerable<string>? flags = null,
IEnumerable<string>? explanations = null,
AppliedGuardrails? guardrails = null)
{
return new EvidenceWeightedScoreResult
{
FindingId = "test-finding-001",
Score = score,
Bucket = bucket,
Inputs = new EvidenceInputValues(0.5, 0.5, 0.5, 0.5, 0.5, 0.5),
Weights = EvidenceWeights.Default,
Breakdown = [],
Flags = flags?.ToList() ?? [],
Explanations = explanations?.ToList() ?? [],
Caps = guardrails ?? AppliedGuardrails.None(score),
PolicyDigest = "sha256:abc123",
CalculatedAt = DateTimeOffset.UtcNow,
};
}
private static EvidenceWeightedScoreResult CreateEwsResultWithBreakdown(
int score,
ScoreBucket bucket,
IReadOnlyList<DimensionContribution> breakdown)
{
return new EvidenceWeightedScoreResult
{
FindingId = "test-finding-001",
Score = score,
Bucket = bucket,
Inputs = new EvidenceInputValues(0.5, 0.5, 0.5, 0.5, 0.5, 0.5),
Weights = EvidenceWeights.Default,
Breakdown = breakdown,
Flags = [],
Explanations = [],
Caps = AppliedGuardrails.None(score),
PolicyDigest = "sha256:abc123",
CalculatedAt = DateTimeOffset.UtcNow,
};
}
private static DimensionContribution CreateContribution(
string dimension,
string symbol,
double inputValue,
double weight,
double contribution,
bool isSubtractive = false)
{
return new DimensionContribution
{
Dimension = dimension,
Symbol = symbol,
InputValue = inputValue,
Weight = weight,
Contribution = contribution,
IsSubtractive = isSubtractive,
};
}
#endregion
}

View File

@@ -4,6 +4,7 @@
using FluentAssertions;
using FsCheck;
using FsCheck.Xunit;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using StellaOps.Excititor.Core;
using StellaOps.Excititor.Core.Lattice;

View File

@@ -0,0 +1,571 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-008 - Unit tests for enricher invocation, context population, caching
using FluentAssertions;
using Microsoft.Extensions.Options;
using StellaOps.Policy.Engine.Scoring.EvidenceWeightedScore;
using StellaOps.Signals.EvidenceWeightedScore;
using StellaOps.Signals.EvidenceWeightedScore.Normalizers;
using Xunit;
namespace StellaOps.Policy.Engine.Tests.Scoring.EvidenceWeightedScore;
/// <summary>
/// Unit tests for EvidenceWeightedScoreEnricher.
/// </summary>
[Trait("Category", "Unit")]
[Trait("Sprint", "8200.0012.0003")]
public sealed class EvidenceWeightedScoreEnricherTests
{
private readonly TestNormalizerAggregator _aggregator;
private readonly EvidenceWeightedScoreCalculator _calculator;
private readonly TestPolicyProvider _policyProvider;
public EvidenceWeightedScoreEnricherTests()
{
_aggregator = new TestNormalizerAggregator();
_calculator = new EvidenceWeightedScoreCalculator();
_policyProvider = new TestPolicyProvider();
}
#region Feature Flag Tests
[Fact(DisplayName = "Enrich returns skipped when feature disabled")]
public void Enrich_WhenDisabled_ReturnsSkipped()
{
// Arrange
var options = CreateOptions(enabled: false);
var enricher = CreateEnricher(options);
var evidence = CreateEvidence("CVE-2024-1234@pkg:npm/test@1.0.0");
// Act
var result = enricher.Enrich(evidence);
// Assert
result.Should().NotBeNull();
result.IsSuccess.Should().BeFalse();
result.Score.Should().BeNull();
result.FindingId.Should().Be("CVE-2024-1234@pkg:npm/test@1.0.0");
}
[Fact(DisplayName = "Enrich calculates score when feature enabled")]
public void Enrich_WhenEnabled_CalculatesScore()
{
// Arrange
var options = CreateOptions(enabled: true);
var enricher = CreateEnricher(options);
var evidence = CreateEvidence("CVE-2024-1234@pkg:npm/test@1.0.0");
// Act
var result = enricher.Enrich(evidence);
// Assert
result.Should().NotBeNull();
result.IsSuccess.Should().BeTrue();
result.Score.Should().NotBeNull();
result.FindingId.Should().Be("CVE-2024-1234@pkg:npm/test@1.0.0");
result.FromCache.Should().BeFalse();
}
[Fact(DisplayName = "IsEnabled reflects options")]
public void IsEnabled_ReflectsOptions()
{
// Arrange
var enabledOptions = CreateOptions(enabled: true);
var disabledOptions = CreateOptions(enabled: false);
var enabledEnricher = CreateEnricher(enabledOptions);
var disabledEnricher = CreateEnricher(disabledOptions);
// Assert
enabledEnricher.IsEnabled.Should().BeTrue();
disabledEnricher.IsEnabled.Should().BeFalse();
}
#endregion
#region Caching Tests
[Fact(DisplayName = "Enrich caches result when caching enabled")]
public void Enrich_WhenCachingEnabled_CachesResult()
{
// Arrange
var options = CreateOptions(enabled: true, enableCaching: true);
var cache = new InMemoryScoreEnrichmentCache();
var enricher = CreateEnricher(options, cache);
var evidence = CreateEvidence("CVE-2024-1234@pkg:npm/test@1.0.0");
// Act
var result1 = enricher.Enrich(evidence);
var result2 = enricher.Enrich(evidence);
// Assert
result1.FromCache.Should().BeFalse();
result2.FromCache.Should().BeTrue();
cache.Count.Should().Be(1);
}
[Fact(DisplayName = "Enrich does not cache when caching disabled")]
public void Enrich_WhenCachingDisabled_DoesNotCache()
{
// Arrange
var options = CreateOptions(enabled: true, enableCaching: false);
var cache = new InMemoryScoreEnrichmentCache();
var enricher = CreateEnricher(options, cache);
var evidence = CreateEvidence("CVE-2024-1234@pkg:npm/test@1.0.0");
// Act
var result1 = enricher.Enrich(evidence);
var result2 = enricher.Enrich(evidence);
// Assert
result1.FromCache.Should().BeFalse();
result2.FromCache.Should().BeFalse();
cache.Count.Should().Be(0);
}
[Fact(DisplayName = "Cache respects max size limit")]
public void Cache_RespectsMaxSizeLimit()
{
// Arrange
var options = CreateOptions(enabled: true, enableCaching: true, maxCachedScores: 2);
var cache = new InMemoryScoreEnrichmentCache();
var enricher = CreateEnricher(options, cache);
// Act - add 3 items
enricher.Enrich(CreateEvidence("finding-1"));
enricher.Enrich(CreateEvidence("finding-2"));
enricher.Enrich(CreateEvidence("finding-3"));
// Assert - cache should stop at max (third item not cached)
cache.Count.Should().Be(2);
}
#endregion
#region Score Calculation Tests
[Fact(DisplayName = "Enrich produces valid score range")]
public void Enrich_ProducesValidScoreRange()
{
// Arrange
var options = CreateOptions(enabled: true);
var enricher = CreateEnricher(options);
var evidence = CreateEvidence("CVE-2024-1234@pkg:npm/test@1.0.0");
// Act
var result = enricher.Enrich(evidence);
// Assert
result.Score.Should().NotBeNull();
result.Score!.Score.Should().BeInRange(0, 100);
}
[Fact(DisplayName = "Enrich with high evidence produces high score")]
public void Enrich_HighEvidence_ProducesHighScore()
{
// Arrange
var options = CreateOptions(enabled: true);
var enricher = CreateEnricher(options);
var evidence = CreateHighEvidenceData("CVE-2024-1234@pkg:npm/test@1.0.0");
// Act
var result = enricher.Enrich(evidence);
// Assert
result.Score.Should().NotBeNull();
result.Score!.Score.Should().BeGreaterThanOrEqualTo(70);
}
[Fact(DisplayName = "Enrich with low evidence produces low score")]
public void Enrich_LowEvidence_ProducesLowScore()
{
// Arrange
var options = CreateOptions(enabled: true);
var enricher = CreateEnricher(options);
var evidence = CreateLowEvidenceData("CVE-2024-1234@pkg:npm/test@1.0.0");
// Act
var result = enricher.Enrich(evidence);
// Assert
result.Score.Should().NotBeNull();
result.Score!.Score.Should().BeLessThanOrEqualTo(50);
}
[Fact(DisplayName = "Enrich records calculation duration")]
public void Enrich_RecordsCalculationDuration()
{
// Arrange
var options = CreateOptions(enabled: true, enableCaching: false);
var enricher = CreateEnricher(options);
var evidence = CreateEvidence("CVE-2024-1234@pkg:npm/test@1.0.0");
// Act
var result = enricher.Enrich(evidence);
// Assert
result.CalculationDuration.Should().NotBeNull();
result.CalculationDuration!.Value.Should().BeGreaterThan(TimeSpan.Zero);
}
#endregion
#region Async Tests
[Fact(DisplayName = "EnrichAsync returns same result as sync")]
public async Task EnrichAsync_ReturnsSameResultAsSync()
{
// Arrange
var options = CreateOptions(enabled: true, enableCaching: false);
var enricher = CreateEnricher(options);
var evidence = CreateEvidence("CVE-2024-1234@pkg:npm/test@1.0.0");
// Act
var syncResult = enricher.Enrich(evidence);
var asyncResult = await enricher.EnrichAsync(evidence);
// Assert
asyncResult.IsSuccess.Should().Be(syncResult.IsSuccess);
asyncResult.Score?.Score.Should().Be(syncResult.Score?.Score);
}
[Fact(DisplayName = "EnrichBatchAsync processes all items")]
public async Task EnrichBatchAsync_ProcessesAllItems()
{
// Arrange
var options = CreateOptions(enabled: true);
var enricher = CreateEnricher(options);
var evidenceList = new[]
{
CreateEvidence("finding-1"),
CreateEvidence("finding-2"),
CreateEvidence("finding-3")
};
// Act
var results = new List<ScoreEnrichmentResult>();
await foreach (var result in enricher.EnrichBatchAsync(evidenceList))
{
results.Add(result);
}
// Assert
results.Should().HaveCount(3);
results.Should().OnlyContain(r => r.IsSuccess);
}
[Fact(DisplayName = "EnrichBatchAsync respects cancellation")]
public async Task EnrichBatchAsync_RespectsCancellation()
{
// Arrange
var options = CreateOptions(enabled: true, enableCaching: false);
var enricher = CreateEnricher(options);
var evidenceList = Enumerable.Range(1, 100)
.Select(i => CreateEvidence($"finding-{i}"))
.ToList();
var cts = new CancellationTokenSource();
cts.Cancel(); // Cancel immediately
// Act
var results = new List<ScoreEnrichmentResult>();
await foreach (var result in enricher.EnrichBatchAsync(evidenceList, cts.Token))
{
results.Add(result);
}
// Assert
results.Should().BeEmpty();
}
#endregion
#region Policy Override Tests
[Fact(DisplayName = "Enrich applies weight overrides")]
public void Enrich_AppliesWeightOverrides()
{
// Arrange
var options = CreateOptions(enabled: true);
options.Weights = new EvidenceWeightsConfiguration
{
Rch = 0.5,
Rts = 0.3,
Bkp = 0.1,
Xpl = 0.05,
Src = 0.05,
Mit = 0.1
};
var enricher = CreateEnricher(options);
var evidence = CreateEvidence("CVE-2024-1234@pkg:npm/test@1.0.0");
// Act
var result = enricher.Enrich(evidence);
// Assert - score calculation should use custom weights
result.IsSuccess.Should().BeTrue();
result.Score.Should().NotBeNull();
}
[Fact(DisplayName = "Enrich applies bucket threshold overrides")]
public void Enrich_AppliesBucketThresholdOverrides()
{
// Arrange
var options = CreateOptions(enabled: true);
options.BucketThresholds = new BucketThresholdsConfiguration
{
ActNowMin = 95,
ScheduleNextMin = 80,
InvestigateMin = 50
};
var enricher = CreateEnricher(options);
var evidence = CreateEvidence("CVE-2024-1234@pkg:npm/test@1.0.0");
// Act
var result = enricher.Enrich(evidence);
// Assert
result.IsSuccess.Should().BeTrue();
result.Score.Should().NotBeNull();
}
#endregion
#region Error Handling Tests
[Fact(DisplayName = "Enrich handles aggregator exception gracefully")]
public void Enrich_HandleAggregatorException_Gracefully()
{
// Arrange
var options = CreateOptions(enabled: true);
var failingAggregator = new FailingNormalizerAggregator();
var enricher = new EvidenceWeightedScoreEnricher(
failingAggregator,
_calculator,
_policyProvider,
CreateOptionsMonitor(options));
var evidence = CreateEvidence("CVE-2024-1234@pkg:npm/test@1.0.0");
// Act
var result = enricher.Enrich(evidence);
// Assert
result.IsSuccess.Should().BeFalse();
result.Error.Should().NotBeNullOrEmpty();
result.Score.Should().BeNull();
}
#endregion
#region Helper Methods
private EvidenceWeightedScoreEnricher CreateEnricher(
PolicyEvidenceWeightedScoreOptions options,
IScoreEnrichmentCache? cache = null)
{
return new EvidenceWeightedScoreEnricher(
_aggregator,
_calculator,
_policyProvider,
CreateOptionsMonitor(options),
logger: null,
cache: cache);
}
private static PolicyEvidenceWeightedScoreOptions CreateOptions(
bool enabled = false,
bool enableCaching = true,
int maxCachedScores = 10_000)
{
return new PolicyEvidenceWeightedScoreOptions
{
Enabled = enabled,
EnableCaching = enableCaching,
MaxCachedScoresPerContext = maxCachedScores
};
}
private static IOptionsMonitor<PolicyEvidenceWeightedScoreOptions> CreateOptionsMonitor(
PolicyEvidenceWeightedScoreOptions options)
{
return new StaticOptionsMonitor<PolicyEvidenceWeightedScoreOptions>(options);
}
private static FindingEvidence CreateEvidence(string findingId)
{
return new FindingEvidence
{
FindingId = findingId
};
}
private static FindingEvidence CreateHighEvidenceData(string findingId)
{
return new FindingEvidence
{
FindingId = findingId,
Reachability = new ReachabilityInput
{
State = ReachabilityState.DynamicReachable,
Confidence = 0.95
},
Runtime = new RuntimeInput
{
Posture = RuntimePosture.ActiveTracing,
ObservationCount = 10,
RecencyFactor = 0.95
},
Exploit = new ExploitInput
{
EpssScore = 0.85,
EpssPercentile = 95,
KevStatus = KevStatus.InKev,
PublicExploitAvailable = true
}
};
}
private static FindingEvidence CreateLowEvidenceData(string findingId)
{
return new FindingEvidence
{
FindingId = findingId,
Reachability = new ReachabilityInput
{
State = ReachabilityState.Unknown,
Confidence = 0.1
}
};
}
#endregion
#region Test Doubles
private sealed class TestNormalizerAggregator : INormalizerAggregator
{
public Task<EvidenceWeightedScoreInput> AggregateAsync(
string findingId,
CancellationToken cancellationToken = default)
{
return Task.FromResult(Aggregate(new FindingEvidence { FindingId = findingId }));
}
public EvidenceWeightedScoreInput Aggregate(FindingEvidence evidence)
{
// Simple aggregation - use defaults for missing evidence
var rch = evidence.Reachability is not null
? (evidence.Reachability.Confidence * MapReachabilityState(evidence.Reachability.State))
: 0.3; // Default
var rts = evidence.Runtime is not null
? 0.7 * (evidence.Runtime.ObservationCount > 0 ? 1.0 : 0.5)
: 0.0;
var xpl = evidence.Exploit is not null
? (evidence.Exploit.EpssScore +
(evidence.Exploit.KevStatus == KevStatus.InKev ? 0.3 : 0.0) +
(evidence.Exploit.PublicExploitAvailable ? 0.2 : 0.0)) / 1.5
: 0.0;
return new EvidenceWeightedScoreInput
{
FindingId = evidence.FindingId,
Rch = Math.Clamp(rch, 0, 1),
Rts = Math.Clamp(rts, 0, 1),
Bkp = 0.0,
Xpl = Math.Clamp(xpl, 0, 1),
Src = 0.5,
Mit = 0.0
};
}
public AggregationResult AggregateWithDetails(FindingEvidence evidence)
{
return new AggregationResult
{
Input = Aggregate(evidence),
Details = new Dictionary<string, NormalizationResult>()
};
}
private static double MapReachabilityState(ReachabilityState state) => state switch
{
ReachabilityState.LiveExploitPath => 1.0,
ReachabilityState.DynamicReachable => 0.9,
ReachabilityState.StaticReachable => 0.7,
ReachabilityState.PotentiallyReachable => 0.4,
ReachabilityState.NotReachable => 0.1,
_ => 0.3
};
}
private sealed class FailingNormalizerAggregator : INormalizerAggregator
{
public Task<EvidenceWeightedScoreInput> AggregateAsync(
string findingId,
CancellationToken cancellationToken = default)
{
throw new InvalidOperationException("Simulated aggregator failure");
}
public EvidenceWeightedScoreInput Aggregate(FindingEvidence evidence)
{
throw new InvalidOperationException("Simulated aggregator failure");
}
public AggregationResult AggregateWithDetails(FindingEvidence evidence)
{
throw new InvalidOperationException("Simulated aggregator failure");
}
}
private sealed class TestPolicyProvider : IEvidenceWeightPolicyProvider
{
public EvidenceWeightPolicy Policy { get; set; } = EvidenceWeightPolicy.DefaultProduction;
public Task<EvidenceWeightPolicy> GetPolicyAsync(
string? tenantId,
string environment,
CancellationToken cancellationToken = default)
{
return Task.FromResult(Policy);
}
public Task<EvidenceWeightPolicy> GetDefaultPolicyAsync(
string environment,
CancellationToken cancellationToken = default)
{
return Task.FromResult(EvidenceWeightPolicy.DefaultProduction);
}
public Task<bool> PolicyExistsAsync(
string? tenantId,
string environment,
CancellationToken cancellationToken = default)
{
return Task.FromResult(true);
}
}
private sealed class StaticOptionsMonitor<T> : IOptionsMonitor<T>
where T : class
{
private readonly T _value;
public StaticOptionsMonitor(T value)
{
_value = value;
}
public T CurrentValue => _value;
public T Get(string? name) => _value;
public IDisposable? OnChange(Action<T, string?> listener) => null;
}
#endregion
}

View File

@@ -0,0 +1,470 @@
// -----------------------------------------------------------------------------
// DslCompletionProviderTests.cs
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-019
// Description: Unit tests for DSL autocomplete hints for score fields
// -----------------------------------------------------------------------------
using FluentAssertions;
using Xunit;
namespace StellaOps.PolicyDsl.Tests;
/// <summary>
/// Tests for DslCompletionProvider and DslCompletionCatalog.
/// </summary>
public class DslCompletionProviderTests
{
#region Catalog Tests
[Fact]
public void GetCompletionCatalog_ReturnsNonNullCatalog()
{
// Act
var catalog = DslCompletionProvider.GetCompletionCatalog();
// Assert
catalog.Should().NotBeNull();
}
[Fact]
public void Catalog_ContainsScoreFields()
{
// Arrange
var catalog = DslCompletionProvider.GetCompletionCatalog();
// Assert
catalog.ScoreFields.Should().NotBeEmpty();
catalog.ScoreFields.Should().Contain(f => f.Label == "value");
catalog.ScoreFields.Should().Contain(f => f.Label == "bucket");
catalog.ScoreFields.Should().Contain(f => f.Label == "is_act_now");
catalog.ScoreFields.Should().Contain(f => f.Label == "flags");
catalog.ScoreFields.Should().Contain(f => f.Label == "rch");
catalog.ScoreFields.Should().Contain(f => f.Label == "reachability");
}
[Fact]
public void Catalog_ContainsScoreBuckets()
{
// Arrange
var catalog = DslCompletionProvider.GetCompletionCatalog();
// Assert
catalog.ScoreBuckets.Should().NotBeEmpty();
catalog.ScoreBuckets.Should().HaveCount(4);
catalog.ScoreBuckets.Should().Contain(b => b.Label == "ActNow");
catalog.ScoreBuckets.Should().Contain(b => b.Label == "ScheduleNext");
catalog.ScoreBuckets.Should().Contain(b => b.Label == "Investigate");
catalog.ScoreBuckets.Should().Contain(b => b.Label == "Watchlist");
}
[Fact]
public void Catalog_ContainsScoreFlags()
{
// Arrange
var catalog = DslCompletionProvider.GetCompletionCatalog();
// Assert
catalog.ScoreFlags.Should().NotBeEmpty();
catalog.ScoreFlags.Should().Contain(f => f.Label == "kev");
catalog.ScoreFlags.Should().Contain(f => f.Label == "live-signal");
catalog.ScoreFlags.Should().Contain(f => f.Label == "vendor-na");
catalog.ScoreFlags.Should().Contain(f => f.Label == "reachable");
catalog.ScoreFlags.Should().Contain(f => f.Label == "unreachable");
}
[Fact]
public void Catalog_ContainsAllDimensionAliases()
{
// Arrange
var catalog = DslCompletionProvider.GetCompletionCatalog();
// Assert - short aliases
catalog.ScoreFields.Should().Contain(f => f.Label == "rch");
catalog.ScoreFields.Should().Contain(f => f.Label == "rts");
catalog.ScoreFields.Should().Contain(f => f.Label == "bkp");
catalog.ScoreFields.Should().Contain(f => f.Label == "xpl");
catalog.ScoreFields.Should().Contain(f => f.Label == "src");
catalog.ScoreFields.Should().Contain(f => f.Label == "mit");
// Assert - long aliases
catalog.ScoreFields.Should().Contain(f => f.Label == "reachability");
catalog.ScoreFields.Should().Contain(f => f.Label == "runtime");
catalog.ScoreFields.Should().Contain(f => f.Label == "backport");
catalog.ScoreFields.Should().Contain(f => f.Label == "exploit");
catalog.ScoreFields.Should().Contain(f => f.Label == "source_trust");
catalog.ScoreFields.Should().Contain(f => f.Label == "mitigation");
}
[Fact]
public void Catalog_ContainsVexStatuses()
{
// Arrange
var catalog = DslCompletionProvider.GetCompletionCatalog();
// Assert
catalog.VexStatuses.Should().NotBeEmpty();
catalog.VexStatuses.Should().Contain(s => s.Label == "affected");
catalog.VexStatuses.Should().Contain(s => s.Label == "not_affected");
catalog.VexStatuses.Should().Contain(s => s.Label == "fixed");
}
[Fact]
public void Catalog_ContainsKeywordsAndFunctions()
{
// Arrange
var catalog = DslCompletionProvider.GetCompletionCatalog();
// Assert - keywords
catalog.Keywords.Should().NotBeEmpty();
catalog.Keywords.Should().Contain(k => k.Label == "policy");
catalog.Keywords.Should().Contain(k => k.Label == "rule");
catalog.Keywords.Should().Contain(k => k.Label == "when");
catalog.Keywords.Should().Contain(k => k.Label == "then");
// Assert - functions
catalog.Functions.Should().NotBeEmpty();
catalog.Functions.Should().Contain(f => f.Label == "normalize_cvss");
catalog.Functions.Should().Contain(f => f.Label == "exists");
}
#endregion
#region Context-Based Completion Tests
[Fact]
public void GetCompletionsForContext_ScoreDot_ReturnsScoreFields()
{
// Arrange
var context = new DslCompletionContext("when score.");
// Act
var completions = DslCompletionProvider.GetCompletionsForContext(context);
// Assert
completions.Should().NotBeEmpty();
completions.Should().Contain(c => c.Label == "value");
completions.Should().Contain(c => c.Label == "bucket");
completions.Should().Contain(c => c.Label == "flags");
completions.Should().OnlyContain(c =>
DslCompletionProvider.GetCompletionCatalog().ScoreFields.Any(sf => sf.Label == c.Label));
}
[Fact]
public void GetCompletionsForContext_SbomDot_ReturnsSbomFields()
{
// Arrange
var context = new DslCompletionContext("when sbom.");
// Act
var completions = DslCompletionProvider.GetCompletionsForContext(context);
// Assert
completions.Should().NotBeEmpty();
completions.Should().Contain(c => c.Label == "purl");
completions.Should().Contain(c => c.Label == "name");
completions.Should().Contain(c => c.Label == "version");
}
[Fact]
public void GetCompletionsForContext_AdvisoryDot_ReturnsAdvisoryFields()
{
// Arrange
var context = new DslCompletionContext("when advisory.");
// Act
var completions = DslCompletionProvider.GetCompletionsForContext(context);
// Assert
completions.Should().NotBeEmpty();
completions.Should().Contain(c => c.Label == "id");
completions.Should().Contain(c => c.Label == "source");
completions.Should().Contain(c => c.Label == "severity");
}
[Fact]
public void GetCompletionsForContext_VexDot_ReturnsVexFields()
{
// Arrange
var context = new DslCompletionContext("when vex.");
// Act
var completions = DslCompletionProvider.GetCompletionsForContext(context);
// Assert
completions.Should().NotBeEmpty();
completions.Should().Contain(c => c.Label == "status");
completions.Should().Contain(c => c.Label == "justification");
completions.Should().Contain(c => c.Label == "any");
}
[Fact]
public void GetCompletionsForContext_ScoreBucketEquals_ReturnsBuckets()
{
// Arrange
var context = new DslCompletionContext("when score.bucket == ");
// Act
var completions = DslCompletionProvider.GetCompletionsForContext(context);
// Assert
completions.Should().NotBeEmpty();
completions.Should().Contain(c => c.Label == "ActNow");
completions.Should().Contain(c => c.Label == "ScheduleNext");
completions.Should().Contain(c => c.Label == "Investigate");
completions.Should().Contain(c => c.Label == "Watchlist");
}
[Fact]
public void GetCompletionsForContext_ScoreBucketEqualsQuote_ReturnsBuckets()
{
// Arrange
var context = new DslCompletionContext("when score.bucket == \"");
// Act
var completions = DslCompletionProvider.GetCompletionsForContext(context);
// Assert
completions.Should().NotBeEmpty();
completions.Should().HaveCount(4);
}
[Fact]
public void GetCompletionsForContext_ScoreFlagsContains_ReturnsFlags()
{
// Arrange
var context = new DslCompletionContext("when score.flags contains ");
// Act
var completions = DslCompletionProvider.GetCompletionsForContext(context);
// Assert
completions.Should().NotBeEmpty();
completions.Should().Contain(c => c.Label == "kev");
completions.Should().Contain(c => c.Label == "live-signal");
completions.Should().Contain(c => c.Label == "vendor-na");
}
[Fact]
public void GetCompletionsForContext_StatusEquals_ReturnsVexStatuses()
{
// Arrange
var context = new DslCompletionContext("status == ");
// Act
var completions = DslCompletionProvider.GetCompletionsForContext(context);
// Assert
completions.Should().NotBeEmpty();
completions.Should().Contain(c => c.Label == "affected");
completions.Should().Contain(c => c.Label == "not_affected");
completions.Should().Contain(c => c.Label == "fixed");
}
[Fact]
public void GetCompletionsForContext_JustificationEquals_ReturnsJustifications()
{
// Arrange
var context = new DslCompletionContext("justification == ");
// Act
var completions = DslCompletionProvider.GetCompletionsForContext(context);
// Assert
completions.Should().NotBeEmpty();
completions.Should().Contain(c => c.Label == "component_not_present");
completions.Should().Contain(c => c.Label == "vulnerable_code_not_present");
}
[Fact]
public void GetCompletionsForContext_AfterThen_ReturnsActions()
{
// Arrange
var context = new DslCompletionContext("when condition then");
// Act
var completions = DslCompletionProvider.GetCompletionsForContext(context);
// Assert
completions.Should().NotBeEmpty();
completions.Should().Contain(c => c.Label == "status :=");
completions.Should().Contain(c => c.Label == "ignore");
completions.Should().Contain(c => c.Label == "escalate");
}
[Fact]
public void GetCompletionsForContext_AfterElse_ReturnsActions()
{
// Arrange
var context = new DslCompletionContext("then action1 else");
// Act
var completions = DslCompletionProvider.GetCompletionsForContext(context);
// Assert
completions.Should().NotBeEmpty();
completions.Should().Contain(c => c.Label == "warn");
completions.Should().Contain(c => c.Label == "defer");
}
[Fact]
public void GetCompletionsForContext_EmptyContext_ReturnsAllTopLevel()
{
// Arrange
var context = new DslCompletionContext("");
// Act
var completions = DslCompletionProvider.GetCompletionsForContext(context);
// Assert
completions.Should().NotBeEmpty();
// Should include keywords
completions.Should().Contain(c => c.Label == "policy");
completions.Should().Contain(c => c.Label == "rule");
// Should include namespaces
completions.Should().Contain(c => c.Label == "score");
completions.Should().Contain(c => c.Label == "sbom");
// Should include functions
completions.Should().Contain(c => c.Label == "normalize_cvss");
}
#endregion
#region CompletionItem Tests
[Fact]
public void ScoreValueField_HasCorrectDocumentation()
{
// Arrange
var catalog = DslCompletionProvider.GetCompletionCatalog();
// Act
var valueField = catalog.ScoreFields.First(f => f.Label == "value");
// Assert
valueField.Documentation.Should().Contain("0-100");
valueField.Documentation.Should().Contain("score.value >= 80");
valueField.Kind.Should().Be(DslCompletionKind.Field);
}
[Fact]
public void ScoreBucketField_HasCorrectDocumentation()
{
// Arrange
var catalog = DslCompletionProvider.GetCompletionCatalog();
// Act
var bucketField = catalog.ScoreFields.First(f => f.Label == "bucket");
// Assert
bucketField.Documentation.Should().Contain("ActNow");
bucketField.Documentation.Should().Contain("ScheduleNext");
bucketField.Documentation.Should().Contain("Investigate");
bucketField.Documentation.Should().Contain("Watchlist");
}
[Fact]
public void ScoreFlags_AllHaveQuotedInsertText()
{
// Arrange
var catalog = DslCompletionProvider.GetCompletionCatalog();
// Assert - all flags should be quoted for use in DSL
foreach (var flag in catalog.ScoreFlags)
{
flag.InsertText.Should().StartWith("\"");
flag.InsertText.Should().EndWith("\"");
}
}
[Fact]
public void ScoreBuckets_AllHaveQuotedInsertText()
{
// Arrange
var catalog = DslCompletionProvider.GetCompletionCatalog();
// Assert - all buckets should be quoted for use in DSL
foreach (var bucket in catalog.ScoreBuckets)
{
bucket.InsertText.Should().StartWith("\"");
bucket.InsertText.Should().EndWith("\"");
}
}
[Fact]
public void SnippetCompletions_HaveSnippetFlag()
{
// Arrange
var catalog = DslCompletionProvider.GetCompletionCatalog();
// Assert - items with placeholders should have IsSnippet = true
var policyKeyword = catalog.Keywords.First(k => k.Label == "policy");
policyKeyword.IsSnippet.Should().BeTrue();
policyKeyword.InsertText.Should().Contain("${1:");
}
[Fact]
public void SimpleFields_DoNotHaveSnippetFlag()
{
// Arrange
var catalog = DslCompletionProvider.GetCompletionCatalog();
// Assert - simple field completions should not be snippets
var valueField = catalog.ScoreFields.First(f => f.Label == "value");
valueField.IsSnippet.Should().BeFalse();
valueField.InsertText.Should().NotContain("${");
}
#endregion
#region Edge Cases
[Fact]
public void GetCompletionsForContext_NullContext_ThrowsArgumentNullException()
{
// Act & Assert
var action = () => DslCompletionProvider.GetCompletionsForContext(null!);
action.Should().Throw<ArgumentNullException>();
}
[Fact]
public void GetCompletionsForContext_CaseInsensitive_ScoreBucket()
{
// Arrange - mixed case
var context = new DslCompletionContext("when SCORE.BUCKET == ");
// Act
var completions = DslCompletionProvider.GetCompletionsForContext(context);
// Assert
completions.Should().NotBeEmpty();
completions.Should().Contain(c => c.Label == "ActNow");
}
[Fact]
public void GetCompletionsForContext_MultipleContextsInLine_ReturnsCorrectCompletions()
{
// Arrange - score.value already used, now typing score.bucket
var context = new DslCompletionContext("when score.value >= 80 and score.bucket == ");
// Act
var completions = DslCompletionProvider.GetCompletionsForContext(context);
// Assert
completions.Should().NotBeEmpty();
completions.Should().Contain(c => c.Label == "ActNow");
}
[Fact]
public void Catalog_IsSingleton()
{
// Act
var catalog1 = DslCompletionProvider.GetCompletionCatalog();
var catalog2 = DslCompletionProvider.GetCompletionCatalog();
// Assert
catalog1.Should().BeSameAs(catalog2);
}
#endregion
}