sprints work

This commit is contained in:
StellaOps Bot
2025-12-25 12:19:12 +02:00
parent 223843f1d1
commit 2a06f780cf
224 changed files with 41796 additions and 1515 deletions

View File

@@ -0,0 +1,696 @@
// -----------------------------------------------------------------------------
// ScoreProvenanceChain.cs
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-030
// Description: Score provenance chain linking Finding → Evidence → Score → Verdict
// -----------------------------------------------------------------------------
using System.Collections.Immutable;
using System.Security.Cryptography;
using System.Text;
using System.Text.Json;
using System.Text.Json.Serialization;
namespace StellaOps.Policy.Engine.Attestation;
/// <summary>
/// Complete provenance chain tracking a vulnerability finding through
/// evidence collection, score calculation, and policy verdict.
/// </summary>
/// <remarks>
/// This chain provides audit-grade traceability:
/// 1. **Finding**: The vulnerability that triggered evaluation (CVE, PURL, digest).
/// 2. **Evidence**: The attestations/documents considered (SBOM, VEX, reachability).
/// 3. **Score**: The EWS calculation with all inputs and weights.
/// 4. **Verdict**: The final policy decision with rule chain.
///
/// Each step includes content-addressed references for deterministic replay.
/// </remarks>
public sealed record ScoreProvenanceChain
{
/// <summary>
/// Creates a new ScoreProvenanceChain.
/// </summary>
public ScoreProvenanceChain(
ProvenanceFindingRef finding,
ProvenanceEvidenceSet evidenceSet,
ProvenanceScoreNode score,
ProvenanceVerdictRef verdict,
DateTimeOffset createdAt)
{
Finding = finding ?? throw new ArgumentNullException(nameof(finding));
EvidenceSet = evidenceSet ?? throw new ArgumentNullException(nameof(evidenceSet));
Score = score ?? throw new ArgumentNullException(nameof(score));
Verdict = verdict ?? throw new ArgumentNullException(nameof(verdict));
CreatedAt = createdAt;
ChainDigest = ComputeChainDigest();
}
/// <summary>
/// Reference to the vulnerability finding that triggered evaluation.
/// </summary>
public ProvenanceFindingRef Finding { get; }
/// <summary>
/// Set of evidence attestations that were considered.
/// </summary>
public ProvenanceEvidenceSet EvidenceSet { get; }
/// <summary>
/// Score computation node with inputs, weights, and result.
/// </summary>
public ProvenanceScoreNode Score { get; }
/// <summary>
/// Reference to the final policy verdict.
/// </summary>
public ProvenanceVerdictRef Verdict { get; }
/// <summary>
/// Chain creation timestamp (UTC).
/// </summary>
public DateTimeOffset CreatedAt { get; }
/// <summary>
/// Digest of the entire provenance chain for tamper detection.
/// </summary>
public string ChainDigest { get; }
/// <summary>
/// Validates the chain integrity by recomputing the digest.
/// </summary>
public bool ValidateIntegrity()
{
var recomputed = ComputeChainDigest();
return string.Equals(ChainDigest, recomputed, StringComparison.Ordinal);
}
/// <summary>
/// Gets a summary of the provenance chain for logging.
/// </summary>
public string GetSummary()
{
return $"[{Finding.VulnerabilityId}] " +
$"Evidence({EvidenceSet.TotalCount}) → " +
$"Score({Score.FinalScore}, {Score.Bucket}) → " +
$"Verdict({Verdict.Status})";
}
private string ComputeChainDigest()
{
// Canonical structure for hashing
var canonical = new
{
finding = new
{
vuln_id = Finding.VulnerabilityId,
component_purl = Finding.ComponentPurl,
finding_digest = Finding.FindingDigest
},
evidence_set = new
{
sbom_count = EvidenceSet.SbomRefs.Length,
vex_count = EvidenceSet.VexRefs.Length,
reachability_count = EvidenceSet.ReachabilityRefs.Length,
scan_count = EvidenceSet.ScanRefs.Length,
evidence_digest = EvidenceSet.SetDigest
},
score = new
{
final_score = Score.FinalScore,
bucket = Score.Bucket,
policy_digest = Score.PolicyDigest,
input_digest = Score.InputDigest
},
verdict = new
{
status = Verdict.Status,
severity = Verdict.Severity,
rule_name = Verdict.MatchedRuleName,
verdict_digest = Verdict.VerdictDigest
},
created_at = CreatedAt.ToUniversalTime().ToString("O")
};
var json = JsonSerializer.Serialize(canonical, ProvenanceJsonOptions.Default);
var hash = SHA256.HashData(Encoding.UTF8.GetBytes(json));
return Convert.ToHexStringLower(hash);
}
/// <summary>
/// Creates a ScoreProvenanceChain from a VerdictPredicate and supporting context.
/// </summary>
public static ScoreProvenanceChain FromVerdictPredicate(
VerdictPredicate predicate,
ProvenanceFindingRef finding,
ProvenanceEvidenceSet evidenceSet)
{
ArgumentNullException.ThrowIfNull(predicate);
ArgumentNullException.ThrowIfNull(finding);
ArgumentNullException.ThrowIfNull(evidenceSet);
var scoreNode = ProvenanceScoreNode.FromVerdictEws(predicate.EvidenceWeightedScore, predicate.FindingId);
var verdictRef = ProvenanceVerdictRef.FromVerdictPredicate(predicate);
return new ScoreProvenanceChain(
finding: finding,
evidenceSet: evidenceSet,
score: scoreNode,
verdict: verdictRef,
createdAt: DateTimeOffset.UtcNow
);
}
}
/// <summary>
/// Reference to the vulnerability finding that triggered evaluation.
/// </summary>
public sealed record ProvenanceFindingRef
{
/// <summary>
/// Creates a new ProvenanceFindingRef.
/// </summary>
public ProvenanceFindingRef(
string vulnerabilityId,
string? componentPurl = null,
string? findingDigest = null,
string? advisorySource = null,
DateTimeOffset? publishedAt = null)
{
VulnerabilityId = Validation.TrimToNull(vulnerabilityId)
?? throw new ArgumentNullException(nameof(vulnerabilityId));
ComponentPurl = Validation.TrimToNull(componentPurl);
FindingDigest = Validation.TrimToNull(findingDigest);
AdvisorySource = Validation.TrimToNull(advisorySource);
PublishedAt = publishedAt;
}
/// <summary>
/// Vulnerability identifier (CVE, GHSA, etc.).
/// </summary>
public string VulnerabilityId { get; }
/// <summary>
/// Package URL of the affected component (optional).
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public string? ComponentPurl { get; }
/// <summary>
/// Content digest of the finding document (optional).
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public string? FindingDigest { get; }
/// <summary>
/// Advisory source (NVD, OSV, vendor, etc.).
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public string? AdvisorySource { get; }
/// <summary>
/// Advisory publication date (optional).
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public DateTimeOffset? PublishedAt { get; }
}
/// <summary>
/// Set of evidence attestations considered during scoring.
/// </summary>
public sealed record ProvenanceEvidenceSet
{
/// <summary>
/// Creates a new ProvenanceEvidenceSet.
/// </summary>
public ProvenanceEvidenceSet(
IEnumerable<ProvenanceEvidenceRef>? sbomRefs = null,
IEnumerable<ProvenanceEvidenceRef>? vexRefs = null,
IEnumerable<ProvenanceEvidenceRef>? reachabilityRefs = null,
IEnumerable<ProvenanceEvidenceRef>? scanRefs = null,
IEnumerable<ProvenanceEvidenceRef>? otherRefs = null)
{
SbomRefs = NormalizeRefs(sbomRefs);
VexRefs = NormalizeRefs(vexRefs);
ReachabilityRefs = NormalizeRefs(reachabilityRefs);
ScanRefs = NormalizeRefs(scanRefs);
OtherRefs = NormalizeRefs(otherRefs);
SetDigest = ComputeSetDigest();
}
/// <summary>
/// SBOM attestation references.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableArray<ProvenanceEvidenceRef> SbomRefs { get; }
/// <summary>
/// VEX document references.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableArray<ProvenanceEvidenceRef> VexRefs { get; }
/// <summary>
/// Reachability analysis attestation references.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableArray<ProvenanceEvidenceRef> ReachabilityRefs { get; }
/// <summary>
/// Scan result attestation references.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableArray<ProvenanceEvidenceRef> ScanRefs { get; }
/// <summary>
/// Other evidence references.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableArray<ProvenanceEvidenceRef> OtherRefs { get; }
/// <summary>
/// Digest of the entire evidence set.
/// </summary>
public string SetDigest { get; }
/// <summary>
/// Total count of all evidence references.
/// </summary>
public int TotalCount =>
SbomRefs.Length + VexRefs.Length + ReachabilityRefs.Length + ScanRefs.Length + OtherRefs.Length;
/// <summary>
/// Whether any evidence was considered.
/// </summary>
public bool HasEvidence => TotalCount > 0;
/// <summary>
/// Gets all references in deterministic order.
/// </summary>
public IEnumerable<ProvenanceEvidenceRef> GetAllRefs()
{
return SbomRefs
.Concat(VexRefs)
.Concat(ReachabilityRefs)
.Concat(ScanRefs)
.Concat(OtherRefs);
}
private static ImmutableArray<ProvenanceEvidenceRef> NormalizeRefs(IEnumerable<ProvenanceEvidenceRef>? refs)
{
if (refs is null)
{
return [];
}
return refs
.Where(static r => r is not null)
.OrderBy(static r => r.Type, StringComparer.Ordinal)
.ThenBy(static r => r.Digest, StringComparer.Ordinal)
.ToImmutableArray();
}
private string ComputeSetDigest()
{
var digests = GetAllRefs()
.Select(static r => r.Digest)
.Where(static d => !string.IsNullOrEmpty(d))
.OrderBy(static d => d, StringComparer.Ordinal)
.ToArray();
if (digests.Length == 0)
{
return "empty";
}
var combined = string.Join(":", digests);
var hash = SHA256.HashData(Encoding.UTF8.GetBytes(combined));
return Convert.ToHexStringLower(hash);
}
/// <summary>
/// Empty evidence set.
/// </summary>
public static ProvenanceEvidenceSet Empty => new();
}
/// <summary>
/// Reference to a single evidence attestation.
/// </summary>
public sealed record ProvenanceEvidenceRef
{
/// <summary>
/// Creates a new ProvenanceEvidenceRef.
/// </summary>
public ProvenanceEvidenceRef(
string type,
string digest,
string? uri = null,
string? provider = null,
DateTimeOffset? createdAt = null,
string? status = null)
{
Type = Validation.TrimToNull(type) ?? throw new ArgumentNullException(nameof(type));
Digest = Validation.TrimToNull(digest) ?? throw new ArgumentNullException(nameof(digest));
Uri = Validation.TrimToNull(uri);
Provider = Validation.TrimToNull(provider);
CreatedAt = createdAt;
Status = Validation.TrimToNull(status);
}
/// <summary>
/// Evidence type (sbom, vex, reachability, scan, etc.).
/// </summary>
public string Type { get; }
/// <summary>
/// Content digest of the evidence attestation.
/// </summary>
public string Digest { get; }
/// <summary>
/// URI reference to the evidence (optional).
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public string? Uri { get; }
/// <summary>
/// Evidence provider (vendor, tool, etc.).
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public string? Provider { get; }
/// <summary>
/// Evidence creation timestamp.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public DateTimeOffset? CreatedAt { get; }
/// <summary>
/// Evidence status (e.g., VEX status).
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public string? Status { get; }
/// <summary>
/// Well-known evidence types.
/// </summary>
public static class Types
{
public const string Sbom = "sbom";
public const string Vex = "vex";
public const string Reachability = "reachability";
public const string Scan = "scan";
public const string Advisory = "advisory";
public const string RuntimeSignal = "runtime-signal";
public const string BackportAnalysis = "backport-analysis";
public const string ExploitIntel = "exploit-intel";
}
}
/// <summary>
/// Score computation node in the provenance chain.
/// </summary>
public sealed record ProvenanceScoreNode
{
/// <summary>
/// Creates a new ProvenanceScoreNode.
/// </summary>
public ProvenanceScoreNode(
int finalScore,
string bucket,
VerdictEvidenceInputs inputs,
VerdictEvidenceWeights weights,
string policyDigest,
string calculatorVersion,
DateTimeOffset calculatedAt,
IEnumerable<string>? appliedFlags = null,
VerdictAppliedGuardrails? guardrails = null)
{
FinalScore = finalScore;
Bucket = Validation.TrimToNull(bucket) ?? throw new ArgumentNullException(nameof(bucket));
Inputs = inputs ?? throw new ArgumentNullException(nameof(inputs));
Weights = weights ?? throw new ArgumentNullException(nameof(weights));
PolicyDigest = Validation.TrimToNull(policyDigest) ?? throw new ArgumentNullException(nameof(policyDigest));
CalculatorVersion = Validation.TrimToNull(calculatorVersion) ?? throw new ArgumentNullException(nameof(calculatorVersion));
CalculatedAt = calculatedAt;
AppliedFlags = NormalizeFlags(appliedFlags);
Guardrails = guardrails;
InputDigest = ComputeInputDigest();
}
/// <summary>
/// Final computed score [0, 100].
/// </summary>
public int FinalScore { get; }
/// <summary>
/// Score bucket (ActNow, ScheduleNext, Investigate, Watchlist).
/// </summary>
public string Bucket { get; }
/// <summary>
/// Normalized input values used for calculation.
/// </summary>
public VerdictEvidenceInputs Inputs { get; }
/// <summary>
/// Weights applied to each dimension.
/// </summary>
public VerdictEvidenceWeights Weights { get; }
/// <summary>
/// Policy digest used for calculation.
/// </summary>
public string PolicyDigest { get; }
/// <summary>
/// Calculator version for reproducibility.
/// </summary>
public string CalculatorVersion { get; }
/// <summary>
/// Calculation timestamp (UTC).
/// </summary>
public DateTimeOffset CalculatedAt { get; }
/// <summary>
/// Flags applied during scoring.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableArray<string> AppliedFlags { get; }
/// <summary>
/// Guardrails applied during scoring.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public VerdictAppliedGuardrails? Guardrails { get; }
/// <summary>
/// Digest of inputs for verification.
/// </summary>
public string InputDigest { get; }
private static ImmutableArray<string> NormalizeFlags(IEnumerable<string>? flags)
{
if (flags is null)
{
return [];
}
return flags
.Select(static f => f?.Trim())
.Where(static f => !string.IsNullOrEmpty(f))
.Select(static f => f!)
.OrderBy(static f => f, StringComparer.Ordinal)
.Distinct(StringComparer.OrdinalIgnoreCase)
.ToImmutableArray();
}
private string ComputeInputDigest()
{
var canonical = new
{
rch = Inputs.Reachability,
rts = Inputs.Runtime,
bkp = Inputs.Backport,
xpl = Inputs.Exploit,
src = Inputs.SourceTrust,
mit = Inputs.Mitigation,
w_rch = Weights.Reachability,
w_rts = Weights.Runtime,
w_bkp = Weights.Backport,
w_xpl = Weights.Exploit,
w_src = Weights.SourceTrust,
w_mit = Weights.Mitigation
};
var json = JsonSerializer.Serialize(canonical, ProvenanceJsonOptions.Default);
var hash = SHA256.HashData(Encoding.UTF8.GetBytes(json));
return Convert.ToHexStringLower(hash);
}
/// <summary>
/// Creates a ProvenanceScoreNode from a VerdictEvidenceWeightedScore.
/// </summary>
public static ProvenanceScoreNode FromVerdictEws(VerdictEvidenceWeightedScore? ews, string findingId)
{
if (ews is null)
{
// No EWS - create a placeholder node
return new ProvenanceScoreNode(
finalScore: 0,
bucket: "Unknown",
inputs: new VerdictEvidenceInputs(0, 0, 0, 0, 0, 0),
weights: new VerdictEvidenceWeights(0, 0, 0, 0, 0, 0),
policyDigest: "none",
calculatorVersion: "none",
calculatedAt: DateTimeOffset.UtcNow
);
}
var proof = ews.Proof;
if (proof is null)
{
// EWS without proof - use available data
return new ProvenanceScoreNode(
finalScore: ews.Score,
bucket: ews.Bucket,
inputs: new VerdictEvidenceInputs(0, 0, 0, 0, 0, 0),
weights: new VerdictEvidenceWeights(0, 0, 0, 0, 0, 0),
policyDigest: ews.PolicyDigest ?? "unknown",
calculatorVersion: "unknown",
calculatedAt: ews.CalculatedAt ?? DateTimeOffset.UtcNow,
appliedFlags: ews.Flags,
guardrails: ews.Guardrails
);
}
return new ProvenanceScoreNode(
finalScore: ews.Score,
bucket: ews.Bucket,
inputs: proof.Inputs,
weights: proof.Weights,
policyDigest: proof.PolicyDigest,
calculatorVersion: proof.CalculatorVersion,
calculatedAt: proof.CalculatedAt,
appliedFlags: ews.Flags,
guardrails: ews.Guardrails
);
}
}
/// <summary>
/// Reference to the final policy verdict.
/// </summary>
public sealed record ProvenanceVerdictRef
{
/// <summary>
/// Creates a new ProvenanceVerdictRef.
/// </summary>
public ProvenanceVerdictRef(
string status,
string severity,
string matchedRuleName,
int matchedRulePriority,
string verdictDigest,
DateTimeOffset evaluatedAt,
string? rationale = null)
{
Status = Validation.TrimToNull(status) ?? throw new ArgumentNullException(nameof(status));
Severity = Validation.TrimToNull(severity) ?? throw new ArgumentNullException(nameof(severity));
MatchedRuleName = Validation.TrimToNull(matchedRuleName) ?? throw new ArgumentNullException(nameof(matchedRuleName));
MatchedRulePriority = matchedRulePriority;
VerdictDigest = Validation.TrimToNull(verdictDigest) ?? throw new ArgumentNullException(nameof(verdictDigest));
EvaluatedAt = evaluatedAt;
Rationale = Validation.TrimToNull(rationale);
}
/// <summary>
/// Verdict status (affected, not_affected, fixed, etc.).
/// </summary>
public string Status { get; }
/// <summary>
/// Final severity determination.
/// </summary>
public string Severity { get; }
/// <summary>
/// Name of the policy rule that matched.
/// </summary>
public string MatchedRuleName { get; }
/// <summary>
/// Priority of the matched rule.
/// </summary>
public int MatchedRulePriority { get; }
/// <summary>
/// Content digest of the verdict for verification.
/// </summary>
public string VerdictDigest { get; }
/// <summary>
/// Evaluation timestamp (UTC).
/// </summary>
public DateTimeOffset EvaluatedAt { get; }
/// <summary>
/// Human-readable rationale (optional).
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public string? Rationale { get; }
/// <summary>
/// Creates a ProvenanceVerdictRef from a VerdictPredicate.
/// </summary>
public static ProvenanceVerdictRef FromVerdictPredicate(VerdictPredicate predicate)
{
ArgumentNullException.ThrowIfNull(predicate);
// Compute verdict digest from key fields
var canonical = new
{
tenant_id = predicate.TenantId,
policy_id = predicate.PolicyId,
policy_version = predicate.PolicyVersion,
finding_id = predicate.FindingId,
status = predicate.Verdict.Status,
severity = predicate.Verdict.Severity,
score = predicate.Verdict.Score,
evaluated_at = predicate.EvaluatedAt.ToUniversalTime().ToString("O")
};
var json = JsonSerializer.Serialize(canonical, ProvenanceJsonOptions.Default);
var hash = SHA256.HashData(Encoding.UTF8.GetBytes(json));
var verdictDigest = Convert.ToHexStringLower(hash);
// Get matched rule name from rule chain
var matchedRule = predicate.RuleChain.FirstOrDefault();
var matchedRuleName = matchedRule?.RuleId ?? "default";
return new ProvenanceVerdictRef(
status: predicate.Verdict.Status,
severity: predicate.Verdict.Severity,
matchedRuleName: matchedRuleName,
matchedRulePriority: 0, // Priority not directly available from predicate
verdictDigest: verdictDigest,
evaluatedAt: predicate.EvaluatedAt,
rationale: predicate.Verdict.Rationale
);
}
}
/// <summary>
/// JSON serialization options for provenance chain.
/// </summary>
internal static class ProvenanceJsonOptions
{
public static JsonSerializerOptions Default { get; } = new()
{
PropertyNamingPolicy = JsonNamingPolicy.SnakeCaseLower,
WriteIndented = false,
DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull
};
}

View File

@@ -0,0 +1,237 @@
// -----------------------------------------------------------------------------
// ScoringDeterminismVerifier.cs
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-029
// Description: Scoring determinism verification for attestation verification
// -----------------------------------------------------------------------------
using Microsoft.Extensions.Logging;
using StellaOps.Signals.EvidenceWeightedScore;
namespace StellaOps.Policy.Engine.Attestation;
/// <summary>
/// Result of scoring determinism verification.
/// </summary>
public sealed record ScoringVerificationResult
{
/// <summary>
/// Whether the score verification passed (recalculated matches attested).
/// </summary>
public required bool IsValid { get; init; }
/// <summary>
/// The attested score from the verdict.
/// </summary>
public required int AttestedScore { get; init; }
/// <summary>
/// The recalculated score using the proof inputs.
/// </summary>
public required int RecalculatedScore { get; init; }
/// <summary>
/// Difference between attested and recalculated (should be 0 for valid).
/// </summary>
public int Difference => Math.Abs(AttestedScore - RecalculatedScore);
/// <summary>
/// Error message if verification failed.
/// </summary>
public string? Error { get; init; }
/// <summary>
/// Creates a successful verification result.
/// </summary>
public static ScoringVerificationResult Success(int score) => new()
{
IsValid = true,
AttestedScore = score,
RecalculatedScore = score,
Error = null
};
/// <summary>
/// Creates a failed verification result due to score mismatch.
/// </summary>
public static ScoringVerificationResult ScoreMismatch(int attested, int recalculated) => new()
{
IsValid = false,
AttestedScore = attested,
RecalculatedScore = recalculated,
Error = $"Score mismatch: attested={attested}, recalculated={recalculated}, diff={Math.Abs(attested - recalculated)}"
};
/// <summary>
/// Creates a failed verification result due to missing proof.
/// </summary>
public static ScoringVerificationResult MissingProof(int attestedScore) => new()
{
IsValid = false,
AttestedScore = attestedScore,
RecalculatedScore = 0,
Error = "No scoring proof available for verification"
};
/// <summary>
/// Creates a skipped verification result (no EWS present).
/// </summary>
public static ScoringVerificationResult Skipped() => new()
{
IsValid = true,
AttestedScore = 0,
RecalculatedScore = 0,
Error = null
};
}
/// <summary>
/// Interface for scoring determinism verification.
/// </summary>
public interface IScoringDeterminismVerifier
{
/// <summary>
/// Verifies that the attested score can be reproduced from the proof.
/// </summary>
/// <param name="ews">The attested evidence-weighted score.</param>
/// <returns>Verification result.</returns>
ScoringVerificationResult Verify(VerdictEvidenceWeightedScore? ews);
/// <summary>
/// Verifies that a verdict predicate's score is deterministically reproducible.
/// </summary>
/// <param name="predicate">The verdict predicate to verify.</param>
/// <returns>Verification result.</returns>
ScoringVerificationResult VerifyPredicate(VerdictPredicate? predicate);
}
/// <summary>
/// Verifies scoring determinism by recalculating from proof inputs.
/// </summary>
public sealed class ScoringDeterminismVerifier : IScoringDeterminismVerifier
{
private readonly IEvidenceWeightedScoreCalculator _calculator;
private readonly ILogger<ScoringDeterminismVerifier> _logger;
/// <summary>
/// Creates a new ScoringDeterminismVerifier.
/// </summary>
public ScoringDeterminismVerifier(
IEvidenceWeightedScoreCalculator calculator,
ILogger<ScoringDeterminismVerifier> logger)
{
_calculator = calculator ?? throw new ArgumentNullException(nameof(calculator));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
}
/// <inheritdoc />
public ScoringVerificationResult Verify(VerdictEvidenceWeightedScore? ews)
{
if (ews is null)
{
_logger.LogDebug("No EWS present in verdict, skipping determinism verification");
return ScoringVerificationResult.Skipped();
}
if (ews.Proof is null)
{
_logger.LogWarning(
"EWS present but no proof available for determinism verification (score={Score})",
ews.Score);
return ScoringVerificationResult.MissingProof(ews.Score);
}
try
{
// Reconstruct inputs from proof
var input = new EvidenceWeightedScoreInput
{
FindingId = "verification", // Placeholder - not used in calculation
Rch = ews.Proof.Inputs.Reachability,
Rts = ews.Proof.Inputs.Runtime,
Bkp = ews.Proof.Inputs.Backport,
Xpl = ews.Proof.Inputs.Exploit,
Src = ews.Proof.Inputs.SourceTrust,
Mit = ews.Proof.Inputs.Mitigation,
};
// Reconstruct weights from proof
var weights = new EvidenceWeights
{
Rch = ews.Proof.Weights.Reachability,
Rts = ews.Proof.Weights.Runtime,
Bkp = ews.Proof.Weights.Backport,
Xpl = ews.Proof.Weights.Exploit,
Src = ews.Proof.Weights.SourceTrust,
Mit = ews.Proof.Weights.Mitigation,
};
// Create policy with the proof weights
var policy = new EvidenceWeightPolicy
{
Version = "ews.v1",
Profile = "verification",
Weights = weights,
};
// Recalculate
var result = _calculator.Calculate(input, policy);
// Compare
if (result.Score == ews.Score)
{
_logger.LogDebug(
"Scoring determinism verified: score={Score}",
ews.Score);
return ScoringVerificationResult.Success(ews.Score);
}
else
{
_logger.LogWarning(
"Scoring determinism failed: attested={Attested}, recalculated={Recalculated}",
ews.Score,
result.Score);
return ScoringVerificationResult.ScoreMismatch(ews.Score, result.Score);
}
}
catch (Exception ex)
{
_logger.LogError(ex, "Error during scoring determinism verification");
return new ScoringVerificationResult
{
IsValid = false,
AttestedScore = ews.Score,
RecalculatedScore = 0,
Error = $"Verification error: {ex.Message}"
};
}
}
/// <inheritdoc />
public ScoringVerificationResult VerifyPredicate(VerdictPredicate? predicate)
{
if (predicate is null)
{
_logger.LogDebug("No predicate provided, skipping determinism verification");
return ScoringVerificationResult.Skipped();
}
return Verify(predicate.EvidenceWeightedScore);
}
}
/// <summary>
/// Factory for creating scoring determinism verifiers.
/// </summary>
public static class ScoringDeterminismVerifierFactory
{
/// <summary>
/// Creates a new ScoringDeterminismVerifier with default calculator.
/// </summary>
public static IScoringDeterminismVerifier Create(ILogger<ScoringDeterminismVerifier> logger)
{
return new ScoringDeterminismVerifier(
new EvidenceWeightedScoreCalculator(),
logger);
}
}

View File

@@ -0,0 +1,266 @@
// -----------------------------------------------------------------------------
// VerdictBudgetCheck.cs
// Sprint: SPRINT_8200_0001_0006_budget_threshold_attestation
// Tasks: BUDGET-8200-006, BUDGET-8200-007
// Description: Budget check attestation data for verdict predicates
// -----------------------------------------------------------------------------
using System.Collections.Immutable;
using System.Security.Cryptography;
using System.Text;
using System.Text.Json;
using System.Text.Json.Serialization;
namespace StellaOps.Policy.Engine.Attestation;
/// <summary>
/// Budget check information for verdict attestation.
/// Captures the budget configuration and evaluation result at decision time.
/// </summary>
public sealed record VerdictBudgetCheck
{
/// <summary>
/// Creates a new VerdictBudgetCheck.
/// </summary>
public VerdictBudgetCheck(
string environment,
VerdictBudgetConfig config,
VerdictBudgetActualCounts actualCounts,
string result,
string configHash,
DateTimeOffset evaluatedAt,
IEnumerable<VerdictBudgetViolation>? violations = null)
{
Environment = Validation.TrimToNull(environment) ?? throw new ArgumentNullException(nameof(environment));
Config = config ?? throw new ArgumentNullException(nameof(config));
ActualCounts = actualCounts ?? throw new ArgumentNullException(nameof(actualCounts));
Result = Validation.TrimToNull(result) ?? throw new ArgumentNullException(nameof(result));
ConfigHash = Validation.TrimToNull(configHash) ?? throw new ArgumentNullException(nameof(configHash));
EvaluatedAt = evaluatedAt;
Violations = NormalizeViolations(violations);
}
/// <summary>
/// Environment for which the budget was evaluated.
/// </summary>
public string Environment { get; }
/// <summary>
/// Budget configuration that was applied.
/// </summary>
public VerdictBudgetConfig Config { get; }
/// <summary>
/// Actual counts observed at evaluation time.
/// </summary>
public VerdictBudgetActualCounts ActualCounts { get; }
/// <summary>
/// Budget check result: pass, warn, fail.
/// </summary>
public string Result { get; }
/// <summary>
/// SHA-256 hash of budget configuration for determinism proof.
/// Format: sha256:{64 hex characters}
/// </summary>
public string ConfigHash { get; }
/// <summary>
/// Timestamp when the budget was evaluated.
/// </summary>
public DateTimeOffset EvaluatedAt { get; }
/// <summary>
/// Violations if any limits were exceeded.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableArray<VerdictBudgetViolation> Violations { get; }
/// <summary>
/// Computes a deterministic hash of a budget configuration.
/// </summary>
public static string ComputeConfigHash(VerdictBudgetConfig config)
{
ArgumentNullException.ThrowIfNull(config);
// Serialize with canonical options for deterministic output
var options = new JsonSerializerOptions
{
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
WriteIndented = false,
DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull
};
var json = JsonSerializer.Serialize(config, options);
var bytes = Encoding.UTF8.GetBytes(json);
var hash = SHA256.HashData(bytes);
return $"sha256:{Convert.ToHexString(hash).ToLowerInvariant()}";
}
private static ImmutableArray<VerdictBudgetViolation> NormalizeViolations(
IEnumerable<VerdictBudgetViolation>? violations)
{
if (violations is null)
{
return [];
}
return violations
.Where(static v => v is not null)
.OrderBy(static v => v.Type, StringComparer.Ordinal)
.ThenBy(static v => v.Reason ?? string.Empty, StringComparer.Ordinal)
.ToImmutableArray();
}
}
/// <summary>
/// Budget configuration that was applied during evaluation.
/// </summary>
public sealed record VerdictBudgetConfig
{
/// <summary>
/// Creates a new VerdictBudgetConfig.
/// </summary>
public VerdictBudgetConfig(
int maxUnknownCount,
double maxCumulativeUncertainty,
string action,
IReadOnlyDictionary<string, int>? reasonLimits = null)
{
MaxUnknownCount = maxUnknownCount;
MaxCumulativeUncertainty = maxCumulativeUncertainty;
Action = Validation.TrimToNull(action) ?? "warn";
ReasonLimits = NormalizeReasonLimits(reasonLimits);
}
/// <summary>
/// Maximum number of unknowns allowed.
/// </summary>
public int MaxUnknownCount { get; }
/// <summary>
/// Maximum cumulative uncertainty score allowed.
/// </summary>
public double MaxCumulativeUncertainty { get; }
/// <summary>
/// Action to take when budget is exceeded: warn, block.
/// </summary>
public string Action { get; }
/// <summary>
/// Per-reason code limits (optional).
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableSortedDictionary<string, int> ReasonLimits { get; }
private static ImmutableSortedDictionary<string, int> NormalizeReasonLimits(
IReadOnlyDictionary<string, int>? limits)
{
if (limits is null || limits.Count == 0)
{
return ImmutableSortedDictionary<string, int>.Empty;
}
return limits
.Where(static kv => !string.IsNullOrWhiteSpace(kv.Key))
.ToImmutableSortedDictionary(
static kv => kv.Key.Trim(),
static kv => kv.Value,
StringComparer.Ordinal);
}
}
/// <summary>
/// Actual counts observed at evaluation time.
/// </summary>
public sealed record VerdictBudgetActualCounts
{
/// <summary>
/// Creates a new VerdictBudgetActualCounts.
/// </summary>
public VerdictBudgetActualCounts(
int total,
double cumulativeUncertainty,
IReadOnlyDictionary<string, int>? byReason = null)
{
Total = total;
CumulativeUncertainty = cumulativeUncertainty;
ByReason = NormalizeByReason(byReason);
}
/// <summary>
/// Total number of unknowns.
/// </summary>
public int Total { get; }
/// <summary>
/// Cumulative uncertainty score across all unknowns.
/// </summary>
public double CumulativeUncertainty { get; }
/// <summary>
/// Breakdown by reason code.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableSortedDictionary<string, int> ByReason { get; }
private static ImmutableSortedDictionary<string, int> NormalizeByReason(
IReadOnlyDictionary<string, int>? byReason)
{
if (byReason is null || byReason.Count == 0)
{
return ImmutableSortedDictionary<string, int>.Empty;
}
return byReason
.Where(static kv => !string.IsNullOrWhiteSpace(kv.Key))
.ToImmutableSortedDictionary(
static kv => kv.Key.Trim(),
static kv => kv.Value,
StringComparer.Ordinal);
}
}
/// <summary>
/// Represents a budget limit violation.
/// </summary>
public sealed record VerdictBudgetViolation
{
/// <summary>
/// Creates a new VerdictBudgetViolation.
/// </summary>
public VerdictBudgetViolation(
string type,
int limit,
int actual,
string? reason = null)
{
Type = Validation.TrimToNull(type) ?? throw new ArgumentNullException(nameof(type));
Limit = limit;
Actual = actual;
Reason = Validation.TrimToNull(reason);
}
/// <summary>
/// Type of violation: total, cumulative, reason.
/// </summary>
public string Type { get; }
/// <summary>
/// The limit that was exceeded.
/// </summary>
public int Limit { get; }
/// <summary>
/// The actual value that exceeded the limit.
/// </summary>
public int Actual { get; }
/// <summary>
/// Reason code, if this is a per-reason violation.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public string? Reason { get; }
}

View File

@@ -0,0 +1,521 @@
// -----------------------------------------------------------------------------
// VerdictEvidenceWeightedScore.cs
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-025, PINT-8200-028
// Description: Serializable EWS decomposition and ScoringProof for verdict attestation
// -----------------------------------------------------------------------------
using System.Collections.Immutable;
using System.Text.Json.Serialization;
using StellaOps.Signals.EvidenceWeightedScore;
namespace StellaOps.Policy.Engine.Attestation;
/// <summary>
/// Evidence-Weighted Score (EWS) decomposition for verdict serialization.
/// Includes score, bucket, dimension breakdown, flags, and calculation metadata.
/// </summary>
public sealed record VerdictEvidenceWeightedScore
{
/// <summary>
/// Creates a new VerdictEvidenceWeightedScore from its components.
/// </summary>
public VerdictEvidenceWeightedScore(
int score,
string bucket,
IEnumerable<VerdictDimensionContribution>? breakdown = null,
IEnumerable<string>? flags = null,
IEnumerable<string>? explanations = null,
string? policyDigest = null,
DateTimeOffset? calculatedAt = null,
VerdictAppliedGuardrails? guardrails = null,
VerdictScoringProof? proof = null)
{
Score = score is < 0 or > 100
? throw new ArgumentOutOfRangeException(nameof(score), score, "Score must be between 0 and 100.")
: score;
Bucket = Validation.TrimToNull(bucket) ?? throw new ArgumentNullException(nameof(bucket));
Breakdown = NormalizeBreakdown(breakdown);
Flags = NormalizeFlags(flags);
Explanations = NormalizeExplanations(explanations);
PolicyDigest = Validation.TrimToNull(policyDigest);
CalculatedAt = calculatedAt;
Guardrails = guardrails;
Proof = proof;
}
/// <summary>
/// Final score [0, 100]. Higher = more evidence of real risk.
/// </summary>
public int Score { get; }
/// <summary>
/// Score bucket for quick triage (ActNow, ScheduleNext, Investigate, Watchlist).
/// </summary>
public string Bucket { get; }
/// <summary>
/// Per-dimension score contributions (breakdown).
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableArray<VerdictDimensionContribution> Breakdown { get; }
/// <summary>
/// Active flags for badges (e.g., "live-signal", "proven-path", "vendor-na").
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableArray<string> Flags { get; }
/// <summary>
/// Human-readable explanations of top contributing factors.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableArray<string> Explanations { get; }
/// <summary>
/// Policy digest for determinism verification.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public string? PolicyDigest { get; }
/// <summary>
/// Calculation timestamp (UTC ISO-8601).
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public DateTimeOffset? CalculatedAt { get; }
/// <summary>
/// Applied guardrails (caps/floors) during calculation.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public VerdictAppliedGuardrails? Guardrails { get; }
/// <summary>
/// Scoring proof for reproducibility verification.
/// Contains raw inputs and weights to allow deterministic recalculation.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public VerdictScoringProof? Proof { get; }
/// <summary>
/// Creates a VerdictEvidenceWeightedScore from an EvidenceWeightedScoreResult.
/// </summary>
public static VerdictEvidenceWeightedScore? FromEwsResult(EvidenceWeightedScoreResult? ewsResult)
{
if (ewsResult is null)
{
return null;
}
return new VerdictEvidenceWeightedScore(
score: ewsResult.Score,
bucket: ewsResult.Bucket.ToString(),
breakdown: ewsResult.Breakdown.Select(d => VerdictDimensionContribution.FromDimensionContribution(d)),
flags: ewsResult.Flags,
explanations: ewsResult.Explanations,
policyDigest: ewsResult.PolicyDigest,
calculatedAt: ewsResult.CalculatedAt,
guardrails: VerdictAppliedGuardrails.FromAppliedGuardrails(ewsResult.Caps),
proof: VerdictScoringProof.FromEwsResult(ewsResult)
);
}
private static ImmutableArray<VerdictDimensionContribution> NormalizeBreakdown(
IEnumerable<VerdictDimensionContribution>? breakdown)
{
if (breakdown is null)
{
return [];
}
return breakdown
.Where(static b => b is not null)
.OrderByDescending(static b => Math.Abs(b.Contribution))
.ToImmutableArray();
}
private static ImmutableArray<string> NormalizeFlags(IEnumerable<string>? flags)
{
if (flags is null)
{
return [];
}
return flags
.Select(static f => f?.Trim())
.Where(static f => !string.IsNullOrEmpty(f))
.Select(static f => f!)
.OrderBy(static f => f, StringComparer.Ordinal)
.Distinct(StringComparer.OrdinalIgnoreCase)
.ToImmutableArray();
}
private static ImmutableArray<string> NormalizeExplanations(IEnumerable<string>? explanations)
{
if (explanations is null)
{
return [];
}
return explanations
.Select(static e => e?.Trim())
.Where(static e => !string.IsNullOrEmpty(e))
.Select(static e => e!)
.ToImmutableArray();
}
}
/// <summary>
/// Per-dimension contribution to the evidence-weighted score.
/// </summary>
public sealed record VerdictDimensionContribution
{
/// <summary>
/// Creates a new VerdictDimensionContribution.
/// </summary>
public VerdictDimensionContribution(
string dimension,
string symbol,
double inputValue,
double weight,
double contribution,
bool isSubtractive = false)
{
Dimension = Validation.TrimToNull(dimension) ?? throw new ArgumentNullException(nameof(dimension));
Symbol = Validation.TrimToNull(symbol) ?? throw new ArgumentNullException(nameof(symbol));
InputValue = inputValue;
Weight = weight;
Contribution = contribution;
IsSubtractive = isSubtractive;
}
/// <summary>
/// Dimension name (e.g., "Reachability", "Runtime").
/// </summary>
public string Dimension { get; }
/// <summary>
/// Symbol (RCH, RTS, BKP, XPL, SRC, MIT).
/// </summary>
public string Symbol { get; }
/// <summary>
/// Normalized input value [0, 1].
/// </summary>
public double InputValue { get; }
/// <summary>
/// Weight applied to this dimension.
/// </summary>
public double Weight { get; }
/// <summary>
/// Contribution to raw score (weight * input, or negative for MIT).
/// </summary>
public double Contribution { get; }
/// <summary>
/// Whether this is a subtractive dimension (like MIT).
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public bool IsSubtractive { get; }
/// <summary>
/// Creates a VerdictDimensionContribution from a DimensionContribution.
/// </summary>
public static VerdictDimensionContribution FromDimensionContribution(DimensionContribution contribution)
{
ArgumentNullException.ThrowIfNull(contribution);
return new VerdictDimensionContribution(
dimension: contribution.Dimension,
symbol: contribution.Symbol,
inputValue: contribution.InputValue,
weight: contribution.Weight,
contribution: contribution.Contribution,
isSubtractive: contribution.IsSubtractive
);
}
}
/// <summary>
/// Record of applied guardrails during EWS calculation.
/// </summary>
public sealed record VerdictAppliedGuardrails
{
/// <summary>
/// Creates a new VerdictAppliedGuardrails.
/// </summary>
public VerdictAppliedGuardrails(
bool speculativeCap,
bool notAffectedCap,
bool runtimeFloor,
int originalScore,
int adjustedScore)
{
SpeculativeCap = speculativeCap;
NotAffectedCap = notAffectedCap;
RuntimeFloor = runtimeFloor;
OriginalScore = originalScore;
AdjustedScore = adjustedScore;
}
/// <summary>
/// Whether the speculative cap was applied.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public bool SpeculativeCap { get; }
/// <summary>
/// Whether the not-affected cap was applied.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public bool NotAffectedCap { get; }
/// <summary>
/// Whether the runtime floor was applied.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public bool RuntimeFloor { get; }
/// <summary>
/// Original score before guardrails.
/// </summary>
public int OriginalScore { get; }
/// <summary>
/// Score after guardrails.
/// </summary>
public int AdjustedScore { get; }
/// <summary>
/// Check if any guardrail was applied.
/// </summary>
[JsonIgnore]
public bool AnyApplied => SpeculativeCap || NotAffectedCap || RuntimeFloor;
/// <summary>
/// Creates a VerdictAppliedGuardrails from an AppliedGuardrails.
/// </summary>
public static VerdictAppliedGuardrails? FromAppliedGuardrails(AppliedGuardrails? guardrails)
{
if (guardrails is null)
{
return null;
}
// Only include if any guardrail was actually applied
if (!guardrails.AnyApplied)
{
return null;
}
return new VerdictAppliedGuardrails(
speculativeCap: guardrails.SpeculativeCap,
notAffectedCap: guardrails.NotAffectedCap,
runtimeFloor: guardrails.RuntimeFloor,
originalScore: guardrails.OriginalScore,
adjustedScore: guardrails.AdjustedScore
);
}
}
/// <summary>
/// Scoring proof for deterministic reproducibility verification.
/// Contains all inputs needed to recalculate and verify the score.
/// </summary>
public sealed record VerdictScoringProof
{
/// <summary>
/// Creates a new VerdictScoringProof.
/// </summary>
public VerdictScoringProof(
VerdictEvidenceInputs inputs,
VerdictEvidenceWeights weights,
string policyDigest,
string calculatorVersion,
DateTimeOffset calculatedAt)
{
Inputs = inputs ?? throw new ArgumentNullException(nameof(inputs));
Weights = weights ?? throw new ArgumentNullException(nameof(weights));
PolicyDigest = Validation.TrimToNull(policyDigest) ?? throw new ArgumentNullException(nameof(policyDigest));
CalculatorVersion = Validation.TrimToNull(calculatorVersion) ?? throw new ArgumentNullException(nameof(calculatorVersion));
CalculatedAt = calculatedAt;
}
/// <summary>
/// Normalized input values [0, 1] for each dimension.
/// </summary>
public VerdictEvidenceInputs Inputs { get; }
/// <summary>
/// Weight values used for scoring.
/// </summary>
public VerdictEvidenceWeights Weights { get; }
/// <summary>
/// Policy digest (SHA256) used for calculation.
/// </summary>
public string PolicyDigest { get; }
/// <summary>
/// Calculator version string for reproducibility.
/// </summary>
public string CalculatorVersion { get; }
/// <summary>
/// Calculation timestamp (UTC).
/// </summary>
public DateTimeOffset CalculatedAt { get; }
/// <summary>
/// Creates a VerdictScoringProof from an EvidenceWeightedScoreResult.
/// </summary>
public static VerdictScoringProof? FromEwsResult(EvidenceWeightedScoreResult? ewsResult)
{
if (ewsResult is null)
{
return null;
}
return new VerdictScoringProof(
inputs: VerdictEvidenceInputs.FromEvidenceInputValues(ewsResult.Inputs),
weights: VerdictEvidenceWeights.FromEvidenceWeights(ewsResult.Weights),
policyDigest: ewsResult.PolicyDigest,
calculatorVersion: "1.0.0", // TODO: Get from calculator metadata
calculatedAt: ewsResult.CalculatedAt
);
}
}
/// <summary>
/// Normalized input values for scoring.
/// </summary>
public sealed record VerdictEvidenceInputs
{
/// <summary>
/// Creates a new VerdictEvidenceInputs.
/// </summary>
public VerdictEvidenceInputs(
double reachability,
double runtime,
double backport,
double exploit,
double sourceTrust,
double mitigation)
{
Reachability = reachability;
Runtime = runtime;
Backport = backport;
Exploit = exploit;
SourceTrust = sourceTrust;
Mitigation = mitigation;
}
/// <summary>Reachability input [0, 1].</summary>
[JsonPropertyName("rch")]
public double Reachability { get; }
/// <summary>Runtime signal input [0, 1].</summary>
[JsonPropertyName("rts")]
public double Runtime { get; }
/// <summary>Backport analysis input [0, 1].</summary>
[JsonPropertyName("bkp")]
public double Backport { get; }
/// <summary>Exploit evidence input [0, 1].</summary>
[JsonPropertyName("xpl")]
public double Exploit { get; }
/// <summary>Source trust input [0, 1].</summary>
[JsonPropertyName("src")]
public double SourceTrust { get; }
/// <summary>Mitigation factor input [0, 1].</summary>
[JsonPropertyName("mit")]
public double Mitigation { get; }
/// <summary>
/// Creates from an EvidenceInputValues.
/// </summary>
public static VerdictEvidenceInputs FromEvidenceInputValues(EvidenceInputValues inputs)
{
ArgumentNullException.ThrowIfNull(inputs);
return new VerdictEvidenceInputs(
reachability: inputs.Rch,
runtime: inputs.Rts,
backport: inputs.Bkp,
exploit: inputs.Xpl,
sourceTrust: inputs.Src,
mitigation: inputs.Mit
);
}
}
/// <summary>
/// Weight values for scoring dimensions.
/// </summary>
public sealed record VerdictEvidenceWeights
{
/// <summary>
/// Creates a new VerdictEvidenceWeights.
/// </summary>
public VerdictEvidenceWeights(
double reachability,
double runtime,
double backport,
double exploit,
double sourceTrust,
double mitigation)
{
Reachability = reachability;
Runtime = runtime;
Backport = backport;
Exploit = exploit;
SourceTrust = sourceTrust;
Mitigation = mitigation;
}
/// <summary>Reachability weight [0, 1].</summary>
[JsonPropertyName("rch")]
public double Reachability { get; }
/// <summary>Runtime signal weight [0, 1].</summary>
[JsonPropertyName("rts")]
public double Runtime { get; }
/// <summary>Backport analysis weight [0, 1].</summary>
[JsonPropertyName("bkp")]
public double Backport { get; }
/// <summary>Exploit evidence weight [0, 1].</summary>
[JsonPropertyName("xpl")]
public double Exploit { get; }
/// <summary>Source trust weight [0, 1].</summary>
[JsonPropertyName("src")]
public double SourceTrust { get; }
/// <summary>Mitigation factor weight [0, 1].</summary>
[JsonPropertyName("mit")]
public double Mitigation { get; }
/// <summary>
/// Creates from an EvidenceWeights.
/// </summary>
public static VerdictEvidenceWeights FromEvidenceWeights(EvidenceWeights weights)
{
ArgumentNullException.ThrowIfNull(weights);
return new VerdictEvidenceWeights(
reachability: weights.Rch,
runtime: weights.Rts,
backport: weights.Bkp,
exploit: weights.Xpl,
sourceTrust: weights.Src,
mitigation: weights.Mit
);
}
}

View File

@@ -23,6 +23,8 @@ public sealed record VerdictPredicate
IEnumerable<VerdictEvidence>? evidence = null,
IEnumerable<VerdictVexImpact>? vexImpacts = null,
VerdictReachability? reachability = null,
VerdictEvidenceWeightedScore? evidenceWeightedScore = null,
VerdictBudgetCheck? budgetCheck = null,
ImmutableSortedDictionary<string, string>? metadata = null)
{
Type = PredicateType;
@@ -47,6 +49,8 @@ public sealed record VerdictPredicate
Evidence = NormalizeEvidence(evidence);
VexImpacts = NormalizeVexImpacts(vexImpacts);
Reachability = reachability;
EvidenceWeightedScore = evidenceWeightedScore;
BudgetCheck = budgetCheck;
Metadata = NormalizeMetadata(metadata);
}
@@ -77,6 +81,19 @@ public sealed record VerdictPredicate
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public VerdictReachability? Reachability { get; }
/// <summary>
/// Evidence-weighted score decomposition for scoring transparency.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public VerdictEvidenceWeightedScore? EvidenceWeightedScore { get; }
/// <summary>
/// Budget check information for unknown budget enforcement.
/// Captures the budget configuration and result at decision time.
/// </summary>
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public VerdictBudgetCheck? BudgetCheck { get; }
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public ImmutableSortedDictionary<string, string> Metadata { get; }

View File

@@ -76,6 +76,9 @@ public sealed class VerdictPredicateBuilder
// Extract reachability (if present in metadata)
var reachability = ExtractReachability(trace);
// Extract evidence-weighted score (if present)
var evidenceWeightedScore = VerdictEvidenceWeightedScore.FromEwsResult(trace.EvidenceWeightedScore);
// Build metadata with determinism hash
var metadata = BuildMetadata(trace, evidence);
@@ -91,6 +94,7 @@ public sealed class VerdictPredicateBuilder
evidence: evidence,
vexImpacts: vexImpacts,
reachability: reachability,
evidenceWeightedScore: evidenceWeightedScore,
metadata: metadata
);
}
@@ -249,6 +253,8 @@ public sealed class VerdictPredicateBuilder
evidence: evidence,
vexImpacts: null,
reachability: null,
evidenceWeightedScore: null,
budgetCheck: null,
metadata: null
);

View File

@@ -7,6 +7,7 @@ using StellaOps.Policy.Confidence.Models;
using StellaOps.Policy.Exceptions.Models;
using StellaOps.Policy.Unknowns.Models;
using StellaOps.PolicyDsl;
using StellaOps.Signals.EvidenceWeightedScore;
namespace StellaOps.Policy.Engine.Evaluation;
@@ -128,7 +129,8 @@ internal sealed record PolicyEvaluationResult(
ConfidenceScore? Confidence,
PolicyFailureReason? FailureReason = null,
string? FailureMessage = null,
BudgetStatusSummary? UnknownBudgetStatus = null)
BudgetStatusSummary? UnknownBudgetStatus = null,
EvidenceWeightedScoreResult? EvidenceWeightedScore = null)
{
public static PolicyEvaluationResult CreateDefault(string? severity) => new(
Matched: false,
@@ -139,7 +141,8 @@ internal sealed record PolicyEvaluationResult(
Annotations: ImmutableDictionary<string, string>.Empty,
Warnings: ImmutableArray<string>.Empty,
AppliedException: null,
Confidence: null);
Confidence: null,
EvidenceWeightedScore: null);
}
internal enum PolicyFailureReason

View File

@@ -10,10 +10,15 @@ using StellaOps.Policy;
using StellaOps.Policy.Confidence.Configuration;
using StellaOps.Policy.Confidence.Models;
using StellaOps.Policy.Confidence.Services;
using StellaOps.Policy.Engine.Scoring.EvidenceWeightedScore;
using StellaOps.Policy.Unknowns.Models;
using StellaOps.Policy.Unknowns.Services;
using StellaOps.PolicyDsl;
// Alias Confidence types to avoid ambiguity with EWS types
using ConfidenceReachabilityState = StellaOps.Policy.Confidence.Models.ReachabilityState;
using ConfidenceRuntimePosture = StellaOps.Policy.Confidence.Models.RuntimePosture;
namespace StellaOps.Policy.Engine.Evaluation;
/// <summary>
@@ -23,15 +28,18 @@ internal sealed class PolicyEvaluator
{
private readonly IConfidenceCalculator _confidenceCalculator;
private readonly IUnknownBudgetService? _budgetService;
private readonly IFindingScoreEnricher? _scoreEnricher;
public PolicyEvaluator(
IConfidenceCalculator? confidenceCalculator = null,
IUnknownBudgetService? budgetService = null)
IUnknownBudgetService? budgetService = null,
IFindingScoreEnricher? scoreEnricher = null)
{
_confidenceCalculator = confidenceCalculator
?? new ConfidenceCalculator(
new StaticOptionsMonitor<ConfidenceWeightOptions>(new ConfidenceWeightOptions()));
_budgetService = budgetService;
_scoreEnricher = scoreEnricher;
}
public PolicyEvaluationResult Evaluate(PolicyEvaluationRequest request)
@@ -46,7 +54,10 @@ internal sealed class PolicyEvaluator
throw new ArgumentNullException(nameof(request.Document));
}
var evaluator = new PolicyExpressionEvaluator(request.Context);
// Pre-compute EWS so it's available during rule evaluation for score-based rules
var precomputedScore = PrecomputeEvidenceWeightedScore(request.Context);
var evaluator = new PolicyExpressionEvaluator(request.Context, precomputedScore);
var orderedRules = request.Document.Rules
.Select(static (rule, index) => new { rule, index })
.OrderBy(x => x.rule.Priority)
@@ -85,13 +96,15 @@ internal sealed class PolicyEvaluator
var result = ApplyExceptions(request, baseResult);
var budgeted = ApplyUnknownBudget(request.Context, result);
return ApplyConfidence(request.Context, budgeted);
var withConfidence = ApplyConfidence(request.Context, budgeted);
return ApplyEvidenceWeightedScore(request.Context, withConfidence, precomputedScore);
}
var defaultResult = PolicyEvaluationResult.CreateDefault(request.Context.Severity.Normalized);
var defaultWithExceptions = ApplyExceptions(request, defaultResult);
var budgetedDefault = ApplyUnknownBudget(request.Context, defaultWithExceptions);
return ApplyConfidence(request.Context, budgetedDefault);
var defaultWithConfidence = ApplyConfidence(request.Context, budgetedDefault);
return ApplyEvidenceWeightedScore(request.Context, defaultWithConfidence, precomputedScore);
}
private static void ApplyAction(
@@ -513,6 +526,139 @@ internal sealed class PolicyEvaluator
return baseResult with { Confidence = confidence };
}
/// <summary>
/// Pre-computes the Evidence-Weighted Score before rule evaluation so it's available
/// for score-based policy rules (e.g., "when score >= 80 then block").
/// </summary>
private global::StellaOps.Signals.EvidenceWeightedScore.EvidenceWeightedScoreResult? PrecomputeEvidenceWeightedScore(
PolicyEvaluationContext context)
{
// Skip if no enricher configured
if (_scoreEnricher is null || !_scoreEnricher.IsEnabled)
{
return null;
}
try
{
// Generate finding ID from context
var findingId = GenerateFindingIdFromContext(context);
// Extract evidence from context
var evidence = context.ExtractEwsEvidence(
findingId,
epssScore: context.Advisory.Metadata.TryGetValue("epss.score", out var epssStr)
? double.TryParse(epssStr, out var epss) ? epss : null
: null,
epssPercentile: context.Advisory.Metadata.TryGetValue("epss.percentile", out var epssPercStr)
? double.TryParse(epssPercStr, out var epssPerc) ? epssPerc : null
: null,
isInKev: context.Advisory.Metadata.TryGetValue("kev.status", out var kevStatus)
&& kevStatus.Equals("true", StringComparison.OrdinalIgnoreCase),
kevAddedDate: context.Advisory.Metadata.TryGetValue("kev.added", out var kevAddedStr)
? DateTimeOffset.TryParse(kevAddedStr, out var kevAdded) ? kevAdded : null
: null);
// Calculate score synchronously
var enrichmentResult = _scoreEnricher.Enrich(evidence);
return enrichmentResult.IsSuccess ? enrichmentResult.Score : null;
}
catch
{
// Pre-computation should not fail the evaluation
return null;
}
}
/// <summary>
/// Generates a deterministic finding ID from context (without requiring result).
/// </summary>
private static string GenerateFindingIdFromContext(PolicyEvaluationContext context)
{
var source = context.Advisory.Source ?? "unknown";
var severity = context.Severity.Normalized ?? "unknown";
// Use advisory metadata CVE ID if available
if (context.Advisory.Metadata.TryGetValue("cve", out var cve) && !string.IsNullOrEmpty(cve))
{
return $"finding:{cve}:{source}";
}
// Fall back to deterministic hash
var input = $"{source}|{severity}|{context.Now:O}";
Span<byte> hash = stackalloc byte[32];
SHA256.HashData(Encoding.UTF8.GetBytes(input), hash);
return $"finding:sha256:{Convert.ToHexString(hash).ToLowerInvariant()[..16]}";
}
/// <summary>
/// Applies Evidence-Weighted Score enrichment if the enricher is available and enabled.
/// Uses pre-computed score if available to avoid recalculation.
/// </summary>
private PolicyEvaluationResult ApplyEvidenceWeightedScore(
PolicyEvaluationContext context,
PolicyEvaluationResult baseResult,
global::StellaOps.Signals.EvidenceWeightedScore.EvidenceWeightedScoreResult? precomputedScore = null)
{
// Use precomputed score if available
var score = precomputedScore;
// If no precomputed score and enricher is enabled, compute now
if (score is null && _scoreEnricher is not null && _scoreEnricher.IsEnabled)
{
score = PrecomputeEvidenceWeightedScore(context);
}
// Skip if no score available
if (score is null)
{
return baseResult;
}
try
{
// Add score to annotations for DSL access
var annotations = baseResult.Annotations.ToBuilder();
annotations["ews.score"] = score.Score.ToString("F2", CultureInfo.InvariantCulture);
annotations["ews.bucket"] = score.Bucket.ToString();
return baseResult with
{
EvidenceWeightedScore = score,
Annotations = annotations.ToImmutable()
};
}
catch
{
// Score enrichment should not fail the evaluation
// Return base result unchanged
return baseResult;
}
}
/// <summary>
/// Generates a deterministic finding ID from evaluation context.
/// </summary>
private static string GenerateFindingId(PolicyEvaluationContext context, PolicyEvaluationResult result)
{
var source = context.Advisory.Source ?? "unknown";
var severity = context.Severity.Normalized ?? "unknown";
var ruleName = result.RuleName ?? "default";
// Use advisory metadata CVE ID if available
if (context.Advisory.Metadata.TryGetValue("cve", out var cve) && !string.IsNullOrEmpty(cve))
{
return $"finding:{cve}:{source}";
}
// Fall back to deterministic hash
var input = $"{source}|{severity}|{ruleName}|{context.Now:O}";
Span<byte> hash = stackalloc byte[32];
SHA256.HashData(Encoding.UTF8.GetBytes(input), hash);
return $"finding:sha256:{Convert.ToHexString(hash).ToLowerInvariant()[..16]}";
}
private static ConfidenceInput BuildConfidenceInput(PolicyEvaluationContext context, PolicyEvaluationResult result)
{
return new ConfidenceInput
@@ -535,10 +681,10 @@ internal sealed class PolicyEvaluator
}
var state = reachability.IsReachable
? (reachability.HasRuntimeEvidence ? ReachabilityState.ConfirmedReachable : ReachabilityState.StaticReachable)
? (reachability.HasRuntimeEvidence ? ConfidenceReachabilityState.ConfirmedReachable : ConfidenceReachabilityState.StaticReachable)
: reachability.IsUnreachable
? (reachability.HasRuntimeEvidence ? ReachabilityState.ConfirmedUnreachable : ReachabilityState.StaticUnreachable)
: ReachabilityState.Unknown;
? (reachability.HasRuntimeEvidence ? ConfidenceReachabilityState.ConfirmedUnreachable : ConfidenceReachabilityState.StaticUnreachable)
: ConfidenceReachabilityState.Unknown;
var digests = string.IsNullOrWhiteSpace(reachability.EvidenceRef)
? Array.Empty<string>()
@@ -560,8 +706,8 @@ internal sealed class PolicyEvaluator
}
var posture = context.Reachability.IsReachable || context.Reachability.IsUnreachable
? RuntimePosture.Supports
: RuntimePosture.Unknown;
? ConfidenceRuntimePosture.Supports
: ConfidenceRuntimePosture.Unknown;
return new RuntimeEvidence
{

View File

@@ -4,6 +4,7 @@ using System.Collections.Immutable;
using System.Globalization;
using System.Linq;
using StellaOps.PolicyDsl;
using StellaOps.Signals.EvidenceWeightedScore;
namespace StellaOps.Policy.Engine.Evaluation;
@@ -23,10 +24,14 @@ internal sealed class PolicyExpressionEvaluator
};
private readonly PolicyEvaluationContext context;
private readonly EvidenceWeightedScoreResult? _evidenceWeightedScore;
public PolicyExpressionEvaluator(PolicyEvaluationContext context)
public PolicyExpressionEvaluator(
PolicyEvaluationContext context,
EvidenceWeightedScoreResult? evidenceWeightedScore = null)
{
this.context = context ?? throw new ArgumentNullException(nameof(context));
_evidenceWeightedScore = evidenceWeightedScore;
}
public EvaluationValue Evaluate(PolicyExpression expression, EvaluationScope? scope = null)
@@ -65,6 +70,9 @@ internal sealed class PolicyExpressionEvaluator
"sbom" => new EvaluationValue(new SbomScope(context.Sbom)),
"reachability" => new EvaluationValue(new ReachabilityScope(context.Reachability)),
"entropy" => new EvaluationValue(new EntropyScope(context.Entropy)),
"score" => _evidenceWeightedScore is not null
? new EvaluationValue(new ScoreScope(_evidenceWeightedScore))
: EvaluationValue.Null,
"now" => new EvaluationValue(context.Now),
"true" => EvaluationValue.True,
"false" => EvaluationValue.False,
@@ -111,6 +119,11 @@ internal sealed class PolicyExpressionEvaluator
return entropy.Get(member.Member);
}
if (raw is ScoreScope scoreScope)
{
return scoreScope.Get(member.Member);
}
if (raw is ComponentScope componentScope)
{
return componentScope.Get(member.Member);
@@ -202,6 +215,22 @@ internal sealed class PolicyExpressionEvaluator
{
return advisoryScope.Invoke(member.Member, invocation.Arguments, scope, this);
}
if (root.Name == "score" && targetRaw is ScoreScope scoreScope)
{
return member.Member.ToLowerInvariant() switch
{
"has_flag" or "hasflag" => invocation.Arguments.Length > 0
? scoreScope.HasFlag(Evaluate(invocation.Arguments[0], scope).AsString() ?? "")
: EvaluationValue.False,
"between" => invocation.Arguments.Length >= 2
? scoreScope.Between(
Evaluate(invocation.Arguments[0], scope).AsDecimal() ?? 0m,
Evaluate(invocation.Arguments[1], scope).AsDecimal() ?? 100m)
: EvaluationValue.False,
_ => EvaluationValue.Null,
};
}
}
}
@@ -915,6 +944,94 @@ internal sealed class PolicyExpressionEvaluator
};
}
/// <summary>
/// SPL scope for Evidence-Weighted Score predicates.
/// Provides access to score value, bucket, flags, and individual dimensions.
/// </summary>
/// <example>
/// SPL predicates supported:
/// - score >= 80
/// - score.value >= 80
/// - score.bucket == "ActNow"
/// - score.is_act_now == true
/// - score.rch > 0.8
/// - score.runt > 0.5
/// - score.has_flag("live-signal")
/// - score.flags contains "kev"
/// </example>
private sealed class ScoreScope
{
private readonly EvidenceWeightedScoreResult score;
public ScoreScope(EvidenceWeightedScoreResult score)
{
this.score = score;
}
public EvaluationValue Get(string member) => member.ToLowerInvariant() switch
{
// Core score value (allows direct comparison: score >= 80)
"value" => new EvaluationValue(score.Score),
// Bucket access
"bucket" => new EvaluationValue(score.Bucket.ToString()),
"is_act_now" or "isactnow" => new EvaluationValue(score.Bucket == ScoreBucket.ActNow),
"is_schedule_next" or "isschedulenext" => new EvaluationValue(score.Bucket == ScoreBucket.ScheduleNext),
"is_investigate" or "isinvestigate" => new EvaluationValue(score.Bucket == ScoreBucket.Investigate),
"is_watchlist" or "iswatchlist" => new EvaluationValue(score.Bucket == ScoreBucket.Watchlist),
// Individual dimension scores (0-1 normalized) - using Breakdown
"rch" or "reachability" => new EvaluationValue(GetDimensionInput("RCH")),
"rts" or "runtime" => new EvaluationValue(GetDimensionInput("RTS")),
"bkp" or "backport" => new EvaluationValue(GetDimensionInput("BKP")),
"xpl" or "exploit" => new EvaluationValue(GetDimensionInput("XPL")),
"src" or "source_trust" => new EvaluationValue(GetDimensionInput("SRC")),
"mit" or "mitigation" => new EvaluationValue(GetDimensionInput("MIT")),
// Flags as array
"flags" => new EvaluationValue(score.Flags.Select(f => (object?)f).ToImmutableArray()),
// Policy info
"policy_digest" or "policydigest" => new EvaluationValue(score.PolicyDigest),
// Calculation metadata
"calculated_at" or "calculatedat" => new EvaluationValue(score.CalculatedAt),
// Explanations
"explanations" => new EvaluationValue(score.Explanations.Select(e => (object?)e).ToImmutableArray()),
_ => EvaluationValue.Null,
};
private double GetDimensionInput(string symbol)
{
var contribution = score.Breakdown.FirstOrDefault(c =>
c.Symbol.Equals(symbol, StringComparison.OrdinalIgnoreCase));
return contribution?.InputValue ?? 0.0;
}
/// <summary>
/// Check if score has a specific flag.
/// </summary>
public EvaluationValue HasFlag(string flagName)
{
if (string.IsNullOrWhiteSpace(flagName))
{
return EvaluationValue.False;
}
return new EvaluationValue(score.Flags.Contains(flagName, StringComparer.OrdinalIgnoreCase));
}
/// <summary>
/// Check if score is between min and max (inclusive).
/// </summary>
public EvaluationValue Between(decimal min, decimal max)
{
return new EvaluationValue(score.Score >= min && score.Score <= max);
}
}
/// <summary>
/// SPL scope for macOS component predicates.
/// Provides access to bundle signing, entitlements, sandboxing, and package receipt information.

View File

@@ -0,0 +1,323 @@
// -----------------------------------------------------------------------------
// VerdictSummary.cs
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-024
// Description: VerdictSummary extension for including EWS bucket and top factors
// -----------------------------------------------------------------------------
using System.Collections.Immutable;
using StellaOps.Signals.EvidenceWeightedScore;
namespace StellaOps.Policy.Engine.Evaluation;
/// <summary>
/// A summarized view of a policy evaluation result, including evidence-weighted
/// score bucket and top contributing factors for quick triage visualization.
/// </summary>
public sealed record VerdictSummary
{
/// <summary>The overall verdict status (e.g., "affected", "not_affected").</summary>
public required string Status { get; init; }
/// <summary>The severity level (Critical, High, Medium, Low, Info).</summary>
public string? Severity { get; init; }
/// <summary>Whether a rule matched this finding.</summary>
public bool RuleMatched { get; init; }
/// <summary>Name of the matching rule, if any.</summary>
public string? RuleName { get; init; }
/// <summary>Rule priority, if applicable.</summary>
public int? Priority { get; init; }
/// <summary>Evidence-weighted score bucket for quick triage.</summary>
public string? ScoreBucket { get; init; }
/// <summary>Numeric score (0-100) from evidence-weighted scoring.</summary>
public int? Score { get; init; }
/// <summary>
/// Top contributing factors from EWS breakdown, ordered by contribution magnitude.
/// Each entry contains the dimension name and its contribution.
/// </summary>
public ImmutableArray<VerdictFactor> TopFactors { get; init; } = [];
/// <summary>Active flags from EWS (e.g., "live-signal", "kev", "vendor-na").</summary>
public ImmutableArray<string> Flags { get; init; } = [];
/// <summary>Human-readable explanations for the score.</summary>
public ImmutableArray<string> Explanations { get; init; } = [];
/// <summary>Whether guardrails (caps/floors) were applied to the score.</summary>
public bool GuardrailsApplied { get; init; }
/// <summary>Warnings emitted during evaluation.</summary>
public ImmutableArray<string> Warnings { get; init; } = [];
/// <summary>Whether an exception was applied to this finding.</summary>
public bool ExceptionApplied { get; init; }
/// <summary>Legacy confidence score, if available.</summary>
public decimal? ConfidenceScore { get; init; }
/// <summary>Legacy confidence band, if available.</summary>
public string? ConfidenceBand { get; init; }
}
/// <summary>
/// A single contributing factor to the evidence-weighted score.
/// </summary>
public sealed record VerdictFactor
{
/// <summary>Full dimension name (e.g., "Reachability", "Runtime Signal").</summary>
public required string Dimension { get; init; }
/// <summary>Short symbol (e.g., "RCH", "RTS", "XPL").</summary>
public required string Symbol { get; init; }
/// <summary>Contribution to the score (positive for additive, negative for subtractive).</summary>
public required double Contribution { get; init; }
/// <summary>Weight applied to this dimension.</summary>
public required double Weight { get; init; }
/// <summary>Normalized input value [0, 1].</summary>
public required double InputValue { get; init; }
/// <summary>Whether this is a subtractive factor (like Mitigation).</summary>
public bool IsSubtractive { get; init; }
}
/// <summary>
/// Extension methods for creating <see cref="VerdictSummary"/> from evaluation results.
/// </summary>
internal static class VerdictSummaryExtensions
{
/// <summary>
/// Maximum number of top factors to include in the summary.
/// </summary>
private const int MaxTopFactors = 5;
/// <summary>
/// Creates a <see cref="VerdictSummary"/> from a <see cref="PolicyEvaluationResult"/>.
/// </summary>
/// <param name="result">The policy evaluation result.</param>
/// <returns>A summarized view of the verdict including EWS bucket and top factors.</returns>
internal static VerdictSummary ToSummary(this PolicyEvaluationResult result)
{
ArgumentNullException.ThrowIfNull(result);
var ews = result.EvidenceWeightedScore;
return new VerdictSummary
{
Status = result.Status,
Severity = result.Severity,
RuleMatched = result.Matched,
RuleName = result.RuleName,
Priority = result.Priority,
ScoreBucket = ews?.Bucket.ToString(),
Score = ews?.Score,
TopFactors = ExtractTopFactors(ews),
Flags = ews?.Flags.ToImmutableArray() ?? [],
Explanations = ews?.Explanations.ToImmutableArray() ?? [],
GuardrailsApplied = ews?.Caps.AnyApplied ?? false,
Warnings = result.Warnings,
ExceptionApplied = result.AppliedException is not null,
ConfidenceScore = result.Confidence?.Value,
ConfidenceBand = result.Confidence?.Tier.ToString(),
};
}
/// <summary>
/// Creates a minimal <see cref="VerdictSummary"/> with only status and rule info.
/// Use this for quick serialization when EWS details are not needed.
/// </summary>
/// <param name="result">The policy evaluation result.</param>
/// <returns>A minimal summarized view.</returns>
internal static VerdictSummary ToMinimalSummary(this PolicyEvaluationResult result)
{
ArgumentNullException.ThrowIfNull(result);
return new VerdictSummary
{
Status = result.Status,
Severity = result.Severity,
RuleMatched = result.Matched,
RuleName = result.RuleName,
Priority = result.Priority,
ScoreBucket = result.EvidenceWeightedScore?.Bucket.ToString(),
Score = result.EvidenceWeightedScore?.Score,
Warnings = result.Warnings,
ExceptionApplied = result.AppliedException is not null,
};
}
/// <summary>
/// Extracts the top contributing factors from the EWS breakdown,
/// ordered by absolute contribution magnitude (descending).
/// </summary>
private static ImmutableArray<VerdictFactor> ExtractTopFactors(EvidenceWeightedScoreResult? ews)
{
if (ews?.Breakdown is null || ews.Breakdown.Count == 0)
{
return [];
}
return ews.Breakdown
.OrderByDescending(d => Math.Abs(d.Contribution))
.Take(MaxTopFactors)
.Select(d => new VerdictFactor
{
Dimension = d.Dimension,
Symbol = d.Symbol,
Contribution = d.Contribution,
Weight = d.Weight,
InputValue = d.InputValue,
IsSubtractive = d.IsSubtractive,
})
.ToImmutableArray();
}
/// <summary>
/// Gets the primary contributing factor from the EWS breakdown.
/// Returns null if no breakdown is available.
/// </summary>
/// <param name="ews">The evidence-weighted score result.</param>
/// <returns>The highest-contributing factor, or null.</returns>
public static VerdictFactor? GetPrimaryFactor(this EvidenceWeightedScoreResult? ews)
{
if (ews?.Breakdown is null || ews.Breakdown.Count == 0)
{
return null;
}
var primary = ews.Breakdown
.OrderByDescending(d => Math.Abs(d.Contribution))
.FirstOrDefault();
if (primary is null)
{
return null;
}
return new VerdictFactor
{
Dimension = primary.Dimension,
Symbol = primary.Symbol,
Contribution = primary.Contribution,
Weight = primary.Weight,
InputValue = primary.InputValue,
IsSubtractive = primary.IsSubtractive,
};
}
/// <summary>
/// Formats the verdict summary as a single-line triage string.
/// Example: "[ActNow 92] CVE-2024-1234: RCH(+35), XPL(+28), RTS(+20) | live-signal"
/// </summary>
/// <param name="summary">The verdict summary.</param>
/// <param name="findingId">Optional finding ID to include.</param>
/// <returns>A formatted triage string.</returns>
public static string FormatTriageLine(this VerdictSummary summary, string? findingId = null)
{
ArgumentNullException.ThrowIfNull(summary);
var parts = new List<string>();
// Score bucket and value
if (summary.Score.HasValue)
{
parts.Add($"[{summary.ScoreBucket ?? "?"} {summary.Score}]");
}
// Finding ID if provided
if (!string.IsNullOrEmpty(findingId))
{
parts.Add($"{findingId}:");
}
// Top factors
if (summary.TopFactors.Length > 0)
{
var factors = summary.TopFactors
.Take(3)
.Select(f => $"{f.Symbol}({(f.Contribution >= 0 ? "+" : "")}{f.Contribution:F0})")
.ToArray();
parts.Add(string.Join(", ", factors));
}
// Flags
if (summary.Flags.Length > 0)
{
parts.Add($"| {string.Join(", ", summary.Flags.Take(3))}");
}
return string.Join(" ", parts);
}
/// <summary>
/// Gets a brief explanation of why this verdict received its score bucket.
/// </summary>
/// <param name="summary">The verdict summary.</param>
/// <returns>A human-readable explanation.</returns>
public static string GetBucketExplanation(this VerdictSummary summary)
{
ArgumentNullException.ThrowIfNull(summary);
if (!summary.Score.HasValue)
{
return "No evidence-weighted score available.";
}
var bucket = summary.ScoreBucket;
var score = summary.Score.Value;
var explanation = bucket switch
{
"ActNow" => $"Score {score}/100: Strong evidence of exploitable risk. Immediate action recommended.",
"ScheduleNext" => $"Score {score}/100: Likely real risk. Schedule remediation for next sprint.",
"Investigate" => $"Score {score}/100: Moderate evidence. Investigate when working on this component.",
"Watchlist" => $"Score {score}/100: Insufficient evidence. Monitor for changes.",
_ => $"Score {score}/100."
};
// Add primary factor context
if (summary.TopFactors.Length > 0)
{
var primary = summary.TopFactors[0];
var factorContext = primary.Symbol switch
{
"RCH" => "Reachability analysis is the primary driver.",
"RTS" => "Runtime signals detected exploitation activity.",
"XPL" => "Known exploit evidence is significant.",
"BKP" => "Backport information affects the score.",
"SRC" => "Source trust levels impact the assessment.",
"MIT" => "Mitigations reduce the effective risk.",
_ => null
};
if (factorContext is not null)
{
explanation = $"{explanation} {factorContext}";
}
}
// Add flag context
if (summary.Flags.Contains("live-signal"))
{
explanation = $"{explanation} ALERT: Live exploitation signal detected!";
}
else if (summary.Flags.Contains("kev"))
{
explanation = $"{explanation} This is a Known Exploited Vulnerability (KEV).";
}
else if (summary.Flags.Contains("vendor-na"))
{
explanation = $"{explanation} Vendor has confirmed not affected.";
}
return explanation;
}
}

View File

@@ -1,6 +1,7 @@
using System;
using System.Collections.Immutable;
using StellaOps.Policy;
using StellaOps.Signals.EvidenceWeightedScore;
namespace StellaOps.Policy.Engine.Materialization;
@@ -60,6 +61,11 @@ public sealed record PolicyExplainTrace
/// </summary>
public ImmutableArray<PolicyExplainVexImpact> VexImpacts { get; init; } = ImmutableArray<PolicyExplainVexImpact>.Empty;
/// <summary>
/// Evidence-weighted score result (if calculated).
/// </summary>
public EvidenceWeightedScoreResult? EvidenceWeightedScore { get; init; }
/// <summary>
/// Additional metadata (component PURL, SBOM ID, trace ID, reachability status, etc.).
/// </summary>

View File

@@ -0,0 +1,446 @@
// -----------------------------------------------------------------------------
// ConfidenceToEwsAdapter.cs
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-032
// Description: Adapter to translate legacy Confidence scores to EWS format
// -----------------------------------------------------------------------------
using StellaOps.Policy.Confidence.Models;
using StellaOps.Signals.EvidenceWeightedScore;
namespace StellaOps.Policy.Engine.Scoring.EvidenceWeightedScore;
/// <summary>
/// Result of Confidence to EWS adaptation.
/// </summary>
public sealed record ConfidenceToEwsAdaptationResult
{
/// <summary>
/// Creates a new ConfidenceToEwsAdaptationResult.
/// </summary>
public ConfidenceToEwsAdaptationResult(
EvidenceWeightedScoreResult ewsResult,
ConfidenceScore originalConfidence,
AdaptationDetails details)
{
EwsResult = ewsResult ?? throw new ArgumentNullException(nameof(ewsResult));
OriginalConfidence = originalConfidence ?? throw new ArgumentNullException(nameof(originalConfidence));
Details = details ?? throw new ArgumentNullException(nameof(details));
}
/// <summary>
/// The adapted EWS result.
/// </summary>
public EvidenceWeightedScoreResult EwsResult { get; }
/// <summary>
/// The original Confidence score.
/// </summary>
public ConfidenceScore OriginalConfidence { get; }
/// <summary>
/// Details about the adaptation process.
/// </summary>
public AdaptationDetails Details { get; }
}
/// <summary>
/// Details about how the adaptation was performed.
/// </summary>
public sealed record AdaptationDetails
{
/// <summary>
/// Creates new AdaptationDetails.
/// </summary>
public AdaptationDetails(
IReadOnlyDictionary<string, double> dimensionMappings,
string mappingStrategy,
IReadOnlyList<string> warnings)
{
DimensionMappings = dimensionMappings ?? throw new ArgumentNullException(nameof(dimensionMappings));
MappingStrategy = mappingStrategy ?? throw new ArgumentNullException(nameof(mappingStrategy));
Warnings = warnings ?? throw new ArgumentNullException(nameof(warnings));
}
/// <summary>
/// How each Confidence factor was mapped to EWS dimensions.
/// </summary>
public IReadOnlyDictionary<string, double> DimensionMappings { get; }
/// <summary>
/// The strategy used for mapping (e.g., "direct", "interpolated").
/// </summary>
public string MappingStrategy { get; }
/// <summary>
/// Any warnings about the adaptation.
/// </summary>
public IReadOnlyList<string> Warnings { get; }
}
/// <summary>
/// Adapter to translate legacy Confidence scores to Evidence-Weighted Scores.
/// </summary>
/// <remarks>
/// <para>
/// The Confidence system uses a 0.0-1.0 scale where higher = more confidence in NOT being affected.
/// The EWS system uses a 0-100 scale where higher = more evidence of real risk.
/// </para>
/// <para>
/// Key differences:
/// - Confidence: High = likely not affected = lower risk
/// - EWS: High = likely affected = higher risk
/// </para>
/// <para>
/// Mapping strategy:
/// - Invert Confidence factors that measure "safety" to measure "risk"
/// - Map Confidence factors to closest EWS dimensions
/// - Apply EWS scaling (0-100 instead of 0.0-1.0)
/// </para>
/// </remarks>
public sealed class ConfidenceToEwsAdapter
{
private readonly IEvidenceWeightedScoreCalculator _calculator;
/// <summary>
/// Creates a new ConfidenceToEwsAdapter.
/// </summary>
public ConfidenceToEwsAdapter(IEvidenceWeightedScoreCalculator? calculator = null)
{
_calculator = calculator ?? new EvidenceWeightedScoreCalculator();
}
/// <summary>
/// Adapts a Confidence score to an EWS result.
/// </summary>
/// <param name="confidence">The Confidence score to adapt.</param>
/// <param name="findingId">The finding ID for the EWS result.</param>
/// <returns>The adapted EWS result with details.</returns>
public ConfidenceToEwsAdaptationResult Adapt(ConfidenceScore confidence, string findingId)
{
ArgumentNullException.ThrowIfNull(confidence);
ArgumentException.ThrowIfNullOrWhiteSpace(findingId);
var (input, mappings, warnings) = MapConfidenceToEwsInput(confidence, findingId);
var ewsResult = _calculator.Calculate(input, EvidenceWeightPolicy.DefaultProduction);
var details = new AdaptationDetails(
dimensionMappings: mappings,
mappingStrategy: "inverted-factor-mapping",
warnings: warnings
);
return new ConfidenceToEwsAdaptationResult(
ewsResult: ewsResult,
originalConfidence: confidence,
details: details
);
}
/// <summary>
/// Compares a Confidence score with an EWS result to assess alignment.
/// </summary>
/// <param name="confidence">The Confidence score.</param>
/// <param name="ewsResult">The EWS result.</param>
/// <returns>Comparison result with alignment details.</returns>
public ConfidenceEwsComparison Compare(ConfidenceScore confidence, EvidenceWeightedScoreResult ewsResult)
{
ArgumentNullException.ThrowIfNull(confidence);
ArgumentNullException.ThrowIfNull(ewsResult);
// Adapt Confidence to EWS for comparison
var adapted = Adapt(confidence, ewsResult.FindingId);
// Calculate alignment
var scoreDifference = Math.Abs(adapted.EwsResult.Score - ewsResult.Score);
var bucketMatch = adapted.EwsResult.Bucket == ewsResult.Bucket;
var alignment = scoreDifference switch
{
< 5 => AlignmentLevel.Excellent,
< 10 => AlignmentLevel.Good,
< 20 => AlignmentLevel.Moderate,
< 30 => AlignmentLevel.Poor,
_ => AlignmentLevel.Divergent
};
return new ConfidenceEwsComparison(
originalConfidence: confidence,
originalEws: ewsResult,
adaptedEws: adapted.EwsResult,
scoreDifference: scoreDifference,
bucketMatch: bucketMatch,
alignment: alignment
);
}
private static (EvidenceWeightedScoreInput Input, Dictionary<string, double> Mappings, List<string> Warnings)
MapConfidenceToEwsInput(ConfidenceScore confidence, string findingId)
{
var mappings = new Dictionary<string, double>(StringComparer.OrdinalIgnoreCase);
var warnings = new List<string>();
// Find factors by type
var reachabilityFactor = confidence.Factors.FirstOrDefault(f => f.Type == ConfidenceFactorType.Reachability);
var runtimeFactor = confidence.Factors.FirstOrDefault(f => f.Type == ConfidenceFactorType.Runtime);
var vexFactor = confidence.Factors.FirstOrDefault(f => f.Type == ConfidenceFactorType.Vex);
var provenanceFactor = confidence.Factors.FirstOrDefault(f => f.Type == ConfidenceFactorType.Provenance);
var advisoryFactor = confidence.Factors.FirstOrDefault(f => f.Type == ConfidenceFactorType.Advisory);
// Map Reachability (Confidence) → RCH (EWS)
// Confidence: high = unreachable (safe) → EWS: invert so high = reachable (risky)
var rch = InvertConfidenceFactor(reachabilityFactor, "Reachability", mappings, warnings);
// Map Runtime (Confidence) → RTS (EWS)
// Confidence: high = runtime contradicts (safe) → EWS: invert so high = runtime confirms (risky)
var rts = InvertConfidenceFactor(runtimeFactor, "Runtime", mappings, warnings);
// Map VEX (Confidence) → BKP (EWS)
// VEX not_affected with high trust → BKP high means vendor confirmed safe
// Note: This is a loose mapping since VEX and Backport are different concepts
var bkp = MapVexToBackport(vexFactor, mappings, warnings);
// Map Provenance/Advisory → SRC (EWS)
// Provenance quality affects source trust
var src = MapProvenanceToSourceTrust(provenanceFactor, advisoryFactor, mappings, warnings);
// XPL (Exploit) - no direct Confidence equivalent
// Default to neutral (0.5) as Confidence doesn't track exploit intelligence
var xpl = 0.5;
mappings["xpl"] = xpl;
warnings.Add("No exploit factor in Confidence; defaulting XPL to 0.5");
// MIT (Mitigation) - no direct Confidence equivalent
// Default to 0 (no mitigation assumed)
var mit = 0.0;
mappings["mit"] = mit;
warnings.Add("No mitigation factor in Confidence; defaulting MIT to 0.0");
var input = new EvidenceWeightedScoreInput
{
FindingId = findingId,
Rch = rch,
Rts = rts,
Bkp = bkp,
Xpl = xpl,
Src = src,
Mit = mit
};
return (input, mappings, warnings);
}
private static double InvertConfidenceFactor(
ConfidenceFactor? factor,
string name,
Dictionary<string, double> mappings,
List<string> warnings)
{
if (factor is null)
{
var defaultValue = 0.5;
mappings[$"{name.ToLowerInvariant()}_to_ews"] = defaultValue;
warnings.Add($"No {name} factor in Confidence; defaulting to {defaultValue}");
return defaultValue;
}
// Invert: high confidence (safe) → low EWS (safe)
// Low confidence (risky) → high EWS (risky)
var inverted = 1.0 - (double)factor.RawValue;
mappings[$"{name.ToLowerInvariant()}_to_ews"] = inverted;
return inverted;
}
private static double MapVexToBackport(
ConfidenceFactor? vexFactor,
Dictionary<string, double> mappings,
List<string> warnings)
{
if (vexFactor is null)
{
var defaultValue = 0.5;
mappings["vex_to_bkp"] = defaultValue;
warnings.Add("No VEX factor in Confidence; defaulting BKP to 0.5");
return defaultValue;
}
// VEX high trust (not affected) → BKP high (backport confirms safe)
// This is an approximation - VEX and backport serve different purposes
// VEX says "vendor says not affected"
// BKP says "version comparison shows patched"
// We treat high VEX trust as evidence of being "handled" similarly to backport
var bkp = (double)vexFactor.RawValue;
mappings["vex_to_bkp"] = bkp;
warnings.Add("VEX factor mapped to BKP (approximation - different semantic meanings)");
return bkp;
}
private static double MapProvenanceToSourceTrust(
ConfidenceFactor? provenanceFactor,
ConfidenceFactor? advisoryFactor,
Dictionary<string, double> mappings,
List<string> warnings)
{
double provenanceValue = provenanceFactor is not null ? (double)provenanceFactor.RawValue : 0.5;
double advisoryValue = advisoryFactor is not null ? (double)advisoryFactor.RawValue : 0.5;
// Average provenance and advisory factors for source trust
// High provenance quality + fresh advisory = high source trust
var src = (provenanceValue + advisoryValue) / 2.0;
mappings["provenance_to_src"] = provenanceValue;
mappings["advisory_to_src"] = advisoryValue;
mappings["src_combined"] = src;
if (provenanceFactor is null && advisoryFactor is null)
{
warnings.Add("No Provenance or Advisory factors; defaulting SRC to 0.5");
}
return src;
}
}
/// <summary>
/// Result of comparing Confidence and EWS scores.
/// </summary>
public sealed record ConfidenceEwsComparison
{
/// <summary>
/// Creates a new ConfidenceEwsComparison.
/// </summary>
public ConfidenceEwsComparison(
ConfidenceScore originalConfidence,
EvidenceWeightedScoreResult originalEws,
EvidenceWeightedScoreResult adaptedEws,
int scoreDifference,
bool bucketMatch,
AlignmentLevel alignment)
{
OriginalConfidence = originalConfidence;
OriginalEws = originalEws;
AdaptedEws = adaptedEws;
ScoreDifference = scoreDifference;
BucketMatch = bucketMatch;
Alignment = alignment;
}
/// <summary>
/// The original Confidence score.
/// </summary>
public ConfidenceScore OriginalConfidence { get; }
/// <summary>
/// The original EWS result (from direct calculation).
/// </summary>
public EvidenceWeightedScoreResult OriginalEws { get; }
/// <summary>
/// EWS result adapted from Confidence score.
/// </summary>
public EvidenceWeightedScoreResult AdaptedEws { get; }
/// <summary>
/// Absolute difference between original and adapted EWS scores.
/// </summary>
public int ScoreDifference { get; }
/// <summary>
/// Whether the bucket assignment matches.
/// </summary>
public bool BucketMatch { get; }
/// <summary>
/// Overall alignment level.
/// </summary>
public AlignmentLevel Alignment { get; }
/// <summary>
/// Whether the scores are considered aligned (Moderate or better).
/// </summary>
public bool IsAligned => Alignment is AlignmentLevel.Excellent
or AlignmentLevel.Good or AlignmentLevel.Moderate;
/// <summary>
/// Gets a summary of the comparison.
/// </summary>
public string GetSummary()
{
return $"Confidence {OriginalConfidence.Value:P0} ({OriginalConfidence.Tier}) ↔ " +
$"EWS {OriginalEws.Score} ({OriginalEws.Bucket}) | " +
$"Adapted EWS {AdaptedEws.Score} ({AdaptedEws.Bucket}) | " +
$"Diff={ScoreDifference}, Alignment={Alignment}";
}
}
/// <summary>
/// Level of alignment between Confidence and EWS scores.
/// </summary>
public enum AlignmentLevel
{
/// <summary>Score difference &lt; 5 points.</summary>
Excellent,
/// <summary>Score difference &lt; 10 points.</summary>
Good,
/// <summary>Score difference &lt; 20 points.</summary>
Moderate,
/// <summary>Score difference &lt; 30 points.</summary>
Poor,
/// <summary>Score difference ≥ 30 points.</summary>
Divergent
}
/// <summary>
/// Extension methods for Confidence to EWS adaptation.
/// </summary>
public static class ConfidenceToEwsExtensions
{
/// <summary>
/// Adapts a Confidence score to an approximate EWS score value (0-100).
/// </summary>
/// <remarks>
/// This is a quick approximation that inverts the Confidence value.
/// For accurate mapping, use ConfidenceToEwsAdapter.Adapt().
/// </remarks>
public static int ToApproximateEwsScore(this ConfidenceScore confidence)
{
// Confidence: 1.0 = very confident safe → EWS: 0 = low risk
// Confidence: 0.0 = no confidence → EWS: 100 = high risk
return (int)Math.Round((1.0m - confidence.Value) * 100m);
}
/// <summary>
/// Gets the approximate EWS bucket for a Confidence score.
/// </summary>
public static ScoreBucket ToApproximateEwsBucket(this ConfidenceScore confidence)
{
var approxScore = confidence.ToApproximateEwsScore();
return approxScore switch
{
>= 90 => ScoreBucket.ActNow,
>= 70 => ScoreBucket.ScheduleNext,
>= 40 => ScoreBucket.Investigate,
_ => ScoreBucket.Watchlist
};
}
/// <summary>
/// Maps ConfidenceTier to approximate EWS ScoreBucket.
/// </summary>
public static ScoreBucket ToApproximateEwsBucket(this ConfidenceTier tier)
{
// Invert: high confidence (safe) → low priority bucket
return tier switch
{
ConfidenceTier.VeryHigh => ScoreBucket.Watchlist, // Very confident = low risk
ConfidenceTier.High => ScoreBucket.Watchlist, // High confidence = low risk
ConfidenceTier.Medium => ScoreBucket.Investigate, // Medium = investigate
ConfidenceTier.Low => ScoreBucket.ScheduleNext, // Low confidence = schedule
ConfidenceTier.VeryLow => ScoreBucket.ActNow, // No confidence = act now
_ => ScoreBucket.Investigate
};
}
}

View File

@@ -0,0 +1,390 @@
// -----------------------------------------------------------------------------
// DualEmitVerdictEnricher.cs
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-033
// Description: Dual-emit mode for Confidence and EWS scores in verdicts
// -----------------------------------------------------------------------------
using System.Diagnostics.Metrics;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using StellaOps.Policy.Confidence.Models;
using StellaOps.Signals.EvidenceWeightedScore;
namespace StellaOps.Policy.Engine.Scoring.EvidenceWeightedScore;
/// <summary>
/// Result of dual-emit verdict enrichment.
/// </summary>
public sealed record DualEmitResult
{
/// <summary>
/// Creates a new DualEmitResult.
/// </summary>
public DualEmitResult(
ConfidenceScore? confidence,
EvidenceWeightedScoreResult? evidenceWeightedScore,
DualEmitComparison? comparison)
{
Confidence = confidence;
EvidenceWeightedScore = evidenceWeightedScore;
Comparison = comparison;
}
/// <summary>
/// The Confidence score (legacy).
/// </summary>
public ConfidenceScore? Confidence { get; }
/// <summary>
/// The Evidence-Weighted Score (new).
/// </summary>
public EvidenceWeightedScoreResult? EvidenceWeightedScore { get; }
/// <summary>
/// Comparison between the two scores when both are present.
/// </summary>
public DualEmitComparison? Comparison { get; }
/// <summary>
/// Whether both scores are present.
/// </summary>
public bool HasBothScores => Confidence is not null && EvidenceWeightedScore is not null;
/// <summary>
/// Whether the scores are aligned (if comparison available).
/// </summary>
public bool IsAligned => Comparison?.IsAligned ?? true;
}
/// <summary>
/// Comparison between Confidence and EWS scores.
/// </summary>
public sealed record DualEmitComparison
{
/// <summary>
/// Creates a new DualEmitComparison.
/// </summary>
public DualEmitComparison(
decimal confidenceValue,
int ewsScore,
string confidenceTier,
string ewsBucket,
int scoreDifference,
bool tierBucketMatch,
bool isAligned)
{
ConfidenceValue = confidenceValue;
EwsScore = ewsScore;
ConfidenceTier = confidenceTier;
EwsBucket = ewsBucket;
ScoreDifference = scoreDifference;
TierBucketMatch = tierBucketMatch;
IsAligned = isAligned;
}
/// <summary>
/// Confidence value [0, 1].
/// </summary>
public decimal ConfidenceValue { get; }
/// <summary>
/// EWS score [0, 100].
/// </summary>
public int EwsScore { get; }
/// <summary>
/// Confidence tier (VeryHigh, High, Medium, Low, VeryLow).
/// </summary>
public string ConfidenceTier { get; }
/// <summary>
/// EWS bucket (ActNow, ScheduleNext, Investigate, Watchlist).
/// </summary>
public string EwsBucket { get; }
/// <summary>
/// Absolute difference when Confidence is mapped to 0-100 scale.
/// </summary>
public int ScoreDifference { get; }
/// <summary>
/// Whether tier/bucket semantically match (High→Watchlist, Low→ActNow).
/// </summary>
public bool TierBucketMatch { get; }
/// <summary>
/// Whether scores are considered aligned (diff &lt; 20 and tier matches).
/// </summary>
public bool IsAligned { get; }
/// <summary>
/// Creates a comparison from Confidence and EWS scores.
/// </summary>
public static DualEmitComparison Create(ConfidenceScore confidence, EvidenceWeightedScoreResult ews)
{
ArgumentNullException.ThrowIfNull(confidence);
ArgumentNullException.ThrowIfNull(ews);
// Map Confidence to 0-100 (inverted: high confidence = low risk)
var confidenceAs100 = (int)Math.Round((1.0m - confidence.Value) * 100m);
var scoreDiff = Math.Abs(confidenceAs100 - ews.Score);
// Check tier/bucket match (inverted semantics)
var tierBucketMatch = IsTierBucketMatch(confidence.Tier, ews.Bucket);
// Aligned if diff < 20 and tier matches
var isAligned = scoreDiff < 20 && tierBucketMatch;
return new DualEmitComparison(
confidenceValue: confidence.Value,
ewsScore: ews.Score,
confidenceTier: confidence.Tier.ToString(),
ewsBucket: ews.Bucket.ToString(),
scoreDifference: scoreDiff,
tierBucketMatch: tierBucketMatch,
isAligned: isAligned
);
}
private static bool IsTierBucketMatch(Confidence.Models.ConfidenceTier tier, ScoreBucket bucket)
{
// Map inverted semantics:
// High Confidence (safe) → Watchlist (low priority)
// Low Confidence (risky) → ActNow (high priority)
return (tier, bucket) switch
{
(Confidence.Models.ConfidenceTier.VeryHigh, ScoreBucket.Watchlist) => true,
(Confidence.Models.ConfidenceTier.High, ScoreBucket.Watchlist) => true,
(Confidence.Models.ConfidenceTier.High, ScoreBucket.Investigate) => true,
(Confidence.Models.ConfidenceTier.Medium, ScoreBucket.Investigate) => true,
(Confidence.Models.ConfidenceTier.Medium, ScoreBucket.ScheduleNext) => true,
(Confidence.Models.ConfidenceTier.Low, ScoreBucket.ScheduleNext) => true,
(Confidence.Models.ConfidenceTier.Low, ScoreBucket.ActNow) => true,
(Confidence.Models.ConfidenceTier.VeryLow, ScoreBucket.ActNow) => true,
_ => false
};
}
}
/// <summary>
/// Service for dual-emit mode that enriches verdicts with both Confidence and EWS scores.
/// </summary>
public interface IDualEmitVerdictEnricher
{
/// <summary>
/// Whether dual-emit mode is enabled.
/// </summary>
bool IsEnabled { get; }
/// <summary>
/// Enriches a verdict with both Confidence and EWS scores.
/// </summary>
/// <param name="confidence">The Confidence score (may be null).</param>
/// <param name="ewsScore">The EWS score (may be null).</param>
/// <returns>The dual-emit result with comparison if both present.</returns>
DualEmitResult Enrich(ConfidenceScore? confidence, EvidenceWeightedScoreResult? ewsScore);
}
/// <summary>
/// Implementation of dual-emit verdict enricher.
/// </summary>
public sealed class DualEmitVerdictEnricher : IDualEmitVerdictEnricher
{
private readonly IOptionsMonitor<PolicyEvidenceWeightedScoreOptions> _options;
private readonly ILogger<DualEmitVerdictEnricher> _logger;
private readonly Counter<long> _dualEmitCounter;
private readonly Counter<long> _alignmentCounter;
private readonly Histogram<double> _scoreDifferenceHistogram;
/// <summary>
/// Creates a new DualEmitVerdictEnricher.
/// </summary>
public DualEmitVerdictEnricher(
IOptionsMonitor<PolicyEvidenceWeightedScoreOptions> options,
ILogger<DualEmitVerdictEnricher> logger,
IMeterFactory? meterFactory = null)
{
_options = options ?? throw new ArgumentNullException(nameof(options));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
var meter = meterFactory?.Create("StellaOps.Policy.DualEmit")
?? new Meter("StellaOps.Policy.DualEmit");
_dualEmitCounter = meter.CreateCounter<long>(
"stellaops.policy.dual_emit.verdicts",
"verdicts",
"Number of verdicts processed in dual-emit mode");
_alignmentCounter = meter.CreateCounter<long>(
"stellaops.policy.dual_emit.alignment",
"verdicts",
"Number of aligned/misaligned verdicts in dual-emit mode");
_scoreDifferenceHistogram = meter.CreateHistogram<double>(
"stellaops.policy.dual_emit.score_difference",
"points",
"Distribution of score differences between Confidence and EWS");
}
/// <inheritdoc />
public bool IsEnabled => _options.CurrentValue.Enabled && _options.CurrentValue.DualEmitMode;
/// <inheritdoc />
public DualEmitResult Enrich(ConfidenceScore? confidence, EvidenceWeightedScoreResult? ewsScore)
{
// Fast path when disabled
if (!IsEnabled)
{
return new DualEmitResult(confidence, ewsScore, null);
}
// Create comparison if both present
DualEmitComparison? comparison = null;
if (confidence is not null && ewsScore is not null)
{
comparison = DualEmitComparison.Create(confidence, ewsScore);
EmitTelemetry(comparison);
}
return new DualEmitResult(confidence, ewsScore, comparison);
}
private void EmitTelemetry(DualEmitComparison comparison)
{
// Skip if telemetry disabled
if (!_options.CurrentValue.EmitComparisonTelemetry)
{
return;
}
try
{
// Increment counters
_dualEmitCounter.Add(1, new KeyValuePair<string, object?>("has_both", true));
_alignmentCounter.Add(1, new KeyValuePair<string, object?>(
"status", comparison.IsAligned ? "aligned" : "misaligned"));
// Record score difference
_scoreDifferenceHistogram.Record(comparison.ScoreDifference);
// Log misalignments at debug level
if (!comparison.IsAligned)
{
_logger.LogDebug(
"Dual-emit score misalignment: Confidence={ConfidenceValue:P0} ({ConfidenceTier}) ↔ EWS={EwsScore} ({EwsBucket}), diff={ScoreDiff}",
comparison.ConfidenceValue,
comparison.ConfidenceTier,
comparison.EwsScore,
comparison.EwsBucket,
comparison.ScoreDifference);
}
}
catch (Exception ex)
{
// Telemetry should never fail the enrichment
_logger.LogWarning(ex, "Failed to emit dual-emit telemetry");
}
}
}
/// <summary>
/// Extension methods for dual-emit mode.
/// </summary>
public static class DualEmitExtensions
{
/// <summary>
/// Gets the primary score value based on configuration.
/// </summary>
/// <param name="result">The dual-emit result.</param>
/// <param name="useEwsAsPrimary">Whether to use EWS as primary (otherwise Confidence).</param>
/// <returns>The primary score as a value 0-100.</returns>
public static int GetPrimaryScore(this DualEmitResult result, bool useEwsAsPrimary)
{
if (useEwsAsPrimary && result.EvidenceWeightedScore is not null)
{
return result.EvidenceWeightedScore.Score;
}
if (result.Confidence is not null)
{
// Convert Confidence [0,1] to [0,100] (inverted: high confidence = low score)
return (int)Math.Round((1.0m - result.Confidence.Value) * 100m);
}
// Default to neutral
return 50;
}
/// <summary>
/// Gets the primary bucket/tier based on configuration.
/// </summary>
/// <param name="result">The dual-emit result.</param>
/// <param name="useEwsAsPrimary">Whether to use EWS as primary.</param>
/// <returns>The primary bucket/tier as a string.</returns>
public static string GetPrimaryBucket(this DualEmitResult result, bool useEwsAsPrimary)
{
if (useEwsAsPrimary && result.EvidenceWeightedScore is not null)
{
return result.EvidenceWeightedScore.Bucket.ToString();
}
if (result.Confidence is not null)
{
// Map Confidence tier to bucket name (inverted)
return result.Confidence.Tier switch
{
ConfidenceTier.VeryHigh => "Watchlist",
ConfidenceTier.High => "Watchlist",
ConfidenceTier.Medium => "Investigate",
ConfidenceTier.Low => "ScheduleNext",
ConfidenceTier.VeryLow => "ActNow",
_ => "Investigate"
};
}
return "Investigate";
}
/// <summary>
/// Gets a summary string for the dual-emit result.
/// </summary>
public static string GetSummary(this DualEmitResult result)
{
var parts = new List<string>();
if (result.Confidence is not null)
{
parts.Add($"Confidence={result.Confidence.Value:P0}({result.Confidence.Tier})");
}
if (result.EvidenceWeightedScore is not null)
{
parts.Add($"EWS={result.EvidenceWeightedScore.Score}({result.EvidenceWeightedScore.Bucket})");
}
if (result.Comparison is not null)
{
parts.Add($"Aligned={result.Comparison.IsAligned}(diff={result.Comparison.ScoreDifference})");
}
return string.Join(" | ", parts);
}
}
/// <summary>
/// Registration helper for dual-emit mode.
/// Note: Actual DI registration will be handled by the host assembly
/// that has access to Microsoft.Extensions.DependencyInjection.
/// </summary>
internal static class DualEmitServiceCollectionHelpers
{
/// <summary>
/// Returns the service registration types for dual-emit services.
/// </summary>
public static (Type Service, Type Implementation) GetDualEmitServices()
{
return (typeof(IDualEmitVerdictEnricher), typeof(DualEmitVerdictEnricher));
}
}

View File

@@ -0,0 +1,304 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-004 - Implement EvidenceWeightedScoreEnricher
using System.Collections.Concurrent;
using System.Diagnostics;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using StellaOps.Signals.EvidenceWeightedScore;
using StellaOps.Signals.EvidenceWeightedScore.Normalizers;
namespace StellaOps.Policy.Engine.Scoring.EvidenceWeightedScore;
/// <summary>
/// Enriches findings with Evidence-Weighted Scores by calling the normalizer aggregator and calculator.
/// </summary>
public sealed class EvidenceWeightedScoreEnricher : IFindingScoreEnricher
{
private readonly INormalizerAggregator _aggregator;
private readonly IEvidenceWeightedScoreCalculator _calculator;
private readonly IEvidenceWeightPolicyProvider _policyProvider;
private readonly IOptionsMonitor<PolicyEvidenceWeightedScoreOptions> _options;
private readonly ILogger<EvidenceWeightedScoreEnricher>? _logger;
private readonly IScoreEnrichmentCache? _cache;
public EvidenceWeightedScoreEnricher(
INormalizerAggregator aggregator,
IEvidenceWeightedScoreCalculator calculator,
IEvidenceWeightPolicyProvider policyProvider,
IOptionsMonitor<PolicyEvidenceWeightedScoreOptions> options,
ILogger<EvidenceWeightedScoreEnricher>? logger = null,
IScoreEnrichmentCache? cache = null)
{
_aggregator = aggregator ?? throw new ArgumentNullException(nameof(aggregator));
_calculator = calculator ?? throw new ArgumentNullException(nameof(calculator));
_policyProvider = policyProvider ?? throw new ArgumentNullException(nameof(policyProvider));
_options = options ?? throw new ArgumentNullException(nameof(options));
_logger = logger;
_cache = cache;
}
/// <inheritdoc />
public bool IsEnabled => _options.CurrentValue.Enabled;
/// <inheritdoc />
public ValueTask<ScoreEnrichmentResult> EnrichAsync(
FindingEvidence evidence,
CancellationToken cancellationToken = default)
{
// For now, the implementation is synchronous - async is for future when
// we might need to fetch additional evidence asynchronously
return ValueTask.FromResult(Enrich(evidence));
}
/// <inheritdoc />
public ScoreEnrichmentResult Enrich(FindingEvidence evidence)
{
ArgumentNullException.ThrowIfNull(evidence);
var options = _options.CurrentValue;
// Check if feature is enabled
if (!options.Enabled)
{
return ScoreEnrichmentResult.Skipped(evidence.FindingId);
}
// Check cache first if enabled
if (options.EnableCaching && _cache is not null)
{
if (_cache.TryGet(evidence.FindingId, out var cachedScore) && cachedScore is not null)
{
_logger?.LogDebug(
"Cache hit for EWS: FindingId={FindingId}, Score={Score}",
evidence.FindingId, cachedScore.Score);
return ScoreEnrichmentResult.Success(
evidence.FindingId,
cachedScore,
fromCache: true);
}
}
try
{
var stopwatch = Stopwatch.StartNew();
// Aggregate evidence into normalized input
var input = _aggregator.Aggregate(evidence);
// Get policy (use configured digest or default)
var policy = GetPolicy(options);
// Calculate score
var score = _calculator.Calculate(input, policy);
stopwatch.Stop();
// Cache the result if enabled
if (options.EnableCaching && _cache is not null && _cache.Count < options.MaxCachedScoresPerContext)
{
_cache.Set(evidence.FindingId, score);
}
_logger?.LogDebug(
"Calculated EWS: FindingId={FindingId}, Score={Score}, Bucket={Bucket}, Duration={Duration}ms",
evidence.FindingId, score.Score, score.Bucket, stopwatch.ElapsedMilliseconds);
return ScoreEnrichmentResult.Success(
evidence.FindingId,
score,
fromCache: false,
duration: stopwatch.Elapsed);
}
catch (Exception ex)
{
_logger?.LogWarning(
ex,
"Failed to calculate EWS for FindingId={FindingId}: {Error}",
evidence.FindingId, ex.Message);
return ScoreEnrichmentResult.Failure(evidence.FindingId, ex.Message);
}
}
/// <inheritdoc />
public async IAsyncEnumerable<ScoreEnrichmentResult> EnrichBatchAsync(
IEnumerable<FindingEvidence> evidenceList,
[System.Runtime.CompilerServices.EnumeratorCancellation] CancellationToken cancellationToken = default)
{
ArgumentNullException.ThrowIfNull(evidenceList);
foreach (var evidence in evidenceList)
{
if (cancellationToken.IsCancellationRequested)
{
yield break;
}
yield return await EnrichAsync(evidence, cancellationToken);
}
}
private EvidenceWeightPolicy GetPolicy(PolicyEvidenceWeightedScoreOptions options)
{
// Get default policy synchronously (blocking call) - use cached policy in production
// The async API is available but for the sync Enrich method we need sync access
var defaultPolicy = _policyProvider
.GetDefaultPolicyAsync("default", CancellationToken.None)
.GetAwaiter()
.GetResult();
return ApplyWeightOverrides(defaultPolicy, options);
}
private static EvidenceWeightPolicy ApplyWeightOverrides(
EvidenceWeightPolicy policy,
PolicyEvidenceWeightedScoreOptions options)
{
// Apply weight overrides if configured
if (options.Weights is not null)
{
var newWeights = options.Weights.ToWeights(policy.Weights);
policy = policy with { Weights = newWeights };
}
// Apply bucket threshold overrides if configured
if (options.BucketThresholds is not null)
{
var newThresholds = options.BucketThresholds.ToThresholds(policy.Buckets);
policy = policy with { Buckets = newThresholds };
}
return policy;
}
}
/// <summary>
/// In-memory cache for EWS scores within an evaluation context.
/// Thread-safe for concurrent access.
/// </summary>
public sealed class InMemoryScoreEnrichmentCache : IScoreEnrichmentCache
{
private readonly ConcurrentDictionary<string, EvidenceWeightedScoreResult> _cache = new(StringComparer.OrdinalIgnoreCase);
// Telemetry counters
private long _hits;
private long _misses;
private long _sets;
/// <inheritdoc />
public int Count => _cache.Count;
/// <inheritdoc />
public bool TryGet(string findingId, out EvidenceWeightedScoreResult? score)
{
ArgumentException.ThrowIfNullOrEmpty(findingId);
if (_cache.TryGetValue(findingId, out var cached))
{
Interlocked.Increment(ref _hits);
score = cached;
return true;
}
Interlocked.Increment(ref _misses);
score = null;
return false;
}
/// <inheritdoc />
public void Set(string findingId, EvidenceWeightedScoreResult score)
{
ArgumentException.ThrowIfNullOrEmpty(findingId);
ArgumentNullException.ThrowIfNull(score);
_cache[findingId] = score;
Interlocked.Increment(ref _sets);
}
/// <inheritdoc />
public void Clear()
{
_cache.Clear();
}
/// <summary>
/// Number of cache hits.
/// </summary>
public long Hits => Interlocked.Read(ref _hits);
/// <summary>
/// Number of cache misses.
/// </summary>
public long Misses => Interlocked.Read(ref _misses);
/// <summary>
/// Number of cache sets.
/// </summary>
public long Sets => Interlocked.Read(ref _sets);
/// <summary>
/// Cache hit rate (0-1).
/// </summary>
public double HitRate
{
get
{
var total = Hits + Misses;
return total == 0 ? 0.0 : (double)Hits / total;
}
}
/// <summary>
/// Gets cache statistics for telemetry.
/// </summary>
public CacheStatistics GetStatistics() => new(
Count: Count,
Hits: Hits,
Misses: Misses,
Sets: Sets,
HitRate: HitRate);
/// <summary>
/// Resets telemetry counters.
/// </summary>
public void ResetStatistics()
{
Interlocked.Exchange(ref _hits, 0);
Interlocked.Exchange(ref _misses, 0);
Interlocked.Exchange(ref _sets, 0);
}
}
/// <summary>
/// Cache statistics for telemetry.
/// </summary>
public readonly record struct CacheStatistics(
int Count,
long Hits,
long Misses,
long Sets,
double HitRate);
/// <summary>
/// Factory for creating score enrichment caches.
/// </summary>
public interface IScoreEnrichmentCacheFactory
{
/// <summary>
/// Creates a new cache for an evaluation context.
/// </summary>
IScoreEnrichmentCache Create();
}
/// <summary>
/// Default factory that creates in-memory caches.
/// </summary>
public sealed class InMemoryScoreEnrichmentCacheFactory : IScoreEnrichmentCacheFactory
{
/// <inheritdoc />
public IScoreEnrichmentCache Create() => new InMemoryScoreEnrichmentCache();
}

View File

@@ -0,0 +1,130 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-037 - Extend AddPolicyEngine() to include EWS services
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions;
using Microsoft.Extensions.Options;
using StellaOps.Signals.EvidenceWeightedScore;
namespace StellaOps.Policy.Engine.Scoring.EvidenceWeightedScore;
/// <summary>
/// Extension methods for registering Evidence-Weighted Score services in the Policy Engine.
/// </summary>
public static class EvidenceWeightedScoreServiceCollectionExtensions
{
/// <summary>
/// Adds Evidence-Weighted Score services to the Policy Engine.
/// </summary>
/// <remarks>
/// Registers:
/// - <see cref="PolicyEvidenceWeightedScoreOptions"/> via configuration binding
/// - <see cref="IFindingScoreEnricher"/> for score calculation during policy evaluation
/// - <see cref="IScoreEnrichmentCache"/> for caching (when enabled)
/// - <see cref="IDualEmitVerdictEnricher"/> for dual-emit mode
/// - <see cref="IMigrationTelemetryService"/> for migration metrics
/// - <see cref="ConfidenceToEwsAdapter"/> for legacy score translation
/// </remarks>
/// <param name="services">Service collection.</param>
/// <returns>The service collection for chaining.</returns>
public static IServiceCollection AddEvidenceWeightedScore(this IServiceCollection services)
{
// Options binding
services.AddOptions<PolicyEvidenceWeightedScoreOptions>()
.BindConfiguration(PolicyEvidenceWeightedScoreOptions.SectionName);
// Core calculator from Signals library (if not already registered)
services.TryAddSingleton<IEvidenceWeightedScoreCalculator, EvidenceWeightedScoreCalculator>();
// Score enricher (invokes calculator during policy evaluation)
services.TryAddSingleton<IFindingScoreEnricher, EvidenceWeightedScoreEnricher>();
// Cache for scores within evaluation context
services.TryAddSingleton<IScoreEnrichmentCache, InMemoryScoreEnrichmentCache>();
// Dual-emit enricher for migration
services.TryAddSingleton<IDualEmitVerdictEnricher, DualEmitVerdictEnricher>();
// Migration telemetry
services.TryAddSingleton<IMigrationTelemetryService, MigrationTelemetryService>();
// Confidence adapter for legacy comparison
services.TryAddSingleton<ConfidenceToEwsAdapter>();
return services;
}
/// <summary>
/// Adds Evidence-Weighted Score services with custom configuration.
/// </summary>
/// <param name="services">Service collection.</param>
/// <param name="configure">Configuration action.</param>
/// <returns>The service collection for chaining.</returns>
public static IServiceCollection AddEvidenceWeightedScore(
this IServiceCollection services,
Action<PolicyEvidenceWeightedScoreOptions> configure)
{
services.Configure(configure);
return services.AddEvidenceWeightedScore();
}
/// <summary>
/// Conditionally adds Evidence-Weighted Score services based on configuration.
/// </summary>
/// <remarks>
/// This method reads the configuration at registration time and only registers
/// services if <see cref="PolicyEvidenceWeightedScoreOptions.Enabled"/> is true.
/// Use this when you want zero overhead when EWS is disabled.
/// </remarks>
/// <param name="services">Service collection.</param>
/// <param name="configuration">Configuration root for reading options.</param>
/// <returns>The service collection for chaining.</returns>
public static IServiceCollection AddEvidenceWeightedScoreIfEnabled(
this IServiceCollection services,
Microsoft.Extensions.Configuration.IConfiguration configuration)
{
var options = configuration
.GetSection(PolicyEvidenceWeightedScoreOptions.SectionName)
.Get<PolicyEvidenceWeightedScoreOptions>();
if (options?.Enabled == true)
{
services.AddEvidenceWeightedScore();
}
else
{
// Register null enricher when disabled (no-op)
services.TryAddSingleton<IFindingScoreEnricher, NullFindingScoreEnricher>();
}
return services;
}
/// <summary>
/// Adds only the migration support services (telemetry, adapter) without full EWS.
/// </summary>
/// <remarks>
/// Use this during Phase 1 (feature flag) when you want to prepare for migration
/// but not yet enable EWS calculation.
/// </remarks>
/// <param name="services">Service collection.</param>
/// <returns>The service collection for chaining.</returns>
public static IServiceCollection AddEvidenceWeightedScoreMigrationSupport(
this IServiceCollection services)
{
// Options binding
services.AddOptions<PolicyEvidenceWeightedScoreOptions>()
.BindConfiguration(PolicyEvidenceWeightedScoreOptions.SectionName);
// Migration services only
services.TryAddSingleton<IMigrationTelemetryService, MigrationTelemetryService>();
services.TryAddSingleton<ConfidenceToEwsAdapter>();
// Null enricher (no actual EWS calculation)
services.TryAddSingleton<IFindingScoreEnricher, NullFindingScoreEnricher>();
return services;
}
}

View File

@@ -0,0 +1,197 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-003 - Create IFindingScoreEnricher interface
using StellaOps.Signals.EvidenceWeightedScore;
using StellaOps.Signals.EvidenceWeightedScore.Normalizers;
namespace StellaOps.Policy.Engine.Scoring.EvidenceWeightedScore;
// Use FindingEvidence from the Normalizers namespace
// StellaOps.Signals.EvidenceWeightedScore.Normalizers.FindingEvidence
/// <summary>
/// Result of score enrichment for a finding.
/// </summary>
public sealed record ScoreEnrichmentResult
{
/// <summary>Finding identifier.</summary>
public required string FindingId { get; init; }
/// <summary>
/// The calculated Evidence-Weighted Score result.
/// Null if scoring was not performed (e.g., feature disabled or error).
/// </summary>
public EvidenceWeightedScoreResult? Score { get; init; }
/// <summary>
/// Whether scoring was successful.
/// </summary>
public bool IsSuccess => Score is not null;
/// <summary>
/// Error message if scoring failed.
/// </summary>
public string? Error { get; init; }
/// <summary>
/// Whether the result came from cache.
/// </summary>
public bool FromCache { get; init; }
/// <summary>
/// Duration of score calculation (if not from cache).
/// </summary>
public TimeSpan? CalculationDuration { get; init; }
/// <summary>
/// Creates a successful result.
/// </summary>
public static ScoreEnrichmentResult Success(
string findingId,
EvidenceWeightedScoreResult score,
bool fromCache = false,
TimeSpan? duration = null) => new()
{
FindingId = findingId,
Score = score,
FromCache = fromCache,
CalculationDuration = duration
};
/// <summary>
/// Creates a failed result.
/// </summary>
public static ScoreEnrichmentResult Failure(string findingId, string error) => new()
{
FindingId = findingId,
Error = error
};
/// <summary>
/// Creates a skipped result (feature disabled).
/// </summary>
public static ScoreEnrichmentResult Skipped(string findingId) => new()
{
FindingId = findingId
};
}
/// <summary>
/// Interface for enriching findings with Evidence-Weighted Scores during policy evaluation.
/// </summary>
public interface IFindingScoreEnricher
{
/// <summary>
/// Enriches a finding with an Evidence-Weighted Score.
/// </summary>
/// <param name="evidence">Evidence collected for the finding.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Score enrichment result.</returns>
ValueTask<ScoreEnrichmentResult> EnrichAsync(
FindingEvidence evidence,
CancellationToken cancellationToken = default);
/// <summary>
/// Enriches a finding synchronously (for pipeline integration).
/// </summary>
/// <param name="evidence">Evidence collected for the finding.</param>
/// <returns>Score enrichment result.</returns>
ScoreEnrichmentResult Enrich(FindingEvidence evidence);
/// <summary>
/// Enriches multiple findings in batch.
/// </summary>
/// <param name="evidenceList">List of evidence for findings.</param>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>Enumerable of score enrichment results.</returns>
IAsyncEnumerable<ScoreEnrichmentResult> EnrichBatchAsync(
IEnumerable<FindingEvidence> evidenceList,
CancellationToken cancellationToken = default);
/// <summary>
/// Whether EWS enrichment is enabled.
/// </summary>
bool IsEnabled { get; }
}
/// <summary>
/// Cache for EWS scores within an evaluation context.
/// Thread-safe for concurrent access.
/// </summary>
public interface IScoreEnrichmentCache
{
/// <summary>
/// Tries to get a cached score for a finding.
/// </summary>
/// <param name="findingId">Finding identifier.</param>
/// <param name="score">Cached score if found.</param>
/// <returns>True if found in cache.</returns>
bool TryGet(string findingId, out EvidenceWeightedScoreResult? score);
/// <summary>
/// Caches a score for a finding.
/// </summary>
/// <param name="findingId">Finding identifier.</param>
/// <param name="score">Score to cache.</param>
void Set(string findingId, EvidenceWeightedScoreResult score);
/// <summary>
/// Current cache size.
/// </summary>
int Count { get; }
/// <summary>
/// Clears the cache.
/// </summary>
void Clear();
}
/// <summary>
/// Null implementation of score enricher for when EWS is disabled.
/// </summary>
public sealed class NullFindingScoreEnricher : IFindingScoreEnricher
{
/// <summary>
/// Singleton instance.
/// </summary>
public static NullFindingScoreEnricher Instance { get; } = new();
private NullFindingScoreEnricher() { }
/// <inheritdoc />
public bool IsEnabled => false;
/// <inheritdoc />
public ValueTask<ScoreEnrichmentResult> EnrichAsync(
FindingEvidence evidence,
CancellationToken cancellationToken = default)
{
return ValueTask.FromResult(ScoreEnrichmentResult.Skipped(evidence.FindingId));
}
/// <inheritdoc />
public ScoreEnrichmentResult Enrich(FindingEvidence evidence)
{
return ScoreEnrichmentResult.Skipped(evidence.FindingId);
}
/// <inheritdoc />
public async IAsyncEnumerable<ScoreEnrichmentResult> EnrichBatchAsync(
IEnumerable<FindingEvidence> evidenceList,
[System.Runtime.CompilerServices.EnumeratorCancellation] CancellationToken cancellationToken = default)
{
foreach (var evidence in evidenceList)
{
if (cancellationToken.IsCancellationRequested)
{
yield break;
}
yield return ScoreEnrichmentResult.Skipped(evidence.FindingId);
}
await Task.CompletedTask;
}
}

View File

@@ -0,0 +1,468 @@
// -----------------------------------------------------------------------------
// MigrationTelemetryService.cs
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-034
// Description: Migration telemetry comparing Confidence vs EWS rankings
// -----------------------------------------------------------------------------
using System.Collections.Concurrent;
using System.Diagnostics.Metrics;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using StellaOps.Policy.Confidence.Models;
using StellaOps.Signals.EvidenceWeightedScore;
namespace StellaOps.Policy.Engine.Scoring.EvidenceWeightedScore;
/// <summary>
/// Aggregated statistics for migration telemetry.
/// </summary>
public sealed record MigrationTelemetryStats
{
/// <summary>
/// Total verdicts processed.
/// </summary>
public long TotalVerdicts { get; init; }
/// <summary>
/// Verdicts with both Confidence and EWS scores.
/// </summary>
public long DualScoredVerdicts { get; init; }
/// <summary>
/// Verdicts where scores are aligned (diff &lt; 20).
/// </summary>
public long AlignedVerdicts { get; init; }
/// <summary>
/// Verdicts where tier/bucket match semantically.
/// </summary>
public long TierMatchVerdicts { get; init; }
/// <summary>
/// Alignment rate (0-1).
/// </summary>
public double AlignmentRate => DualScoredVerdicts > 0
? (double)AlignedVerdicts / DualScoredVerdicts
: 0;
/// <summary>
/// Tier match rate (0-1).
/// </summary>
public double TierMatchRate => DualScoredVerdicts > 0
? (double)TierMatchVerdicts / DualScoredVerdicts
: 0;
/// <summary>
/// Average score difference when both scores present.
/// </summary>
public double AverageScoreDifference { get; init; }
/// <summary>
/// Distribution of score differences by range.
/// </summary>
public IReadOnlyDictionary<string, long> ScoreDifferenceDistribution { get; init; }
= new Dictionary<string, long>();
/// <summary>
/// Distribution by Confidence tier.
/// </summary>
public IReadOnlyDictionary<string, long> ByConfidenceTier { get; init; }
= new Dictionary<string, long>();
/// <summary>
/// Distribution by EWS bucket.
/// </summary>
public IReadOnlyDictionary<string, long> ByEwsBucket { get; init; }
= new Dictionary<string, long>();
/// <summary>
/// Timestamp when stats were captured.
/// </summary>
public DateTimeOffset CapturedAt { get; init; } = DateTimeOffset.UtcNow;
}
/// <summary>
/// Individual ranking comparison sample for detailed analysis.
/// </summary>
public sealed record RankingComparisonSample
{
/// <summary>
/// Creates a new RankingComparisonSample.
/// </summary>
public RankingComparisonSample(
string findingId,
decimal confidenceValue,
int ewsScore,
ConfidenceTier confidenceTier,
ScoreBucket ewsBucket,
int scoreDifference,
bool isAligned,
bool tierBucketMatch,
DateTimeOffset timestamp)
{
FindingId = findingId;
ConfidenceValue = confidenceValue;
EwsScore = ewsScore;
ConfidenceTier = confidenceTier;
EwsBucket = ewsBucket;
ScoreDifference = scoreDifference;
IsAligned = isAligned;
TierBucketMatch = tierBucketMatch;
Timestamp = timestamp;
}
public string FindingId { get; }
public decimal ConfidenceValue { get; }
public int EwsScore { get; }
public ConfidenceTier ConfidenceTier { get; }
public ScoreBucket EwsBucket { get; }
public int ScoreDifference { get; }
public bool IsAligned { get; }
public bool TierBucketMatch { get; }
public DateTimeOffset Timestamp { get; }
}
/// <summary>
/// Service for tracking migration telemetry comparing Confidence vs EWS.
/// </summary>
public interface IMigrationTelemetryService
{
/// <summary>
/// Whether migration telemetry is enabled.
/// </summary>
bool IsEnabled { get; }
/// <summary>
/// Records a comparison between Confidence and EWS scores.
/// </summary>
void RecordComparison(
string findingId,
ConfidenceScore confidence,
EvidenceWeightedScoreResult ewsScore);
/// <summary>
/// Gets the current aggregated statistics.
/// </summary>
MigrationTelemetryStats GetStats();
/// <summary>
/// Gets recent comparison samples (for debugging).
/// </summary>
IReadOnlyList<RankingComparisonSample> GetRecentSamples(int count = 100);
/// <summary>
/// Resets all telemetry counters.
/// </summary>
void Reset();
}
/// <summary>
/// Implementation of migration telemetry service.
/// </summary>
public sealed class MigrationTelemetryService : IMigrationTelemetryService
{
private readonly IOptionsMonitor<PolicyEvidenceWeightedScoreOptions> _options;
private readonly ILogger<MigrationTelemetryService> _logger;
// Counters
private long _totalVerdicts;
private long _dualScoredVerdicts;
private long _alignedVerdicts;
private long _tierMatchVerdicts;
private long _totalScoreDifference;
// Distribution counters
private readonly ConcurrentDictionary<string, long> _scoreDiffDistribution = new();
private readonly ConcurrentDictionary<string, long> _byConfidenceTier = new();
private readonly ConcurrentDictionary<string, long> _byEwsBucket = new();
// Recent samples (circular buffer)
private readonly ConcurrentQueue<RankingComparisonSample> _recentSamples = new();
private const int MaxSamples = 1000;
// Metrics
private readonly Counter<long> _comparisonCounter;
private readonly Counter<long> _alignmentCounter;
private readonly Counter<long> _tierMatchCounter;
private readonly Histogram<double> _scoreDiffHistogram;
/// <summary>
/// Creates a new MigrationTelemetryService.
/// </summary>
public MigrationTelemetryService(
IOptionsMonitor<PolicyEvidenceWeightedScoreOptions> options,
ILogger<MigrationTelemetryService> logger,
IMeterFactory? meterFactory = null)
{
_options = options ?? throw new ArgumentNullException(nameof(options));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
var meter = meterFactory?.Create("StellaOps.Policy.Migration")
?? new Meter("StellaOps.Policy.Migration");
_comparisonCounter = meter.CreateCounter<long>(
"stellaops.policy.migration.comparisons",
"comparisons",
"Total Confidence vs EWS comparisons");
_alignmentCounter = meter.CreateCounter<long>(
"stellaops.policy.migration.aligned",
"verdicts",
"Aligned verdict count");
_tierMatchCounter = meter.CreateCounter<long>(
"stellaops.policy.migration.tier_match",
"verdicts",
"Tier/bucket match count");
_scoreDiffHistogram = meter.CreateHistogram<double>(
"stellaops.policy.migration.score_diff",
"points",
"Score difference distribution");
// Initialize distribution buckets
foreach (var range in new[] { "0-5", "5-10", "10-20", "20-30", "30+" })
{
_scoreDiffDistribution[range] = 0;
}
foreach (var tier in Enum.GetNames<ConfidenceTier>())
{
_byConfidenceTier[tier] = 0;
}
foreach (var bucket in Enum.GetNames<ScoreBucket>())
{
_byEwsBucket[bucket] = 0;
}
}
/// <inheritdoc />
public bool IsEnabled => _options.CurrentValue.Enabled
&& _options.CurrentValue.DualEmitMode
&& _options.CurrentValue.EmitComparisonTelemetry;
/// <inheritdoc />
public void RecordComparison(
string findingId,
ConfidenceScore confidence,
EvidenceWeightedScoreResult ewsScore)
{
if (!IsEnabled)
{
return;
}
ArgumentException.ThrowIfNullOrWhiteSpace(findingId);
ArgumentNullException.ThrowIfNull(confidence);
ArgumentNullException.ThrowIfNull(ewsScore);
try
{
Interlocked.Increment(ref _totalVerdicts);
Interlocked.Increment(ref _dualScoredVerdicts);
// Calculate comparison metrics
var confidenceAs100 = (int)Math.Round((1.0m - confidence.Value) * 100m);
var scoreDiff = Math.Abs(confidenceAs100 - ewsScore.Score);
var isAligned = scoreDiff < 20;
var tierMatch = IsTierBucketMatch(confidence.Tier, ewsScore.Bucket);
// Update counters
if (isAligned)
{
Interlocked.Increment(ref _alignedVerdicts);
_alignmentCounter.Add(1);
}
if (tierMatch)
{
Interlocked.Increment(ref _tierMatchVerdicts);
_tierMatchCounter.Add(1);
}
Interlocked.Add(ref _totalScoreDifference, scoreDiff);
// Update distributions
var diffRange = scoreDiff switch
{
< 5 => "0-5",
< 10 => "5-10",
< 20 => "10-20",
< 30 => "20-30",
_ => "30+"
};
_scoreDiffDistribution.AddOrUpdate(diffRange, 1, (_, v) => v + 1);
_byConfidenceTier.AddOrUpdate(confidence.Tier.ToString(), 1, (_, v) => v + 1);
_byEwsBucket.AddOrUpdate(ewsScore.Bucket.ToString(), 1, (_, v) => v + 1);
// Record metrics
_comparisonCounter.Add(1, new KeyValuePair<string, object?>("aligned", isAligned));
_scoreDiffHistogram.Record(scoreDiff);
// Store sample
var sample = new RankingComparisonSample(
findingId: findingId,
confidenceValue: confidence.Value,
ewsScore: ewsScore.Score,
confidenceTier: confidence.Tier,
ewsBucket: ewsScore.Bucket,
scoreDifference: scoreDiff,
isAligned: isAligned,
tierBucketMatch: tierMatch,
timestamp: DateTimeOffset.UtcNow
);
_recentSamples.Enqueue(sample);
// Trim samples if needed
while (_recentSamples.Count > MaxSamples)
{
_recentSamples.TryDequeue(out _);
}
// Log significant misalignments
if (!isAligned && scoreDiff >= 30)
{
_logger.LogDebug(
"Significant score misalignment for {FindingId}: Confidence={ConfidenceValue:P0} ({Tier}) vs EWS={EwsScore} ({Bucket}), diff={Diff}",
findingId,
confidence.Value,
confidence.Tier,
ewsScore.Score,
ewsScore.Bucket,
scoreDiff);
}
}
catch (Exception ex)
{
_logger.LogWarning(ex, "Failed to record migration telemetry for {FindingId}", findingId);
}
}
/// <inheritdoc />
public MigrationTelemetryStats GetStats()
{
var total = Interlocked.Read(ref _totalVerdicts);
var dualScored = Interlocked.Read(ref _dualScoredVerdicts);
var aligned = Interlocked.Read(ref _alignedVerdicts);
var tierMatch = Interlocked.Read(ref _tierMatchVerdicts);
var totalDiff = Interlocked.Read(ref _totalScoreDifference);
return new MigrationTelemetryStats
{
TotalVerdicts = total,
DualScoredVerdicts = dualScored,
AlignedVerdicts = aligned,
TierMatchVerdicts = tierMatch,
AverageScoreDifference = dualScored > 0 ? (double)totalDiff / dualScored : 0,
ScoreDifferenceDistribution = new Dictionary<string, long>(_scoreDiffDistribution),
ByConfidenceTier = new Dictionary<string, long>(_byConfidenceTier),
ByEwsBucket = new Dictionary<string, long>(_byEwsBucket),
CapturedAt = DateTimeOffset.UtcNow
};
}
/// <inheritdoc />
public IReadOnlyList<RankingComparisonSample> GetRecentSamples(int count = 100)
{
return _recentSamples
.TakeLast(Math.Min(count, MaxSamples))
.ToList();
}
/// <inheritdoc />
public void Reset()
{
Interlocked.Exchange(ref _totalVerdicts, 0);
Interlocked.Exchange(ref _dualScoredVerdicts, 0);
Interlocked.Exchange(ref _alignedVerdicts, 0);
Interlocked.Exchange(ref _tierMatchVerdicts, 0);
Interlocked.Exchange(ref _totalScoreDifference, 0);
_scoreDiffDistribution.Clear();
_byConfidenceTier.Clear();
_byEwsBucket.Clear();
while (_recentSamples.TryDequeue(out _)) { }
_logger.LogInformation("Migration telemetry reset");
}
private static bool IsTierBucketMatch(ConfidenceTier tier, ScoreBucket bucket)
{
return (tier, bucket) switch
{
(ConfidenceTier.VeryHigh, ScoreBucket.Watchlist) => true,
(ConfidenceTier.High, ScoreBucket.Watchlist) => true,
(ConfidenceTier.High, ScoreBucket.Investigate) => true,
(ConfidenceTier.Medium, ScoreBucket.Investigate) => true,
(ConfidenceTier.Medium, ScoreBucket.ScheduleNext) => true,
(ConfidenceTier.Low, ScoreBucket.ScheduleNext) => true,
(ConfidenceTier.Low, ScoreBucket.ActNow) => true,
(ConfidenceTier.VeryLow, ScoreBucket.ActNow) => true,
_ => false
};
}
}
/// <summary>
/// Extension methods for migration telemetry reporting.
/// </summary>
public static class MigrationTelemetryExtensions
{
/// <summary>
/// Generates a human-readable report from migration stats.
/// </summary>
public static string ToReport(this MigrationTelemetryStats stats)
{
var lines = new List<string>
{
"=== Migration Telemetry Report ===",
$"Captured: {stats.CapturedAt:O}",
"",
"--- Summary ---",
$"Total Verdicts: {stats.TotalVerdicts:N0}",
$"Dual-Scored: {stats.DualScoredVerdicts:N0}",
$"Aligned: {stats.AlignedVerdicts:N0} ({stats.AlignmentRate:P1})",
$"Tier Match: {stats.TierMatchVerdicts:N0} ({stats.TierMatchRate:P1})",
$"Avg Score Diff: {stats.AverageScoreDifference:F1}",
"",
"--- Score Difference Distribution ---"
};
foreach (var (range, count) in stats.ScoreDifferenceDistribution.OrderBy(kv => kv.Key))
{
var pct = stats.DualScoredVerdicts > 0 ? (double)count / stats.DualScoredVerdicts : 0;
lines.Add($" {range}: {count:N0} ({pct:P1})");
}
lines.Add("");
lines.Add("--- By Confidence Tier ---");
foreach (var (tier, count) in stats.ByConfidenceTier.OrderBy(kv => kv.Key))
{
lines.Add($" {tier}: {count:N0}");
}
lines.Add("");
lines.Add("--- By EWS Bucket ---");
foreach (var (bucket, count) in stats.ByEwsBucket.OrderBy(kv => kv.Key))
{
lines.Add($" {bucket}: {count:N0}");
}
return string.Join(Environment.NewLine, lines);
}
/// <summary>
/// Gets a summary line for the stats.
/// </summary>
public static string ToSummaryLine(this MigrationTelemetryStats stats)
{
return $"Migration: {stats.DualScoredVerdicts:N0} dual-scored, " +
$"{stats.AlignmentRate:P0} aligned, " +
$"{stats.TierMatchRate:P0} tier match, " +
$"avg diff {stats.AverageScoreDifference:F1}";
}
}

View File

@@ -0,0 +1,314 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-005, PINT-8200-006 - Integrate enricher into PolicyEvaluator pipeline
using StellaOps.Signals.EvidenceWeightedScore.Normalizers;
// Type aliases to avoid conflicts with types in StellaOps.Policy.Engine.Scoring
using EwsReachabilityInput = StellaOps.Signals.EvidenceWeightedScore.ReachabilityInput;
using EwsReachabilityState = StellaOps.Signals.EvidenceWeightedScore.ReachabilityState;
using EwsRuntimeInput = StellaOps.Signals.EvidenceWeightedScore.RuntimeInput;
using EwsRuntimePosture = StellaOps.Signals.EvidenceWeightedScore.RuntimePosture;
using EwsBackportInput = StellaOps.Signals.EvidenceWeightedScore.BackportInput;
using EwsBackportStatus = StellaOps.Signals.EvidenceWeightedScore.BackportStatus;
using EwsBackportEvidenceTier = StellaOps.Signals.EvidenceWeightedScore.BackportEvidenceTier;
using EwsExploitInput = StellaOps.Signals.EvidenceWeightedScore.ExploitInput;
using EwsKevStatus = StellaOps.Signals.EvidenceWeightedScore.KevStatus;
using EwsSourceTrustInput = StellaOps.Signals.EvidenceWeightedScore.SourceTrustInput;
using EwsIssuerType = StellaOps.Signals.EvidenceWeightedScore.IssuerType;
using EwsMitigationInput = StellaOps.Signals.EvidenceWeightedScore.MitigationInput;
using EwsActiveMitigation = StellaOps.Signals.EvidenceWeightedScore.ActiveMitigation;
namespace StellaOps.Policy.Engine.Scoring.EvidenceWeightedScore;
/// <summary>
/// Extends PolicyEvaluationContext with EWS evidence extraction.
/// Internal because PolicyEvaluationContext is internal.
/// </summary>
internal static class PolicyEvaluationContextEwsExtensions
{
/// <summary>
/// Extracts FindingEvidence from a policy evaluation context for EWS calculation.
/// Maps existing context data to the normalizer input format.
/// </summary>
/// <param name="context">The policy evaluation context.</param>
/// <param name="findingId">The finding identifier.</param>
/// <param name="epssScore">EPSS score if available (0-1).</param>
/// <param name="epssPercentile">EPSS percentile if available (0-100).</param>
/// <param name="isInKev">Whether the CVE is in the KEV catalog.</param>
/// <param name="kevAddedDate">When the CVE was added to KEV.</param>
/// <returns>FindingEvidence for EWS calculation.</returns>
public static FindingEvidence ExtractEwsEvidence(
this Evaluation.PolicyEvaluationContext context,
string findingId,
double? epssScore = null,
double? epssPercentile = null,
bool isInKev = false,
DateTimeOffset? kevAddedDate = null)
{
ArgumentNullException.ThrowIfNull(context);
ArgumentException.ThrowIfNullOrEmpty(findingId);
return new FindingEvidence
{
FindingId = findingId,
Reachability = ExtractReachability(context),
Runtime = ExtractRuntime(context),
Backport = ExtractBackport(context),
Exploit = ExtractExploit(epssScore, epssPercentile, isInKev, kevAddedDate),
SourceTrust = ExtractSourceTrust(context),
Mitigations = ExtractMitigations(context)
};
}
private static EwsReachabilityInput? ExtractReachability(Evaluation.PolicyEvaluationContext context)
{
var reachability = context.Reachability;
// Map context state to ReachabilityState enum
var state = reachability.State?.ToLowerInvariant() switch
{
"reachable" => reachability.HasRuntimeEvidence
? EwsReachabilityState.DynamicReachable
: EwsReachabilityState.StaticReachable,
"unreachable" => EwsReachabilityState.NotReachable,
"conditional" => EwsReachabilityState.PotentiallyReachable,
"under_investigation" => EwsReachabilityState.Unknown,
"live_exploit" => EwsReachabilityState.LiveExploitPath,
_ => EwsReachabilityState.Unknown
};
// If unknown with no confidence, return null (no evidence)
if (state == EwsReachabilityState.Unknown && reachability.Confidence == 0)
{
return null;
}
return new EwsReachabilityInput
{
State = state,
Confidence = (double)reachability.Confidence,
HasTaintTracking = reachability.HasRuntimeEvidence,
HopCount = 0, // Not available in current context
EvidenceSource = reachability.Source
};
}
private static EwsRuntimeInput? ExtractRuntime(Evaluation.PolicyEvaluationContext context)
{
// Only create runtime input if there's runtime evidence
if (!context.Reachability.HasRuntimeEvidence)
{
return null;
}
// Calculate recency factor based on how recent the evidence is
// 1.0 for within last 24h, decaying over time
var recencyFactor = 1.0; // Assume recent if we have evidence
return new EwsRuntimeInput
{
Posture = EwsRuntimePosture.ActiveTracing,
ObservationCount = 1, // Default to 1 if we have evidence
LastObservation = context.Now,
RecencyFactor = recencyFactor
};
}
private static EwsBackportInput? ExtractBackport(Evaluation.PolicyEvaluationContext context)
{
// Extract backport evidence from VEX statements
var vexStatements = context.Vex.Statements;
if (vexStatements.IsDefaultOrEmpty)
{
return null;
}
// Look for "fixed" or "not_affected" status in VEX
var hasBackportEvidence = vexStatements.Any(s =>
s.Status.Equals("fixed", StringComparison.OrdinalIgnoreCase) ||
s.Status.Equals("not_affected", StringComparison.OrdinalIgnoreCase));
if (!hasBackportEvidence)
{
return null;
}
var statement = vexStatements.FirstOrDefault(s =>
s.Status.Equals("fixed", StringComparison.OrdinalIgnoreCase) ||
s.Status.Equals("not_affected", StringComparison.OrdinalIgnoreCase));
// Should never be null since hasBackportEvidence was true, but check anyway
if (statement is null)
{
return null;
}
var status = statement.Status.Equals("fixed", StringComparison.OrdinalIgnoreCase)
? EwsBackportStatus.Fixed
: EwsBackportStatus.NotAffected;
return new EwsBackportInput
{
Status = status,
EvidenceTier = EwsBackportEvidenceTier.VendorVex, // VEX-based evidence
EvidenceSource = context.Advisory.Source ?? "unknown",
Confidence = 0.8, // VEX statements have high confidence
ProofId = statement.StatementId
};
}
private static EwsExploitInput? ExtractExploit(
double? epssScore,
double? epssPercentile,
bool isInKev,
DateTimeOffset? kevAddedDate)
{
// Only create exploit input if we have some data
if (!epssScore.HasValue && !isInKev)
{
return null;
}
return new EwsExploitInput
{
EpssScore = epssScore ?? 0.0,
EpssPercentile = epssPercentile ?? 0.0,
KevStatus = isInKev ? EwsKevStatus.InKev : EwsKevStatus.NotInKev,
KevAddedDate = kevAddedDate,
PublicExploitAvailable = false // Would need additional data source
};
}
private static EwsSourceTrustInput? ExtractSourceTrust(Evaluation.PolicyEvaluationContext context)
{
var source = context.Advisory.Source;
if (string.IsNullOrEmpty(source))
{
return null;
}
// Map source to issuer type
var issuerType = MapSourceToIssuerType(source);
// Calculate base trust from VEX coverage
var vexCoverage = context.Vex.Statements.IsDefaultOrEmpty ? 0.3 : 0.7;
// Provenance adds to trust
var provenanceScore = context.ProvenanceAttested == true ? 0.8 : 0.4;
// Replayability depends on whether we have attestation
var replayability = context.ProvenanceAttested == true ? 0.9 : 0.5;
return new EwsSourceTrustInput
{
IssuerType = issuerType,
ProvenanceTrust = provenanceScore,
CoverageCompleteness = vexCoverage,
Replayability = replayability,
IsCryptographicallyAttested = context.ProvenanceAttested == true
};
}
private static EwsIssuerType MapSourceToIssuerType(string source)
{
var sourceLower = source.ToLowerInvariant();
// Vendor sources
if (sourceLower.Contains("vendor") ||
sourceLower.Contains("red hat") ||
sourceLower.Contains("redhat") ||
sourceLower.Contains("microsoft") ||
sourceLower.Contains("google") ||
sourceLower.Contains("oracle") ||
sourceLower.Contains("vmware") ||
sourceLower.Contains("cisco") ||
sourceLower.Contains("apache"))
{
return EwsIssuerType.Vendor;
}
// Distribution sources
if (sourceLower.Contains("distro") ||
sourceLower.Contains("ubuntu") ||
sourceLower.Contains("debian") ||
sourceLower.Contains("alpine") ||
sourceLower.Contains("fedora") ||
sourceLower.Contains("centos") ||
sourceLower.Contains("suse") ||
sourceLower.Contains("canonical"))
{
return EwsIssuerType.Distribution;
}
// CNA / Government
if (sourceLower.Contains("nvd") ||
sourceLower.Contains("cve") ||
sourceLower.Contains("nist") ||
sourceLower.Contains("cisa") ||
sourceLower.Contains("mitre"))
{
return EwsIssuerType.Cna;
}
// Security researcher
if (sourceLower.Contains("research") ||
sourceLower.Contains("security") ||
sourceLower.Contains("vuln") ||
sourceLower.Contains("snyk") ||
sourceLower.Contains("qualys"))
{
return EwsIssuerType.SecurityResearcher;
}
// Default to community
return EwsIssuerType.Community;
}
private static EwsMitigationInput? ExtractMitigations(Evaluation.PolicyEvaluationContext context)
{
// Check for mitigations in annotations or other sources
var mitigations = new List<EwsActiveMitigation>();
// TODO: In a full implementation, this would check context for:
// - Network isolation flags
// - Feature flags
// - Seccomp/AppArmor profiles
// - Runtime protections
// For now, return null if no mitigations detected
if (mitigations.Count == 0)
{
return null;
}
return new EwsMitigationInput
{
ActiveMitigations = mitigations,
CombinedEffectiveness = CalculateCombinedEffectiveness(mitigations)
};
}
private static double CalculateCombinedEffectiveness(IReadOnlyList<EwsActiveMitigation> mitigations)
{
if (mitigations.Count == 0)
{
return 0.0;
}
// Combined effectiveness using diminishing returns formula
var combined = 0.0;
var remaining = 1.0;
foreach (var mitigation in mitigations.OrderByDescending(m => m.Effectiveness))
{
combined += mitigation.Effectiveness * remaining;
remaining *= (1.0 - mitigation.Effectiveness);
}
return Math.Clamp(combined, 0.0, 1.0);
}
}

View File

@@ -0,0 +1,232 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright © 2025 StellaOps
// Sprint: SPRINT_8200_0012_0003_policy_engine_integration
// Task: PINT-8200-001 - Create PolicyEvidenceWeightedScoreOptions
using StellaOps.Signals.EvidenceWeightedScore;
namespace StellaOps.Policy.Engine.Scoring.EvidenceWeightedScore;
/// <summary>
/// Configuration options for Evidence-Weighted Score integration in the Policy Engine.
/// </summary>
public sealed class PolicyEvidenceWeightedScoreOptions
{
/// <summary>
/// Configuration section name.
/// </summary>
public const string SectionName = "PolicyEngine:EvidenceWeightedScore";
/// <summary>
/// Whether EWS is enabled in the policy engine (default: false for safe rollout).
/// When false, only legacy Confidence scoring is used.
/// </summary>
public bool Enabled { get; set; }
/// <summary>
/// Whether to emit both Confidence and EWS scores during migration.
/// Useful for A/B comparison and gradual rollout.
/// Only applies when <see cref="Enabled"/> is true.
/// </summary>
public bool DualEmitMode { get; set; } = true;
/// <summary>
/// Whether to use EWS as the primary score (affects verdict status decisions).
/// When false (default), EWS is calculated but Confidence is still used for decisions.
/// Only applies when <see cref="Enabled"/> is true.
/// </summary>
public bool UseAsPrimaryScore { get; set; }
/// <summary>
/// Whether to emit comparison telemetry between EWS and Confidence scores.
/// Only applies when <see cref="DualEmitMode"/> is true.
/// </summary>
public bool EmitComparisonTelemetry { get; set; } = true;
/// <summary>
/// Whether to cache EWS results within a single evaluation context.
/// Default: true for performance.
/// </summary>
public bool EnableCaching { get; set; } = true;
/// <summary>
/// Maximum number of cached scores per evaluation context.
/// Prevents unbounded memory usage during large batch evaluations.
/// </summary>
public int MaxCachedScoresPerContext { get; set; } = 10_000;
/// <summary>
/// Policy version/digest to use. When null, uses the default policy from options.
/// Can be overridden per-tenant via tenant configuration.
/// </summary>
public string? PolicyDigest { get; set; }
/// <summary>
/// Custom weight overrides per dimension.
/// When null, uses default weights from the underlying calculator.
/// </summary>
public EvidenceWeightsConfiguration? Weights { get; set; }
/// <summary>
/// Custom bucket threshold overrides.
/// When null, uses default bucket thresholds.
/// </summary>
public BucketThresholdsConfiguration? BucketThresholds { get; set; }
/// <summary>
/// Whether to include full EWS breakdown in verdicts.
/// Setting to false reduces verdict payload size but loses explainability.
/// </summary>
public bool IncludeBreakdownInVerdict { get; set; } = true;
/// <summary>
/// Whether to include score attestation proofs in verdicts.
/// Required for audit trails and reproducibility verification.
/// </summary>
public bool IncludeScoringProof { get; set; } = true;
/// <summary>
/// Validates the options configuration.
/// </summary>
public void Validate()
{
if (MaxCachedScoresPerContext < 100)
{
throw new InvalidOperationException(
$"{nameof(MaxCachedScoresPerContext)} must be at least 100, got {MaxCachedScoresPerContext}");
}
if (MaxCachedScoresPerContext > 1_000_000)
{
throw new InvalidOperationException(
$"{nameof(MaxCachedScoresPerContext)} must not exceed 1,000,000, got {MaxCachedScoresPerContext}");
}
Weights?.Validate();
BucketThresholds?.Validate();
}
}
/// <summary>
/// Custom weight configuration for EWS dimensions.
/// </summary>
public sealed class EvidenceWeightsConfiguration
{
/// <summary>Reachability weight (0-1).</summary>
public double? Rch { get; set; }
/// <summary>Runtime signal weight (0-1).</summary>
public double? Rts { get; set; }
/// <summary>Backport evidence weight (0-1).</summary>
public double? Bkp { get; set; }
/// <summary>Exploit likelihood weight (0-1).</summary>
public double? Xpl { get; set; }
/// <summary>Source trust weight (0-1).</summary>
public double? Src { get; set; }
/// <summary>Mitigation weight (0-1, subtractive).</summary>
public double? Mit { get; set; }
/// <summary>
/// Converts to <see cref="EvidenceWeights"/> using defaults for unset values.
/// </summary>
public EvidenceWeights ToWeights(EvidenceWeights defaults)
{
return defaults with
{
Rch = Rch ?? defaults.Rch,
Rts = Rts ?? defaults.Rts,
Bkp = Bkp ?? defaults.Bkp,
Xpl = Xpl ?? defaults.Xpl,
Src = Src ?? defaults.Src,
Mit = Mit ?? defaults.Mit
};
}
/// <summary>
/// Validates weight values are in valid range.
/// </summary>
public void Validate()
{
ValidateWeight(nameof(Rch), Rch);
ValidateWeight(nameof(Rts), Rts);
ValidateWeight(nameof(Bkp), Bkp);
ValidateWeight(nameof(Xpl), Xpl);
ValidateWeight(nameof(Src), Src);
ValidateWeight(nameof(Mit), Mit);
}
private static void ValidateWeight(string name, double? value)
{
if (value.HasValue && (value.Value < 0.0 || value.Value > 1.0))
{
throw new InvalidOperationException(
$"Weight '{name}' must be between 0 and 1, got {value.Value}");
}
}
}
/// <summary>
/// Custom bucket threshold configuration.
/// </summary>
public sealed class BucketThresholdsConfiguration
{
/// <summary>Minimum score for ActNow bucket (default: 90).</summary>
public int? ActNowMin { get; set; }
/// <summary>Minimum score for ScheduleNext bucket (default: 70).</summary>
public int? ScheduleNextMin { get; set; }
/// <summary>Minimum score for Investigate bucket (default: 40).</summary>
public int? InvestigateMin { get; set; }
/// <summary>
/// Converts to <see cref="BucketThresholds"/> using defaults for unset values.
/// </summary>
public BucketThresholds ToThresholds(BucketThresholds defaults)
{
return defaults with
{
ActNowMin = ActNowMin ?? defaults.ActNowMin,
ScheduleNextMin = ScheduleNextMin ?? defaults.ScheduleNextMin,
InvestigateMin = InvestigateMin ?? defaults.InvestigateMin
};
}
/// <summary>
/// Validates bucket thresholds are in valid order.
/// </summary>
public void Validate()
{
var actNow = ActNowMin ?? 90;
var scheduleNext = ScheduleNextMin ?? 70;
var investigate = InvestigateMin ?? 40;
if (actNow < scheduleNext)
{
throw new InvalidOperationException(
$"ActNowMin threshold ({actNow}) must be >= ScheduleNextMin threshold ({scheduleNext})");
}
if (scheduleNext < investigate)
{
throw new InvalidOperationException(
$"ScheduleNextMin threshold ({scheduleNext}) must be >= InvestigateMin threshold ({investigate})");
}
if (investigate < 0)
{
throw new InvalidOperationException(
$"InvestigateMin threshold ({investigate}) must be >= 0");
}
if (actNow > 100)
{
throw new InvalidOperationException(
$"ActNowMin threshold ({actNow}) must be <= 100");
}
}
}