feat: add security sink detection patterns for JavaScript/TypeScript

- Introduced `sink-detect.js` with various security sink detection patterns categorized by type (e.g., command injection, SQL injection, file operations).
- Implemented functions to build a lookup map for fast sink detection and to match sink calls against known patterns.
- Added `package-lock.json` for dependency management.
This commit is contained in:
StellaOps Bot
2025-12-22 23:21:21 +02:00
parent 3ba7157b00
commit 5146204f1b
529 changed files with 73579 additions and 5985 deletions

View File

@@ -0,0 +1,226 @@
// SPDX-License-Identifier: AGPL-3.0-or-later
// Copyright (c) StellaOps
using StellaOps.Scanner.Explainability.Assumptions;
using StellaOps.Scanner.Explainability.Falsifiability;
namespace StellaOps.Scanner.Explainability.Confidence;
/// <summary>
/// Evidence factors that contribute to confidence scoring.
/// </summary>
public sealed record EvidenceFactors
{
/// <summary>Assumption set for the finding</summary>
public AssumptionSet? Assumptions { get; init; }
/// <summary>Falsifiability criteria for the finding</summary>
public FalsifiabilityCriteria? Falsifiability { get; init; }
/// <summary>Whether static reachability analysis was performed</summary>
public bool HasStaticReachability { get; init; }
/// <summary>Whether runtime observations are available</summary>
public bool HasRuntimeObservations { get; init; }
/// <summary>Whether SBOM lineage is tracked</summary>
public bool HasSbomLineage { get; init; }
/// <summary>Number of corroborating vulnerability sources</summary>
public int SourceCount { get; init; } = 1;
/// <summary>Whether VEX assessment is available</summary>
public bool HasVexAssessment { get; init; }
/// <summary>Whether exploit code is known to exist</summary>
public bool HasKnownExploit { get; init; }
}
/// <summary>
/// Result of evidence density scoring.
/// </summary>
public sealed record EvidenceDensityScore
{
/// <summary>Overall confidence score (0.0 to 1.0)</summary>
public required double Score { get; init; }
/// <summary>Confidence level derived from score</summary>
public required ConfidenceLevel Level { get; init; }
/// <summary>Individual factor contributions</summary>
public required IReadOnlyDictionary<string, double> FactorBreakdown { get; init; }
/// <summary>Human-readable explanation</summary>
public required string Explanation { get; init; }
/// <summary>Recommendations to improve confidence</summary>
public required IReadOnlyList<string> ImprovementRecommendations { get; init; }
}
/// <summary>
/// Calculates confidence scores based on evidence density.
/// More evidence types and validation = higher confidence in the finding accuracy.
/// </summary>
public interface IEvidenceDensityScorer
{
/// <summary>
/// Calculates an evidence density score for a finding.
/// </summary>
EvidenceDensityScore Calculate(EvidenceFactors factors);
}
/// <summary>
/// Default implementation of <see cref="IEvidenceDensityScorer"/>.
/// </summary>
public sealed class EvidenceDensityScorer : IEvidenceDensityScorer
{
// Weights for different evidence factors
private const double WeightAssumptionValidation = 0.20;
private const double WeightFalsifiabilityEval = 0.15;
private const double WeightStaticReachability = 0.15;
private const double WeightRuntimeObservation = 0.20;
private const double WeightSbomLineage = 0.05;
private const double WeightMultipleSources = 0.10;
private const double WeightVexAssessment = 0.10;
private const double WeightKnownExploit = 0.05;
/// <inheritdoc />
public EvidenceDensityScore Calculate(EvidenceFactors factors)
{
var breakdown = new Dictionary<string, double>();
var recommendations = new List<string>();
// Factor 1: Assumption validation ratio
double assumptionScore = 0.0;
if (factors.Assumptions is not null && factors.Assumptions.Assumptions.Length > 0)
{
assumptionScore = factors.Assumptions.ValidationRatio * WeightAssumptionValidation;
if (factors.Assumptions.ValidationRatio < 0.5)
{
recommendations.Add("Validate more assumptions with runtime observations or static analysis");
}
}
else
{
recommendations.Add("Add assumption tracking to understand analysis context");
}
breakdown["assumption_validation"] = assumptionScore;
// Factor 2: Falsifiability evaluation
double falsifiabilityScore = 0.0;
if (factors.Falsifiability is not null)
{
var evaluatedCount = factors.Falsifiability.Criteria
.Count(c => c.Status is CriterionStatus.Satisfied or CriterionStatus.NotSatisfied);
var totalCount = factors.Falsifiability.Criteria.Length;
if (totalCount > 0)
{
falsifiabilityScore = ((double)evaluatedCount / totalCount) * WeightFalsifiabilityEval;
}
if (factors.Falsifiability.Status == FalsifiabilityStatus.PartiallyEvaluated)
{
recommendations.Add("Complete evaluation of pending falsifiability criteria");
}
}
else
{
recommendations.Add("Generate falsifiability criteria to understand what would disprove this finding");
}
breakdown["falsifiability_evaluation"] = falsifiabilityScore;
// Factor 3: Static reachability
double staticReachScore = factors.HasStaticReachability ? WeightStaticReachability : 0.0;
if (!factors.HasStaticReachability)
{
recommendations.Add("Perform static reachability analysis to verify code paths");
}
breakdown["static_reachability"] = staticReachScore;
// Factor 4: Runtime observations
double runtimeScore = factors.HasRuntimeObservations ? WeightRuntimeObservation : 0.0;
if (!factors.HasRuntimeObservations)
{
recommendations.Add("Collect runtime observations to verify actual behavior");
}
breakdown["runtime_observations"] = runtimeScore;
// Factor 5: SBOM lineage
double lineageScore = factors.HasSbomLineage ? WeightSbomLineage : 0.0;
if (!factors.HasSbomLineage)
{
recommendations.Add("Track SBOM lineage for reproducibility");
}
breakdown["sbom_lineage"] = lineageScore;
// Factor 6: Multiple sources
double sourceScore = Math.Min(factors.SourceCount, 3) / 3.0 * WeightMultipleSources;
if (factors.SourceCount < 2)
{
recommendations.Add("Cross-reference with additional vulnerability databases");
}
breakdown["multiple_sources"] = sourceScore;
// Factor 7: VEX assessment
double vexScore = factors.HasVexAssessment ? WeightVexAssessment : 0.0;
if (!factors.HasVexAssessment)
{
recommendations.Add("Obtain vendor VEX assessment for authoritative status");
}
breakdown["vex_assessment"] = vexScore;
// Factor 8: Known exploit
double exploitScore = factors.HasKnownExploit ? WeightKnownExploit : 0.0;
// Not having a known exploit is not a negative - don't recommend
breakdown["known_exploit"] = exploitScore;
// Calculate total score
double totalScore = breakdown.Values.Sum();
var level = ScoreToLevel(totalScore);
var explanation = GenerateExplanation(totalScore, level, breakdown);
return new EvidenceDensityScore
{
Score = Math.Round(totalScore, 3),
Level = level,
FactorBreakdown = breakdown,
Explanation = explanation,
ImprovementRecommendations = recommendations
};
}
private static ConfidenceLevel ScoreToLevel(double score) => score switch
{
>= 0.75 => ConfidenceLevel.Verified,
>= 0.50 => ConfidenceLevel.High,
>= 0.25 => ConfidenceLevel.Medium,
_ => ConfidenceLevel.Low
};
private static string GenerateExplanation(
double score,
ConfidenceLevel level,
Dictionary<string, double> breakdown)
{
var topFactors = breakdown
.Where(kv => kv.Value > 0)
.OrderByDescending(kv => kv.Value)
.Take(3)
.Select(kv => kv.Key.Replace("_", " "));
var factorList = string.Join(", ", topFactors);
return level switch
{
ConfidenceLevel.Verified =>
$"Very high confidence ({score:P0}). Strong evidence from: {factorList}.",
ConfidenceLevel.High =>
$"High confidence ({score:P0}). Good evidence from: {factorList}.",
ConfidenceLevel.Medium =>
$"Medium confidence ({score:P0}). Some evidence from: {factorList}.",
_ =>
$"Low confidence ({score:P0}). Limited evidence available. Consider gathering more data."
};
}
}