save progress

This commit is contained in:
StellaOps Bot
2025-12-28 03:08:52 +02:00
parent cec4265a40
commit 3acc0ef0cd
476 changed files with 6765 additions and 1902 deletions

View File

@@ -29,6 +29,8 @@ DEFAULT_EXCLUDE_DIRS = {
"packages",
".nuget",
".cache",
"Fixtures", # Test fixture files should not be in solutions
"TestData", # Test data files should not be in solutions
}
# Default file patterns to exclude (test fixtures, samples, etc.)

View File

@@ -0,0 +1,416 @@
"""
NuGet API v3 client for package version and vulnerability queries.
"""
import logging
import re
from typing import Any
try:
import requests
except ImportError:
requests = None # type: ignore
from .version_utils import parse_version, is_stable
logger = logging.getLogger(__name__)
NUGET_SERVICE_INDEX = "https://api.nuget.org/v3/index.json"
NUGET_VULN_INDEX = "https://api.nuget.org/v3/vulnerabilities/index.json"
class NuGetApiError(Exception):
"""Error communicating with NuGet API."""
pass
class NuGetApiClient:
"""
Client for NuGet API v3 operations.
Provides methods for:
- Fetching available package versions
- Fetching vulnerability data
- Finding non-vulnerable versions
"""
def __init__(self, source: str = "https://api.nuget.org/v3"):
if requests is None:
raise ImportError(
"requests library is required for NuGet API access. "
"Install with: pip install requests"
)
self.source = source.rstrip("/")
self._session = requests.Session()
self._session.headers.update(
{"User-Agent": "StellaOps-NuGetVulnChecker/1.0"}
)
self._service_index: dict | None = None
self._vuln_cache: dict[str, list[dict]] | None = None
self._search_url: str | None = None
self._registration_url: str | None = None
def _get_service_index(self) -> dict:
"""Fetch and cache the NuGet service index."""
if self._service_index is not None:
return self._service_index
try:
response = self._session.get(f"{self.source}/index.json", timeout=30)
response.raise_for_status()
self._service_index = response.json()
return self._service_index
except Exception as e:
raise NuGetApiError(f"Failed to fetch NuGet service index: {e}")
def _get_search_url(self) -> str:
"""Get the SearchQueryService URL from service index."""
if self._search_url:
return self._search_url
index = self._get_service_index()
resources = index.get("resources", [])
# Look for SearchQueryService
for resource in resources:
resource_type = resource.get("@type", "")
if "SearchQueryService" in resource_type:
self._search_url = resource.get("@id", "")
return self._search_url
raise NuGetApiError("SearchQueryService not found in service index")
def _get_registration_url(self) -> str:
"""Get the RegistrationsBaseUrl from service index."""
if self._registration_url:
return self._registration_url
index = self._get_service_index()
resources = index.get("resources", [])
# Look for RegistrationsBaseUrl
for resource in resources:
resource_type = resource.get("@type", "")
if "RegistrationsBaseUrl" in resource_type:
self._registration_url = resource.get("@id", "").rstrip("/")
return self._registration_url
raise NuGetApiError("RegistrationsBaseUrl not found in service index")
def get_available_versions(self, package_id: str) -> list[str]:
"""
Fetch all available versions of a package from NuGet.
Args:
package_id: The NuGet package ID
Returns:
List of version strings, sorted newest first
"""
try:
# Use registration API for complete version list
reg_url = self._get_registration_url()
package_lower = package_id.lower()
url = f"{reg_url}/{package_lower}/index.json"
response = self._session.get(url, timeout=30)
if response.status_code == 404:
logger.warning(f"Package not found on NuGet: {package_id}")
return []
response.raise_for_status()
data = response.json()
versions = []
for page in data.get("items", []):
# Pages may be inline or require fetching
if "items" in page:
items = page["items"]
else:
# Fetch the page
page_url = page.get("@id")
if page_url:
page_response = self._session.get(page_url, timeout=30)
page_response.raise_for_status()
items = page_response.json().get("items", [])
else:
items = []
for item in items:
catalog_entry = item.get("catalogEntry", {})
version = catalog_entry.get("version")
if version:
versions.append(version)
# Sort by parsed version, newest first
def sort_key(v: str) -> tuple:
parsed = parse_version(v)
if parsed is None:
return (0, 0, 0, "")
return parsed
versions.sort(key=sort_key, reverse=True)
return versions
except NuGetApiError:
raise
except Exception as e:
logger.warning(f"Failed to fetch versions for {package_id}: {e}")
return []
def get_vulnerability_data(self) -> dict[str, list[dict]]:
"""
Fetch vulnerability data from NuGet VulnerabilityInfo API.
Returns:
Dictionary mapping lowercase package ID to list of vulnerability info dicts.
Each dict contains: severity, advisory_url, versions (affected range)
"""
if self._vuln_cache is not None:
return self._vuln_cache
try:
# Fetch vulnerability index
response = self._session.get(NUGET_VULN_INDEX, timeout=30)
response.raise_for_status()
index = response.json()
vuln_map: dict[str, list[dict]] = {}
# Fetch each vulnerability page
for page_info in index:
page_url = page_info.get("@id")
if not page_url:
continue
try:
page_response = self._session.get(page_url, timeout=60)
page_response.raise_for_status()
page_data = page_response.json()
# Parse vulnerability entries
self._merge_vuln_data(vuln_map, page_data)
except Exception as e:
logger.warning(f"Failed to fetch vulnerability page {page_url}: {e}")
continue
self._vuln_cache = vuln_map
logger.info(f"Loaded vulnerability data for {len(vuln_map)} packages")
return vuln_map
except Exception as e:
logger.warning(f"Failed to fetch vulnerability data: {e}")
return {}
def _merge_vuln_data(
self, vuln_map: dict[str, list[dict]], page_data: Any
) -> None:
"""Merge vulnerability page data into the vulnerability map."""
# The vulnerability data format is a dict mapping package ID (lowercase)
# to list of vulnerability objects
if not isinstance(page_data, dict):
return
for package_id, vulns in page_data.items():
if package_id.startswith("@"):
# Skip metadata fields like @context
continue
package_lower = package_id.lower()
if package_lower not in vuln_map:
vuln_map[package_lower] = []
if isinstance(vulns, list):
vuln_map[package_lower].extend(vulns)
def is_version_vulnerable(
self, package_id: str, version: str, vuln_data: dict[str, list[dict]] | None = None
) -> tuple[bool, list[dict]]:
"""
Check if a specific package version is vulnerable.
Args:
package_id: The package ID
version: The version to check
vuln_data: Optional pre-fetched vulnerability data
Returns:
Tuple of (is_vulnerable, list of matching vulnerabilities)
"""
if vuln_data is None:
vuln_data = self.get_vulnerability_data()
package_lower = package_id.lower()
vulns = vuln_data.get(package_lower, [])
if not vulns:
return False, []
matching = []
parsed_version = parse_version(version)
if parsed_version is None:
return False, []
for vuln in vulns:
# Check version range
version_range = vuln.get("versions", "")
if self._version_in_range(version, parsed_version, version_range):
matching.append(vuln)
return len(matching) > 0, matching
def _version_in_range(
self, version: str, parsed: tuple, range_str: str
) -> bool:
"""
Check if a version is in a NuGet version range.
NuGet range formats:
- "[1.0.0, 2.0.0)" - >= 1.0.0 and < 2.0.0
- "(, 1.0.0)" - < 1.0.0
- "[1.0.0,)" - >= 1.0.0
- "1.0.0" - exact match
"""
if not range_str:
return False
range_str = range_str.strip()
# Handle exact version
if not range_str.startswith(("[", "(")):
exact_parsed = parse_version(range_str)
return exact_parsed == parsed if exact_parsed else False
# Parse range
match = re.match(r"([\[\(])([^,]*),([^)\]]*)([\)\]])", range_str)
if not match:
return False
left_bracket, left_ver, right_ver, right_bracket = match.groups()
left_ver = left_ver.strip()
right_ver = right_ver.strip()
# Check lower bound
if left_ver:
left_parsed = parse_version(left_ver)
if left_parsed:
if left_bracket == "[":
if parsed < left_parsed:
return False
else: # "("
if parsed <= left_parsed:
return False
# Check upper bound
if right_ver:
right_parsed = parse_version(right_ver)
if right_parsed:
if right_bracket == "]":
if parsed > right_parsed:
return False
else: # ")"
if parsed >= right_parsed:
return False
return True
def find_safe_version(
self,
package_id: str,
current_version: str,
prefer_upgrade: bool = True,
) -> str | None:
"""
Find the closest non-vulnerable version.
Strategy:
1. Get all available versions
2. Filter out versions with known vulnerabilities
3. Prefer: patch upgrade > minor upgrade > major upgrade > downgrade
Args:
package_id: The package ID
current_version: Current (vulnerable) version
prefer_upgrade: If True, prefer upgrades over downgrades
Returns:
Suggested safe version, or None if not found
"""
available = self.get_available_versions(package_id)
if not available:
return None
vuln_data = self.get_vulnerability_data()
current_parsed = parse_version(current_version)
if current_parsed is None:
return None
# Find safe versions
from .version_utils import ParsedVersion
safe_versions: list[tuple[str, ParsedVersion]] = []
for version in available:
# Skip prereleases unless current is prerelease
if not is_stable(version) and is_stable(current_version):
continue
parsed = parse_version(version)
if parsed is None:
continue
is_vuln, _ = self.is_version_vulnerable(package_id, version, vuln_data)
if not is_vuln:
safe_versions.append((version, parsed))
if not safe_versions:
return None
# Sort by preference: closest upgrade first
def sort_key(item: tuple[str, ParsedVersion]) -> tuple:
version, parsed = item
major_diff = parsed.major - current_parsed.major
minor_diff = parsed.minor - current_parsed.minor
patch_diff = parsed.patch - current_parsed.patch
# Prefer upgrades (positive diff) over downgrades
# Within upgrades, prefer smaller changes
if prefer_upgrade:
if major_diff > 0 or (major_diff == 0 and minor_diff > 0) or \
(major_diff == 0 and minor_diff == 0 and patch_diff > 0):
# Upgrade: prefer smaller version jumps
return (0, major_diff, minor_diff, patch_diff)
elif major_diff == 0 and minor_diff == 0 and patch_diff == 0:
# Same version (shouldn't happen if vulnerable)
return (1, 0, 0, 0)
else:
# Downgrade: prefer smaller version drops
return (2, -major_diff, -minor_diff, -patch_diff)
else:
# Just prefer closest version
return (abs(major_diff), abs(minor_diff), abs(patch_diff))
safe_versions.sort(key=sort_key)
return safe_versions[0][0] if safe_versions else None
def get_fix_risk(
self, current_version: str, suggested_version: str
) -> str:
"""
Estimate the risk of upgrading to a suggested version.
Returns: "low", "medium", or "high"
"""
current = parse_version(current_version)
suggested = parse_version(suggested_version)
if current is None or suggested is None:
return "unknown"
if suggested.major > current.major:
return "high" # Major version change
elif suggested.minor > current.minor:
return "medium" # Minor version change
else:
return "low" # Patch or no change

View File

@@ -0,0 +1,123 @@
"""
Data models for NuGet vulnerability checking.
"""
from dataclasses import dataclass, field
from pathlib import Path
@dataclass
class VulnerabilityDetail:
"""Details about a specific vulnerability."""
severity: str # low, moderate, high, critical
advisory_url: str
@dataclass
class VulnerablePackage:
"""A package with known vulnerabilities."""
package_id: str
resolved_version: str
requested_version: str
vulnerabilities: list[VulnerabilityDetail] = field(default_factory=list)
affected_projects: list[Path] = field(default_factory=list)
suggested_version: str | None = None
fix_risk: str = "unknown" # low, medium, high
@property
def highest_severity(self) -> str:
"""Get the highest severity among all vulnerabilities."""
severity_order = {"low": 1, "moderate": 2, "high": 3, "critical": 4}
if not self.vulnerabilities:
return "unknown"
return max(
self.vulnerabilities,
key=lambda v: severity_order.get(v.severity.lower(), 0),
).severity
@property
def advisory_urls(self) -> list[str]:
"""Get all advisory URLs."""
return [v.advisory_url for v in self.vulnerabilities]
@dataclass
class SuggestedFix:
"""Suggested fix for a vulnerable package."""
version: str
is_major_upgrade: bool
is_minor_upgrade: bool
is_patch_upgrade: bool
breaking_change_risk: str # low, medium, high
@classmethod
def from_versions(
cls, current: str, suggested: str, current_parsed: tuple, suggested_parsed: tuple
) -> "SuggestedFix":
"""Create a SuggestedFix from version tuples."""
is_major = suggested_parsed[0] > current_parsed[0]
is_minor = not is_major and suggested_parsed[1] > current_parsed[1]
is_patch = not is_major and not is_minor and suggested_parsed[2] > current_parsed[2]
# Estimate breaking change risk
if is_major:
risk = "high"
elif is_minor:
risk = "medium"
else:
risk = "low"
return cls(
version=suggested,
is_major_upgrade=is_major,
is_minor_upgrade=is_minor,
is_patch_upgrade=is_patch,
breaking_change_risk=risk,
)
@dataclass
class VulnerabilityReport:
"""Complete vulnerability scan report."""
solution: Path
min_severity: str
total_packages: int
vulnerabilities: list[VulnerablePackage] = field(default_factory=list)
unfixable: list[tuple[str, str]] = field(default_factory=list) # (package, reason)
@property
def vulnerable_count(self) -> int:
"""Count of vulnerable packages."""
return len(self.vulnerabilities)
@property
def fixable_count(self) -> int:
"""Count of packages with suggested fixes."""
return sum(1 for v in self.vulnerabilities if v.suggested_version)
@property
def unfixable_count(self) -> int:
"""Count of packages without fixes."""
return len(self.unfixable) + sum(
1 for v in self.vulnerabilities if not v.suggested_version
)
# Severity level mapping for comparisons
SEVERITY_LEVELS = {
"low": 1,
"moderate": 2,
"high": 3,
"critical": 4,
}
def meets_severity_threshold(vuln_severity: str, min_severity: str) -> bool:
"""Check if vulnerability meets minimum severity threshold."""
vuln_level = SEVERITY_LEVELS.get(vuln_severity.lower(), 0)
min_level = SEVERITY_LEVELS.get(min_severity.lower(), 0)
return vuln_level >= min_level

View File

@@ -0,0 +1,620 @@
#!/usr/bin/env python3
"""
StellaOps NuGet Vulnerability Checker.
Scans NuGet packages for security vulnerabilities and suggests/applies fixes.
Usage:
python nuget_vuln_checker.py [OPTIONS]
Options:
--solution PATH Path to .sln file (default: src/StellaOps.sln)
--min-severity LEVEL Minimum severity: low|moderate|high|critical (default: high)
--fix Auto-fix by updating to non-vulnerable versions
--dry-run Show what would be fixed without modifying files
--report PATH Write JSON report to file
--include-transitive Include transitive dependency vulnerabilities
--exclude PACKAGE Exclude package from checks (repeatable)
-v, --verbose Verbose output
Exit Codes:
0 - No vulnerabilities found (or all below threshold)
1 - Vulnerabilities found above threshold
2 - Error during execution
"""
import argparse
import json
import logging
import re
import shutil
import subprocess
import sys
from datetime import datetime, timezone
from pathlib import Path
from lib.nuget_api import NuGetApiClient, NuGetApiError
from lib.vulnerability_models import (
SEVERITY_LEVELS,
VulnerabilityDetail,
VulnerabilityReport,
VulnerablePackage,
meets_severity_threshold,
)
from lib.version_utils import parse_version
logger = logging.getLogger(__name__)
def setup_logging(verbose: bool) -> None:
"""Configure logging based on verbosity."""
level = logging.DEBUG if verbose else logging.INFO
logging.basicConfig(
level=level,
format="%(levelname)s: %(message)s",
)
def check_dotnet_available() -> bool:
"""Check if dotnet CLI is available."""
return shutil.which("dotnet") is not None
def run_vulnerability_check(
solution_path: Path, include_transitive: bool
) -> dict | None:
"""
Run dotnet list package --vulnerable and parse JSON output.
Returns parsed JSON or None if command fails.
"""
cmd = [
"dotnet",
"list",
str(solution_path),
"package",
"--vulnerable",
"--format",
"json",
"--output-version",
"1",
]
if include_transitive:
cmd.append("--include-transitive")
logger.info(f"Running: {' '.join(cmd)}")
try:
result = subprocess.run(
cmd,
capture_output=True,
text=True,
timeout=600, # 10 minute timeout for large solutions
)
# dotnet always returns 0, even with vulnerabilities
if result.returncode != 0:
logger.error(f"dotnet command failed: {result.stderr}")
return None
# Parse JSON output
if not result.stdout.strip():
logger.warning("Empty output from dotnet list package")
return {"version": 1, "projects": []}
return json.loads(result.stdout)
except subprocess.TimeoutExpired:
logger.error("dotnet command timed out")
return None
except json.JSONDecodeError as e:
logger.error(f"Failed to parse dotnet output as JSON: {e}")
logger.debug(f"Output was: {result.stdout[:500]}...")
return None
except Exception as e:
logger.error(f"Error running dotnet command: {e}")
return None
def parse_vulnerability_output(
data: dict, min_severity: str, exclude_packages: set[str]
) -> list[VulnerablePackage]:
"""
Parse dotnet list package --vulnerable JSON output.
Returns list of VulnerablePackage objects that meet severity threshold.
"""
vulnerable_packages: dict[str, VulnerablePackage] = {}
for project in data.get("projects", []):
project_path = Path(project.get("path", "unknown"))
for framework in project.get("frameworks", []):
# Check both topLevelPackages and transitivePackages
for package_list_key in ["topLevelPackages", "transitivePackages"]:
for package in framework.get(package_list_key, []):
package_id = package.get("id", "")
# Skip excluded packages
if package_id.lower() in {p.lower() for p in exclude_packages}:
logger.debug(f"Skipping excluded package: {package_id}")
continue
vulns = package.get("vulnerabilities", [])
if not vulns:
continue
# Check if any vulnerability meets threshold
matching_vulns = []
for vuln in vulns:
severity = vuln.get("severity", "unknown")
if meets_severity_threshold(severity, min_severity):
matching_vulns.append(
VulnerabilityDetail(
severity=severity,
advisory_url=vuln.get("advisoryurl", ""),
)
)
if not matching_vulns:
continue
# Add or update vulnerable package
key = f"{package_id}@{package.get('resolvedVersion', '')}"
if key not in vulnerable_packages:
vulnerable_packages[key] = VulnerablePackage(
package_id=package_id,
resolved_version=package.get("resolvedVersion", ""),
requested_version=package.get("requestedVersion", ""),
vulnerabilities=matching_vulns,
)
vulnerable_packages[key].affected_projects.append(project_path)
return list(vulnerable_packages.values())
def find_suggested_fixes(
vulnerable_packages: list[VulnerablePackage],
api_client: NuGetApiClient | None,
) -> None:
"""
For each vulnerable package, find a suggested non-vulnerable version.
Modifies packages in-place to add suggested_version and fix_risk.
"""
if api_client is None:
logger.warning("NuGet API client not available, cannot suggest fixes")
return
for pkg in vulnerable_packages:
logger.debug(f"Finding safe version for {pkg.package_id} {pkg.resolved_version}")
try:
safe_version = api_client.find_safe_version(
pkg.package_id, pkg.resolved_version
)
if safe_version:
pkg.suggested_version = safe_version
pkg.fix_risk = api_client.get_fix_risk(
pkg.resolved_version, safe_version
)
logger.info(
f"Found safe version for {pkg.package_id}: "
f"{pkg.resolved_version} -> {safe_version} (risk: {pkg.fix_risk})"
)
else:
logger.warning(
f"No safe version found for {pkg.package_id} {pkg.resolved_version}"
)
except NuGetApiError as e:
logger.warning(f"Failed to query NuGet API for {pkg.package_id}: {e}")
def has_direct_package_reference(content: str, package_id: str) -> bool:
"""Check if the csproj has a direct PackageReference for the package."""
pattern = re.compile(
rf'<PackageReference\s+[^>]*Include\s*=\s*"{re.escape(package_id)}"',
re.IGNORECASE,
)
return pattern.search(content) is not None
def add_package_reference(content: str, package_id: str, version: str) -> str:
"""
Add a new PackageReference to a csproj file.
Inserts into an existing ItemGroup with PackageReferences, or creates a new one.
"""
# Find existing ItemGroup with PackageReferences
itemgroup_pattern = re.compile(
r'(<ItemGroup[^>]*>)(.*?<PackageReference\s)',
re.IGNORECASE | re.DOTALL,
)
match = itemgroup_pattern.search(content)
if match:
# Insert after the opening ItemGroup tag
insert_pos = match.end(1)
new_ref = f'\n <PackageReference Include="{package_id}" Version="{version}" />'
return content[:insert_pos] + new_ref + content[insert_pos:]
# No ItemGroup with PackageReferences found, look for any ItemGroup
any_itemgroup = re.search(r'(<ItemGroup[^>]*>)', content, re.IGNORECASE)
if any_itemgroup:
insert_pos = any_itemgroup.end(1)
new_ref = f'\n <PackageReference Include="{package_id}" Version="{version}" />'
return content[:insert_pos] + new_ref + content[insert_pos:]
# No ItemGroup at all, add before closing </Project>
project_close = content.rfind('</Project>')
if project_close > 0:
new_itemgroup = f'\n <ItemGroup>\n <PackageReference Include="{package_id}" Version="{version}" />\n </ItemGroup>\n'
return content[:project_close] + new_itemgroup + content[project_close:]
# Fallback - shouldn't happen for valid csproj
return content
def apply_fixes(
vulnerable_packages: list[VulnerablePackage],
dry_run: bool = False,
) -> int:
"""
Apply suggested fixes to csproj files.
For direct dependencies: updates the version in place.
For transitive dependencies: adds an explicit PackageReference to override.
Returns number of files modified.
"""
files_modified: set[Path] = set()
for pkg in vulnerable_packages:
if not pkg.suggested_version:
continue
for project_path in pkg.affected_projects:
if not project_path.exists():
logger.warning(f"Project file not found: {project_path}")
continue
try:
content = project_path.read_text(encoding="utf-8")
# Check if this is a direct or transitive dependency
is_direct = has_direct_package_reference(content, pkg.package_id)
if is_direct:
# Direct dependency - update version in place
if dry_run:
logger.info(
f"Would update {pkg.package_id} in {project_path.name}: "
f"{pkg.resolved_version} -> {pkg.suggested_version}"
)
files_modified.add(project_path)
continue
# Pattern to match PackageReference for this package
pattern = re.compile(
rf'(<PackageReference\s+[^>]*Include\s*=\s*"{re.escape(pkg.package_id)}"'
rf'[^>]*Version\s*=\s*"){re.escape(pkg.resolved_version)}(")',
re.IGNORECASE,
)
new_content, count = pattern.subn(
rf"\g<1>{pkg.suggested_version}\g<2>",
content,
)
if count > 0:
project_path.write_text(new_content, encoding="utf-8")
files_modified.add(project_path)
logger.info(
f"Updated {pkg.package_id} in {project_path.name}: "
f"{pkg.resolved_version} -> {pkg.suggested_version}"
)
else:
# Try alternative pattern (Version before Include)
pattern_alt = re.compile(
rf'(<PackageReference\s+[^>]*Version\s*=\s*"){re.escape(pkg.resolved_version)}"'
rf'([^>]*Include\s*=\s*"{re.escape(pkg.package_id)}")',
re.IGNORECASE,
)
new_content, count = pattern_alt.subn(
rf'\g<1>{pkg.suggested_version}"\g<2>',
content,
)
if count > 0:
project_path.write_text(new_content, encoding="utf-8")
files_modified.add(project_path)
logger.info(
f"Updated {pkg.package_id} in {project_path.name}: "
f"{pkg.resolved_version} -> {pkg.suggested_version}"
)
else:
logger.warning(
f"Could not find {pkg.package_id} {pkg.resolved_version} "
f"in {project_path}"
)
else:
# Transitive dependency - add explicit PackageReference to override
if dry_run:
logger.info(
f"Would add explicit PackageReference for transitive dependency "
f"{pkg.package_id} {pkg.suggested_version} in {project_path.name} "
f"(overrides vulnerable {pkg.resolved_version})"
)
files_modified.add(project_path)
continue
new_content = add_package_reference(
content, pkg.package_id, pkg.suggested_version
)
if new_content != content:
project_path.write_text(new_content, encoding="utf-8")
files_modified.add(project_path)
logger.info(
f"Added explicit PackageReference for {pkg.package_id} "
f"{pkg.suggested_version} in {project_path.name} "
f"(overrides vulnerable transitive {pkg.resolved_version})"
)
else:
logger.warning(
f"Failed to add PackageReference for {pkg.package_id} "
f"in {project_path}"
)
except Exception as e:
logger.error(f"Failed to update {project_path}: {e}")
return len(files_modified)
def generate_report(
solution: Path,
min_severity: str,
total_packages: int,
vulnerable_packages: list[VulnerablePackage],
) -> dict:
"""Generate JSON report of vulnerability scan."""
return {
"timestamp": datetime.now(timezone.utc).isoformat(),
"solution": str(solution),
"min_severity": min_severity,
"summary": {
"total_packages_scanned": total_packages,
"vulnerable_packages": len(vulnerable_packages),
"fixable_packages": sum(
1 for p in vulnerable_packages if p.suggested_version
),
"unfixable_packages": sum(
1 for p in vulnerable_packages if not p.suggested_version
),
},
"vulnerabilities": [
{
"package": pkg.package_id,
"current_version": pkg.resolved_version,
"severity": pkg.highest_severity,
"advisory_urls": pkg.advisory_urls,
"affected_projects": [str(p) for p in pkg.affected_projects],
"suggested_fix": {
"version": pkg.suggested_version,
"risk": pkg.fix_risk,
}
if pkg.suggested_version
else None,
}
for pkg in vulnerable_packages
],
"unfixable": [
{
"package": pkg.package_id,
"version": pkg.resolved_version,
"reason": "No non-vulnerable version available",
}
for pkg in vulnerable_packages
if not pkg.suggested_version
],
}
def print_summary(
vulnerable_packages: list[VulnerablePackage],
min_severity: str,
dry_run: bool,
fix_mode: bool,
) -> None:
"""Print a human-readable summary of findings."""
print("\n" + "=" * 70)
print("NuGet Vulnerability Scan Results")
print("=" * 70)
if not vulnerable_packages:
print(f"\nNo vulnerabilities found at or above '{min_severity}' severity.")
return
print(f"\nFound {len(vulnerable_packages)} vulnerable package(s):\n")
for pkg in sorted(vulnerable_packages, key=lambda p: (
-SEVERITY_LEVELS.get(p.highest_severity.lower(), 0),
p.package_id,
)):
severity_upper = pkg.highest_severity.upper()
print(f" [{severity_upper}] {pkg.package_id} {pkg.resolved_version}")
for vuln in pkg.vulnerabilities:
print(f" Advisory: {vuln.advisory_url}")
if pkg.suggested_version:
risk_str = f" (risk: {pkg.fix_risk})" if pkg.fix_risk != "unknown" else ""
print(f" Suggested fix: {pkg.suggested_version}{risk_str}")
else:
print(" No fix available")
print(f" Affected projects: {len(pkg.affected_projects)}")
for proj in pkg.affected_projects[:3]: # Show first 3
print(f" - {proj.name}")
if len(pkg.affected_projects) > 3:
print(f" - ... and {len(pkg.affected_projects) - 3} more")
print()
# Summary counts
fixable = sum(1 for p in vulnerable_packages if p.suggested_version)
unfixable = len(vulnerable_packages) - fixable
print("-" * 70)
print(f"Summary: {len(vulnerable_packages)} vulnerable, {fixable} fixable, {unfixable} unfixable")
if dry_run:
print("\n[DRY RUN - No files were modified]")
elif not fix_mode:
print("\nRun with --fix to apply suggested fixes, or --dry-run to preview changes")
def main() -> int:
"""Main entry point."""
parser = argparse.ArgumentParser(
description="Check NuGet packages for security vulnerabilities",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=__doc__,
)
parser.add_argument(
"--solution",
type=Path,
default=Path("src/StellaOps.sln"),
help="Path to .sln file (default: src/StellaOps.sln)",
)
parser.add_argument(
"--min-severity",
choices=["low", "moderate", "high", "critical"],
default="high",
help="Minimum severity to report (default: high)",
)
parser.add_argument(
"--fix",
action="store_true",
help="Auto-fix by updating to non-vulnerable versions",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Show what would be fixed without modifying files",
)
parser.add_argument(
"--report",
type=Path,
help="Write JSON report to file",
)
parser.add_argument(
"--include-transitive",
action="store_true",
help="Include transitive dependency vulnerabilities",
)
parser.add_argument(
"--exclude",
action="append",
dest="exclude_packages",
default=[],
help="Exclude package from checks (repeatable)",
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Verbose output",
)
args = parser.parse_args()
setup_logging(args.verbose)
# Validate solution path
solution_path = args.solution.resolve()
if not solution_path.exists():
logger.error(f"Solution file not found: {solution_path}")
return 2
# Check dotnet is available
if not check_dotnet_available():
logger.error("dotnet CLI not found. Please install .NET SDK.")
return 2
logger.info(f"Scanning solution: {solution_path}")
logger.info(f"Minimum severity: {args.min_severity}")
# Run vulnerability check
vuln_data = run_vulnerability_check(solution_path, args.include_transitive)
if vuln_data is None:
logger.error("Failed to run vulnerability check")
return 2
# Count total packages for reporting
total_packages = 0
for project in vuln_data.get("projects", []):
for framework in project.get("frameworks", []):
total_packages += len(framework.get("topLevelPackages", []))
if args.include_transitive:
total_packages += len(framework.get("transitivePackages", []))
# Parse vulnerabilities
exclude_set = set(args.exclude_packages)
vulnerable_packages = parse_vulnerability_output(
vuln_data, args.min_severity, exclude_set
)
logger.info(f"Found {len(vulnerable_packages)} vulnerable package(s)")
# Try to find suggested fixes via NuGet API
api_client = None
try:
api_client = NuGetApiClient()
find_suggested_fixes(vulnerable_packages, api_client)
except ImportError:
logger.warning(
"requests library not available, cannot suggest fixes. "
"Install with: pip install requests"
)
except Exception as e:
logger.warning(f"NuGet API initialization failed: {e}")
# Generate report
report = generate_report(
solution_path, args.min_severity, total_packages, vulnerable_packages
)
# Write report if requested
if args.report:
try:
args.report.write_text(
json.dumps(report, indent=2, default=str),
encoding="utf-8",
)
logger.info(f"Report written to: {args.report}")
except Exception as e:
logger.error(f"Failed to write report: {e}")
# Print summary
print_summary(vulnerable_packages, args.min_severity, args.dry_run, args.fix)
# Apply fixes if requested
if args.fix or args.dry_run:
files_modified = apply_fixes(vulnerable_packages, dry_run=args.dry_run)
if not args.dry_run:
print(f"\nModified {files_modified} file(s)")
# Exit with appropriate code
if vulnerable_packages:
return 1
return 0
if __name__ == "__main__":
sys.exit(main())