# Sprint: Testing Enhancement Advisory - Phase 3.1 # Competitor parity benchmarks with expanded 50+ image corpus # Compares StellaOps against Trivy, Grype, and Syft name: competitor-parity on: schedule: # Run weekly on Sundays at 03:00 UTC - cron: '0 3 * * 0' push: branches: [main] paths: - 'src/__Tests/parity/**' - 'src/Scanner/__Libraries/**' pull_request: branches: [main, develop] paths: - 'src/__Tests/parity/**' workflow_dispatch: inputs: run_full_corpus: description: 'Run against full 50+ image corpus' type: boolean default: false ground_truth_mode: description: 'Enable ground truth validation' type: boolean default: false concurrency: group: competitor-parity-${{ github.ref }} cancel-in-progress: true env: DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true DOTNET_CLI_TELEMETRY_OPTOUT: true jobs: # ========================================================================== # Install Competitor Tools # ========================================================================== setup-tools: name: Setup Scanner Tools runs-on: ubuntu-latest outputs: tools_installed: ${{ steps.check.outputs.installed }} steps: - name: Install Syft run: | curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sh -s -- -b /usr/local/bin v1.9.0 syft --version - name: Install Grype run: | curl -sSfL https://raw.githubusercontent.com/anchore/grype/main/install.sh | sh -s -- -b /usr/local/bin v0.79.3 grype --version grype db update - name: Install Trivy run: | curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin v0.54.1 trivy --version trivy image --download-db-only - name: Check tools id: check run: | syft --version && grype --version && trivy --version echo "installed=true" >> $GITHUB_OUTPUT # ========================================================================== # Quick Parity Check (PR Gate) # ========================================================================== quick-parity: name: Quick Parity Check runs-on: ubuntu-latest needs: setup-tools if: github.event_name == 'pull_request' timeout-minutes: 30 steps: - name: Checkout uses: actions/checkout@v4 - name: Setup .NET uses: actions/setup-dotnet@v4 with: dotnet-version: "10.0.100" - name: Install scanner tools run: | curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sh -s -- -b /usr/local/bin v1.9.0 curl -sSfL https://raw.githubusercontent.com/anchore/grype/main/install.sh | sh -s -- -b /usr/local/bin v0.79.3 curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin v0.54.1 grype db update trivy image --download-db-only - name: Build parity tests run: dotnet build src/__Tests/parity/StellaOps.Parity.Tests/StellaOps.Parity.Tests.csproj --configuration Release - name: Run quick parity tests run: | dotnet test src/__Tests/parity/StellaOps.Parity.Tests \ --filter "Category=CompetitorParity&FullyQualifiedName~BaseImages" \ --configuration Release \ --no-build \ --logger "trx;LogFileName=parity-quick.trx" \ --results-directory ./TestResults timeout-minutes: 20 - name: Upload results uses: actions/upload-artifact@v4 if: always() with: name: quick-parity-results path: TestResults/**/*.trx # ========================================================================== # Full Corpus Benchmark (Scheduled) # ========================================================================== full-corpus-benchmark: name: Full Corpus Benchmark runs-on: ubuntu-latest needs: setup-tools if: github.event_name == 'schedule' || (github.event_name == 'workflow_dispatch' && github.event.inputs.run_full_corpus == 'true') timeout-minutes: 180 steps: - name: Checkout uses: actions/checkout@v4 - name: Setup .NET uses: actions/setup-dotnet@v4 with: dotnet-version: "10.0.100" - name: Install scanner tools run: | curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sh -s -- -b /usr/local/bin v1.9.0 curl -sSfL https://raw.githubusercontent.com/anchore/grype/main/install.sh | sh -s -- -b /usr/local/bin v0.79.3 curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin v0.54.1 grype db update trivy image --download-db-only - name: Build parity tests run: dotnet build src/__Tests/parity/StellaOps.Parity.Tests/StellaOps.Parity.Tests.csproj --configuration Release - name: Pull corpus images run: | echo "Pulling base images..." docker pull alpine:3.18 & docker pull alpine:3.19 & docker pull alpine:3.20 & docker pull debian:bullseye-slim & docker pull debian:bookworm-slim & docker pull ubuntu:20.04 & docker pull ubuntu:22.04 & docker pull ubuntu:24.04 & wait echo "Pulling language runtimes..." docker pull node:18-alpine & docker pull node:20-alpine & docker pull python:3.11-alpine & docker pull python:3.12-slim & docker pull golang:1.22-bookworm & docker pull rust:1.75-bookworm & wait - name: Run base image benchmarks run: | dotnet test src/__Tests/parity/StellaOps.Parity.Tests \ --filter "Category=CompetitorParity&FullyQualifiedName~BaseImages" \ --configuration Release \ --no-build \ --logger "trx;LogFileName=benchmark-base.trx" \ --results-directory ./TestResults/base timeout-minutes: 45 continue-on-error: true - name: Run language runtime benchmarks run: | dotnet test src/__Tests/parity/StellaOps.Parity.Tests \ --filter "Category=CompetitorParity&FullyQualifiedName~LanguageRuntime" \ --configuration Release \ --no-build \ --logger "trx;LogFileName=benchmark-runtimes.trx" \ --results-directory ./TestResults/runtimes timeout-minutes: 60 continue-on-error: true - name: Run vulnerable image benchmarks run: | dotnet test src/__Tests/parity/StellaOps.Parity.Tests \ --filter "Category=CompetitorParity&FullyQualifiedName~Vulnerable" \ --configuration Release \ --no-build \ --logger "trx;LogFileName=benchmark-vulnerable.trx" \ --results-directory ./TestResults/vulnerable timeout-minutes: 30 continue-on-error: true - name: Generate benchmark report if: always() run: | echo "# Competitor Parity Benchmark Report" > ./TestResults/report.md echo "" >> ./TestResults/report.md echo "**Date:** $(date -u '+%Y-%m-%d %H:%M:%S UTC')" >> ./TestResults/report.md echo "**Corpus:** Expanded (50+ images)" >> ./TestResults/report.md echo "" >> ./TestResults/report.md echo "## Tool Versions" >> ./TestResults/report.md echo "- Syft: $(syft --version | head -1)" >> ./TestResults/report.md echo "- Grype: $(grype --version | head -1)" >> ./TestResults/report.md echo "- Trivy: $(trivy --version | head -1)" >> ./TestResults/report.md echo "" >> ./TestResults/report.md echo "## Test Results" >> ./TestResults/report.md find ./TestResults -name "*.trx" -exec basename {} \; | while read f; do echo "- $f" >> ./TestResults/report.md done - name: Upload benchmark results uses: actions/upload-artifact@v4 if: always() with: name: full-corpus-benchmark-results path: TestResults/** # ========================================================================== # Corpus Validation # ========================================================================== corpus-validation: name: Corpus Validation runs-on: ubuntu-latest if: github.event_name != 'schedule' steps: - name: Checkout uses: actions/checkout@v4 - name: Setup .NET uses: actions/setup-dotnet@v4 with: dotnet-version: "10.0.100" - name: Build tests run: dotnet build src/__Tests/parity/StellaOps.Parity.Tests/StellaOps.Parity.Tests.csproj --configuration Release - name: Validate corpus coverage run: | dotnet test src/__Tests/parity/StellaOps.Parity.Tests \ --filter "FullyQualifiedName~ExpandedCorpus" \ --configuration Release \ --no-build \ --logger "trx;LogFileName=corpus-validation.trx" \ --results-directory ./TestResults - name: Upload validation results uses: actions/upload-artifact@v4 if: always() with: name: corpus-validation-results path: TestResults/**/*.trx # ========================================================================== # Metrics Summary # ========================================================================== metrics-summary: name: Metrics Summary runs-on: ubuntu-latest needs: [full-corpus-benchmark] if: always() && (github.event_name == 'schedule' || github.event.inputs.run_full_corpus == 'true') steps: - name: Download results uses: actions/download-artifact@v4 with: name: full-corpus-benchmark-results path: ./Results - name: Generate summary run: | echo "## Competitor Parity Summary" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "Full corpus benchmark completed." >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "### Categories Tested" >> $GITHUB_STEP_SUMMARY echo "- Base OS images (Alpine, Debian, Ubuntu, Rocky)" >> $GITHUB_STEP_SUMMARY echo "- Language runtimes (Node, Python, Go, Java, Rust, .NET)" >> $GITHUB_STEP_SUMMARY echo "- Application stacks (Postgres, Redis, nginx, etc.)" >> $GITHUB_STEP_SUMMARY echo "- Enterprise images (WordPress, Prometheus, Jenkins)" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "### Scanners Compared" >> $GITHUB_STEP_SUMMARY echo "- Syft v1.9.0 (SBOM generation)" >> $GITHUB_STEP_SUMMARY echo "- Grype v0.79.3 (Vulnerability scanning)" >> $GITHUB_STEP_SUMMARY echo "- Trivy v0.54.1 (Vulnerability scanning)" >> $GITHUB_STEP_SUMMARY