sln build fix (again), tests fixes, audit work and doctors work

This commit is contained in:
master
2026-01-12 22:15:51 +02:00
parent 9873f80830
commit 9330c64349
812 changed files with 48051 additions and 3891 deletions

View File

@@ -0,0 +1,272 @@
# Attestation Linkage Workflow
# Sprint: Testing Enhancement Advisory - Phase 1.3
# Generates test run attestations linking outputs to inputs (SBOMs, VEX)
name: attestation-linkage
on:
push:
branches: [main]
paths:
- 'src/__Tests/**'
- 'src/__Libraries/StellaOps.Testing.Manifests/**'
pull_request:
paths:
- 'src/__Tests/**'
- 'src/__Libraries/StellaOps.Testing.Manifests/**'
workflow_dispatch:
inputs:
sign_attestations:
description: 'Sign attestations with production key'
type: boolean
default: false
verify_existing:
description: 'Verify existing attestations in evidence locker'
type: boolean
default: false
concurrency:
group: attestation-linkage-${{ github.ref }}
cancel-in-progress: true
env:
DETERMINISM_OUTPUT_DIR: ${{ github.workspace }}/attestation-output
jobs:
# ==========================================================================
# Build Attestation Infrastructure
# ==========================================================================
build-attestation:
name: Build Attestation Infrastructure
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Restore dependencies
run: dotnet restore src/__Tests/__Libraries/StellaOps.Testing.Manifests/StellaOps.Testing.Manifests.csproj
- name: Build attestation library
run: |
dotnet build src/__Tests/__Libraries/StellaOps.Testing.Manifests/StellaOps.Testing.Manifests.csproj \
--configuration Release \
--no-restore
- name: Verify attestation types compile
run: |
# Verify the attestation generator compiles correctly
dotnet build src/__Tests/__Libraries/StellaOps.Testing.Manifests/StellaOps.Testing.Manifests.csproj \
--configuration Release \
-warnaserror
# ==========================================================================
# Generate Test Run Attestations
# ==========================================================================
generate-attestations:
name: Generate Test Run Attestations
runs-on: ubuntu-latest
timeout-minutes: 20
needs: build-attestation
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Create output directory
run: mkdir -p $DETERMINISM_OUTPUT_DIR/attestations
- name: Restore and build test projects
run: |
dotnet restore src/StellaOps.sln
dotnet build src/StellaOps.sln --configuration Release --no-restore
- name: Run determinism tests with attestation
run: |
# Run determinism tests and capture results for attestation
dotnet test src/__Tests/__Libraries/StellaOps.HybridLogicalClock.Tests \
--configuration Release \
--no-build \
--filter "Category=Unit" \
--logger "trx;LogFileName=hlc-unit.trx" \
--results-directory $DETERMINISM_OUTPUT_DIR/results \
|| true
- name: Collect test evidence
run: |
# Collect test run evidence for attestation generation
cat > $DETERMINISM_OUTPUT_DIR/test-evidence.json << 'EOF'
{
"testFramework": "xunit",
"executedAt": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
"gitCommitSha": "${{ github.sha }}",
"gitBranch": "${{ github.ref_name }}",
"ciBuildId": "${{ github.run_id }}",
"ciWorkflow": "${{ github.workflow }}"
}
EOF
- name: Generate attestation manifest
run: |
# Generate a manifest of test outputs for attestation
echo "Generating attestation manifest..."
# Compute digests of test result files
if [ -d "$DETERMINISM_OUTPUT_DIR/results" ]; then
find $DETERMINISM_OUTPUT_DIR/results -name "*.trx" -exec sha256sum {} \; \
> $DETERMINISM_OUTPUT_DIR/attestations/output-digests.txt
fi
# Create attestation metadata
cat > $DETERMINISM_OUTPUT_DIR/attestations/attestation-metadata.json << EOF
{
"schemaVersion": "1.0.0",
"generatedAt": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
"runId": "${{ github.run_id }}-${{ github.run_attempt }}",
"predicateType": "https://stellaops.io/attestation/test-run/v1",
"signed": ${{ github.event.inputs.sign_attestations == 'true' && 'true' || 'false' }}
}
EOF
- name: Upload attestation artifacts
uses: actions/upload-artifact@v4
if: always()
with:
name: attestation-artifacts
path: |
${{ env.DETERMINISM_OUTPUT_DIR }}/attestations/**
${{ env.DETERMINISM_OUTPUT_DIR }}/results/**
${{ env.DETERMINISM_OUTPUT_DIR }}/test-evidence.json
# ==========================================================================
# Verify Attestation Linkage
# ==========================================================================
verify-attestation-linkage:
name: Verify Attestation Linkage
runs-on: ubuntu-latest
timeout-minutes: 10
needs: generate-attestations
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Download attestation artifacts
uses: actions/download-artifact@v4
with:
name: attestation-artifacts
path: ${{ env.DETERMINISM_OUTPUT_DIR }}
- name: Verify attestation structure
run: |
echo "Verifying attestation structure..."
# Check that metadata file exists and is valid JSON
if [ -f "$DETERMINISM_OUTPUT_DIR/attestations/attestation-metadata.json" ]; then
cat $DETERMINISM_OUTPUT_DIR/attestations/attestation-metadata.json | jq .
echo "Attestation metadata is valid JSON"
else
echo "::warning::No attestation metadata found"
fi
# Check output digests
if [ -f "$DETERMINISM_OUTPUT_DIR/attestations/output-digests.txt" ]; then
echo "Output digests recorded:"
cat $DETERMINISM_OUTPUT_DIR/attestations/output-digests.txt
fi
- name: Verify SBOM linkage
run: |
echo "Verifying SBOM linkage..."
# In a full implementation, this would:
# 1. Load the test run manifest
# 2. Verify all SBOM digests are referenced in the attestation
# 3. Verify the attestation subject digests match actual outputs
echo "SBOM linkage verification: PASS (placeholder)"
- name: Verify VEX linkage
run: |
echo "Verifying VEX linkage..."
# In a full implementation, this would:
# 1. Load VEX documents referenced in the test run
# 2. Verify they were considered in the test execution
# 3. Verify the attestation predicate includes VEX digests
echo "VEX linkage verification: PASS (placeholder)"
# ==========================================================================
# Attestation Unit Tests
# ==========================================================================
attestation-unit-tests:
name: Attestation Unit Tests
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Restore dependencies
run: dotnet restore src/__Tests/__Libraries/StellaOps.Testing.Manifests/StellaOps.Testing.Manifests.csproj
- name: Build
run: |
dotnet build src/__Tests/__Libraries/StellaOps.Testing.Manifests/StellaOps.Testing.Manifests.csproj \
--configuration Release \
--no-restore
- name: Run attestation tests
run: |
# Run tests for the attestation infrastructure
# Note: Tests would be in a .Tests project
echo "Attestation unit tests: Would run from StellaOps.Testing.Manifests.Tests"
# For now, verify the types are correctly structured
dotnet build src/__Tests/__Libraries/StellaOps.Testing.Manifests/StellaOps.Testing.Manifests.csproj \
--configuration Release \
-warnaserror
# ==========================================================================
# Gate Status
# ==========================================================================
attestation-gate:
name: Attestation Linkage Gate
runs-on: ubuntu-latest
needs: [build-attestation, generate-attestations, verify-attestation-linkage, attestation-unit-tests]
if: always()
steps:
- name: Check gate status
run: |
if [ "${{ needs.build-attestation.result }}" == "failure" ]; then
echo "::error::Attestation build failed"
exit 1
fi
if [ "${{ needs.generate-attestations.result }}" == "failure" ]; then
echo "::error::Attestation generation failed"
exit 1
fi
if [ "${{ needs.verify-attestation-linkage.result }}" == "failure" ]; then
echo "::error::Attestation linkage verification failed"
exit 1
fi
if [ "${{ needs.attestation-unit-tests.result }}" == "failure" ]; then
echo "::error::Attestation unit tests failed"
exit 1
fi
echo "All attestation linkage checks passed!"

View File

@@ -0,0 +1,209 @@
# -----------------------------------------------------------------------------
# cold-warm-latency.yml
# Sprint: Testing Enhancement Advisory - Phase 3.4
# Description: CI workflow for warm-path vs cold-path latency budget tests
# Schedule: Nightly
# -----------------------------------------------------------------------------
name: Cold/Warm Path Latency Tests
on:
schedule:
# Run nightly at 2:30 AM UTC
- cron: '30 2 * * *'
workflow_dispatch:
inputs:
test_filter:
description: 'Test filter (e.g., FullyQualifiedName~Scanner)'
required: false
default: ''
sample_count:
description: 'Number of samples for statistical tests'
required: false
default: '50'
verbosity:
description: 'Test verbosity level'
required: false
default: 'normal'
type: choice
options:
- minimal
- normal
- detailed
- diagnostic
env:
DOTNET_NOLOGO: true
DOTNET_CLI_TELEMETRY_OPTOUT: true
DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true
jobs:
latency-tests:
name: Latency Budget Tests
runs-on: ubuntu-latest
timeout-minutes: 45
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: '10.0.x'
dotnet-quality: 'preview'
- name: Restore dependencies
run: |
dotnet restore src/__Tests/Integration/StellaOps.Integration.Performance/StellaOps.Integration.Performance.csproj
- name: Build performance test project
run: |
dotnet build src/__Tests/Integration/StellaOps.Integration.Performance/StellaOps.Integration.Performance.csproj \
--configuration Release \
--no-restore
- name: Run cold-path latency tests
id: cold-tests
run: |
FILTER="${{ github.event.inputs.test_filter }}"
VERBOSITY="${{ github.event.inputs.verbosity || 'normal' }}"
dotnet test src/__Tests/Integration/StellaOps.Integration.Performance/StellaOps.Integration.Performance.csproj \
--configuration Release \
--no-build \
--verbosity $VERBOSITY \
--logger "trx;LogFileName=cold-path-results.trx" \
--logger "console;verbosity=$VERBOSITY" \
--results-directory ./TestResults \
--filter "Category=ColdPath${FILTER:+&$FILTER}" \
-- \
RunConfiguration.CollectSourceInformation=true
continue-on-error: true
- name: Run warm-path latency tests
id: warm-tests
run: |
FILTER="${{ github.event.inputs.test_filter }}"
VERBOSITY="${{ github.event.inputs.verbosity || 'normal' }}"
dotnet test src/__Tests/Integration/StellaOps.Integration.Performance/StellaOps.Integration.Performance.csproj \
--configuration Release \
--no-build \
--verbosity $VERBOSITY \
--logger "trx;LogFileName=warm-path-results.trx" \
--logger "console;verbosity=$VERBOSITY" \
--results-directory ./TestResults \
--filter "Category=WarmPath${FILTER:+&$FILTER}" \
-- \
RunConfiguration.CollectSourceInformation=true
continue-on-error: true
- name: Upload test results
uses: actions/upload-artifact@v4
if: always()
with:
name: latency-test-results
path: |
./TestResults/*.trx
./TestResults/output/*.txt
retention-days: 30
- name: Generate latency test summary
if: always()
run: |
echo "## Cold/Warm Path Latency Test Results" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Test Execution" >> $GITHUB_STEP_SUMMARY
echo "| Test Suite | Status |" >> $GITHUB_STEP_SUMMARY
echo "|------------|--------|" >> $GITHUB_STEP_SUMMARY
if [ "${{ steps.cold-tests.outcome }}" == "success" ]; then
echo "| Cold Path Tests | :white_check_mark: Passed |" >> $GITHUB_STEP_SUMMARY
else
echo "| Cold Path Tests | :x: Failed |" >> $GITHUB_STEP_SUMMARY
fi
if [ "${{ steps.warm-tests.outcome }}" == "success" ]; then
echo "| Warm Path Tests | :white_check_mark: Passed |" >> $GITHUB_STEP_SUMMARY
else
echo "| Warm Path Tests | :x: Failed |" >> $GITHUB_STEP_SUMMARY
fi
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Latency Budgets" >> $GITHUB_STEP_SUMMARY
echo "| Service | Cold Start Budget | Warm Path Budget |" >> $GITHUB_STEP_SUMMARY
echo "|---------|-------------------|------------------|" >> $GITHUB_STEP_SUMMARY
echo "| Scanner | 5000ms | 500ms |" >> $GITHUB_STEP_SUMMARY
echo "| Concelier | 2000ms | 100ms |" >> $GITHUB_STEP_SUMMARY
echo "| Policy | 2000ms | 200ms |" >> $GITHUB_STEP_SUMMARY
echo "| Authority | 1000ms | 50ms |" >> $GITHUB_STEP_SUMMARY
echo "| Attestor | 2000ms | 200ms |" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Test Coverage" >> $GITHUB_STEP_SUMMARY
echo "- Cold start latency (first request after service initialization)" >> $GITHUB_STEP_SUMMARY
echo "- Warm path latency (subsequent requests)" >> $GITHUB_STEP_SUMMARY
echo "- Sustained load performance (100 consecutive requests)" >> $GITHUB_STEP_SUMMARY
echo "- Burst load handling (parallel requests)" >> $GITHUB_STEP_SUMMARY
echo "- Latency variance (P95/P99 metrics)" >> $GITHUB_STEP_SUMMARY
echo "- Cold-to-warm transition smoothness" >> $GITHUB_STEP_SUMMARY
- name: Check test results
if: always()
run: |
if [ "${{ steps.cold-tests.outcome }}" != "success" ] || [ "${{ steps.warm-tests.outcome }}" != "success" ]; then
echo "::error::One or more latency test suites failed"
exit 1
fi
echo "All latency tests passed successfully"
latency-regression-check:
name: Latency Regression Analysis
runs-on: ubuntu-latest
needs: latency-tests
if: always()
steps:
- name: Download test results
uses: actions/download-artifact@v4
with:
name: latency-test-results
path: ./TestResults
- name: Analyze latency trends
run: |
echo "## Latency Trend Analysis" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
# Check for latency report
if [ -f "./TestResults/output/latency-report.txt" ]; then
echo "### Latency Report" >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
cat ./TestResults/output/latency-report.txt >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
else
echo "No detailed latency report available." >> $GITHUB_STEP_SUMMARY
fi
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Recommendations" >> $GITHUB_STEP_SUMMARY
echo "- Monitor P95 latency trends over time" >> $GITHUB_STEP_SUMMARY
echo "- Investigate any budget violations" >> $GITHUB_STEP_SUMMARY
echo "- Consider adjusting budgets if consistent overages occur" >> $GITHUB_STEP_SUMMARY
- name: Alert on regression
if: needs.latency-tests.result == 'failure'
run: |
echo "::warning::Latency regression detected. Review the test results for details."
echo "" >> $GITHUB_STEP_SUMMARY
echo "### :warning: Latency Regression Alert" >> $GITHUB_STEP_SUMMARY
echo "Latency tests have failed, indicating potential performance regression." >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Recommended Actions:**" >> $GITHUB_STEP_SUMMARY
echo "1. Review recent code changes that might affect performance" >> $GITHUB_STEP_SUMMARY
echo "2. Check for resource contention or new dependencies" >> $GITHUB_STEP_SUMMARY
echo "3. Profile affected services to identify bottlenecks" >> $GITHUB_STEP_SUMMARY
echo "4. Consider reverting recent changes if regression is severe" >> $GITHUB_STEP_SUMMARY

View File

@@ -0,0 +1,297 @@
# Sprint: Testing Enhancement Advisory - Phase 3.1
# Competitor parity benchmarks with expanded 50+ image corpus
# Compares StellaOps against Trivy, Grype, and Syft
name: competitor-parity
on:
schedule:
# Run weekly on Sundays at 03:00 UTC
- cron: '0 3 * * 0'
push:
branches: [main]
paths:
- 'src/__Tests/parity/**'
- 'src/Scanner/__Libraries/**'
pull_request:
branches: [main, develop]
paths:
- 'src/__Tests/parity/**'
workflow_dispatch:
inputs:
run_full_corpus:
description: 'Run against full 50+ image corpus'
type: boolean
default: false
ground_truth_mode:
description: 'Enable ground truth validation'
type: boolean
default: false
concurrency:
group: competitor-parity-${{ github.ref }}
cancel-in-progress: true
env:
DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true
DOTNET_CLI_TELEMETRY_OPTOUT: true
jobs:
# ==========================================================================
# Install Competitor Tools
# ==========================================================================
setup-tools:
name: Setup Scanner Tools
runs-on: ubuntu-latest
outputs:
tools_installed: ${{ steps.check.outputs.installed }}
steps:
- name: Install Syft
run: |
curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sh -s -- -b /usr/local/bin v1.9.0
syft --version
- name: Install Grype
run: |
curl -sSfL https://raw.githubusercontent.com/anchore/grype/main/install.sh | sh -s -- -b /usr/local/bin v0.79.3
grype --version
grype db update
- name: Install Trivy
run: |
curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin v0.54.1
trivy --version
trivy image --download-db-only
- name: Check tools
id: check
run: |
syft --version && grype --version && trivy --version
echo "installed=true" >> $GITHUB_OUTPUT
# ==========================================================================
# Quick Parity Check (PR Gate)
# ==========================================================================
quick-parity:
name: Quick Parity Check
runs-on: ubuntu-latest
needs: setup-tools
if: github.event_name == 'pull_request'
timeout-minutes: 30
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Install scanner tools
run: |
curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sh -s -- -b /usr/local/bin v1.9.0
curl -sSfL https://raw.githubusercontent.com/anchore/grype/main/install.sh | sh -s -- -b /usr/local/bin v0.79.3
curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin v0.54.1
grype db update
trivy image --download-db-only
- name: Build parity tests
run: dotnet build src/__Tests/parity/StellaOps.Parity.Tests/StellaOps.Parity.Tests.csproj --configuration Release
- name: Run quick parity tests
run: |
dotnet test src/__Tests/parity/StellaOps.Parity.Tests \
--filter "Category=CompetitorParity&FullyQualifiedName~BaseImages" \
--configuration Release \
--no-build \
--logger "trx;LogFileName=parity-quick.trx" \
--results-directory ./TestResults
timeout-minutes: 20
- name: Upload results
uses: actions/upload-artifact@v4
if: always()
with:
name: quick-parity-results
path: TestResults/**/*.trx
# ==========================================================================
# Full Corpus Benchmark (Scheduled)
# ==========================================================================
full-corpus-benchmark:
name: Full Corpus Benchmark
runs-on: ubuntu-latest
needs: setup-tools
if: github.event_name == 'schedule' || (github.event_name == 'workflow_dispatch' && github.event.inputs.run_full_corpus == 'true')
timeout-minutes: 180
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Install scanner tools
run: |
curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sh -s -- -b /usr/local/bin v1.9.0
curl -sSfL https://raw.githubusercontent.com/anchore/grype/main/install.sh | sh -s -- -b /usr/local/bin v0.79.3
curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin v0.54.1
grype db update
trivy image --download-db-only
- name: Build parity tests
run: dotnet build src/__Tests/parity/StellaOps.Parity.Tests/StellaOps.Parity.Tests.csproj --configuration Release
- name: Pull corpus images
run: |
echo "Pulling base images..."
docker pull alpine:3.18 &
docker pull alpine:3.19 &
docker pull alpine:3.20 &
docker pull debian:bullseye-slim &
docker pull debian:bookworm-slim &
docker pull ubuntu:20.04 &
docker pull ubuntu:22.04 &
docker pull ubuntu:24.04 &
wait
echo "Pulling language runtimes..."
docker pull node:18-alpine &
docker pull node:20-alpine &
docker pull python:3.11-alpine &
docker pull python:3.12-slim &
docker pull golang:1.22-bookworm &
docker pull rust:1.75-bookworm &
wait
- name: Run base image benchmarks
run: |
dotnet test src/__Tests/parity/StellaOps.Parity.Tests \
--filter "Category=CompetitorParity&FullyQualifiedName~BaseImages" \
--configuration Release \
--no-build \
--logger "trx;LogFileName=benchmark-base.trx" \
--results-directory ./TestResults/base
timeout-minutes: 45
continue-on-error: true
- name: Run language runtime benchmarks
run: |
dotnet test src/__Tests/parity/StellaOps.Parity.Tests \
--filter "Category=CompetitorParity&FullyQualifiedName~LanguageRuntime" \
--configuration Release \
--no-build \
--logger "trx;LogFileName=benchmark-runtimes.trx" \
--results-directory ./TestResults/runtimes
timeout-minutes: 60
continue-on-error: true
- name: Run vulnerable image benchmarks
run: |
dotnet test src/__Tests/parity/StellaOps.Parity.Tests \
--filter "Category=CompetitorParity&FullyQualifiedName~Vulnerable" \
--configuration Release \
--no-build \
--logger "trx;LogFileName=benchmark-vulnerable.trx" \
--results-directory ./TestResults/vulnerable
timeout-minutes: 30
continue-on-error: true
- name: Generate benchmark report
if: always()
run: |
echo "# Competitor Parity Benchmark Report" > ./TestResults/report.md
echo "" >> ./TestResults/report.md
echo "**Date:** $(date -u '+%Y-%m-%d %H:%M:%S UTC')" >> ./TestResults/report.md
echo "**Corpus:** Expanded (50+ images)" >> ./TestResults/report.md
echo "" >> ./TestResults/report.md
echo "## Tool Versions" >> ./TestResults/report.md
echo "- Syft: $(syft --version | head -1)" >> ./TestResults/report.md
echo "- Grype: $(grype --version | head -1)" >> ./TestResults/report.md
echo "- Trivy: $(trivy --version | head -1)" >> ./TestResults/report.md
echo "" >> ./TestResults/report.md
echo "## Test Results" >> ./TestResults/report.md
find ./TestResults -name "*.trx" -exec basename {} \; | while read f; do
echo "- $f" >> ./TestResults/report.md
done
- name: Upload benchmark results
uses: actions/upload-artifact@v4
if: always()
with:
name: full-corpus-benchmark-results
path: TestResults/**
# ==========================================================================
# Corpus Validation
# ==========================================================================
corpus-validation:
name: Corpus Validation
runs-on: ubuntu-latest
if: github.event_name != 'schedule'
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Build tests
run: dotnet build src/__Tests/parity/StellaOps.Parity.Tests/StellaOps.Parity.Tests.csproj --configuration Release
- name: Validate corpus coverage
run: |
dotnet test src/__Tests/parity/StellaOps.Parity.Tests \
--filter "FullyQualifiedName~ExpandedCorpus" \
--configuration Release \
--no-build \
--logger "trx;LogFileName=corpus-validation.trx" \
--results-directory ./TestResults
- name: Upload validation results
uses: actions/upload-artifact@v4
if: always()
with:
name: corpus-validation-results
path: TestResults/**/*.trx
# ==========================================================================
# Metrics Summary
# ==========================================================================
metrics-summary:
name: Metrics Summary
runs-on: ubuntu-latest
needs: [full-corpus-benchmark]
if: always() && (github.event_name == 'schedule' || github.event.inputs.run_full_corpus == 'true')
steps:
- name: Download results
uses: actions/download-artifact@v4
with:
name: full-corpus-benchmark-results
path: ./Results
- name: Generate summary
run: |
echo "## Competitor Parity Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "Full corpus benchmark completed." >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Categories Tested" >> $GITHUB_STEP_SUMMARY
echo "- Base OS images (Alpine, Debian, Ubuntu, Rocky)" >> $GITHUB_STEP_SUMMARY
echo "- Language runtimes (Node, Python, Go, Java, Rust, .NET)" >> $GITHUB_STEP_SUMMARY
echo "- Application stacks (Postgres, Redis, nginx, etc.)" >> $GITHUB_STEP_SUMMARY
echo "- Enterprise images (WordPress, Prometheus, Jenkins)" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Scanners Compared" >> $GITHUB_STEP_SUMMARY
echo "- Syft v1.9.0 (SBOM generation)" >> $GITHUB_STEP_SUMMARY
echo "- Grype v0.79.3 (Vulnerability scanning)" >> $GITHUB_STEP_SUMMARY
echo "- Trivy v0.54.1 (Vulnerability scanning)" >> $GITHUB_STEP_SUMMARY

View File

@@ -0,0 +1,187 @@
# -----------------------------------------------------------------------------
# control-plane-chaos.yml
# Sprint: Testing Enhancement Advisory - Phase 3.3
# Description: CI workflow for control-plane outage chaos tests
# Schedule: Weekly (chaos tests are intensive)
# -----------------------------------------------------------------------------
name: Control-Plane Chaos Tests
on:
schedule:
# Run weekly on Sundays at 3:00 AM UTC
- cron: '0 3 * * 0'
workflow_dispatch:
inputs:
test_filter:
description: 'Test filter (e.g., FullyQualifiedName~Authority)'
required: false
default: ''
verbosity:
description: 'Test verbosity level'
required: false
default: 'normal'
type: choice
options:
- minimal
- normal
- detailed
- diagnostic
env:
DOTNET_NOLOGO: true
DOTNET_CLI_TELEMETRY_OPTOUT: true
DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true
jobs:
chaos-tests:
name: Control-Plane Chaos Tests
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: '10.0.x'
dotnet-quality: 'preview'
- name: Restore dependencies
run: |
dotnet restore src/__Tests/chaos/StellaOps.Chaos.ControlPlane.Tests/StellaOps.Chaos.ControlPlane.Tests.csproj
- name: Build chaos test project
run: |
dotnet build src/__Tests/chaos/StellaOps.Chaos.ControlPlane.Tests/StellaOps.Chaos.ControlPlane.Tests.csproj \
--configuration Release \
--no-restore
- name: Run control-plane outage tests
id: outage-tests
run: |
FILTER="${{ github.event.inputs.test_filter }}"
VERBOSITY="${{ github.event.inputs.verbosity || 'normal' }}"
dotnet test src/__Tests/chaos/StellaOps.Chaos.ControlPlane.Tests/StellaOps.Chaos.ControlPlane.Tests.csproj \
--configuration Release \
--no-build \
--verbosity $VERBOSITY \
--logger "trx;LogFileName=chaos-outage-results.trx" \
--logger "console;verbosity=$VERBOSITY" \
--results-directory ./TestResults \
--filter "Category=ControlPlane${FILTER:+&$FILTER}" \
-- \
RunConfiguration.CollectSourceInformation=true
continue-on-error: true
- name: Run partial outage tests
id: partial-tests
run: |
FILTER="${{ github.event.inputs.test_filter }}"
VERBOSITY="${{ github.event.inputs.verbosity || 'normal' }}"
dotnet test src/__Tests/chaos/StellaOps.Chaos.ControlPlane.Tests/StellaOps.Chaos.ControlPlane.Tests.csproj \
--configuration Release \
--no-build \
--verbosity $VERBOSITY \
--logger "trx;LogFileName=chaos-partial-results.trx" \
--logger "console;verbosity=$VERBOSITY" \
--results-directory ./TestResults \
--filter "Category=PartialOutage${FILTER:+&$FILTER}" \
-- \
RunConfiguration.CollectSourceInformation=true
continue-on-error: true
- name: Upload test results
uses: actions/upload-artifact@v4
if: always()
with:
name: chaos-test-results
path: ./TestResults/*.trx
retention-days: 30
- name: Generate chaos test summary
if: always()
run: |
echo "## Control-Plane Chaos Test Results" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Test Execution" >> $GITHUB_STEP_SUMMARY
echo "| Test Suite | Status |" >> $GITHUB_STEP_SUMMARY
echo "|------------|--------|" >> $GITHUB_STEP_SUMMARY
if [ "${{ steps.outage-tests.outcome }}" == "success" ]; then
echo "| Full Outage Tests | :white_check_mark: Passed |" >> $GITHUB_STEP_SUMMARY
else
echo "| Full Outage Tests | :x: Failed |" >> $GITHUB_STEP_SUMMARY
fi
if [ "${{ steps.partial-tests.outcome }}" == "success" ]; then
echo "| Partial Outage Tests | :white_check_mark: Passed |" >> $GITHUB_STEP_SUMMARY
else
echo "| Partial Outage Tests | :x: Failed |" >> $GITHUB_STEP_SUMMARY
fi
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Test Categories Covered" >> $GITHUB_STEP_SUMMARY
echo "- Authority outage and cached token validation" >> $GITHUB_STEP_SUMMARY
echo "- Scheduler outage and job persistence" >> $GITHUB_STEP_SUMMARY
echo "- Full control-plane outage and data integrity" >> $GITHUB_STEP_SUMMARY
echo "- Partial failure rate scenarios" >> $GITHUB_STEP_SUMMARY
echo "- Latency injection and degraded service handling" >> $GITHUB_STEP_SUMMARY
echo "- Service isolation and cascading failure prevention" >> $GITHUB_STEP_SUMMARY
- name: Check test results
if: always()
run: |
if [ "${{ steps.outage-tests.outcome }}" != "success" ] || [ "${{ steps.partial-tests.outcome }}" != "success" ]; then
echo "::error::One or more chaos test suites failed"
exit 1
fi
echo "All chaos tests passed successfully"
chaos-report:
name: Generate Chaos Report
runs-on: ubuntu-latest
needs: chaos-tests
if: always()
steps:
- name: Download test results
uses: actions/download-artifact@v4
with:
name: chaos-test-results
path: ./TestResults
- name: Parse TRX results
run: |
echo "## Chaos Test Detailed Report" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "Test results have been uploaded as artifacts." >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Artifact Location" >> $GITHUB_STEP_SUMMARY
echo "- chaos-test-results (TRX format)" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
# List TRX files
echo "### Available Result Files" >> $GITHUB_STEP_SUMMARY
for file in ./TestResults/*.trx; do
if [ -f "$file" ]; then
echo "- $(basename $file)" >> $GITHUB_STEP_SUMMARY
fi
done
- name: Notify on failure
if: needs.chaos-tests.result == 'failure'
run: |
echo "::warning::Chaos tests failed. Review the test results for details."
echo "" >> $GITHUB_STEP_SUMMARY
echo "### :warning: Action Required" >> $GITHUB_STEP_SUMMARY
echo "Chaos tests have failed. Please review:" >> $GITHUB_STEP_SUMMARY
echo "1. Download the test artifacts for detailed results" >> $GITHUB_STEP_SUMMARY
echo "2. Check if failures are due to test infrastructure or actual regressions" >> $GITHUB_STEP_SUMMARY
echo "3. Consider running tests locally with diagnostic verbosity" >> $GITHUB_STEP_SUMMARY

View File

@@ -0,0 +1,283 @@
# Sprint: Testing Enhancement Advisory - Phase 2.2/2.3
# Multi-site federation integration tests
# Tests 3+ site federation scenarios including partitions and latency
name: federation-multisite
on:
schedule:
# Run nightly at 02:00 UTC
- cron: '0 2 * * *'
push:
branches: [main]
paths:
- 'src/Concelier/__Libraries/StellaOps.Concelier.Federation/**'
- 'src/Concelier/__Tests/StellaOps.Concelier.Federation.Tests/**'
pull_request:
branches: [main, develop]
paths:
- 'src/Concelier/__Libraries/StellaOps.Concelier.Federation/**'
- 'src/Concelier/__Tests/StellaOps.Concelier.Federation.Tests/**'
workflow_dispatch:
inputs:
run_latency_stress:
description: 'Run extended latency stress tests'
type: boolean
default: false
run_chaos_scenarios:
description: 'Run chaos/partition scenarios'
type: boolean
default: false
concurrency:
group: federation-${{ github.ref }}
cancel-in-progress: true
jobs:
# ==========================================================================
# Multi-Site Federation Tests
# ==========================================================================
federation-multisite-tests:
name: Multi-Site Federation Tests
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Restore dependencies
run: dotnet restore src/Concelier/__Tests/StellaOps.Concelier.Federation.Tests/StellaOps.Concelier.Federation.Tests.csproj
- name: Build federation tests
run: dotnet build src/Concelier/__Tests/StellaOps.Concelier.Federation.Tests/StellaOps.Concelier.Federation.Tests.csproj --configuration Release --no-restore
- name: Run 3-Site Convergence Tests
run: |
dotnet test src/Concelier/__Tests/StellaOps.Concelier.Federation.Tests \
--filter "Category=Federation&FullyQualifiedName~ThreeSite" \
--configuration Release \
--no-build \
--logger "trx;LogFileName=federation-convergence.trx" \
--results-directory ./TestResults
- name: Run Partition Tests
run: |
dotnet test src/Concelier/__Tests/StellaOps.Concelier.Federation.Tests \
--filter "Category=Federation&FullyQualifiedName~Partition" \
--configuration Release \
--no-build \
--logger "trx;LogFileName=federation-partition.trx" \
--results-directory ./TestResults
- name: Run Latency Tests
run: |
dotnet test src/Concelier/__Tests/StellaOps.Concelier.Federation.Tests \
--filter "Category=Latency" \
--configuration Release \
--no-build \
--logger "trx;LogFileName=federation-latency.trx" \
--results-directory ./TestResults
- name: Upload test results
uses: actions/upload-artifact@v4
if: always()
with:
name: federation-test-results
path: TestResults/**/*.trx
- name: Publish test summary
uses: dorny/test-reporter@v1
if: always()
with:
name: Federation Test Results
path: TestResults/**/*.trx
reporter: dotnet-trx
# ==========================================================================
# Extended Latency Stress Tests (On-Demand)
# ==========================================================================
latency-stress-tests:
name: Latency Stress Tests
runs-on: ubuntu-latest
if: github.event_name == 'workflow_dispatch' && github.event.inputs.run_latency_stress == 'true'
timeout-minutes: 60
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Build federation tests
run: dotnet build src/Concelier/__Tests/StellaOps.Concelier.Federation.Tests/StellaOps.Concelier.Federation.Tests.csproj --configuration Release
- name: Run Extended Latency Scenarios
run: |
# Run cross-region tests with various latency configurations
for LATENCY in 100 500 1000 2000; do
echo "Testing with ${LATENCY}ms latency..."
dotnet test src/Concelier/__Tests/StellaOps.Concelier.Federation.Tests \
--filter "Category=Latency&FullyQualifiedName~CrossRegion" \
--configuration Release \
--no-build \
--logger "trx;LogFileName=latency-stress-${LATENCY}ms.trx" \
--results-directory ./TestResults/latency-stress || true
done
- name: Analyze latency results
run: |
echo "Latency stress test results:"
find ./TestResults -name "*.trx" -exec basename {} \;
- name: Upload stress test results
uses: actions/upload-artifact@v4
with:
name: latency-stress-results
path: TestResults/**
# ==========================================================================
# Chaos Scenario Tests (On-Demand)
# ==========================================================================
chaos-scenario-tests:
name: Chaos Scenario Tests
runs-on: ubuntu-latest
if: github.event_name == 'workflow_dispatch' && github.event.inputs.run_chaos_scenarios == 'true'
timeout-minutes: 45
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Build federation tests
run: dotnet build src/Concelier/__Tests/StellaOps.Concelier.Federation.Tests/StellaOps.Concelier.Federation.Tests.csproj --configuration Release
- name: Run Split Brain Scenarios
run: |
dotnet test src/Concelier/__Tests/StellaOps.Concelier.Federation.Tests \
--filter "Category=Chaos&FullyQualifiedName~SplitBrain" \
--configuration Release \
--no-build \
--logger "trx;LogFileName=chaos-splitbrain.trx" \
--results-directory ./TestResults
- name: Run Flapping Network Scenarios
run: |
dotnet test src/Concelier/__Tests/StellaOps.Concelier.Federation.Tests \
--filter "Category=Chaos&FullyQualifiedName~Flap" \
--configuration Release \
--no-build \
--logger "trx;LogFileName=chaos-flapping.trx" \
--results-directory ./TestResults
- name: Run Partition Healing Scenarios
run: |
dotnet test src/Concelier/__Tests/StellaOps.Concelier.Federation.Tests \
--filter "Category=Chaos&FullyQualifiedName~Heal" \
--configuration Release \
--no-build \
--logger "trx;LogFileName=chaos-healing.trx" \
--results-directory ./TestResults
- name: Upload chaos test results
uses: actions/upload-artifact@v4
with:
name: chaos-test-results
path: TestResults/**
# ==========================================================================
# Nightly Full Federation Suite
# ==========================================================================
nightly-full-suite:
name: Nightly Full Federation Suite
runs-on: ubuntu-latest
if: github.event_name == 'schedule'
timeout-minutes: 90
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Build all federation tests
run: dotnet build src/Concelier/__Tests/StellaOps.Concelier.Federation.Tests/StellaOps.Concelier.Federation.Tests.csproj --configuration Release
- name: Run complete federation test suite
run: |
dotnet test src/Concelier/__Tests/StellaOps.Concelier.Federation.Tests \
--configuration Release \
--no-build \
--collect:"XPlat Code Coverage" \
--logger "trx;LogFileName=federation-full.trx" \
--results-directory ./TestResults
- name: Generate test report
run: |
echo "# Federation Test Report" > ./TestResults/report.md
echo "" >> ./TestResults/report.md
echo "Run date: $(date -u '+%Y-%m-%d %H:%M:%S UTC')" >> ./TestResults/report.md
echo "" >> ./TestResults/report.md
echo "## Test Categories" >> ./TestResults/report.md
echo "- Multi-site convergence" >> ./TestResults/report.md
echo "- Network partition handling" >> ./TestResults/report.md
echo "- Cross-region latency" >> ./TestResults/report.md
echo "- Split-brain recovery" >> ./TestResults/report.md
- name: Upload nightly results
uses: actions/upload-artifact@v4
with:
name: nightly-federation-results
path: TestResults/**
- name: Send notification on failure
if: failure()
run: |
echo "Federation nightly tests failed - notification would be sent here"
# Could integrate with Slack/Teams/Email notification
# ==========================================================================
# Test Result Summary
# ==========================================================================
test-summary:
name: Test Summary
runs-on: ubuntu-latest
needs: [federation-multisite-tests]
if: always()
steps:
- name: Download test results
uses: actions/download-artifact@v4
with:
name: federation-test-results
path: ./TestResults
- name: Summarize results
run: |
echo "## Federation Test Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "Test categories executed:" >> $GITHUB_STEP_SUMMARY
echo "- Three-site convergence tests" >> $GITHUB_STEP_SUMMARY
echo "- Partition/split-brain tests" >> $GITHUB_STEP_SUMMARY
echo "- Cross-region latency tests" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "Result files:" >> $GITHUB_STEP_SUMMARY
find ./TestResults -name "*.trx" -exec basename {} \; | while read f; do
echo "- $f" >> $GITHUB_STEP_SUMMARY
done

View File

@@ -0,0 +1,215 @@
# HLC Distributed Tests Workflow
# Sprint: Testing Enhancement Advisory - Phase 1.2
# Tests multi-node HLC scenarios with network partition simulation
name: hlc-distributed
on:
schedule:
# Run nightly at 2 AM UTC
- cron: '0 2 * * *'
push:
branches: [main]
paths:
- 'src/__Libraries/StellaOps.HybridLogicalClock/**'
- 'src/__Tests/Integration/StellaOps.Integration.HLC/**'
pull_request:
paths:
- 'src/__Libraries/StellaOps.HybridLogicalClock/**'
- 'src/__Tests/Integration/StellaOps.Integration.HLC/**'
workflow_dispatch:
inputs:
run_extended:
description: 'Run extended multi-node tests'
type: boolean
default: false
run_chaos:
description: 'Run chaos/partition tests'
type: boolean
default: true
concurrency:
group: hlc-distributed-${{ github.ref }}
cancel-in-progress: true
jobs:
# ==========================================================================
# Multi-Node HLC Tests
# ==========================================================================
hlc-distributed:
name: Distributed HLC Tests
runs-on: ubuntu-latest
timeout-minutes: 20
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Restore dependencies
run: dotnet restore src/__Tests/Integration/StellaOps.Integration.HLC/StellaOps.Integration.HLC.csproj
- name: Build HLC tests
run: dotnet build src/__Tests/Integration/StellaOps.Integration.HLC/StellaOps.Integration.HLC.csproj --configuration Release --no-restore
- name: Run distributed HLC tests
run: |
dotnet test src/__Tests/Integration/StellaOps.Integration.HLC \
--configuration Release \
--no-build \
--filter "Category=HLC&Category=Integration" \
--logger "trx;LogFileName=hlc-distributed.trx" \
--results-directory ./TestResults
- name: Upload test results
uses: actions/upload-artifact@v4
if: always()
with:
name: hlc-distributed-results
path: TestResults/**
- name: Publish test summary
uses: dorny/test-reporter@v1
if: always()
with:
name: HLC Distributed Test Results
path: TestResults/**/*.trx
reporter: dotnet-trx
# ==========================================================================
# Network Partition / Chaos Tests
# ==========================================================================
hlc-chaos:
name: HLC Chaos Tests
runs-on: ubuntu-latest
timeout-minutes: 30
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Restore dependencies
run: dotnet restore src/__Tests/Integration/StellaOps.Integration.HLC/StellaOps.Integration.HLC.csproj
- name: Build HLC tests
run: dotnet build src/__Tests/Integration/StellaOps.Integration.HLC/StellaOps.Integration.HLC.csproj --configuration Release --no-restore
- name: Run partition tests
run: |
dotnet test src/__Tests/Integration/StellaOps.Integration.HLC \
--configuration Release \
--no-build \
--filter "Category=Chaos" \
--logger "trx;LogFileName=hlc-chaos.trx" \
--results-directory ./TestResults
- name: Run extended multi-node tests
if: github.event.inputs.run_extended == 'true'
run: |
dotnet test src/__Tests/Integration/StellaOps.Integration.HLC \
--configuration Release \
--no-build \
--filter "FullyQualifiedName~LargeCluster|FullyQualifiedName~HighFrequency" \
--logger "trx;LogFileName=hlc-extended.trx" \
--results-directory ./TestResults
- name: Upload chaos test results
uses: actions/upload-artifact@v4
if: always()
with:
name: hlc-chaos-results
path: TestResults/**
- name: Publish test summary
uses: dorny/test-reporter@v1
if: always()
with:
name: HLC Chaos Test Results
path: TestResults/**/*.trx
reporter: dotnet-trx
# ==========================================================================
# Determinism Verification
# ==========================================================================
hlc-determinism:
name: HLC Determinism Verification
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Restore dependencies
run: dotnet restore src/__Libraries/__Tests/StellaOps.HybridLogicalClock.Tests/StellaOps.HybridLogicalClock.Tests.csproj
- name: Build HLC unit tests
run: dotnet build src/__Libraries/__Tests/StellaOps.HybridLogicalClock.Tests/StellaOps.HybridLogicalClock.Tests.csproj --configuration Release --no-restore
- name: Run determinism verification (3 runs)
run: |
for i in 1 2 3; do
echo "=== Run $i ==="
dotnet test src/__Libraries/__Tests/StellaOps.HybridLogicalClock.Tests \
--configuration Release \
--no-build \
--filter "FullyQualifiedName~Monotonic|FullyQualifiedName~Uniqueness" \
--logger "trx;LogFileName=hlc-determinism-$i.trx" \
--results-directory ./TestResults/run-$i
done
- name: Compare determinism runs
run: |
echo "Comparing test results across runs..."
# All runs should pass
for i in 1 2 3; do
if [ ! -f "./TestResults/run-$i/hlc-determinism-$i.trx" ]; then
echo "Run $i results not found"
exit 1
fi
done
echo "All determinism runs completed successfully"
- name: Upload determinism results
uses: actions/upload-artifact@v4
if: always()
with:
name: hlc-determinism-results
path: TestResults/**
# ==========================================================================
# Gate Status
# ==========================================================================
gate-status:
name: HLC Distributed Gate Status
runs-on: ubuntu-latest
needs: [hlc-distributed, hlc-determinism]
if: always()
steps:
- name: Check gate status
run: |
if [ "${{ needs.hlc-distributed.result }}" == "failure" ]; then
echo "::error::Distributed HLC tests failed"
exit 1
fi
if [ "${{ needs.hlc-determinism.result }}" == "failure" ]; then
echo "::error::HLC determinism verification failed"
exit 1
fi
echo "All HLC distributed checks passed!"

View File

@@ -0,0 +1,180 @@
# Spec-Diff Gate - Contract Verification Workflow
# Sprint: Testing Enhancement Advisory - Phase 1.1
# Verifies that OpenAPI specifications match code implementations
name: spec-diff-gate
on:
pull_request:
branches: [main, develop]
paths:
- 'src/**/WebService/**'
- 'src/**/Endpoints/**'
- 'src/**/Controllers/**'
- 'docs/api/**'
- 'docs/contracts/**'
- 'docs/db/**'
push:
branches: [main]
workflow_dispatch:
concurrency:
group: spec-diff-${{ github.ref }}
cancel-in-progress: true
jobs:
# ==========================================================================
# Contract Spec Diff Tests
# ==========================================================================
spec-diff:
name: Contract Spec Diff
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Restore dependencies
run: dotnet restore src/__Tests/Architecture/StellaOps.Architecture.Contracts.Tests/StellaOps.Architecture.Contracts.Tests.csproj
- name: Build spec-diff tests
run: dotnet build src/__Tests/Architecture/StellaOps.Architecture.Contracts.Tests/StellaOps.Architecture.Contracts.Tests.csproj --configuration Release --no-restore
- name: Run OpenAPI spec validation
run: |
dotnet test src/__Tests/Architecture/StellaOps.Architecture.Contracts.Tests \
--configuration Release \
--no-build \
--filter "Category=Architecture&Category=Contract" \
--logger "trx;LogFileName=spec-diff.trx" \
--results-directory ./TestResults
- name: Generate spec-diff report
if: always()
run: |
dotnet test src/__Tests/Architecture/StellaOps.Architecture.Contracts.Tests \
--configuration Release \
--no-build \
--filter "FullyQualifiedName~SpecDiff_GeneratesReport" \
--logger "console;verbosity=detailed" \
2>&1 | tee ./TestResults/spec-diff-report.txt || true
- name: Upload test results
uses: actions/upload-artifact@v4
if: always()
with:
name: spec-diff-results
path: TestResults/**
- name: Publish test summary
uses: dorny/test-reporter@v1
if: always()
with:
name: Spec Diff Test Results
path: TestResults/**/*.trx
reporter: dotnet-trx
# ==========================================================================
# Schema Compliance Tests
# ==========================================================================
schema-compliance:
name: Schema Compliance
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Restore dependencies
run: dotnet restore src/__Tests/Architecture/StellaOps.Architecture.Contracts.Tests/StellaOps.Architecture.Contracts.Tests.csproj
- name: Build schema tests
run: dotnet build src/__Tests/Architecture/StellaOps.Architecture.Contracts.Tests/StellaOps.Architecture.Contracts.Tests.csproj --configuration Release --no-restore
- name: Run schema compliance tests
run: |
dotnet test src/__Tests/Architecture/StellaOps.Architecture.Contracts.Tests \
--configuration Release \
--no-build \
--filter "FullyQualifiedName~SchemaCompliance" \
--logger "trx;LogFileName=schema-compliance.trx" \
--results-directory ./TestResults
- name: Upload schema test results
uses: actions/upload-artifact@v4
if: always()
with:
name: schema-compliance-results
path: TestResults/**
# ==========================================================================
# API Governance Check (existing, enhanced)
# ==========================================================================
api-governance:
name: API Governance
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
- name: Install spectral
run: npm install -g @stoplight/spectral-cli
- name: Lint OpenAPI specs
run: |
find docs/api -name "*.yaml" -o -name "*.yml" | while read spec; do
echo "Linting: $spec"
spectral lint "$spec" --ruleset .spectral.yaml || true
done
- name: Check for breaking changes
run: |
if [ -f ".gitea/scripts/validate/api-compat-diff.mjs" ]; then
node .gitea/scripts/validate/api-compat-diff.mjs --baseline docs/contracts/api-aggregate-*.yaml
else
echo "API compat diff script not found, skipping"
fi
# ==========================================================================
# Combined Gate Status
# ==========================================================================
gate-status:
name: Spec Diff Gate Status
runs-on: ubuntu-latest
needs: [spec-diff, schema-compliance, api-governance]
if: always()
steps:
- name: Check gate status
run: |
if [ "${{ needs.spec-diff.result }}" == "failure" ]; then
echo "::error::Spec diff tests failed - specs and code are out of sync"
exit 1
fi
if [ "${{ needs.schema-compliance.result }}" == "failure" ]; then
echo "::error::Schema compliance tests failed - migrations may not comply with specifications"
exit 1
fi
if [ "${{ needs.api-governance.result }}" == "failure" ]; then
echo "::warning::API governance checks had issues - review API lint results"
fi
echo "All spec-diff checks passed!"