210 lines
8.2 KiB
YAML
210 lines
8.2 KiB
YAML
# -----------------------------------------------------------------------------
|
|
# cold-warm-latency.yml
|
|
# Sprint: Testing Enhancement Advisory - Phase 3.4
|
|
# Description: CI workflow for warm-path vs cold-path latency budget tests
|
|
# Schedule: Nightly
|
|
# -----------------------------------------------------------------------------
|
|
|
|
name: Cold/Warm Path Latency Tests
|
|
|
|
on:
|
|
schedule:
|
|
# Run nightly at 2:30 AM UTC
|
|
- cron: '30 2 * * *'
|
|
workflow_dispatch:
|
|
inputs:
|
|
test_filter:
|
|
description: 'Test filter (e.g., FullyQualifiedName~Scanner)'
|
|
required: false
|
|
default: ''
|
|
sample_count:
|
|
description: 'Number of samples for statistical tests'
|
|
required: false
|
|
default: '50'
|
|
verbosity:
|
|
description: 'Test verbosity level'
|
|
required: false
|
|
default: 'normal'
|
|
type: choice
|
|
options:
|
|
- minimal
|
|
- normal
|
|
- detailed
|
|
- diagnostic
|
|
|
|
env:
|
|
DOTNET_NOLOGO: true
|
|
DOTNET_CLI_TELEMETRY_OPTOUT: true
|
|
DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true
|
|
|
|
jobs:
|
|
latency-tests:
|
|
name: Latency Budget Tests
|
|
runs-on: ubuntu-latest
|
|
timeout-minutes: 45
|
|
|
|
steps:
|
|
- name: Checkout repository
|
|
uses: actions/checkout@v4
|
|
with:
|
|
fetch-depth: 0
|
|
|
|
- name: Setup .NET
|
|
uses: actions/setup-dotnet@v4
|
|
with:
|
|
dotnet-version: '10.0.x'
|
|
dotnet-quality: 'preview'
|
|
|
|
- name: Restore dependencies
|
|
run: |
|
|
dotnet restore src/__Tests/Integration/StellaOps.Integration.Performance/StellaOps.Integration.Performance.csproj
|
|
|
|
- name: Build performance test project
|
|
run: |
|
|
dotnet build src/__Tests/Integration/StellaOps.Integration.Performance/StellaOps.Integration.Performance.csproj \
|
|
--configuration Release \
|
|
--no-restore
|
|
|
|
- name: Run cold-path latency tests
|
|
id: cold-tests
|
|
run: |
|
|
FILTER="${{ github.event.inputs.test_filter }}"
|
|
VERBOSITY="${{ github.event.inputs.verbosity || 'normal' }}"
|
|
|
|
dotnet test src/__Tests/Integration/StellaOps.Integration.Performance/StellaOps.Integration.Performance.csproj \
|
|
--configuration Release \
|
|
--no-build \
|
|
--verbosity $VERBOSITY \
|
|
--logger "trx;LogFileName=cold-path-results.trx" \
|
|
--logger "console;verbosity=$VERBOSITY" \
|
|
--results-directory ./TestResults \
|
|
--filter "Category=ColdPath${FILTER:+&$FILTER}" \
|
|
-- \
|
|
RunConfiguration.CollectSourceInformation=true
|
|
continue-on-error: true
|
|
|
|
- name: Run warm-path latency tests
|
|
id: warm-tests
|
|
run: |
|
|
FILTER="${{ github.event.inputs.test_filter }}"
|
|
VERBOSITY="${{ github.event.inputs.verbosity || 'normal' }}"
|
|
|
|
dotnet test src/__Tests/Integration/StellaOps.Integration.Performance/StellaOps.Integration.Performance.csproj \
|
|
--configuration Release \
|
|
--no-build \
|
|
--verbosity $VERBOSITY \
|
|
--logger "trx;LogFileName=warm-path-results.trx" \
|
|
--logger "console;verbosity=$VERBOSITY" \
|
|
--results-directory ./TestResults \
|
|
--filter "Category=WarmPath${FILTER:+&$FILTER}" \
|
|
-- \
|
|
RunConfiguration.CollectSourceInformation=true
|
|
continue-on-error: true
|
|
|
|
- name: Upload test results
|
|
uses: actions/upload-artifact@v4
|
|
if: always()
|
|
with:
|
|
name: latency-test-results
|
|
path: |
|
|
./TestResults/*.trx
|
|
./TestResults/output/*.txt
|
|
retention-days: 30
|
|
|
|
- name: Generate latency test summary
|
|
if: always()
|
|
run: |
|
|
echo "## Cold/Warm Path Latency Test Results" >> $GITHUB_STEP_SUMMARY
|
|
echo "" >> $GITHUB_STEP_SUMMARY
|
|
echo "### Test Execution" >> $GITHUB_STEP_SUMMARY
|
|
echo "| Test Suite | Status |" >> $GITHUB_STEP_SUMMARY
|
|
echo "|------------|--------|" >> $GITHUB_STEP_SUMMARY
|
|
|
|
if [ "${{ steps.cold-tests.outcome }}" == "success" ]; then
|
|
echo "| Cold Path Tests | :white_check_mark: Passed |" >> $GITHUB_STEP_SUMMARY
|
|
else
|
|
echo "| Cold Path Tests | :x: Failed |" >> $GITHUB_STEP_SUMMARY
|
|
fi
|
|
|
|
if [ "${{ steps.warm-tests.outcome }}" == "success" ]; then
|
|
echo "| Warm Path Tests | :white_check_mark: Passed |" >> $GITHUB_STEP_SUMMARY
|
|
else
|
|
echo "| Warm Path Tests | :x: Failed |" >> $GITHUB_STEP_SUMMARY
|
|
fi
|
|
|
|
echo "" >> $GITHUB_STEP_SUMMARY
|
|
echo "### Latency Budgets" >> $GITHUB_STEP_SUMMARY
|
|
echo "| Service | Cold Start Budget | Warm Path Budget |" >> $GITHUB_STEP_SUMMARY
|
|
echo "|---------|-------------------|------------------|" >> $GITHUB_STEP_SUMMARY
|
|
echo "| Scanner | 5000ms | 500ms |" >> $GITHUB_STEP_SUMMARY
|
|
echo "| Concelier | 2000ms | 100ms |" >> $GITHUB_STEP_SUMMARY
|
|
echo "| Policy | 2000ms | 200ms |" >> $GITHUB_STEP_SUMMARY
|
|
echo "| Authority | 1000ms | 50ms |" >> $GITHUB_STEP_SUMMARY
|
|
echo "| Attestor | 2000ms | 200ms |" >> $GITHUB_STEP_SUMMARY
|
|
|
|
echo "" >> $GITHUB_STEP_SUMMARY
|
|
echo "### Test Coverage" >> $GITHUB_STEP_SUMMARY
|
|
echo "- Cold start latency (first request after service initialization)" >> $GITHUB_STEP_SUMMARY
|
|
echo "- Warm path latency (subsequent requests)" >> $GITHUB_STEP_SUMMARY
|
|
echo "- Sustained load performance (100 consecutive requests)" >> $GITHUB_STEP_SUMMARY
|
|
echo "- Burst load handling (parallel requests)" >> $GITHUB_STEP_SUMMARY
|
|
echo "- Latency variance (P95/P99 metrics)" >> $GITHUB_STEP_SUMMARY
|
|
echo "- Cold-to-warm transition smoothness" >> $GITHUB_STEP_SUMMARY
|
|
|
|
- name: Check test results
|
|
if: always()
|
|
run: |
|
|
if [ "${{ steps.cold-tests.outcome }}" != "success" ] || [ "${{ steps.warm-tests.outcome }}" != "success" ]; then
|
|
echo "::error::One or more latency test suites failed"
|
|
exit 1
|
|
fi
|
|
echo "All latency tests passed successfully"
|
|
|
|
latency-regression-check:
|
|
name: Latency Regression Analysis
|
|
runs-on: ubuntu-latest
|
|
needs: latency-tests
|
|
if: always()
|
|
|
|
steps:
|
|
- name: Download test results
|
|
uses: actions/download-artifact@v4
|
|
with:
|
|
name: latency-test-results
|
|
path: ./TestResults
|
|
|
|
- name: Analyze latency trends
|
|
run: |
|
|
echo "## Latency Trend Analysis" >> $GITHUB_STEP_SUMMARY
|
|
echo "" >> $GITHUB_STEP_SUMMARY
|
|
|
|
# Check for latency report
|
|
if [ -f "./TestResults/output/latency-report.txt" ]; then
|
|
echo "### Latency Report" >> $GITHUB_STEP_SUMMARY
|
|
echo '```' >> $GITHUB_STEP_SUMMARY
|
|
cat ./TestResults/output/latency-report.txt >> $GITHUB_STEP_SUMMARY
|
|
echo '```' >> $GITHUB_STEP_SUMMARY
|
|
else
|
|
echo "No detailed latency report available." >> $GITHUB_STEP_SUMMARY
|
|
fi
|
|
|
|
echo "" >> $GITHUB_STEP_SUMMARY
|
|
echo "### Recommendations" >> $GITHUB_STEP_SUMMARY
|
|
echo "- Monitor P95 latency trends over time" >> $GITHUB_STEP_SUMMARY
|
|
echo "- Investigate any budget violations" >> $GITHUB_STEP_SUMMARY
|
|
echo "- Consider adjusting budgets if consistent overages occur" >> $GITHUB_STEP_SUMMARY
|
|
|
|
- name: Alert on regression
|
|
if: needs.latency-tests.result == 'failure'
|
|
run: |
|
|
echo "::warning::Latency regression detected. Review the test results for details."
|
|
echo "" >> $GITHUB_STEP_SUMMARY
|
|
echo "### :warning: Latency Regression Alert" >> $GITHUB_STEP_SUMMARY
|
|
echo "Latency tests have failed, indicating potential performance regression." >> $GITHUB_STEP_SUMMARY
|
|
echo "" >> $GITHUB_STEP_SUMMARY
|
|
echo "**Recommended Actions:**" >> $GITHUB_STEP_SUMMARY
|
|
echo "1. Review recent code changes that might affect performance" >> $GITHUB_STEP_SUMMARY
|
|
echo "2. Check for resource contention or new dependencies" >> $GITHUB_STEP_SUMMARY
|
|
echo "3. Profile affected services to identify bottlenecks" >> $GITHUB_STEP_SUMMARY
|
|
echo "4. Consider reverting recent changes if regression is severe" >> $GITHUB_STEP_SUMMARY
|