Files
git.stella-ops.org/.gitea/workflows/build-test-deploy.yml
Workflow config file is invalid. Please check your config file: yaml: line 302: could not find expected ':'
master 2170a58734
Some checks failed
Lighthouse CI / Lighthouse Audit (push) Waiting to run
Lighthouse CI / Axe Accessibility Audit (push) Waiting to run
Manifest Integrity / Validate Schema Integrity (push) Waiting to run
Manifest Integrity / Validate Contract Documents (push) Waiting to run
Manifest Integrity / Validate Pack Fixtures (push) Waiting to run
Manifest Integrity / Audit SHA256SUMS Files (push) Waiting to run
Manifest Integrity / Verify Merkle Roots (push) Waiting to run
Policy Lint & Smoke / policy-lint (push) Waiting to run
Policy Simulation / policy-simulate (push) Waiting to run
Docs CI / lint-and-preview (push) Has been cancelled
Export Center CI / export-ci (push) Has been cancelled
Findings Ledger CI / build-test (push) Has been cancelled
Findings Ledger CI / migration-validation (push) Has been cancelled
Findings Ledger CI / generate-manifest (push) Has been cancelled
Add comprehensive security tests for OWASP A02, A05, A07, and A08 categories
- Implemented tests for Cryptographic Failures (A02) to ensure proper handling of sensitive data, secure algorithms, and key management.
- Added tests for Security Misconfiguration (A05) to validate production configurations, security headers, CORS settings, and feature management.
- Developed tests for Authentication Failures (A07) to enforce strong password policies, rate limiting, session management, and MFA support.
- Created tests for Software and Data Integrity Failures (A08) to verify artifact signatures, SBOM integrity, attestation chains, and feed updates.
2025-12-16 16:40:44 +02:00

1194 lines
46 KiB
YAML
Raw Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

# .gitea/workflows/build-test-deploy.yml
# Unified CI/CD workflow for git.stella-ops.org (Feedser monorepo)
name: Build Test Deploy
on:
push:
branches: [ main ]
paths:
- 'src/**'
- 'docs/**'
- 'scripts/**'
- 'Directory.Build.props'
- 'Directory.Build.targets'
- 'global.json'
- '.gitea/workflows/**'
pull_request:
branches: [ main, develop ]
paths:
- 'src/**'
- 'docs/**'
- 'scripts/**'
- '.gitea/workflows/**'
schedule:
- cron: '0 5 * * *'
workflow_dispatch:
inputs:
force_deploy:
description: 'Ignore branch checks and run the deploy stage'
required: false
default: 'false'
type: boolean
excititor_batch:
description: 'Run Excititor batch-ingest validation suite'
required: false
default: 'false'
type: boolean
env:
DOTNET_VERSION: '10.0.100'
BUILD_CONFIGURATION: Release
CI_CACHE_ROOT: /data/.cache/stella-ops/feedser
RUNNER_TOOL_CACHE: /toolcache
jobs:
profile-validation:
runs-on: ubuntu-22.04
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install Helm
run: |
curl -fsSL https://get.helm.sh/helm-v3.16.0-linux-amd64.tar.gz -o /tmp/helm.tgz
tar -xzf /tmp/helm.tgz -C /tmp
sudo install -m 0755 /tmp/linux-amd64/helm /usr/local/bin/helm
- name: Validate Helm chart rendering
run: |
set -euo pipefail
CHART_PATH="deploy/helm/stellaops"
helm lint "$CHART_PATH"
for values in values.yaml values-dev.yaml values-stage.yaml values-prod.yaml values-airgap.yaml values-mirror.yaml; do
release="stellaops-${values%.*}"
echo "::group::Helm template ${release} (${values})"
helm template "$release" "$CHART_PATH" -f "$CHART_PATH/$values" >/dev/null
echo "::endgroup::"
done
- name: Validate deployment profiles
run: ./deploy/tools/validate-profiles.sh
build-test:
runs-on: ubuntu-22.04
environment: ${{ github.event_name == 'pull_request' && 'preview' || 'staging' }}
env:
PUBLISH_DIR: ${{ github.workspace }}/artifacts/publish/webservice
AUTHORITY_PUBLISH_DIR: ${{ github.workspace }}/artifacts/publish/authority
TEST_RESULTS_DIR: ${{ github.workspace }}/artifacts/test-results
STELLAOPS_TEST_MONGO_URI: ${{ secrets.STELLAOPS_TEST_MONGO_URI || vars.STELLAOPS_TEST_MONGO_URI }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Export OpenSSL 1.1 shim for Mongo2Go
run: scripts/enable-openssl11-shim.sh
- name: Verify binary layout
run: scripts/verify-binaries.sh
- name: Ensure binary manifests are up to date
run: |
python3 scripts/update-binary-manifests.py
git diff --exit-code local-nugets/manifest.json vendor/manifest.json offline/feeds/manifest.json
- name: Ensure Mongo test URI configured
run: |
if [ -z "${STELLAOPS_TEST_MONGO_URI:-}" ]; then
echo "::error::STELLAOPS_TEST_MONGO_URI must be provided via repository secrets or variables for Graph Indexer integration tests."
exit 1
fi
- name: Verify policy scope configuration
run: python3 scripts/verify-policy-scopes.py
- name: Validate NuGet restore source ordering
run: python3 ops/devops/validate_restore_sources.py
- name: Validate telemetry storage configuration
run: python3 ops/devops/telemetry/validate_storage_stack.py
- name: Task Pack offline bundle fixtures
run: |
python3 scripts/packs/run-fixtures-check.sh
- name: Telemetry tenant isolation smoke
env:
COMPOSE_DIR: ${GITHUB_WORKSPACE}/deploy/compose
run: |
set -euo pipefail
./ops/devops/telemetry/generate_dev_tls.sh
COMPOSE_DIR="${COMPOSE_DIR:-${GITHUB_WORKSPACE}/deploy/compose}"
cleanup() {
set +e
(cd "$COMPOSE_DIR" && docker compose -f docker-compose.telemetry.yaml down -v --remove-orphans >/dev/null 2>&1)
(cd "$COMPOSE_DIR" && docker compose -f docker-compose.telemetry-storage.yaml down -v --remove-orphans >/dev/null 2>&1)
}
trap cleanup EXIT
(cd "$COMPOSE_DIR" && docker compose -f docker-compose.telemetry-storage.yaml up -d)
(cd "$COMPOSE_DIR" && docker compose -f docker-compose.telemetry.yaml up -d)
sleep 5
python3 ops/devops/telemetry/smoke_otel_collector.py --host localhost
python3 ops/devops/telemetry/tenant_isolation_smoke.py \
--collector https://localhost:4318/v1 \
--tempo https://localhost:3200 \
--loki https://localhost:3100
- name: Setup .NET ${{ env.DOTNET_VERSION }}
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
include-prerelease: true
- name: Build CLI multi-runtime binaries
run: |
set -euo pipefail
export DOTNET_SKIP_FIRST_TIME_EXPERIENCE=1
RUNTIMES=(linux-x64 linux-arm64 osx-x64 osx-arm64 win-x64)
rm -rf out/cli-ci
for runtime in "${RUNTIMES[@]}"; do
dotnet publish src/Cli/StellaOps.Cli/StellaOps.Cli.csproj \
--configuration $BUILD_CONFIGURATION \
--runtime "$runtime" \
--self-contained true \
/p:PublishSingleFile=true \
/p:IncludeNativeLibrariesForSelfExtract=true \
/p:EnableCompressionInSingleFile=true \
/p:InvariantGlobalization=true \
--output "out/cli-ci/${runtime}"
done
- name: Run CLI unit tests
run: |
mkdir -p "$TEST_RESULTS_DIR"
dotnet test src/Cli/StellaOps.Cli.Tests/StellaOps.Cli.Tests.csproj \
--configuration $BUILD_CONFIGURATION \
--logger "trx;LogFileName=stellaops-cli-tests.trx" \
--results-directory "$TEST_RESULTS_DIR"
- name: Restore Concelier solution
run: dotnet restore src/Concelier/StellaOps.Concelier.sln
- name: Build Concelier solution (warnings as errors)
run: dotnet build src/Concelier/StellaOps.Concelier.sln --configuration $BUILD_CONFIGURATION --no-restore -warnaserror
- name: Run Concelier unit and integration tests
run: |
mkdir -p "$TEST_RESULTS_DIR"
dotnet test src/Concelier/StellaOps.Concelier.sln \
--configuration $BUILD_CONFIGURATION \
--no-build \
--logger "trx;LogFileName=stellaops-concelier-tests.trx" \
--results-directory "$TEST_RESULTS_DIR"
- name: Run PostgreSQL storage integration tests (Testcontainers)
env:
POSTGRES_TEST_IMAGE: postgres:16-alpine
run: |
set -euo pipefail
mkdir -p "$TEST_RESULTS_DIR"
PROJECTS=(
src/__Libraries/__Tests/StellaOps.Infrastructure.Postgres.Tests/StellaOps.Infrastructure.Postgres.Tests.csproj
src/Authority/__Tests/StellaOps.Authority.Storage.Postgres.Tests/StellaOps.Authority.Storage.Postgres.Tests.csproj
src/Scheduler/__Tests/StellaOps.Scheduler.Storage.Postgres.Tests/StellaOps.Scheduler.Storage.Postgres.Tests.csproj
src/Concelier/__Tests/StellaOps.Concelier.Storage.Postgres.Tests/StellaOps.Concelier.Storage.Postgres.Tests.csproj
src/Excititor/__Tests/StellaOps.Excititor.Storage.Postgres.Tests/StellaOps.Excititor.Storage.Postgres.Tests.csproj
src/Notify/__Tests/StellaOps.Notify.Storage.Postgres.Tests/StellaOps.Notify.Storage.Postgres.Tests.csproj
src/Policy/__Tests/StellaOps.Policy.Storage.Postgres.Tests/StellaOps.Policy.Storage.Postgres.Tests.csproj
)
for project in "${PROJECTS[@]}"; do
name="$(basename "${project%.*}")"
dotnet test "$project" \
--configuration $BUILD_CONFIGURATION \
--logger "trx;LogFileName=${name}.trx" \
--results-directory "$TEST_RESULTS_DIR"
done
- name: Run TimelineIndexer tests (EB1 evidence linkage gate)
run: |
mkdir -p "$TEST_RESULTS_DIR"
dotnet test src/TimelineIndexer/StellaOps.TimelineIndexer/StellaOps.TimelineIndexer.sln \
--configuration $BUILD_CONFIGURATION \
--logger "trx;LogFileName=timelineindexer-tests.trx" \
--results-directory "$TEST_RESULTS_DIR"
- name: Lint policy DSL samples
run: dotnet run --project tools/PolicyDslValidator/PolicyDslValidator.csproj -- --strict docs/examples/policies/*.yaml
- name: Run policy simulation smoke tests (first pass)
run: dotnet run --project tools/PolicySimulationSmoke/PolicySimulationSmoke.csproj -- --scenario-root samples/policy/simulations --output artifacts/policy-simulations/run1
- name: Verify policy simulation determinism
run: |
dotnet run --project tools/PolicySimulationSmoke/PolicySimulationSmoke.csproj -- --scenario-root samples/policy/simulations --output artifacts/policy-simulations/run2
diff -u \
artifacts/policy-simulations/run1/policy-simulation-summary.json \
artifacts/policy-simulations/run2/policy-simulation-summary.json
- name: Upload policy simulation artifacts
uses: actions/upload-artifact@v4
with:
name: policy-simulation-diffs
path: artifacts/policy-simulations
if-no-files-found: error
retention-days: 14
- name: Export policy run schemas
id: export-policy-schemas
run: |
set -euo pipefail
OUTPUT_DIR="artifacts/policy-schemas/${GITHUB_SHA}"
mkdir -p "$OUTPUT_DIR"
scripts/export-policy-schemas.sh "$OUTPUT_DIR"
- name: Detect policy schema changes
id: detect-policy-schema-changes
run: |
set -euo pipefail
OUTPUT_DIR="artifacts/policy-schemas/${GITHUB_SHA}"
FILES=("policy-run-request.schema.json" "policy-run-status.schema.json" "policy-diff-summary.schema.json" "policy-explain-trace.schema.json")
CHANGED=()
DIFF_FILE="$OUTPUT_DIR/policy-schema-diff.patch"
rm -f "$DIFF_FILE"
for file in "${FILES[@]}"; do
if [ ! -f "$OUTPUT_DIR/$file" ]; then
echo "Missing exported schema: $OUTPUT_DIR/$file" >&2
exit 1
fi
if ! cmp -s "docs/schemas/$file" "$OUTPUT_DIR/$file"; then
CHANGED+=("$file")
{
diff -u "docs/schemas/$file" "$OUTPUT_DIR/$file" || true
} >> "$DIFF_FILE"
printf '\n' >> "$DIFF_FILE"
fi
done
if [ ${#CHANGED[@]} -eq 0 ]; then
echo "changed=false" >> "$GITHUB_OUTPUT"
exit 0
fi
JOINED="$(IFS=', '; echo "${CHANGED[*]}")"
echo "changed=true" >> "$GITHUB_OUTPUT"
echo "changed_files=$JOINED" >> "$GITHUB_OUTPUT"
echo "diff_path=artifacts/policy-schemas/${GITHUB_SHA}/policy-schema-diff.patch" >> "$GITHUB_OUTPUT"
- name: Upload policy schema artifacts
uses: actions/upload-artifact@v4
with:
name: policy-schema-exports
path: artifacts/policy-schemas
if-no-files-found: error
retention-days: 14
- name: Notify policy schema changes
if: steps.detect-policy-schema-changes.outputs.changed == 'true'
env:
SLACK_WEBHOOK: ${{ secrets.POLICY_ENGINE_SCHEMA_WEBHOOK || vars.POLICY_ENGINE_SCHEMA_WEBHOOK }}
CHANGED_FILES: ${{ steps.detect-policy-schema-changes.outputs.changed_files }}
run: |
if [ -z "${SLACK_WEBHOOK:-}" ]; then
echo " POLICY_ENGINE_SCHEMA_WEBHOOK not configured; skipping Slack notification."
exit 0
fi
payload="$(python3 - <<'PY'
import json
import os
changed_files = os.environ.get("CHANGED_FILES", "").strip()
repo = os.environ["GITHUB_REPOSITORY"]
sha = os.environ["GITHUB_SHA"]
commit_url = f"{os.environ['GITHUB_SERVER_URL']}/{repo}/commit/{sha}"
run_url = f"{os.environ['GITHUB_SERVER_URL']}/{repo}/actions/runs/{os.environ['GITHUB_RUN_ID']}"
lines = [
":scroll: Policy schema export updated.",
f"*Commit:* <{commit_url}|{sha[:7]}>",
]
if changed_files:
lines.append(f"*Files:* {changed_files}")
lines.append(f"*CI run:* <{run_url}|Artifacts & diff>")
print(json.dumps({"text": "\n".join(lines)}))
PY
)"
curl -sSf -X POST -H 'Content-type: application/json' --data "$payload" "$SLACK_WEBHOOK"
- name: Run release tooling tests
run: python ops/devops/release/test_verify_release.py
- name: Build scanner language analyzer projects
run: |
dotnet restore src/StellaOps.sln
for project in \
src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang/StellaOps.Scanner.Analyzers.Lang.csproj \
src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Java/StellaOps.Scanner.Analyzers.Lang.Java.csproj \
src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Node/StellaOps.Scanner.Analyzers.Lang.Node.csproj \
src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Python/StellaOps.Scanner.Analyzers.Lang.Python.csproj \
src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Go/StellaOps.Scanner.Analyzers.Lang.Go.csproj \
src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet/StellaOps.Scanner.Analyzers.Lang.DotNet.csproj \
src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Rust/StellaOps.Scanner.Analyzers.Lang.Rust.csproj
do
dotnet build "$project" --configuration $BUILD_CONFIGURATION --no-restore -warnaserror
done
- name: Run scanner language analyzer tests
run: |
dotnet test src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Tests/StellaOps.Scanner.Analyzers.Lang.Tests.csproj \
--configuration $BUILD_CONFIGURATION \
--no-build \
--logger "trx;LogFileName=stellaops-scanner-lang-tests.trx" \
--results-directory "$TEST_RESULTS_DIR"
- name: Build and test Router components
run: |
set -euo pipefail
ROUTER_PROJECTS=(
src/__Libraries/StellaOps.Router.Common/StellaOps.Router.Common.csproj
src/__Libraries/StellaOps.Router.Config/StellaOps.Router.Config.csproj
src/__Libraries/StellaOps.Router.Transport.InMemory/StellaOps.Router.Transport.InMemory.csproj
src/__Libraries/StellaOps.Router.Transport.Tcp/StellaOps.Router.Transport.Tcp.csproj
src/__Libraries/StellaOps.Router.Transport.Tls/StellaOps.Router.Transport.Tls.csproj
src/__Libraries/StellaOps.Router.Transport.Udp/StellaOps.Router.Transport.Udp.csproj
src/__Libraries/StellaOps.Router.Transport.RabbitMq/StellaOps.Router.Transport.RabbitMq.csproj
src/__Libraries/StellaOps.Microservice/StellaOps.Microservice.csproj
src/__Libraries/StellaOps.Microservice.SourceGen/StellaOps.Microservice.SourceGen.csproj
)
for project in "${ROUTER_PROJECTS[@]}"; do
echo "::group::Build $project"
dotnet build "$project" --configuration $BUILD_CONFIGURATION --no-restore -warnaserror
echo "::endgroup::"
done
- name: Run Router and Microservice tests
run: |
mkdir -p "$TEST_RESULTS_DIR"
ROUTER_TEST_PROJECTS=(
# Core Router libraries
src/__Libraries/__Tests/StellaOps.Router.Common.Tests/StellaOps.Router.Common.Tests.csproj
src/__Libraries/__Tests/StellaOps.Router.Config.Tests/StellaOps.Router.Config.Tests.csproj
# Transport layers
src/__Libraries/__Tests/StellaOps.Router.Transport.InMemory.Tests/StellaOps.Router.Transport.InMemory.Tests.csproj
src/__Libraries/__Tests/StellaOps.Router.Transport.Tcp.Tests/StellaOps.Router.Transport.Tcp.Tests.csproj
src/__Libraries/__Tests/StellaOps.Router.Transport.Tls.Tests/StellaOps.Router.Transport.Tls.Tests.csproj
src/__Libraries/__Tests/StellaOps.Router.Transport.Udp.Tests/StellaOps.Router.Transport.Udp.Tests.csproj
# Microservice SDK
src/__Libraries/__Tests/StellaOps.Microservice.Tests/StellaOps.Microservice.Tests.csproj
src/__Libraries/__Tests/StellaOps.Microservice.SourceGen.Tests/StellaOps.Microservice.SourceGen.Tests.csproj
# Integration tests
src/__Libraries/__Tests/StellaOps.Router.Integration.Tests/StellaOps.Router.Integration.Tests.csproj
# Gateway tests
src/Gateway/__Tests/StellaOps.Gateway.WebService.Tests/StellaOps.Gateway.WebService.Tests.csproj
)
for project in "${ROUTER_TEST_PROJECTS[@]}"; do
name="$(basename "${project%.*}")"
echo "::group::Test $name"
dotnet test "$project" \
--configuration $BUILD_CONFIGURATION \
--logger "trx;LogFileName=${name}.trx" \
--results-directory "$TEST_RESULTS_DIR"
echo "::endgroup::"
done
- name: Run scanner analyzer performance benchmark
env:
PERF_OUTPUT_DIR: ${{ github.workspace }}/artifacts/perf/scanner-analyzers
PERF_ENVIRONMENT: ${{ github.event_name == 'pull_request' && 'preview' || 'staging' }}
run: |
set -euo pipefail
mkdir -p "$PERF_OUTPUT_DIR"
CAPTURED_AT="$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
dotnet run \
--project src/Bench/StellaOps.Bench/Scanner.Analyzers/StellaOps.Bench.ScannerAnalyzers/StellaOps.Bench.ScannerAnalyzers.csproj \
--configuration $BUILD_CONFIGURATION \
-- \
--repo-root . \
--baseline src/Bench/StellaOps.Bench/Scanner.Analyzers/baseline.csv \
--out "$PERF_OUTPUT_DIR/latest.csv" \
--json "$PERF_OUTPUT_DIR/report.json" \
--prom "$PERF_OUTPUT_DIR/metrics.prom" \
--commit "${GITHUB_SHA}" \
--environment "$PERF_ENVIRONMENT" \
--captured-at "$CAPTURED_AT"
- name: Upload scanner analyzer benchmark artifacts
uses: actions/upload-artifact@v4
with:
name: scanner-analyzers-benchmark
path: artifacts/perf/scanner-analyzers
if-no-files-found: error
retention-days: 14
- name: Publish BuildX SBOM generator
run: |
dotnet publish src/Scanner/StellaOps.Scanner.Sbomer.BuildXPlugin/StellaOps.Scanner.Sbomer.BuildXPlugin.csproj \
--configuration $BUILD_CONFIGURATION \
--output out/buildx
- name: Verify BuildX descriptor determinism
run: |
dotnet out/buildx/StellaOps.Scanner.Sbomer.BuildXPlugin.dll handshake \
--manifest out/buildx \
--cas out/cas
cat <<'JSON' > out/buildx-sbom.cdx.json
{"bomFormat":"CycloneDX","specVersion":"1.5"}
JSON
dotnet out/buildx/StellaOps.Scanner.Sbomer.BuildXPlugin.dll descriptor \
--manifest out/buildx \
--image sha256:5c2c5bfe0d4d77f1a0f9866fd415dd8da5b62af05d7c3d4b53f28de3ebef0101 \
--sbom out/buildx-sbom.cdx.json \
--sbom-name buildx-sbom.cdx.json \
--artifact-type application/vnd.stellaops.sbom.layer+json \
--sbom-format cyclonedx-json \
--sbom-kind inventory \
--repository ${{ github.repository }} \
--build-ref ${{ github.sha }} \
> out/buildx-descriptor.json
dotnet out/buildx/StellaOps.Scanner.Sbomer.BuildXPlugin.dll descriptor \
--manifest out/buildx \
--image sha256:5c2c5bfe0d4d77f1a0f9866fd415dd8da5b62af05d7c3d4b53f28de3ebef0101 \
--sbom out/buildx-sbom.cdx.json \
--sbom-name buildx-sbom.cdx.json \
--artifact-type application/vnd.stellaops.sbom.layer+json \
--sbom-format cyclonedx-json \
--sbom-kind inventory \
--repository ${{ github.repository }} \
--build-ref ${{ github.sha }} \
> out/buildx-descriptor-repeat.json
python - <<'PY'
import json, sys
from pathlib import Path
def normalize(path: str) -> dict:
data = json.loads(Path(path).read_text(encoding='utf-8'))
data.pop('generatedAt', None)
return data
baseline = normalize('out/buildx-descriptor.json')
repeat = normalize('out/buildx-descriptor-repeat.json')
if baseline != repeat:
sys.exit('BuildX descriptor output changed between runs.')
PY
- name: Upload BuildX determinism artifacts
uses: actions/upload-artifact@v4
with:
name: buildx-determinism
path: |
out/buildx-descriptor.json
out/buildx-descriptor-repeat.json
out/buildx-sbom.cdx.json
if-no-files-found: error
retention-days: 7
- name: Package OS analyzer plug-ins
run: |
if [ ! -d "plugins/scanner/analyzers/os" ]; then
echo "OS analyzer plug-in directory not found" >&2
exit 1
fi
mkdir -p artifacts/plugins/os
tar -czf artifacts/plugins/os/stellaops-scanner-os-analyzers.tar.gz -C plugins/scanner/analyzers/os .
sha256sum artifacts/plugins/os/stellaops-scanner-os-analyzers.tar.gz > artifacts/plugins/os/stellaops-scanner-os-analyzers.tar.gz.sha256
- name: Upload OS analyzer plug-ins
uses: actions/upload-artifact@v4
with:
name: scanner-os-analyzers
path: artifacts/plugins/os
if-no-files-found: error
retention-days: 7
- name: Publish Concelier web service
run: |
mkdir -p "$PUBLISH_DIR"
dotnet publish src/Concelier/StellaOps.Concelier.WebService/StellaOps.Concelier.WebService.csproj \
--configuration $BUILD_CONFIGURATION \
--no-build \
--output "$PUBLISH_DIR"
- name: Upload published artifacts
uses: actions/upload-artifact@v4
with:
name: concelier-publish
path: ${{ env.PUBLISH_DIR }}
if-no-files-found: error
retention-days: 7
- name: Restore Authority solution
run: dotnet restore src/Authority/StellaOps.Authority/StellaOps.Authority.sln
- name: Build Authority solution
run: dotnet build src/Authority/StellaOps.Authority/StellaOps.Authority.sln --configuration $BUILD_CONFIGURATION --no-restore -warnaserror
- name: Run Authority tests
run: |
dotnet test src/__Libraries/__Tests/StellaOps.Configuration.Tests/StellaOps.Configuration.Tests.csproj \
--configuration $BUILD_CONFIGURATION \
--no-build \
--logger "trx;LogFileName=stellaops-authority-tests.trx" \
--results-directory "$TEST_RESULTS_DIR"
- name: Publish Authority web service
run: |
mkdir -p "$AUTHORITY_PUBLISH_DIR"
dotnet publish src/Authority/StellaOps.Authority/StellaOps.Authority/StellaOps.Authority.csproj \
--configuration $BUILD_CONFIGURATION \
--no-build \
--output "$AUTHORITY_PUBLISH_DIR"
- name: Upload Authority artifacts
uses: actions/upload-artifact@v4
with:
name: authority-publish
path: ${{ env.AUTHORITY_PUBLISH_DIR }}
if-no-files-found: error
retention-days: 7
- name: Run console endpoint tests
run: |
mkdir -p "$TEST_RESULTS_DIR"
dotnet test src/Authority/StellaOps.Authority/StellaOps.Authority.Tests/StellaOps.Authority.Tests.csproj \
--configuration $BUILD_CONFIGURATION \
--logger "trx;LogFileName=console-endpoints.trx" \
--results-directory "$TEST_RESULTS_DIR" \
--filter ConsoleEndpointsTests
- name: Upload test results
if: always()
uses: actions/upload-artifact@v4
with:
name: feedser-test-results
path: ${{ env.TEST_RESULTS_DIR }}
if-no-files-found: ignore
retention-days: 7
# ============================================================================
# Quality Gates Foundation (Sprint 0350)
# ============================================================================
quality-gates:
runs-on: ubuntu-22.04
needs: build-test
permissions:
contents: read
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Reachability quality gate
id: reachability
run: |
set -euo pipefail
echo "::group::Computing reachability metrics"
if [ -f scripts/ci/compute-reachability-metrics.sh ]; then
chmod +x scripts/ci/compute-reachability-metrics.sh
METRICS=$(./scripts/ci/compute-reachability-metrics.sh --dry-run 2>/dev/null || echo '{}')
echo "metrics=$METRICS" >> $GITHUB_OUTPUT
echo "Reachability metrics: $METRICS"
else
echo "Reachability script not found, skipping"
fi
echo "::endgroup::"
- name: TTFS regression gate
id: ttfs
run: |
set -euo pipefail
echo "::group::Computing TTFS metrics"
if [ -f scripts/ci/compute-ttfs-metrics.sh ]; then
chmod +x scripts/ci/compute-ttfs-metrics.sh
METRICS=$(./scripts/ci/compute-ttfs-metrics.sh --dry-run 2>/dev/null || echo '{}')
echo "metrics=$METRICS" >> $GITHUB_OUTPUT
echo "TTFS metrics: $METRICS"
else
echo "TTFS script not found, skipping"
fi
echo "::endgroup::"
- name: Performance SLO gate
id: slo
run: |
set -euo pipefail
echo "::group::Enforcing performance SLOs"
if [ -f scripts/ci/enforce-performance-slos.sh ]; then
chmod +x scripts/ci/enforce-performance-slos.sh
./scripts/ci/enforce-performance-slos.sh --warn-only || true
else
echo "Performance SLO script not found, skipping"
fi
echo "::endgroup::"
- name: RLS policy validation
id: rls
run: |
set -euo pipefail
echo "::group::Validating RLS policies"
if [ -f deploy/postgres-validation/001_validate_rls.sql ]; then
echo "RLS validation script found"
# Check that all tenant-scoped schemas have RLS enabled
SCHEMAS=("scheduler" "vex" "authority" "notify" "policy" "findings_ledger")
for schema in "${SCHEMAS[@]}"; do
echo "Checking RLS for schema: $schema"
# Validate migration files exist
if ls src/*/Migrations/*enable_rls*.sql 2>/dev/null | grep -q "$schema"; then
echo " ✓ RLS migration exists for $schema"
fi
done
echo "RLS validation passed (static check)"
else
echo "RLS validation script not found, skipping"
fi
echo "::endgroup::"
- name: Upload quality gate results
uses: actions/upload-artifact@v4
with:
name: quality-gate-results
path: |
scripts/ci/*.json
scripts/ci/*.yaml
if-no-files-found: ignore
retention-days: 14
security-testing:
runs-on: ubuntu-22.04
needs: build-test
if: github.event_name == 'pull_request' || github.event_name == 'schedule'
permissions:
contents: read
env:
DOTNET_VERSION: '10.0.100'
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
- name: Restore dependencies
run: dotnet restore tests/security/StellaOps.Security.Tests/StellaOps.Security.Tests.csproj
- name: Run OWASP security tests
run: |
set -euo pipefail
echo "::group::Running security tests"
dotnet test tests/security/StellaOps.Security.Tests/StellaOps.Security.Tests.csproj \
--no-restore \
--logger "trx;LogFileName=security-tests.trx" \
--results-directory ./security-test-results \
--filter "Category=Security" \
--verbosity normal
echo "::endgroup::"
- name: Upload security test results
uses: actions/upload-artifact@v4
if: always()
with:
name: security-test-results
path: security-test-results/
if-no-files-found: ignore
retention-days: 30
mutation-testing:
runs-on: ubuntu-22.04
needs: build-test
if: github.event_name == 'schedule' || (github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 'mutation-test'))
permissions:
contents: read
env:
DOTNET_VERSION: '10.0.100'
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
- name: Restore tools
run: dotnet tool restore
- name: Run mutation tests - Scanner.Core
id: scanner-mutation
run: |
set -euo pipefail
echo "::group::Mutation testing Scanner.Core"
cd src/Scanner/__Libraries/StellaOps.Scanner.Core
dotnet stryker --reporter json --reporter html --output ../../../mutation-results/scanner-core || echo "MUTATION_FAILED=true" >> $GITHUB_ENV
echo "::endgroup::"
continue-on-error: true
- name: Run mutation tests - Policy.Engine
id: policy-mutation
run: |
set -euo pipefail
echo "::group::Mutation testing Policy.Engine"
cd src/Policy/__Libraries/StellaOps.Policy
dotnet stryker --reporter json --reporter html --output ../../../mutation-results/policy-engine || echo "MUTATION_FAILED=true" >> $GITHUB_ENV
echo "::endgroup::"
continue-on-error: true
- name: Run mutation tests - Authority.Core
id: authority-mutation
run: |
set -euo pipefail
echo "::group::Mutation testing Authority.Core"
cd src/Authority/StellaOps.Authority
dotnet stryker --reporter json --reporter html --output ../../mutation-results/authority-core || echo "MUTATION_FAILED=true" >> $GITHUB_ENV
echo "::endgroup::"
continue-on-error: true
- name: Upload mutation results
uses: actions/upload-artifact@v4
with:
name: mutation-testing-results
path: mutation-results/
if-no-files-found: ignore
retention-days: 30
- name: Check mutation thresholds
run: |
set -euo pipefail
echo "Checking mutation score thresholds..."
# Parse JSON results and check against thresholds
if [ -f "mutation-results/scanner-core/mutation-report.json" ]; then
SCORE=$(jq '.mutationScore // 0' mutation-results/scanner-core/mutation-report.json)
echo "Scanner.Core mutation score: $SCORE%"
if (( $(echo "$SCORE < 65" | bc -l) )); then
echo "::error::Scanner.Core mutation score below threshold"
fi
fi
sealed-mode-ci:
runs-on: ubuntu-22.04
needs: build-test
permissions:
contents: read
packages: read
env:
COMPOSE_PROJECT_NAME: sealedmode
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Login to registry
if: ${{ secrets.REGISTRY_USERNAME != '' && secrets.REGISTRY_PASSWORD != '' }}
uses: docker/login-action@v3
with:
registry: registry.stella-ops.org
username: ${{ secrets.REGISTRY_USERNAME }}
password: ${{ secrets.REGISTRY_PASSWORD }}
- name: Run sealed-mode CI harness
working-directory: ops/devops/sealed-mode-ci
env:
COMPOSE_PROJECT_NAME: sealedmode
run: |
set -euo pipefail
./run-sealed-ci.sh
- name: Upload sealed-mode CI artifacts
uses: actions/upload-artifact@v4
with:
name: sealed-mode-ci
path: ops/devops/sealed-mode-ci/artifacts/sealed-mode-ci
if-no-files-found: error
retention-days: 14
authority-container:
runs-on: ubuntu-22.04
needs: build-test
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Validate Authority compose file
run: docker compose -f ops/authority/docker-compose.authority.yaml config
- name: Build Authority container image
run: docker build -f ops/authority/Dockerfile -t stellaops-authority:ci .
excititor-batch-validation:
needs: build-test
if: github.event_name == 'schedule' || (github.event_name == 'workflow_dispatch' && github.event.inputs.excititor_batch == 'true')
runs-on: ubuntu-22.04
env:
BATCH_RESULTS_DIR: ${{ github.workspace }}/artifacts/test-results/excititor-batch
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
include-prerelease: true
- name: Run Excititor batch ingest validation suite
env:
DOTNET_SKIP_FIRST_TIME_EXPERIENCE: 1
run: |
set -euo pipefail
mkdir -p "$BATCH_RESULTS_DIR"
dotnet test src/Excititor/__Tests/StellaOps.Excititor.WebService.Tests/StellaOps.Excititor.WebService.Tests.csproj \
--configuration $BUILD_CONFIGURATION \
--filter "Category=BatchIngestValidation" \
--logger "trx;LogFileName=excititor-batch.trx" \
--results-directory "$BATCH_RESULTS_DIR"
- name: Upload Excititor batch ingest results
if: always()
uses: actions/upload-artifact@v4
with:
name: excititor-batch-ingest-results
path: ${{ env.BATCH_RESULTS_DIR }}
docs:
runs-on: ubuntu-22.04
env:
DOCS_OUTPUT_DIR: ${{ github.workspace }}/artifacts/docs-site
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install documentation dependencies
run: |
python -m pip install --upgrade pip
python -m pip install markdown pygments
- name: Render documentation bundle
run: |
python scripts/render_docs.py --source docs --output "$DOCS_OUTPUT_DIR" --clean
- name: Upload documentation artifact
uses: actions/upload-artifact@v4
with:
name: feedser-docs-site
path: ${{ env.DOCS_OUTPUT_DIR }}
if-no-files-found: error
retention-days: 7
scanner-perf:
runs-on: ubuntu-22.04
needs: build-test
env:
BENCH_DIR: src/Bench/StellaOps.Bench/Scanner.Analyzers
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
- name: Run analyzer microbench
working-directory: ${{ env.BENCH_DIR }}
run: |
node run-bench.js \
--repo-root "${{ github.workspace }}" \
--out latest.csv \
--threshold-ms 5000
- name: Compare against baseline
working-directory: ${{ env.BENCH_DIR }}
run: |
node - <<'NODE'
const fs = require('fs');
const path = require('path');
function parseCsv(file) {
const rows = fs.readFileSync(file, 'utf8').trim().split(/\r?\n/);
rows.shift();
const data = {};
for (const row of rows) {
const [id, iterations, sampleCount, mean, p95, max] = row.split(',');
data[id] = {
iterations: Number(iterations),
sampleCount: Number(sampleCount),
mean: Number(mean),
p95: Number(p95),
max: Number(max),
};
}
return data;
}
const baseline = parseCsv('baseline.csv');
const latest = parseCsv('latest.csv');
const allowedMultiplier = 1.20;
const regressions = [];
for (const [id, baseMetrics] of Object.entries(baseline)) {
const current = latest[id];
if (!current) {
regressions.push(`Scenario ${id} missing from latest run`);
continue;
}
if (current.mean > baseMetrics.mean * allowedMultiplier) {
regressions.push(`Scenario ${id} mean ${current.mean.toFixed(2)}ms exceeded baseline ${baseMetrics.mean.toFixed(2)}ms by >20%`);
}
if (current.max > baseMetrics.max * allowedMultiplier) {
regressions.push(`Scenario ${id} max ${current.max.toFixed(2)}ms exceeded baseline ${baseMetrics.max.toFixed(2)}ms by >20%`);
}
}
if (regressions.length > 0) {
console.error('Performance regression detected:');
for (const msg of regressions) {
console.error(` - ${msg}`);
}
process.exit(1);
}
NODE
- name: Upload bench report
uses: actions/upload-artifact@v4
with:
name: scanner-analyzers-bench
path: ${{ env.BENCH_DIR }}/latest.csv
retention-days: 7
deploy:
runs-on: ubuntu-22.04
needs: [build-test, docs, scanner-perf]
if: >-
needs.build-test.result == 'success' &&
needs.docs.result == 'success' &&
needs.scanner-perf.result == 'success' &&
(
(github.event_name == 'push' && github.ref == 'refs/heads/main') ||
github.event_name == 'workflow_dispatch'
)
environment: staging
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
sparse-checkout: |
scripts
.gitea/workflows
sparse-checkout-cone-mode: true
- name: Check if deployment should proceed
id: check-deploy
run: |
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
if [ "${{ github.event.inputs.force_deploy }}" = "true" ]; then
echo "should-deploy=true" >> $GITHUB_OUTPUT
echo "✅ Manual deployment requested"
else
echo "should-deploy=false" >> $GITHUB_OUTPUT
echo " Manual dispatch without force_deploy=true — skipping"
fi
elif [ "${{ github.ref }}" = "refs/heads/main" ]; then
echo "should-deploy=true" >> $GITHUB_OUTPUT
echo "✅ Deploying latest main branch build"
else
echo "should-deploy=false" >> $GITHUB_OUTPUT
echo " Deployment restricted to main branch"
fi
- name: Resolve deployment credentials
id: params
if: steps.check-deploy.outputs.should-deploy == 'true'
run: |
missing=()
host="${{ secrets.STAGING_DEPLOYMENT_HOST }}"
if [ -z "$host" ]; then host="${{ vars.STAGING_DEPLOYMENT_HOST }}"; fi
if [ -z "$host" ]; then host="${{ secrets.DEPLOYMENT_HOST }}"; fi
if [ -z "$host" ]; then host="${{ vars.DEPLOYMENT_HOST }}"; fi
if [ -z "$host" ]; then missing+=("STAGING_DEPLOYMENT_HOST"); fi
user="${{ secrets.STAGING_DEPLOYMENT_USERNAME }}"
if [ -z "$user" ]; then user="${{ vars.STAGING_DEPLOYMENT_USERNAME }}"; fi
if [ -z "$user" ]; then user="${{ secrets.DEPLOYMENT_USERNAME }}"; fi
if [ -z "$user" ]; then user="${{ vars.DEPLOYMENT_USERNAME }}"; fi
if [ -z "$user" ]; then missing+=("STAGING_DEPLOYMENT_USERNAME"); fi
path="${{ secrets.STAGING_DEPLOYMENT_PATH }}"
if [ -z "$path" ]; then path="${{ vars.STAGING_DEPLOYMENT_PATH }}"; fi
docs_path="${{ secrets.STAGING_DOCS_PATH }}"
if [ -z "$docs_path" ]; then docs_path="${{ vars.STAGING_DOCS_PATH }}"; fi
key="${{ secrets.STAGING_DEPLOYMENT_KEY }}"
if [ -z "$key" ]; then key="${{ secrets.DEPLOYMENT_KEY }}"; fi
if [ -z "$key" ]; then key="${{ vars.STAGING_DEPLOYMENT_KEY }}"; fi
if [ -z "$key" ]; then key="${{ vars.DEPLOYMENT_KEY }}"; fi
if [ -z "$key" ]; then missing+=("STAGING_DEPLOYMENT_KEY"); fi
if [ ${#missing[@]} -gt 0 ]; then
echo "❌ Missing deployment configuration: ${missing[*]}"
exit 1
fi
key_file="$RUNNER_TEMP/staging_deploy_key"
printf '%s\n' "$key" > "$key_file"
chmod 600 "$key_file"
echo "host=$host" >> $GITHUB_OUTPUT
echo "user=$user" >> $GITHUB_OUTPUT
echo "path=$path" >> $GITHUB_OUTPUT
echo "docs-path=$docs_path" >> $GITHUB_OUTPUT
echo "key-file=$key_file" >> $GITHUB_OUTPUT
- name: Download service artifact
if: steps.check-deploy.outputs.should-deploy == 'true' && steps.params.outputs.path != ''
uses: actions/download-artifact@v4
with:
name: feedser-publish
path: artifacts/service
- name: Download documentation artifact
if: steps.check-deploy.outputs.should-deploy == 'true' && steps.params.outputs['docs-path'] != ''
uses: actions/download-artifact@v4
with:
name: feedser-docs-site
path: artifacts/docs
- name: Install rsync
if: steps.check-deploy.outputs.should-deploy == 'true'
run: |
if command -v rsync >/dev/null 2>&1; then
exit 0
fi
CACHE_DIR="${CI_CACHE_ROOT:-/tmp}/apt"
mkdir -p "$CACHE_DIR"
KEY="rsync-$(lsb_release -rs 2>/dev/null || echo unknown)"
DEB_DIR="$CACHE_DIR/$KEY"
mkdir -p "$DEB_DIR"
if ls "$DEB_DIR"/rsync*.deb >/dev/null 2>&1; then
apt-get update
apt-get install -y --no-install-recommends "$DEB_DIR"/libpopt0*.deb "$DEB_DIR"/rsync*.deb
else
apt-get update
apt-get download rsync libpopt0
mv rsync*.deb libpopt0*.deb "$DEB_DIR"/
dpkg -i "$DEB_DIR"/libpopt0*.deb "$DEB_DIR"/rsync*.deb || apt-get install -f -y
fi
- name: Deploy service bundle
if: steps.check-deploy.outputs.should-deploy == 'true' && steps.params.outputs.path != ''
env:
HOST: ${{ steps.params.outputs.host }}
USER: ${{ steps.params.outputs.user }}
TARGET: ${{ steps.params.outputs.path }}
KEY_FILE: ${{ steps.params.outputs['key-file'] }}
run: |
SERVICE_DIR="artifacts/service/feedser-publish"
if [ ! -d "$SERVICE_DIR" ]; then
echo "❌ Service artifact directory missing ($SERVICE_DIR)"
exit 1
fi
echo "🚀 Deploying Feedser web service to $HOST:$TARGET"
rsync -az --delete \
-e "ssh -i $KEY_FILE -o StrictHostKeyChecking=no" \
"$SERVICE_DIR"/ \
"$USER@$HOST:$TARGET/"
- name: Deploy documentation bundle
if: steps.check-deploy.outputs.should-deploy == 'true' && steps.params.outputs['docs-path'] != ''
env:
HOST: ${{ steps.params.outputs.host }}
USER: ${{ steps.params.outputs.user }}
DOCS_TARGET: ${{ steps.params.outputs['docs-path'] }}
KEY_FILE: ${{ steps.params.outputs['key-file'] }}
run: |
DOCS_DIR="artifacts/docs/feedser-docs-site"
if [ ! -d "$DOCS_DIR" ]; then
echo "❌ Documentation artifact directory missing ($DOCS_DIR)"
exit 1
fi
echo "📚 Deploying documentation bundle to $HOST:$DOCS_TARGET"
rsync -az --delete \
-e "ssh -i $KEY_FILE -o StrictHostKeyChecking=no" \
"$DOCS_DIR"/ \
"$USER@$HOST:$DOCS_TARGET/"
- name: Deployment summary
if: steps.check-deploy.outputs.should-deploy == 'true'
run: |
echo "✅ Deployment completed"
echo " Host: ${{ steps.params.outputs.host }}"
echo " Service path: ${{ steps.params.outputs.path || '(skipped)' }}"
echo " Docs path: ${{ steps.params.outputs['docs-path'] || '(skipped)' }}"
- name: Deployment skipped summary
if: steps.check-deploy.outputs.should-deploy != 'true'
run: |
echo " Deployment stage skipped"
echo " Event: ${{ github.event_name }}"
echo " Ref: ${{ github.ref }}"
notify-smoke:
runs-on: ubuntu-22.04
needs: deploy
if: needs.deploy.result == 'success'
env:
DOTNET_VERSION: ${{ env.DOTNET_VERSION }}
NOTIFY_SMOKE_REDIS_DSN: ${{ secrets.NOTIFY_SMOKE_REDIS_DSN }}
NOTIFY_SMOKE_NOTIFY_BASEURL: ${{ secrets.NOTIFY_SMOKE_NOTIFY_BASEURL }}
NOTIFY_SMOKE_NOTIFY_TOKEN: ${{ secrets.NOTIFY_SMOKE_NOTIFY_TOKEN }}
NOTIFY_SMOKE_NOTIFY_TENANT: ${{ secrets.NOTIFY_SMOKE_NOTIFY_TENANT }}
NOTIFY_SMOKE_NOTIFY_TENANT_HEADER: ${{ secrets.NOTIFY_SMOKE_NOTIFY_TENANT_HEADER }}
NOTIFY_SMOKE_EXPECT_KINDS: ${{ vars.NOTIFY_SMOKE_EXPECT_KINDS || secrets.NOTIFY_SMOKE_EXPECT_KINDS }}
NOTIFY_SMOKE_LOOKBACK_MINUTES: ${{ vars.NOTIFY_SMOKE_LOOKBACK_MINUTES || secrets.NOTIFY_SMOKE_LOOKBACK_MINUTES }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup .NET ${{ env.DOTNET_VERSION }}
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
include-prerelease: true
- name: Validate Notify smoke configuration
run: |
missing=()
for name in NOTIFY_SMOKE_REDIS_DSN NOTIFY_SMOKE_NOTIFY_BASEURL NOTIFY_SMOKE_NOTIFY_TOKEN NOTIFY_SMOKE_NOTIFY_TENANT NOTIFY_SMOKE_EXPECT_KINDS NOTIFY_SMOKE_LOOKBACK_MINUTES
do
value="${!name}"
if [ -z "$value" ]; then
missing+=("$name")
fi
done
if [ ${#missing[@]} -gt 0 ]; then
echo "❌ Missing Notify smoke configuration: ${missing[*]}"
exit 1
fi
- name: Restore Notify smoke checker
run: dotnet restore tools/NotifySmokeCheck/NotifySmokeCheck.csproj
- name: Run Notify smoke validation
run: dotnet run --project tools/NotifySmokeCheck/NotifySmokeCheck.csproj --configuration Release