Compare commits

..

1 Commits

Author SHA1 Message Date
master
89543de7f1 feat: Implement vulnerability token signing and verification utilities
Some checks failed
Docs CI / lint-and-preview (push) Has been cancelled
- Added VulnTokenSigner for signing JWT tokens with specified algorithms and keys.
- Introduced VulnTokenUtilities for resolving tenant and subject claims, and sanitizing context dictionaries.
- Created VulnTokenVerificationUtilities for parsing tokens, verifying signatures, and deserializing payloads.
- Developed VulnWorkflowAntiForgeryTokenIssuer for issuing anti-forgery tokens with configurable options.
- Implemented VulnWorkflowAntiForgeryTokenVerifier for verifying anti-forgery tokens and validating payloads.
- Added AuthorityVulnerabilityExplorerOptions to manage configuration for vulnerability explorer features.
- Included tests for FilesystemPackRunDispatcher to ensure proper job handling under egress policy restrictions.
2025-11-03 10:02:29 +02:00
9681 changed files with 94506 additions and 4123151 deletions

View File

@@ -1,11 +0,0 @@
{
"permissions": {
"allow": [
"Bash(wc:*)",
"Bash(sort:*)"
],
"deny": [],
"ask": []
},
"outputStyle": "default"
}

View File

@@ -1,5 +0,0 @@
[src/Scanner/StellaOps.Scanner.Analyzers.Native/**.cs]
dotnet_diagnostic.CA2022.severity = none
[src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Native.Tests/**.cs]
dotnet_diagnostic.CA2022.severity = none

View File

@@ -1,28 +0,0 @@
name: Airgap Sealed CI Smoke
on:
push:
branches: [ main ]
paths:
- 'ops/devops/airgap/**'
- '.gitea/workflows/airgap-sealed-ci.yml'
pull_request:
branches: [ main, develop ]
paths:
- 'ops/devops/airgap/**'
- '.gitea/workflows/airgap-sealed-ci.yml'
jobs:
sealed-smoke:
runs-on: ubuntu-22.04
permissions:
contents: read
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Install dnslib
run: pip install dnslib
- name: Run sealed-mode smoke
run: sudo ops/devops/airgap/sealed-ci-smoke.sh

View File

@@ -1,115 +0,0 @@
name: AOC Guard CI
on:
push:
branches: [ main ]
paths:
- 'src/Aoc/**'
- 'src/Concelier/**'
- 'src/Authority/**'
- 'src/Excititor/**'
- 'ops/devops/aoc/**'
- '.gitea/workflows/aoc-guard.yml'
pull_request:
branches: [ main, develop ]
paths:
- 'src/Aoc/**'
- 'src/Concelier/**'
- 'src/Authority/**'
- 'src/Excititor/**'
- 'ops/devops/aoc/**'
- '.gitea/workflows/aoc-guard.yml'
jobs:
aoc-guard:
runs-on: ubuntu-22.04
env:
DOTNET_VERSION: '10.0.100-rc.1.25451.107'
ARTIFACT_DIR: ${{ github.workspace }}/.artifacts
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Export OpenSSL 1.1 shim for Mongo2Go
run: scripts/enable-openssl11-shim.sh
- name: Set up .NET SDK
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
include-prerelease: true
- name: Restore analyzers
run: dotnet restore src/Aoc/__Analyzers/StellaOps.Aoc.Analyzers/StellaOps.Aoc.Analyzers.csproj
- name: Build analyzers
run: dotnet build src/Aoc/__Analyzers/StellaOps.Aoc.Analyzers/StellaOps.Aoc.Analyzers.csproj -c Release
- name: Run analyzers against ingestion projects
run: |
dotnet build src/Concelier/StellaOps.Concelier.Ingestion/StellaOps.Concelier.Ingestion.csproj -c Release /p:RunAnalyzers=true /p:TreatWarningsAsErrors=true
dotnet build src/Authority/StellaOps.Authority.Ingestion/StellaOps.Authority.Ingestion.csproj -c Release /p:RunAnalyzers=true /p:TreatWarningsAsErrors=true
dotnet build src/Excititor/StellaOps.Excititor.Ingestion/StellaOps.Excititor.Ingestion.csproj -c Release /p:RunAnalyzers=true /p:TreatWarningsAsErrors=true
- name: Run analyzer tests
run: |
mkdir -p $ARTIFACT_DIR
dotnet test src/Aoc/__Tests/StellaOps.Aoc.Analyzers.Tests/StellaOps.Aoc.Analyzers.Tests.csproj -c Release --logger "trx;LogFileName=aoc-tests.trx" --results-directory $ARTIFACT_DIR
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: aoc-guard-artifacts
path: ${{ env.ARTIFACT_DIR }}
aoc-verify:
needs: aoc-guard
runs-on: ubuntu-22.04
if: github.event_name != 'schedule'
env:
DOTNET_VERSION: '10.0.100-rc.1.25451.107'
ARTIFACT_DIR: ${{ github.workspace }}/.artifacts
AOC_VERIFY_SINCE: ${{ github.event.pull_request.base.sha || 'HEAD~1' }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Export OpenSSL 1.1 shim for Mongo2Go
run: scripts/enable-openssl11-shim.sh
- name: Set up .NET SDK
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
include-prerelease: true
- name: Run AOC verify
env:
STAGING_MONGO_URI: ${{ secrets.STAGING_MONGO_URI || vars.STAGING_MONGO_URI }}
run: |
if [ -z "${STAGING_MONGO_URI:-}" ]; then
echo "::warning::STAGING_MONGO_URI not set; skipping aoc verify"
exit 0
fi
mkdir -p $ARTIFACT_DIR
dotnet run --project src/Aoc/StellaOps.Aoc.Cli -- verify --since "$AOC_VERIFY_SINCE" --mongo "$STAGING_MONGO_URI" --output "$ARTIFACT_DIR/aoc-verify.json" --ndjson "$ARTIFACT_DIR/aoc-verify.ndjson" || VERIFY_EXIT=$?
if [ -n "${VERIFY_EXIT:-}" ] && [ "${VERIFY_EXIT}" -ne 0 ]; then
echo "::error::AOC verify reported violations"; exit ${VERIFY_EXIT}
fi
- name: Upload verify artifacts
if: always()
uses: actions/upload-artifact@v4
with:
name: aoc-verify-artifacts
path: ${{ env.ARTIFACT_DIR }}

View File

@@ -1,51 +0,0 @@
name: api-governance
on:
push:
paths:
- "src/Api/**"
- ".spectral.yaml"
- "package.json"
pull_request:
paths:
- "src/Api/**"
- ".spectral.yaml"
- "package.json"
jobs:
spectral-lint:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: "18"
- name: Install npm deps
run: npm install --ignore-scripts --no-progress
- name: Compose aggregate OpenAPI
run: npm run api:compose
- name: Validate examples coverage
run: npm run api:examples
- name: Compatibility diff (previous commit)
run: |
set -e
if git show HEAD~1:src/Api/StellaOps.Api.OpenApi/stella.yaml > /tmp/stella-prev.yaml 2>/dev/null; then
node scripts/api-compat-diff.mjs /tmp/stella-prev.yaml src/Api/StellaOps.Api.OpenApi/stella.yaml --output text --fail-on-breaking
else
echo "[api:compat] previous stella.yaml not found; skipping"
fi
- name: Compatibility diff (baseline)
run: |
set -e
if [ -f src/Api/StellaOps.Api.OpenApi/baselines/stella-baseline.yaml ]; then
node scripts/api-compat-diff.mjs src/Api/StellaOps.Api.OpenApi/baselines/stella-baseline.yaml src/Api/StellaOps.Api.OpenApi/stella.yaml --output text
else
echo "[api:compat] baseline file missing; skipping"
fi
- name: Generate changelog
run: npm run api:changelog
- name: Spectral lint (fail on warning+)
run: npm run api:lint

View File

@@ -1,29 +0,0 @@
name: attestation-bundle
on:
workflow_dispatch:
inputs:
attest_dir:
description: "Directory containing attestation artefacts"
required: true
default: "out/attest"
jobs:
bundle:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Build bundle
run: |
chmod +x scripts/attest/build-attestation-bundle.sh
scripts/attest/build-attestation-bundle.sh "${{ github.event.inputs.attest_dir }}"
- name: Upload bundle
uses: actions/upload-artifact@v4
with:
name: attestation-bundle
path: out/attest-bundles/**

View File

@@ -58,9 +58,6 @@ jobs:
with:
fetch-depth: 0
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Resolve Authority configuration
id: config
run: |

View File

@@ -1,30 +0,0 @@
name: bench-determinism
on:
workflow_dispatch: {}
jobs:
bench-determinism:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: '3.12'
- name: Run determinism bench
env:
BENCH_DETERMINISM_THRESHOLD: "0.95"
run: |
chmod +x scripts/bench/determinism-run.sh
scripts/bench/determinism-run.sh
- name: Upload determinism artifacts
uses: actions/upload-artifact@v4
with:
name: bench-determinism
path: out/bench-determinism/**

View File

@@ -21,8 +21,6 @@ on:
- 'docs/**'
- 'scripts/**'
- '.gitea/workflows/**'
schedule:
- cron: '0 5 * * *'
workflow_dispatch:
inputs:
force_deploy:
@@ -30,11 +28,6 @@ on:
required: false
default: 'false'
type: boolean
excititor_batch:
description: 'Run Excititor batch-ingest validation suite'
required: false
default: 'false'
type: boolean
env:
DOTNET_VERSION: '10.0.100-rc.1.25451.107'
@@ -55,18 +48,6 @@ jobs:
tar -xzf /tmp/helm.tgz -C /tmp
sudo install -m 0755 /tmp/linux-amd64/helm /usr/local/bin/helm
- name: Validate Helm chart rendering
run: |
set -euo pipefail
CHART_PATH="deploy/helm/stellaops"
helm lint "$CHART_PATH"
for values in values.yaml values-dev.yaml values-stage.yaml values-prod.yaml values-airgap.yaml values-mirror.yaml; do
release="stellaops-${values%.*}"
echo "::group::Helm template ${release} (${values})"
helm template "$release" "$CHART_PATH" -f "$CHART_PATH/$values" >/dev/null
echo "::endgroup::"
done
- name: Validate deployment profiles
run: ./deploy/tools/validate-profiles.sh
@@ -77,31 +58,12 @@ jobs:
PUBLISH_DIR: ${{ github.workspace }}/artifacts/publish/webservice
AUTHORITY_PUBLISH_DIR: ${{ github.workspace }}/artifacts/publish/authority
TEST_RESULTS_DIR: ${{ github.workspace }}/artifacts/test-results
STELLAOPS_TEST_MONGO_URI: ${{ secrets.STELLAOPS_TEST_MONGO_URI || vars.STELLAOPS_TEST_MONGO_URI }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Export OpenSSL 1.1 shim for Mongo2Go
run: scripts/enable-openssl11-shim.sh
- name: Verify binary layout
run: scripts/verify-binaries.sh
- name: Ensure binary manifests are up to date
run: |
python3 scripts/update-binary-manifests.py
git diff --exit-code local-nugets/manifest.json vendor/manifest.json offline/feeds/manifest.json
- name: Ensure Mongo test URI configured
run: |
if [ -z "${STELLAOPS_TEST_MONGO_URI:-}" ]; then
echo "::error::STELLAOPS_TEST_MONGO_URI must be provided via repository secrets or variables for Graph Indexer integration tests."
exit 1
fi
- name: Verify policy scope configuration
run: python3 scripts/verify-policy-scopes.py
@@ -111,64 +73,12 @@ jobs:
- name: Validate telemetry storage configuration
run: python3 ops/devops/telemetry/validate_storage_stack.py
- name: Task Pack offline bundle fixtures
run: |
python3 scripts/packs/run-fixtures-check.sh
- name: Telemetry tenant isolation smoke
env:
COMPOSE_DIR: ${GITHUB_WORKSPACE}/deploy/compose
run: |
set -euo pipefail
./ops/devops/telemetry/generate_dev_tls.sh
COMPOSE_DIR="${COMPOSE_DIR:-${GITHUB_WORKSPACE}/deploy/compose}"
cleanup() {
set +e
(cd "$COMPOSE_DIR" && docker compose -f docker-compose.telemetry.yaml down -v --remove-orphans >/dev/null 2>&1)
(cd "$COMPOSE_DIR" && docker compose -f docker-compose.telemetry-storage.yaml down -v --remove-orphans >/dev/null 2>&1)
}
trap cleanup EXIT
(cd "$COMPOSE_DIR" && docker compose -f docker-compose.telemetry-storage.yaml up -d)
(cd "$COMPOSE_DIR" && docker compose -f docker-compose.telemetry.yaml up -d)
sleep 5
python3 ops/devops/telemetry/smoke_otel_collector.py --host localhost
python3 ops/devops/telemetry/tenant_isolation_smoke.py \
--collector https://localhost:4318/v1 \
--tempo https://localhost:3200 \
--loki https://localhost:3100
- name: Setup .NET ${{ env.DOTNET_VERSION }}
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
include-prerelease: true
- name: Build CLI multi-runtime binaries
run: |
set -euo pipefail
export DOTNET_SKIP_FIRST_TIME_EXPERIENCE=1
RUNTIMES=(linux-x64 linux-arm64 osx-x64 osx-arm64 win-x64)
rm -rf out/cli-ci
for runtime in "${RUNTIMES[@]}"; do
dotnet publish src/Cli/StellaOps.Cli/StellaOps.Cli.csproj \
--configuration $BUILD_CONFIGURATION \
--runtime "$runtime" \
--self-contained true \
/p:PublishSingleFile=true \
/p:IncludeNativeLibrariesForSelfExtract=true \
/p:EnableCompressionInSingleFile=true \
/p:InvariantGlobalization=true \
--output "out/cli-ci/${runtime}"
done
- name: Run CLI unit tests
run: |
mkdir -p "$TEST_RESULTS_DIR"
dotnet test src/Cli/StellaOps.Cli.Tests/StellaOps.Cli.Tests.csproj \
--configuration $BUILD_CONFIGURATION \
--logger "trx;LogFileName=stellaops-cli-tests.trx" \
--results-directory "$TEST_RESULTS_DIR"
- name: Restore Concelier solution
run: dotnet restore src/Concelier/StellaOps.Concelier.sln
@@ -184,37 +94,6 @@ jobs:
--logger "trx;LogFileName=stellaops-concelier-tests.trx" \
--results-directory "$TEST_RESULTS_DIR"
- name: Run PostgreSQL storage integration tests (Testcontainers)
env:
POSTGRES_TEST_IMAGE: postgres:16-alpine
run: |
set -euo pipefail
mkdir -p "$TEST_RESULTS_DIR"
PROJECTS=(
src/__Libraries/__Tests/StellaOps.Infrastructure.Postgres.Tests/StellaOps.Infrastructure.Postgres.Tests.csproj
src/Authority/__Tests/StellaOps.Authority.Storage.Postgres.Tests/StellaOps.Authority.Storage.Postgres.Tests.csproj
src/Scheduler/__Tests/StellaOps.Scheduler.Storage.Postgres.Tests/StellaOps.Scheduler.Storage.Postgres.Tests.csproj
src/Concelier/__Tests/StellaOps.Concelier.Storage.Postgres.Tests/StellaOps.Concelier.Storage.Postgres.Tests.csproj
src/Excititor/__Tests/StellaOps.Excititor.Storage.Postgres.Tests/StellaOps.Excititor.Storage.Postgres.Tests.csproj
src/Notify/__Tests/StellaOps.Notify.Storage.Postgres.Tests/StellaOps.Notify.Storage.Postgres.Tests.csproj
src/Policy/__Tests/StellaOps.Policy.Storage.Postgres.Tests/StellaOps.Policy.Storage.Postgres.Tests.csproj
)
for project in "${PROJECTS[@]}"; do
name="$(basename "${project%.*}")"
dotnet test "$project" \
--configuration $BUILD_CONFIGURATION \
--logger "trx;LogFileName=${name}.trx" \
--results-directory "$TEST_RESULTS_DIR"
done
- name: Run TimelineIndexer tests (EB1 evidence linkage gate)
run: |
mkdir -p "$TEST_RESULTS_DIR"
dotnet test src/TimelineIndexer/StellaOps.TimelineIndexer/StellaOps.TimelineIndexer.sln \
--configuration $BUILD_CONFIGURATION \
--logger "trx;LogFileName=timelineindexer-tests.trx" \
--results-directory "$TEST_RESULTS_DIR"
- name: Lint policy DSL samples
run: dotnet run --project tools/PolicyDslValidator/PolicyDslValidator.csproj -- --strict docs/examples/policies/*.yaml
@@ -345,56 +224,6 @@ PY
--logger "trx;LogFileName=stellaops-scanner-lang-tests.trx" \
--results-directory "$TEST_RESULTS_DIR"
- name: Build and test Router components
run: |
set -euo pipefail
ROUTER_PROJECTS=(
src/__Libraries/StellaOps.Router.Common/StellaOps.Router.Common.csproj
src/__Libraries/StellaOps.Router.Config/StellaOps.Router.Config.csproj
src/__Libraries/StellaOps.Router.Transport.InMemory/StellaOps.Router.Transport.InMemory.csproj
src/__Libraries/StellaOps.Router.Transport.Tcp/StellaOps.Router.Transport.Tcp.csproj
src/__Libraries/StellaOps.Router.Transport.Tls/StellaOps.Router.Transport.Tls.csproj
src/__Libraries/StellaOps.Router.Transport.Udp/StellaOps.Router.Transport.Udp.csproj
src/__Libraries/StellaOps.Router.Transport.RabbitMq/StellaOps.Router.Transport.RabbitMq.csproj
src/__Libraries/StellaOps.Microservice/StellaOps.Microservice.csproj
src/__Libraries/StellaOps.Microservice.SourceGen/StellaOps.Microservice.SourceGen.csproj
)
for project in "${ROUTER_PROJECTS[@]}"; do
echo "::group::Build $project"
dotnet build "$project" --configuration $BUILD_CONFIGURATION --no-restore -warnaserror
echo "::endgroup::"
done
- name: Run Router and Microservice tests
run: |
mkdir -p "$TEST_RESULTS_DIR"
ROUTER_TEST_PROJECTS=(
# Core Router libraries
src/__Libraries/__Tests/StellaOps.Router.Common.Tests/StellaOps.Router.Common.Tests.csproj
src/__Libraries/__Tests/StellaOps.Router.Config.Tests/StellaOps.Router.Config.Tests.csproj
# Transport layers
src/__Libraries/__Tests/StellaOps.Router.Transport.InMemory.Tests/StellaOps.Router.Transport.InMemory.Tests.csproj
src/__Libraries/__Tests/StellaOps.Router.Transport.Tcp.Tests/StellaOps.Router.Transport.Tcp.Tests.csproj
src/__Libraries/__Tests/StellaOps.Router.Transport.Tls.Tests/StellaOps.Router.Transport.Tls.Tests.csproj
src/__Libraries/__Tests/StellaOps.Router.Transport.Udp.Tests/StellaOps.Router.Transport.Udp.Tests.csproj
# Microservice SDK
src/__Libraries/__Tests/StellaOps.Microservice.Tests/StellaOps.Microservice.Tests.csproj
src/__Libraries/__Tests/StellaOps.Microservice.SourceGen.Tests/StellaOps.Microservice.SourceGen.Tests.csproj
# Integration tests
src/__Libraries/__Tests/StellaOps.Router.Integration.Tests/StellaOps.Router.Integration.Tests.csproj
# Gateway tests
src/Gateway/__Tests/StellaOps.Gateway.WebService.Tests/StellaOps.Gateway.WebService.Tests.csproj
)
for project in "${ROUTER_TEST_PROJECTS[@]}"; do
name="$(basename "${project%.*}")"
echo "::group::Test $name"
dotnet test "$project" \
--configuration $BUILD_CONFIGURATION \
--logger "trx;LogFileName=${name}.trx" \
--results-directory "$TEST_RESULTS_DIR"
echo "::endgroup::"
done
- name: Run scanner analyzer performance benchmark
env:
PERF_OUTPUT_DIR: ${{ github.workspace }}/artifacts/perf/scanner-analyzers
@@ -557,15 +386,6 @@ PY
if-no-files-found: error
retention-days: 7
- name: Run console endpoint tests
run: |
mkdir -p "$TEST_RESULTS_DIR"
dotnet test src/Authority/StellaOps.Authority/StellaOps.Authority.Tests/StellaOps.Authority.Tests.csproj \
--configuration $BUILD_CONFIGURATION \
--logger "trx;LogFileName=console-endpoints.trx" \
--results-directory "$TEST_RESULTS_DIR" \
--filter ConsoleEndpointsTests
- name: Upload test results
if: always()
uses: actions/upload-artifact@v4
@@ -575,44 +395,6 @@ PY
if-no-files-found: ignore
retention-days: 7
sealed-mode-ci:
runs-on: ubuntu-22.04
needs: build-test
permissions:
contents: read
packages: read
env:
COMPOSE_PROJECT_NAME: sealedmode
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Login to registry
if: ${{ secrets.REGISTRY_USERNAME != '' && secrets.REGISTRY_PASSWORD != '' }}
uses: docker/login-action@v3
with:
registry: registry.stella-ops.org
username: ${{ secrets.REGISTRY_USERNAME }}
password: ${{ secrets.REGISTRY_PASSWORD }}
- name: Run sealed-mode CI harness
working-directory: ops/devops/sealed-mode-ci
env:
COMPOSE_PROJECT_NAME: sealedmode
run: |
set -euo pipefail
./run-sealed-ci.sh
- name: Upload sealed-mode CI artifacts
uses: actions/upload-artifact@v4
with:
name: sealed-mode-ci
path: ops/devops/sealed-mode-ci/artifacts/sealed-mode-ci
if-no-files-found: error
retention-days: 14
authority-container:
runs-on: ubuntu-22.04
needs: build-test
@@ -626,41 +408,6 @@ PY
- name: Build Authority container image
run: docker build -f ops/authority/Dockerfile -t stellaops-authority:ci .
excititor-batch-validation:
needs: build-test
if: github.event_name == 'schedule' || (github.event_name == 'workflow_dispatch' && github.event.inputs.excititor_batch == 'true')
runs-on: ubuntu-22.04
env:
BATCH_RESULTS_DIR: ${{ github.workspace }}/artifacts/test-results/excititor-batch
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
include-prerelease: true
- name: Run Excititor batch ingest validation suite
env:
DOTNET_SKIP_FIRST_TIME_EXPERIENCE: 1
run: |
set -euo pipefail
mkdir -p "$BATCH_RESULTS_DIR"
dotnet test src/Excititor/__Tests/StellaOps.Excititor.WebService.Tests/StellaOps.Excititor.WebService.Tests.csproj \
--configuration $BUILD_CONFIGURATION \
--filter "Category=BatchIngestValidation" \
--logger "trx;LogFileName=excititor-batch.trx" \
--results-directory "$BATCH_RESULTS_DIR"
- name: Upload Excititor batch ingest results
if: always()
uses: actions/upload-artifact@v4
with:
name: excititor-batch-ingest-results
path: ${{ env.BATCH_RESULTS_DIR }}
docs:
runs-on: ubuntu-22.04
env:

View File

@@ -1,48 +0,0 @@
name: cli-build
on:
workflow_dispatch:
inputs:
rids:
description: "Comma-separated RIDs (e.g., linux-x64,win-x64,osx-arm64)"
required: false
default: "linux-x64,win-x64,osx-arm64"
config:
description: "Build configuration"
required: false
default: "Release"
sign:
description: "Enable cosign signing (requires COSIGN_KEY)"
required: false
default: "false"
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100-rc.2.25502.107"
- name: Install syft (SBOM)
uses: anchore/sbom-action/download-syft@v0
- name: Build CLI artifacts
run: |
chmod +x scripts/cli/build-cli.sh
RIDS="${{ github.event.inputs.rids }}" CONFIG="${{ github.event.inputs.config }}" SBOM_TOOL=syft SIGN="${{ github.event.inputs.sign }}" COSIGN_KEY="${{ secrets.COSIGN_KEY }}" scripts/cli/build-cli.sh
- name: List artifacts
run: find out/cli -maxdepth 3 -type f -print
- name: Upload CLI artifacts
uses: actions/upload-artifact@v4
with:
name: stella-cli
path: out/cli/**

View File

@@ -1,47 +0,0 @@
name: cli-chaos-parity
on:
workflow_dispatch:
inputs:
chaos:
description: "Run chaos smoke (true/false)"
required: false
default: "true"
parity:
description: "Run parity diff (true/false)"
required: false
default: "true"
jobs:
cli-checks:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100-rc.2.25502.107"
- name: Chaos smoke
if: ${{ github.event.inputs.chaos == 'true' }}
run: |
chmod +x scripts/cli/chaos-smoke.sh
scripts/cli/chaos-smoke.sh
- name: Parity diff
if: ${{ github.event.inputs.parity == 'true' }}
run: |
chmod +x scripts/cli/parity-diff.sh
scripts/cli/parity-diff.sh
- name: Upload evidence
uses: actions/upload-artifact@v4
with:
name: cli-chaos-parity
path: |
out/cli-chaos/**
out/cli-goldens/**

View File

@@ -1,47 +0,0 @@
name: Concelier Attestation Tests
on:
push:
paths:
- 'src/Concelier/**'
- '.gitea/workflows/concelier-attestation-tests.yml'
pull_request:
paths:
- 'src/Concelier/**'
- '.gitea/workflows/concelier-attestation-tests.yml'
jobs:
attestation-tests:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Setup .NET 10 preview
uses: actions/setup-dotnet@v4
with:
dotnet-version: '10.0.100-rc.2.25502.107'
- name: Restore Concelier solution
run: dotnet restore src/Concelier/StellaOps.Concelier.sln
- name: Build WebService Tests (no analyzers)
run: dotnet build src/Concelier/__Tests/StellaOps.Concelier.WebService.Tests/StellaOps.Concelier.WebService.Tests.csproj -c Release -p:DisableAnalyzers=true
- name: Run WebService attestation test
run: dotnet test src/Concelier/__Tests/StellaOps.Concelier.WebService.Tests/StellaOps.Concelier.WebService.Tests.csproj -c Release --filter InternalAttestationVerify --no-build --logger trx --results-directory TestResults
- name: Build Core Tests (no analyzers)
run: dotnet build src/Concelier/__Tests/StellaOps.Concelier.Core.Tests/StellaOps.Concelier.Core.Tests.csproj -c Release -p:DisableAnalyzers=true
- name: Run Core attestation builder tests
run: dotnet test src/Concelier/__Tests/StellaOps.Concelier.Core.Tests/StellaOps.Concelier.Core.Tests.csproj -c Release --filter EvidenceBundleAttestationBuilderTests --no-build --logger trx --results-directory TestResults
- name: Upload TRX results
uses: actions/upload-artifact@v4
with:
name: concelier-attestation-tests-trx
path: '**/TestResults/*.trx'

View File

@@ -1,86 +0,0 @@
name: Console CI
on:
push:
branches: [ main ]
paths:
- 'src/UI/**'
- '.gitea/workflows/console-ci.yml'
- 'docs/modules/devops/console-ci-contract.md'
pull_request:
branches: [ main, develop ]
paths:
- 'src/UI/**'
- '.gitea/workflows/console-ci.yml'
- 'docs/modules/devops/console-ci-contract.md'
jobs:
console-ci:
runs-on: ubuntu-22.04
env:
PNPM_HOME: ~/.pnpm
PLAYWRIGHT_BROWSERS_PATH: ./.playwright
SOURCE_DATE_EPOCH: ${{ github.run_id }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
with:
fetch-depth: 0
- name: Set up Node.js 20
uses: actions/setup-node@v4
with:
node-version: '20'
- name: Enable pnpm
run: |
corepack enable
corepack prepare pnpm@9 --activate
- name: Cache pnpm store & node_modules
uses: actions/cache@v4
with:
path: |
~/.pnpm-store
node_modules
./.pnpm-store
./.playwright
key: console-${{ runner.os }}-${{ hashFiles('pnpm-lock.yaml') }}
- name: Install dependencies (offline-first)
env:
PNPM_FETCH_RETRIES: 0
PNPM_OFFLINE: 1
run: |
pnpm install --frozen-lockfile || PNPM_OFFLINE=0 pnpm install --frozen-lockfile --prefer-offline
- name: Lint / Types
run: pnpm lint && pnpm format:check && pnpm typecheck
- name: Unit tests
run: pnpm test -- --runInBand --reporter=junit --outputFile=.artifacts/junit.xml
- name: Storybook a11y
run: |
pnpm storybook:build
pnpm storybook:a11y --ci --output .artifacts/storybook-a11y.json
- name: Playwright smoke
run: pnpm playwright test --config=playwright.config.ci.ts --reporter=list,junit=.artifacts/playwright.xml
- name: Lighthouse (CI budgets)
run: |
pnpm serve --port 4173 &
pnpm lhci autorun --config=lighthouserc.ci.js --upload.target=filesystem --upload.outputDir=.artifacts/lhci
- name: SBOM
run: pnpm exec syft packages dir:dist --output=spdx-json=.artifacts/console.spdx.json
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: console-ci-artifacts
path: .artifacts

View File

@@ -1,89 +0,0 @@
name: containers-multiarch
on:
workflow_dispatch:
inputs:
image:
description: "Image tag (e.g., ghcr.io/stella-ops/example:edge)"
required: true
context:
description: "Build context directory"
required: true
default: "."
platforms:
description: "Platforms (comma-separated)"
required: false
default: "linux/amd64,linux/arm64"
push:
description: "Push to registry"
required: false
default: "false"
jobs:
build-multiarch:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
install: true
- name: Install syft (SBOM)
uses: anchore/sbom-action/download-syft@v0
- name: Login to ghcr (optional)
if: ${{ github.event.inputs.push == 'true' && secrets.GHCR_TOKEN != '' }}
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GHCR_TOKEN }}
- name: Run multi-arch build
env:
COSIGN_EXPERIMENTAL: "1"
run: |
chmod +x scripts/buildx/build-multiarch.sh
extra=""
if [[ "${{ github.event.inputs.push }}" == "true" ]]; then extra="--push"; fi
scripts/buildx/build-multiarch.sh \
"${{ github.event.inputs.image }}" \
"${{ github.event.inputs.context }}" \
--platform "${{ github.event.inputs.platforms }}" \
--sbom syft ${extra}
- name: Build air-gap bundle
run: |
chmod +x scripts/buildx/build-airgap-bundle.sh
scripts/buildx/build-airgap-bundle.sh "${{ github.event.inputs.image }}"
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: buildx-${{ github.event.inputs.image }}
path: out/buildx/**
- name: Inspect built image archive
run: |
set -e
ls -lh out/buildx/
find out/buildx -name "image.oci" -print -exec sh -c 'tar -tf "$1" | head' _ {} \;
- name: Upload air-gap bundle
uses: actions/upload-artifact@v4
with:
name: bundle-${{ github.event.inputs.image }}
path: out/bundles/**
- name: Inspect remote image (if pushed)
if: ${{ github.event.inputs.push == 'true' }}
run: |
docker buildx imagetools inspect "${{ github.event.inputs.image }}"

View File

@@ -1,40 +0,0 @@
name: cryptopro-optin
on:
workflow_dispatch:
inputs:
configuration:
description: Build configuration
default: Release
run_tests:
description: Run CryptoPro signer tests (requires CSP installed on runner)
default: true
jobs:
cryptopro:
runs-on: windows-latest
env:
STELLAOPS_CRYPTO_PRO_ENABLED: "1"
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Setup .NET 10 (preview)
uses: actions/setup-dotnet@v4
with:
dotnet-version: 10.0.100-rc.2.25502.107
- name: Build CryptoPro plugin
run: |
dotnet build src/__Libraries/StellaOps.Cryptography.Plugin.CryptoPro/StellaOps.Cryptography.Plugin.CryptoPro.csproj -c ${{ github.event.inputs.configuration || 'Release' }}
- name: Run CryptoPro signer tests (requires CSP pre-installed)
if: ${{ github.event.inputs.run_tests != 'false' }}
run: |
powershell -File scripts/crypto/run-cryptopro-tests.ps1 -Configuration ${{ github.event.inputs.configuration || 'Release' }}
# NOTE: This workflow assumes the windows runner already has CryptoPro CSP installed and licensed.
# Leave it opt-in to avoid breaking default CI lanes.

View File

@@ -1,32 +0,0 @@
name: devportal-offline
on:
schedule:
- cron: "0 5 * * *"
workflow_dispatch: {}
jobs:
build-offline:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Setup Node (corepack/pnpm)
uses: actions/setup-node@v4
with:
node-version: "18"
cache: "pnpm"
- name: Build devportal (offline bundle)
run: |
chmod +x scripts/devportal/build-devportal.sh
scripts/devportal/build-devportal.sh
- name: Upload bundle
uses: actions/upload-artifact@v4
with:
name: devportal-offline
path: out/devportal/**.tgz

View File

@@ -29,12 +29,6 @@ jobs:
- name: Checkout repository
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Export OpenSSL 1.1 shim for Mongo2Go
run: scripts/enable-openssl11-shim.sh
- name: Setup Node.js
uses: actions/setup-node@v4
with:

View File

@@ -1,86 +0,0 @@
name: evidence-locker
on:
workflow_dispatch:
inputs:
retention_target:
description: "Retention days target"
required: false
default: "180"
jobs:
check-evidence-locker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Emit retention summary
env:
RETENTION_TARGET: ${{ github.event.inputs.retention_target }}
run: |
echo "target_retention_days=${RETENTION_TARGET}" > out/evidence-locker/summary.txt
- name: Upload evidence locker summary
uses: actions/upload-artifact@v4
with:
name: evidence-locker
path: out/evidence-locker/**
push-zastava-evidence:
runs-on: ubuntu-latest
needs: check-evidence-locker
env:
STAGED_DIR: evidence-locker/zastava/2025-12-02
MODULE_ROOT: docs/modules/zastava
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Package staged Zastava artefacts
run: |
test -d "$MODULE_ROOT" || { echo "missing $MODULE_ROOT" >&2; exit 1; }
tmpdir=$(mktemp -d)
rsync -a --relative \
"$MODULE_ROOT/SHA256SUMS" \
"$MODULE_ROOT/schemas/" \
"$MODULE_ROOT/exports/" \
"$MODULE_ROOT/thresholds.yaml" \
"$MODULE_ROOT/thresholds.yaml.dsse" \
"$MODULE_ROOT/kit/verify.sh" \
"$MODULE_ROOT/kit/README.md" \
"$MODULE_ROOT/kit/ed25519.pub" \
"$MODULE_ROOT/kit/zastava-kit.tzst" \
"$MODULE_ROOT/kit/zastava-kit.tzst.dsse" \
"$MODULE_ROOT/evidence/README.md" \
"$tmpdir/"
(cd "$tmpdir/docs/modules/zastava" && sha256sum --check SHA256SUMS)
tar --sort=name --mtime="UTC 1970-01-01" --owner=0 --group=0 --numeric-owner \
-cf /tmp/zastava-evidence.tar -C "$tmpdir/docs/modules/zastava" .
sha256sum /tmp/zastava-evidence.tar
- name: Upload staged artefacts (fallback)
uses: actions/upload-artifact@v4
with:
name: zastava-evidence-locker-2025-12-02
path: /tmp/zastava-evidence.tar
- name: Push to Evidence Locker
if: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN != '' && env.EVIDENCE_LOCKER_URL != '' }}
env:
TOKEN: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN }}
URL: ${{ env.EVIDENCE_LOCKER_URL }}
run: |
curl -f -X PUT "$URL/zastava/2025-12-02/zastava-evidence.tar" \
-H "Authorization: Bearer $TOKEN" \
--data-binary @/tmp/zastava-evidence.tar
- name: Skip push (missing secret or URL)
if: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN == '' || env.EVIDENCE_LOCKER_URL == '' }}
run: |
echo "Locker push skipped: set CI_EVIDENCE_LOCKER_TOKEN and EVIDENCE_LOCKER_URL to enable." >&2

View File

@@ -1,85 +0,0 @@
name: Export Center CI
on:
push:
branches: [ main ]
paths:
- 'src/ExportCenter/**'
- 'ops/devops/export/**'
- '.gitea/workflows/export-ci.yml'
- 'docs/modules/devops/export-ci-contract.md'
pull_request:
branches: [ main, develop ]
paths:
- 'src/ExportCenter/**'
- 'ops/devops/export/**'
- '.gitea/workflows/export-ci.yml'
- 'docs/modules/devops/export-ci-contract.md'
jobs:
export-ci:
runs-on: ubuntu-22.04
env:
DOTNET_VERSION: '10.0.100-rc.1.25451.107'
MINIO_ACCESS_KEY: exportci
MINIO_SECRET_KEY: exportci123
BUCKET: export-ci
ARTIFACT_DIR: ${{ github.workspace }}/.artifacts
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
with:
fetch-depth: 0
- name: Export OpenSSL 1.1 shim for Mongo2Go
run: scripts/enable-openssl11-shim.sh
- name: Set up .NET SDK
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
include-prerelease: true
- name: Restore
run: dotnet restore src/ExportCenter/StellaOps.ExportCenter.WebService/StellaOps.ExportCenter.WebService.csproj
- name: Bring up MinIO
run: |
docker compose -f ops/devops/export/minio-compose.yml up -d
sleep 5
MINIO_ENDPOINT=http://localhost:9000 ops/devops/export/seed-minio.sh
- name: Build
run: dotnet build src/ExportCenter/StellaOps.ExportCenter.WebService/StellaOps.ExportCenter.WebService.csproj -c Release /p:ContinuousIntegrationBuild=true
- name: Test
run: |
mkdir -p $ARTIFACT_DIR
dotnet test src/ExportCenter/__Tests/StellaOps.ExportCenter.Tests/StellaOps.ExportCenter.Tests.csproj -c Release --logger "trx;LogFileName=export-tests.trx" --results-directory $ARTIFACT_DIR
- name: Trivy/OCI smoke
run: ops/devops/export/trivy-smoke.sh
- name: Schema lint
run: |
python -m json.tool docs/modules/export-center/schemas/export-profile.schema.json >/dev/null
python -m json.tool docs/modules/export-center/schemas/export-manifest.schema.json >/dev/null
- name: Offline kit verify (fixtures)
run: bash docs/modules/export-center/operations/verify-export-kit.sh src/ExportCenter/__fixtures/export-kit
- name: SBOM
run: syft dir:src/ExportCenter -o spdx-json=$ARTIFACT_DIR/exportcenter.spdx.json
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: export-ci-artifacts
path: ${{ env.ARTIFACT_DIR }}
- name: Teardown MinIO
if: always()
run: docker compose -f ops/devops/export/minio-compose.yml down -v

View File

@@ -1,41 +0,0 @@
name: export-compat
on:
workflow_dispatch:
inputs:
image:
description: "Exporter image ref"
required: true
default: "ghcr.io/stella-ops/exporter:edge"
jobs:
compat:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Setup Trivy
uses: aquasecurity/trivy-action@v0.24.0
with:
version: latest
- name: Setup Cosign
uses: sigstore/cosign-installer@v3.6.0
- name: Run compatibility checks
env:
IMAGE: ${{ github.event.inputs.image }}
run: |
chmod +x scripts/export/trivy-compat.sh
chmod +x scripts/export/oci-verify.sh
scripts/export/trivy-compat.sh
scripts/export/oci-verify.sh
- name: Upload reports
uses: actions/upload-artifact@v4
with:
name: export-compat
path: out/export-compat/**

View File

@@ -1,42 +0,0 @@
name: graph-load
on:
workflow_dispatch:
inputs:
target:
description: "Graph API base URL"
required: true
default: "http://localhost:5000"
users:
description: "Virtual users"
required: false
default: "8"
duration:
description: "Duration seconds"
required: false
default: "60"
jobs:
load-test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Install k6
run: |
sudo apt-get update -qq
sudo apt-get install -y k6
- name: Run graph load test
run: |
chmod +x scripts/graph/load-test.sh
TARGET="${{ github.event.inputs.target }}" USERS="${{ github.event.inputs.users }}" DURATION="${{ github.event.inputs.duration }}" scripts/graph/load-test.sh
- name: Upload results
uses: actions/upload-artifact@v4
with:
name: graph-load-summary
path: out/graph-load/**

View File

@@ -1,57 +0,0 @@
name: graph-ui-sim
on:
workflow_dispatch:
inputs:
graph_api:
description: "Graph API base URL"
required: true
default: "http://localhost:5000"
graph_ui:
description: "Graph UI base URL"
required: true
default: "http://localhost:4200"
perf_budget_ms:
description: "Perf budget in ms"
required: false
default: "3000"
jobs:
ui-and-sim:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: "18"
- name: Install Playwright deps
run: npx playwright install --with-deps chromium
- name: Run UI perf probe
env:
GRAPH_UI_BASE: ${{ github.event.inputs.graph_ui }}
GRAPH_UI_BUDGET_MS: ${{ github.event.inputs.perf_budget_ms }}
OUT: out/graph-ui-perf
run: |
npx ts-node scripts/graph/ui-perf.ts
- name: Run simulation smoke
env:
TARGET: ${{ github.event.inputs.graph_api }}
run: |
chmod +x scripts/graph/simulation-smoke.sh
scripts/graph/simulation-smoke.sh
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: graph-ui-sim
path: |
out/graph-ui-perf/**
out/graph-sim/**

View File

@@ -1,64 +0,0 @@
name: LNM Backfill CI
on:
workflow_dispatch:
inputs:
mongo_uri:
description: 'Staging Mongo URI (read-only snapshot)'
required: true
type: string
since_commit:
description: 'Git commit to compare (default HEAD)'
required: false
type: string
dry_run:
description: 'Dry run (no writes)'
required: false
default: true
type: boolean
jobs:
lnm-backfill:
runs-on: ubuntu-22.04
env:
DOTNET_VERSION: '10.0.100-rc.1.25451.107'
ARTIFACT_DIR: ${{ github.workspace }}/.artifacts
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
with:
fetch-depth: 0
- name: Set up .NET SDK
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
include-prerelease: true
- name: Restore
run: dotnet restore src/Concelier/StellaOps.Concelier.Backfill/StellaOps.Concelier.Backfill.csproj
- name: Run backfill (dry-run supported)
env:
STAGING_MONGO_URI: ${{ inputs.mongo_uri }}
run: |
mkdir -p $ARTIFACT_DIR
EXTRA=()
if [ "${{ inputs.dry_run }}" = "true" ]; then EXTRA+=("--dry-run"); fi
dotnet run --project src/Concelier/StellaOps.Concelier.Backfill/StellaOps.Concelier.Backfill.csproj -- --mode=observations --batch-size=500 --max-conflicts=0 --mongo "$STAGING_MONGO_URI" "${EXTRA[@]}" | tee $ARTIFACT_DIR/backfill-observations.log
dotnet run --project src/Concelier/StellaOps.Concelier.Backfill/StellaOps.Concelier.Backfill.csproj -- --mode=linksets --batch-size=500 --max-conflicts=0 --mongo "$STAGING_MONGO_URI" "${EXTRA[@]}" | tee $ARTIFACT_DIR/backfill-linksets.log
- name: Validate counts
env:
STAGING_MONGO_URI: ${{ inputs.mongo_uri }}
run: |
STAGING_MONGO_URI="$STAGING_MONGO_URI" ops/devops/lnm/backfill-validation.sh
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: lnm-backfill-artifacts
path: ${{ env.ARTIFACT_DIR }}

View File

@@ -1,63 +0,0 @@
name: LNM VEX Backfill
on:
workflow_dispatch:
inputs:
mongo_uri:
description: 'Staging Mongo URI'
required: true
type: string
nats_url:
description: 'NATS URL'
required: true
type: string
redis_url:
description: 'Redis URL'
required: true
type: string
dry_run:
description: 'Dry run (no writes)'
required: false
default: true
type: boolean
jobs:
vex-backfill:
runs-on: ubuntu-22.04
env:
DOTNET_VERSION: '10.0.100-rc.1.25451.107'
ARTIFACT_DIR: ${{ github.workspace }}/.artifacts
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
with:
fetch-depth: 0
- name: Set up .NET SDK
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
include-prerelease: true
- name: Restore
run: dotnet restore src/Concelier/StellaOps.Concelier.Backfill/StellaOps.Concelier.Backfill.csproj
- name: Run VEX backfill
env:
STAGING_MONGO_URI: ${{ inputs.mongo_uri }}
NATS_URL: ${{ inputs.nats_url }}
REDIS_URL: ${{ inputs.redis_url }}
run: |
mkdir -p $ARTIFACT_DIR
EXTRA=()
if [ "${{ inputs.dry_run }}" = "true" ]; then EXTRA+=("--dry-run"); fi
dotnet run --project src/Concelier/StellaOps.Concelier.Backfill/StellaOps.Concelier.Backfill.csproj -- --mode=vex --batch-size=500 --max-conflicts=0 --mongo "$STAGING_MONGO_URI" --nats "$NATS_URL" --redis "$REDIS_URL" "${EXTRA[@]}" | tee $ARTIFACT_DIR/vex-backfill.log
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: lnm-vex-backfill-artifacts
path: ${{ env.ARTIFACT_DIR }}

View File

@@ -1,53 +0,0 @@
name: Mirror Thin Bundle Sign & Verify
on:
workflow_dispatch:
schedule:
- cron: '0 6 * * *'
jobs:
mirror-sign:
runs-on: ubuntu-22.04
env:
MIRROR_SIGN_KEY_B64: ${{ secrets.MIRROR_SIGN_KEY_B64 }}
REQUIRE_PROD_SIGNING: 1
OCI: 1
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: 10.0.100-rc.2.25502.107
include-prerelease: true
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Verify signing prerequisites
run: scripts/mirror/check_signing_prereqs.sh
- name: Run mirror signing
run: |
scripts/mirror/ci-sign.sh
- name: Verify signed bundle
run: |
scripts/mirror/verify_thin_bundle.py out/mirror/thin/mirror-thin-v1.tar.gz
- name: Upload signed artifacts
uses: actions/upload-artifact@v4
with:
name: mirror-thin-v1-signed
path: |
out/mirror/thin/mirror-thin-v1.tar.gz
out/mirror/thin/mirror-thin-v1.manifest.json
out/mirror/thin/mirror-thin-v1.manifest.dsse.json
out/mirror/thin/tuf/
out/mirror/thin/oci/
out/mirror/thin/milestone.json
if-no-files-found: error
retention-days: 14

View File

@@ -1,59 +0,0 @@
name: oas-ci
on:
push:
paths:
- "src/Api/**"
- "scripts/api-*.mjs"
- "package.json"
- "package-lock.json"
pull_request:
paths:
- "src/Api/**"
- "scripts/api-*.mjs"
- "package.json"
- "package-lock.json"
jobs:
oas-validate:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: "18"
- name: Install deps
run: npm install --ignore-scripts --no-progress
- name: Compose aggregate OpenAPI
run: npm run api:compose
- name: Lint (spectral)
run: npm run api:lint
- name: Validate examples coverage
run: npm run api:examples
- name: Compat diff (previous commit)
run: |
set -e
if git show HEAD~1:src/Api/StellaOps.Api.OpenApi/stella.yaml > /tmp/stella-prev.yaml 2>/dev/null; then
node scripts/api-compat-diff.mjs /tmp/stella-prev.yaml src/Api/StellaOps.Api.OpenApi/stella.yaml --output text --fail-on-breaking
else
echo "[oas-ci] previous stella.yaml not found; skipping"
fi
- name: Contract tests
run: npm run api:compat:test
- name: Upload aggregate spec
uses: actions/upload-artifact@v4
with:
name: stella-openapi
path: src/Api/StellaOps.Api.OpenApi/stella.yaml

View File

@@ -1,46 +0,0 @@
name: obs-slo
on:
workflow_dispatch:
inputs:
prom_url:
description: "Prometheus base URL"
required: true
default: "http://localhost:9090"
jobs:
slo-eval:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Setup Python (telemetry schema checks)
uses: actions/setup-python@v5
with:
python-version: '3.12'
- name: Install telemetry schema deps
run: python -m pip install --upgrade pip jsonschema
- name: Run SLO evaluator
env:
PROM_URL: ${{ github.event.inputs.prom_url }}
run: |
chmod +x scripts/observability/slo-evaluator.sh
scripts/observability/slo-evaluator.sh
- name: Telemetry schema/bundle checks
env:
TELEMETRY_BUNDLE_SCHEMA: docs/modules/telemetry/schemas/telemetry-bundle.schema.json
run: |
chmod +x ops/devops/telemetry/tests/ci-run.sh
ops/devops/telemetry/tests/ci-run.sh
- name: Upload SLO results
uses: actions/upload-artifact@v4
with:
name: obs-slo
path: out/obs-slo/**

View File

@@ -1,37 +0,0 @@
name: obs-stream
on:
workflow_dispatch:
inputs:
nats_url:
description: "NATS server URL"
required: false
default: "nats://localhost:4222"
jobs:
stream-validate:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Install nats CLI
run: |
curl -sSL https://github.com/nats-io/natscli/releases/download/v0.1.4/nats-0.1.4-linux-amd64.tar.gz -o /tmp/natscli.tgz
tar -C /tmp -xzf /tmp/natscli.tgz
sudo mv /tmp/nats /usr/local/bin/nats
- name: Validate streaming knobs
env:
NATS_URL: ${{ github.event.inputs.nats_url }}
run: |
chmod +x scripts/observability/streaming-validate.sh
scripts/observability/streaming-validate.sh
- name: Upload stream validation
uses: actions/upload-artifact@v4
with:
name: obs-stream
path: out/obs-stream/**

View File

@@ -1,70 +0,0 @@
name: Policy Lint & Smoke
on:
pull_request:
paths:
- 'docs/policy/**'
- 'docs/examples/policies/**'
- 'src/Cli/**'
- '.gitea/workflows/policy-lint.yml'
push:
branches: [ main ]
paths:
- 'docs/policy/**'
- 'docs/examples/policies/**'
- 'src/Cli/**'
- '.gitea/workflows/policy-lint.yml'
jobs:
policy-lint:
runs-on: ubuntu-22.04
env:
DOTNET_NOLOGO: 1
DOTNET_CLI_TELEMETRY_OPTOUT: 1
DOTNET_SYSTEM_GLOBALIZATION_INVARIANT: 1
TZ: UTC
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
with:
fetch-depth: 0
- name: Setup .NET 10 RC
uses: actions/setup-dotnet@v4
with:
dotnet-version: 10.0.100-rc.2.25502.107
include-prerelease: true
- name: Cache NuGet packages
uses: actions/cache@v4
with:
path: |
~/.nuget/packages
local-nugets/packages
key: policy-lint-nuget-${{ runner.os }}-${{ hashFiles('**/*.csproj') }}
- name: Restore CLI
run: |
dotnet restore src/Cli/StellaOps.Cli/StellaOps.Cli.csproj --configfile nuget.config
- name: Lint policies (deterministic)
run: |
mkdir -p out/policy-lint
dotnet run --project src/Cli/StellaOps.Cli/StellaOps.Cli.csproj -- \
policy lint docs/examples/policies/*.stella \
--format json --no-color \
> out/policy-lint/lint.json
- name: Smoke simulate entrypoint
run: |
dotnet run --project src/Cli/StellaOps.Cli/StellaOps.Cli.csproj -- policy simulate --help > out/policy-lint/simulate-help.txt
- name: Upload lint artifacts
uses: actions/upload-artifact@v4
with:
name: policy-lint
path: out/policy-lint
retention-days: 7

View File

@@ -1,89 +0,0 @@
name: Policy Simulation
on:
pull_request:
paths:
- 'docs/policy/**'
- 'docs/examples/policies/**'
- 'scripts/policy/**'
- '.gitea/workflows/policy-simulate.yml'
push:
branches: [ main ]
paths:
- 'docs/policy/**'
- 'docs/examples/policies/**'
- 'scripts/policy/**'
- '.gitea/workflows/policy-simulate.yml'
jobs:
policy-simulate:
runs-on: ubuntu-22.04
env:
DOTNET_NOLOGO: 1
DOTNET_CLI_TELEMETRY_OPTOUT: 1
DOTNET_SYSTEM_GLOBALIZATION_INVARIANT: 1
TZ: UTC
THRESHOLD: 0
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
with:
fetch-depth: 0
- name: Setup .NET 10 RC
uses: actions/setup-dotnet@v4
with:
dotnet-version: 10.0.100-rc.2.25502.107
include-prerelease: true
- name: Install Cosign
uses: sigstore/cosign-installer@v3.4.0
- name: Cache NuGet packages
uses: actions/cache@v4
with:
path: |
~/.nuget/packages
local-nugets/packages
key: policy-sim-nuget-${{ runner.os }}-${{ hashFiles('**/*.csproj') }}
- name: Restore CLI
run: |
dotnet restore src/Cli/StellaOps.Cli/StellaOps.Cli.csproj --configfile nuget.config
- name: Generate policy signing key (ephemeral)
run: |
OUT_DIR=out/policy-sign/keys PREFIX=ci-policy COSIGN_PASSWORD= scripts/policy/rotate-key.sh
- name: Sign sample policy blob
run: |
export COSIGN_KEY_B64=$(base64 -w0 out/policy-sign/keys/ci-policy-cosign.key)
COSIGN_PASSWORD= \
scripts/policy/sign-policy.sh --file docs/examples/policies/baseline.stella --out-dir out/policy-sign
- name: Attest and verify sample policy blob
run: |
export COSIGN_KEY_B64=$(base64 -w0 out/policy-sign/keys/ci-policy-cosign.key)
COSIGN_PASSWORD= \
scripts/policy/attest-verify.sh --file docs/examples/policies/baseline.stella --out-dir out/policy-sign
- name: Run batch policy simulation
run: |
scripts/policy/batch-simulate.sh
- name: Upload simulation artifacts
uses: actions/upload-artifact@v4
with:
name: policy-simulation
path: out/policy-sim
retention-days: 7
- name: Upload signing artifacts
uses: actions/upload-artifact@v4
with:
name: policy-signing
path: out/policy-sign
retention-days: 7

View File

@@ -22,16 +22,13 @@ jobs:
runs-on: ubuntu-22.04
environment: production
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Resolve staging credentials
id: staging
run: |
missing=()
- name: Checkout repository
uses: actions/checkout@v4
- name: Resolve staging credentials
id: staging
run: |
missing=()
host="${{ secrets.STAGING_DEPLOYMENT_HOST }}"
if [ -z "$host" ]; then host="${{ vars.STAGING_DEPLOYMENT_HOST }}"; fi

View File

@@ -1,24 +0,0 @@
name: provenance-check
on:
workflow_dispatch: {}
jobs:
check:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Emit provenance summary
run: |
mkdir -p out/provenance
echo "run_at=$(date -u +"%Y-%m-%dT%H:%M:%SZ")" > out/provenance/summary.txt
- name: Upload provenance summary
uses: actions/upload-artifact@v4
with:
name: provenance-summary
path: out/provenance/**

View File

@@ -44,9 +44,6 @@ jobs:
with:
fetch-depth: 0
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Validate NuGet restore source ordering
run: python3 ops/devops/validate_restore_sources.py
@@ -242,10 +239,3 @@ jobs:
name: stellaops-release-${{ steps.meta.outputs.version }}
path: out/release
if-no-files-found: error
- name: Upload debug artefacts (build-id store)
uses: actions/upload-artifact@v4
with:
name: stellaops-debug-${{ steps.meta.outputs.version }}
path: out/release/debug
if-no-files-found: error

View File

@@ -1,41 +0,0 @@
name: scanner-analyzers-release
on:
workflow_dispatch:
inputs:
rid:
description: "RID (e.g., linux-x64)"
required: false
default: "linux-x64"
jobs:
build-analyzers:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100-rc.2.25502.107"
- name: Install syft (SBOM)
uses: anchore/sbom-action/download-syft@v0
- name: Package PHP analyzer
run: |
chmod +x scripts/scanner/package-analyzer.sh
RID="${{ github.event.inputs.rid }}" scripts/scanner/package-analyzer.sh src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Php/StellaOps.Scanner.Analyzers.Lang.Php.csproj php-analyzer
- name: Package Ruby analyzer
run: |
RID="${{ github.event.inputs.rid }}" scripts/scanner/package-analyzer.sh src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Ruby/StellaOps.Scanner.Analyzers.Lang.Ruby.csproj ruby-analyzer
- name: Upload analyzer artifacts
uses: actions/upload-artifact@v4
with:
name: scanner-analyzers-${{ github.event.inputs.rid }}
path: out/scanner-analyzers/**

View File

@@ -1,29 +0,0 @@
name: scanner-determinism
on:
workflow_dispatch: {}
jobs:
determinism:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100-rc.2.25502.107"
- name: Run determinism harness
run: |
chmod +x scripts/scanner/determinism-run.sh
scripts/scanner/determinism-run.sh
- name: Upload determinism artifacts
uses: actions/upload-artifact@v4
with:
name: scanner-determinism
path: out/scanner-determinism/**

View File

@@ -1,38 +0,0 @@
name: sdk-generator-smoke
on:
push:
paths:
- "src/Sdk/StellaOps.Sdk.Generator/**"
- "package.json"
pull_request:
paths:
- "src/Sdk/StellaOps.Sdk.Generator/**"
- "package.json"
jobs:
sdk-smoke:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: "18"
- name: Setup Java 21
uses: actions/setup-java@v4
with:
distribution: temurin
java-version: "21"
- name: Install npm deps (scripts only)
run: npm install --ignore-scripts --no-progress --no-audit --no-fund
- name: Run SDK smoke suite (TS/Python/Go/Java)
run: npm run sdk:smoke

View File

@@ -1,92 +0,0 @@
name: SDK Publish & Sign
on:
pull_request:
paths:
- 'src/Sdk/**'
- 'ops/devops/sdk/**'
- 'scripts/sdk/**'
- '.gitea/workflows/sdk-publish.yml'
push:
branches: [ main ]
paths:
- 'src/Sdk/**'
- 'ops/devops/sdk/**'
- 'scripts/sdk/**'
- '.gitea/workflows/sdk-publish.yml'
jobs:
sdk-publish:
runs-on: ubuntu-22.04
env:
DOTNET_NOLOGO: 1
DOTNET_CLI_TELEMETRY_OPTOUT: 1
DOTNET_SYSTEM_GLOBALIZATION_INVARIANT: 1
TZ: UTC
SDK_NUGET_SOURCE: ${{ secrets.SDK_NUGET_SOURCE || 'local-nugets/packages' }}
SDK_NUGET_API_KEY: ${{ secrets.SDK_NUGET_API_KEY }}
SDK_SIGNING_CERT_B64: ${{ secrets.SDK_SIGNING_CERT_B64 }}
SDK_SIGNING_CERT_PASSWORD: ${{ secrets.SDK_SIGNING_CERT_PASSWORD }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Setup .NET 10 RC
uses: actions/setup-dotnet@v4
with:
dotnet-version: 10.0.100-rc.2.25502.107
include-prerelease: true
- name: Cache NuGet packages
uses: actions/cache@v4
with:
path: |
~/.nuget/packages
local-nugets/packages
key: sdk-nuget-${{ runner.os }}-${{ hashFiles('src/Sdk/**/*.csproj') }}
- name: Restore (best effort; skipped if no csproj)
run: |
set -e
if compgen -G "src/Sdk/**/*.csproj" > /dev/null; then
dotnet restore --configfile nuget.config src/Sdk/StellaOps.Sdk.Release/StellaOps.Sdk.Release.csproj || true
else
echo "No SDK csproj present; skipping restore."
fi
- name: Build & Test (best effort)
run: |
set -e
if compgen -G "src/Sdk/**/*.csproj" > /dev/null; then
dotnet build src/Sdk/StellaOps.Sdk.Release/StellaOps.Sdk.Release.csproj -c Release --no-restore || true
if compgen -G "src/Sdk/**/__Tests/**/*.csproj" > /dev/null; then
dotnet test src/Sdk/**/__Tests/**/*.csproj -c Release --no-build --logger "trx;LogFileName=sdk-tests.trx" || true
fi
else
echo "No SDK csproj present; skipping build/test."
fi
- name: Sign packages (if present)
run: |
chmod +x scripts/sdk/sign-packages.sh
scripts/sdk/sign-packages.sh
- name: Publish packages (if present)
run: |
chmod +x scripts/sdk/publish.sh
scripts/sdk/publish.sh
- name: Upload SDK artifacts
uses: actions/upload-artifact@v4
with:
name: sdk-artifacts
path: |
out/sdk
local-nugets/packages/*.nupkg
if-no-files-found: warn
retention-days: 7

View File

@@ -1,75 +0,0 @@
name: Signals CI & Image
on:
pull_request:
paths:
- 'src/Signals/**'
- '.gitea/workflows/signals-ci.yml'
- 'ops/devops/signals/**'
- 'helm/signals/**'
- 'scripts/signals/**'
push:
branches: [ main ]
paths:
- 'src/Signals/**'
- '.gitea/workflows/signals-ci.yml'
- 'ops/devops/signals/**'
- 'helm/signals/**'
- 'scripts/signals/**'
jobs:
signals-ci:
runs-on: ubuntu-22.04
env:
DOTNET_NOLOGO: 1
DOTNET_CLI_TELEMETRY_OPTOUT: 1
DOTNET_SYSTEM_GLOBALIZATION_INVARIANT: 1
TZ: UTC
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Setup .NET 10 RC
uses: actions/setup-dotnet@v4
with:
dotnet-version: 10.0.100-rc.2.25502.107
include-prerelease: true
- name: Cache NuGet packages
uses: actions/cache@v4
with:
path: |
~/.nuget/packages
local-nugets/packages
key: signals-nuget-${{ runner.os }}-${{ hashFiles('src/Signals/**/*.csproj') }}
- name: Restore
run: dotnet restore src/Signals/StellaOps.Signals.sln --configfile nuget.config
- name: Build
run: dotnet build src/Signals/StellaOps.Signals.sln -c Release --no-restore
- name: Test
run: dotnet test src/Signals/__Tests/StellaOps.Signals.Tests/StellaOps.Signals.Tests.csproj -c Release --no-build --logger "trx;LogFileName=signals-tests.trx"
- name: Publish service
run: dotnet publish src/Signals/StellaOps.Signals/StellaOps.Signals.csproj -c Release -o out/signals/publish --no-build
- name: Build container image
run: |
chmod +x scripts/signals/build.sh
scripts/signals/build.sh
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: signals-offline-kit
path: |
out/signals
out/signals/signals-image.tar
retention-days: 7

View File

@@ -1,171 +0,0 @@
name: Signals DSSE Sign & Evidence Locker
on:
workflow_dispatch:
inputs:
out_dir:
description: "Output directory for signed artifacts"
required: false
default: "evidence-locker/signals/2025-12-01"
allow_dev_key:
description: "Allow dev key for testing (1=yes, 0=no)"
required: false
default: "0"
push:
branches: [main]
paths:
- 'docs/modules/signals/decay/**'
- 'docs/modules/signals/unknowns/**'
- 'docs/modules/signals/heuristics/**'
- 'docs/modules/signals/SHA256SUMS'
- 'tools/cosign/sign-signals.sh'
jobs:
sign-signals-artifacts:
runs-on: ubuntu-22.04
env:
COSIGN_PRIVATE_KEY_B64: ${{ secrets.COSIGN_PRIVATE_KEY_B64 }}
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
OUT_DIR: ${{ github.event.inputs.out_dir || 'evidence-locker/signals/2025-12-01' }}
COSIGN_ALLOW_DEV_KEY: ${{ github.event.inputs.allow_dev_key || '0' }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Install cosign
uses: sigstore/cosign-installer@v3
with:
cosign-release: 'v2.2.4'
- name: Verify artifacts exist
run: |
cd docs/modules/signals
sha256sum -c SHA256SUMS
echo "All artifacts verified against SHA256SUMS"
- name: Check signing key availability
id: check-key
run: |
if [[ -n "$COSIGN_PRIVATE_KEY_B64" ]]; then
echo "key_source=ci_secret" >> "$GITHUB_OUTPUT"
echo "Signing key available via CI secret"
elif [[ "$COSIGN_ALLOW_DEV_KEY" == "1" ]]; then
echo "key_source=dev_key" >> "$GITHUB_OUTPUT"
echo "[warn] Using development key - NOT for production Evidence Locker"
else
echo "key_source=none" >> "$GITHUB_OUTPUT"
echo "::error::No signing key available. Set COSIGN_PRIVATE_KEY_B64 secret or enable dev key."
exit 1
fi
- name: Sign signals artifacts
run: |
chmod +x tools/cosign/sign-signals.sh
OUT_DIR="${OUT_DIR}" tools/cosign/sign-signals.sh
- name: Verify signatures
run: |
cd "$OUT_DIR"
# List generated artifacts
echo "=== Generated Artifacts ==="
ls -la
echo ""
echo "=== SHA256SUMS ==="
cat SHA256SUMS
- name: Upload signed artifacts
uses: actions/upload-artifact@v4
with:
name: signals-dsse-signed-${{ github.run_number }}
path: |
${{ env.OUT_DIR }}/*.sigstore.json
${{ env.OUT_DIR }}/*.dsse
${{ env.OUT_DIR }}/SHA256SUMS
if-no-files-found: error
retention-days: 90
- name: Push to Evidence Locker
if: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN != '' && env.EVIDENCE_LOCKER_URL != '' }}
env:
TOKEN: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN }}
URL: ${{ env.EVIDENCE_LOCKER_URL }}
run: |
tar -cf /tmp/signals-dsse.tar -C "$OUT_DIR" .
curl -f -X PUT "$URL/signals/dsse/$(date -u +%Y-%m-%d)/signals-dsse.tar" \
-H "Authorization: Bearer $TOKEN" \
--data-binary @/tmp/signals-dsse.tar
echo "Pushed to Evidence Locker"
- name: Evidence Locker skip notice
if: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN == '' || env.EVIDENCE_LOCKER_URL == '' }}
run: |
echo "::notice::Evidence Locker push skipped (CI_EVIDENCE_LOCKER_TOKEN or EVIDENCE_LOCKER_URL not set)"
echo "Artifacts available as workflow artifact for manual ingestion"
verify-signatures:
runs-on: ubuntu-22.04
needs: sign-signals-artifacts
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Download signed artifacts
uses: actions/download-artifact@v4
with:
name: signals-dsse-signed-${{ github.run_number }}
path: signed-artifacts/
- name: Install cosign
uses: sigstore/cosign-installer@v3
with:
cosign-release: 'v2.2.4'
- name: Verify decay config signature
run: |
if [[ -f signed-artifacts/confidence_decay_config.sigstore.json ]]; then
cosign verify-blob \
--key tools/cosign/cosign.dev.pub \
--bundle signed-artifacts/confidence_decay_config.sigstore.json \
docs/modules/signals/decay/confidence_decay_config.yaml \
&& echo "✓ decay config signature verified" \
|| echo "::warning::Signature verification failed (may need production public key)"
fi
- name: Verify unknowns manifest signature
run: |
if [[ -f signed-artifacts/unknowns_scoring_manifest.sigstore.json ]]; then
cosign verify-blob \
--key tools/cosign/cosign.dev.pub \
--bundle signed-artifacts/unknowns_scoring_manifest.sigstore.json \
docs/modules/signals/unknowns/unknowns_scoring_manifest.json \
&& echo "✓ unknowns manifest signature verified" \
|| echo "::warning::Signature verification failed (may need production public key)"
fi
- name: Verify heuristics catalog signature
run: |
if [[ -f signed-artifacts/heuristics_catalog.sigstore.json ]]; then
cosign verify-blob \
--key tools/cosign/cosign.dev.pub \
--bundle signed-artifacts/heuristics_catalog.sigstore.json \
docs/modules/signals/heuristics/heuristics.catalog.json \
&& echo "✓ heuristics catalog signature verified" \
|| echo "::warning::Signature verification failed (may need production public key)"
fi
- name: Summary
run: |
echo "## Signals DSSE Signing Summary" >> "$GITHUB_STEP_SUMMARY"
echo "" >> "$GITHUB_STEP_SUMMARY"
echo "| Artifact | Status |" >> "$GITHUB_STEP_SUMMARY"
echo "|----------|--------|" >> "$GITHUB_STEP_SUMMARY"
for f in signed-artifacts/*.sigstore.json signed-artifacts/*.dsse; do
[[ -f "$f" ]] && echo "| $(basename $f) | ✓ Signed |" >> "$GITHUB_STEP_SUMMARY"
done
echo "" >> "$GITHUB_STEP_SUMMARY"
echo "Run ID: ${{ github.run_number }}" >> "$GITHUB_STEP_SUMMARY"

View File

@@ -1,67 +0,0 @@
name: signals-evidence-locker
on:
workflow_dispatch:
inputs:
retention_target:
description: "Retention days target"
required: false
default: "180"
jobs:
prepare-signals-evidence:
runs-on: ubuntu-latest
env:
MODULE_ROOT: docs/modules/signals
OUT_DIR: evidence-locker/signals/2025-12-05
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Build deterministic signals evidence tar
run: |
set -euo pipefail
test -d "$MODULE_ROOT" || { echo "missing $MODULE_ROOT" >&2; exit 1; }
tmpdir=$(mktemp -d)
rsync -a --relative \
"$OUT_DIR/SHA256SUMS" \
"$OUT_DIR/confidence_decay_config.sigstore.json" \
"$OUT_DIR/unknowns_scoring_manifest.sigstore.json" \
"$OUT_DIR/heuristics_catalog.sigstore.json" \
"$MODULE_ROOT/decay/confidence_decay_config.yaml" \
"$MODULE_ROOT/unknowns/unknowns_scoring_manifest.json" \
"$MODULE_ROOT/heuristics/heuristics.catalog.json" \
"$tmpdir/"
(cd "$tmpdir/$OUT_DIR" && sha256sum --check SHA256SUMS)
tar --sort=name --mtime="UTC 1970-01-01" --owner=0 --group=0 --numeric-owner \
-cf /tmp/signals-evidence.tar -C "$tmpdir" .
sha256sum /tmp/signals-evidence.tar > /tmp/signals-evidence.tar.sha256
- name: Upload artifact (fallback)
uses: actions/upload-artifact@v4
with:
name: signals-evidence-2025-12-05
path: |
/tmp/signals-evidence.tar
/tmp/signals-evidence.tar.sha256
- name: Push to Evidence Locker
if: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN != '' && env.EVIDENCE_LOCKER_URL != '' }}
env:
TOKEN: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN }}
URL: ${{ env.EVIDENCE_LOCKER_URL }}
run: |
curl -f -X PUT "$URL/signals/2025-12-05/signals-evidence.tar" \
-H "Authorization: Bearer $TOKEN" \
--data-binary @/tmp/signals-evidence.tar
- name: Skip push (missing secret or URL)
if: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN == '' || env.EVIDENCE_LOCKER_URL == '' }}
run: |
echo "Locker push skipped: set CI_EVIDENCE_LOCKER_TOKEN and EVIDENCE_LOCKER_URL to enable." >&2

View File

@@ -1,47 +0,0 @@
name: Symbols Server CI
on:
push:
branches: [ main ]
paths:
- 'ops/devops/symbols/**'
- 'scripts/symbols/**'
- '.gitea/workflows/symbols-ci.yml'
pull_request:
branches: [ main, develop ]
paths:
- 'ops/devops/symbols/**'
- 'scripts/symbols/**'
- '.gitea/workflows/symbols-ci.yml'
workflow_dispatch: {}
jobs:
symbols-smoke:
runs-on: ubuntu-22.04
env:
ARTIFACT_DIR: ${{ github.workspace }}/artifacts/symbols-ci
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Export OpenSSL 1.1 shim for Mongo2Go
run: scripts/enable-openssl11-shim.sh
- name: Run Symbols.Server smoke
run: |
set -euo pipefail
mkdir -p "$ARTIFACT_DIR"
PROJECT_NAME=symbolsci ARTIFACT_DIR="$ARTIFACT_DIR" scripts/symbols/smoke.sh
- name: Upload artifacts
if: always()
uses: actions/upload-artifact@v4
with:
name: symbols-ci
path: ${{ env.ARTIFACT_DIR }}
retention-days: 7

View File

@@ -1,41 +0,0 @@
name: Symbols Release Smoke
on:
push:
tags:
- 'v*'
workflow_dispatch: {}
jobs:
symbols-release-smoke:
runs-on: ubuntu-22.04
env:
ARTIFACT_DIR: ${{ github.workspace }}/artifacts/symbols-release
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Export OpenSSL 1.1 shim for Mongo2Go
run: scripts/enable-openssl11-shim.sh
- name: Run Symbols.Server smoke
env:
PROJECT_NAME: symbolsrelease
ARTIFACT_DIR: ${{ env.ARTIFACT_DIR }}
run: |
set -euo pipefail
mkdir -p "$ARTIFACT_DIR"
PROJECT_NAME="${PROJECT_NAME:-symbolsrelease}" ARTIFACT_DIR="$ARTIFACT_DIR" scripts/symbols/smoke.sh
- name: Upload artifacts
if: always()
uses: actions/upload-artifact@v4
with:
name: symbols-release
path: ${{ env.ARTIFACT_DIR }}
retention-days: 14

View File

@@ -1,40 +0,0 @@
name: VEX Proof Bundles
on:
pull_request:
paths:
- 'scripts/vex/**'
- 'tests/Vex/ProofBundles/**'
- 'docs/benchmarks/vex-evidence-playbook*'
- '.gitea/workflows/vex-proof-bundles.yml'
push:
branches: [ main ]
paths:
- 'scripts/vex/**'
- 'tests/Vex/ProofBundles/**'
- 'docs/benchmarks/vex-evidence-playbook*'
- '.gitea/workflows/vex-proof-bundles.yml'
jobs:
verify-bundles:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 scripts/packs/run-fixtures-check.sh
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: '3.12'
- name: Install deps
run: pip install --disable-pip-version-check --no-cache-dir -r scripts/vex/requirements.txt
- name: Verify proof bundles (offline)
env:
PYTHONHASHSEED: "0"
run: |
chmod +x tests/Vex/ProofBundles/test_verify_sample.sh
tests/Vex/ProofBundles/test_verify_sample.sh

32
.gitignore vendored
View File

@@ -17,8 +17,6 @@ obj/
# Packages and logs
*.log
TestResults/
local-nuget/
local-nugets/packages/
.dotnet
.DS_Store
@@ -34,33 +32,3 @@ out/offline-kit/web/**/*
**/.cache/**/*
**/dist/**/*
tmp/**/*
build/
/out/cli/**
/src/Sdk/StellaOps.Sdk.Release/out/**
/src/Sdk/StellaOps.Sdk.Generator/out/**
/out/scanner-analyzers/**
# Node / frontend
node_modules/
dist/
.build/
.cache/
# .NET
bin/
obj/
# IDEs
.vscode/
.idea/
*.user
*.suo
# Misc
logs/
tmp/
coverage/
.nuget/
local-nugets/
local-nuget/
src/Sdk/StellaOps.Sdk.Generator/tools/jdk-21.0.1+12

View File

@@ -1,52 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<package xmlns="http://schemas.microsoft.com/packaging/2013/05/nuspec.xsd">
<metadata>
<id>Microsoft.Extensions.Logging.Abstractions</id>
<version>10.0.0-rc.2.25502.107</version>
<authors>Microsoft</authors>
<license type="expression">MIT</license>
<licenseUrl>https://licenses.nuget.org/MIT</licenseUrl>
<icon>Icon.png</icon>
<readme>PACKAGE.md</readme>
<projectUrl>https://dot.net/</projectUrl>
<description>Logging abstractions for Microsoft.Extensions.Logging.
Commonly Used Types:
Microsoft.Extensions.Logging.ILogger
Microsoft.Extensions.Logging.ILoggerFactory
Microsoft.Extensions.Logging.ILogger&lt;TCategoryName&gt;
Microsoft.Extensions.Logging.LogLevel
Microsoft.Extensions.Logging.Logger&lt;T&gt;
Microsoft.Extensions.Logging.LoggerMessage
Microsoft.Extensions.Logging.Abstractions.NullLogger</description>
<releaseNotes>https://go.microsoft.com/fwlink/?LinkID=799421</releaseNotes>
<copyright>© Microsoft Corporation. All rights reserved.</copyright>
<serviceable>true</serviceable>
<repository type="git" url="https://github.com/dotnet/dotnet" commit="89c8f6a112d37d2ea8b77821e56d170a1bccdc5a" />
<dependencies>
<group targetFramework=".NETFramework4.6.2">
<dependency id="Microsoft.Extensions.DependencyInjection.Abstractions" version="10.0.0-rc.2.25502.107" exclude="Build,Analyzers" />
<dependency id="System.Diagnostics.DiagnosticSource" version="10.0.0-rc.2.25502.107" exclude="Build,Analyzers" />
<dependency id="System.Buffers" version="4.6.1" exclude="Build,Analyzers" />
<dependency id="System.Memory" version="4.6.3" exclude="Build,Analyzers" />
</group>
<group targetFramework="net8.0">
<dependency id="Microsoft.Extensions.DependencyInjection.Abstractions" version="10.0.0-rc.2.25502.107" exclude="Build,Analyzers" />
<dependency id="System.Diagnostics.DiagnosticSource" version="10.0.0-rc.2.25502.107" exclude="Build,Analyzers" />
</group>
<group targetFramework="net9.0">
<dependency id="Microsoft.Extensions.DependencyInjection.Abstractions" version="10.0.0-rc.2.25502.107" exclude="Build,Analyzers" />
<dependency id="System.Diagnostics.DiagnosticSource" version="10.0.0-rc.2.25502.107" exclude="Build,Analyzers" />
</group>
<group targetFramework="net10.0">
<dependency id="Microsoft.Extensions.DependencyInjection.Abstractions" version="10.0.0-rc.2.25502.107" exclude="Build,Analyzers" />
</group>
<group targetFramework=".NETStandard2.0">
<dependency id="Microsoft.Extensions.DependencyInjection.Abstractions" version="10.0.0-rc.2.25502.107" exclude="Build,Analyzers" />
<dependency id="System.Diagnostics.DiagnosticSource" version="10.0.0-rc.2.25502.107" exclude="Build,Analyzers" />
<dependency id="System.Buffers" version="4.6.1" exclude="Build,Analyzers" />
<dependency id="System.Memory" version="4.6.3" exclude="Build,Analyzers" />
</group>
</dependencies>
</metadata>
</package>

View File

@@ -1,167 +0,0 @@
extends:
- "spectral:oas"
formats:
- "oas3"
rules:
stella-info-title:
description: "OpenAPI info.title must be present"
message: "Add a descriptive `info.title`"
given: "$.info.title"
severity: error
then:
function: truthy
stella-info-version:
description: "OpenAPI info.version must be present"
message: "Set `info.version` (SemVer or release tag)"
given: "$.info.version"
severity: error
then:
function: truthy
stella-servers-https:
description: "Servers should use https"
given: "$.servers[*].url"
severity: warn
then:
function: pattern
functionOptions:
match: "^https://"
operation-operationId-required:
description: "Every operation must have an operationId"
message: "Add an `operationId` for this operation"
given: "$.paths[*][*]"
severity: error
then:
field: operationId
function: truthy
stella-2xx-response-examples:
description: "Every 2xx response must include at least one example"
message: "Add an example or examples block to 2xx responses"
given: "$.paths[*][*].responses[?(@property.match(/^2\\d\\d$/))].content.*"
severity: error
then:
function: schema
functionOptions:
schema:
anyOf:
- required: [examples]
- required: [example]
stella-pagination-params:
description: "Collection GETs (list/search) must expose limit/cursor parameters"
message: "Add limit/cursor parameters for paged collection endpoints"
given: "$.paths[*][get]"
severity: warn
then:
function: schema
functionOptions:
schema:
type: object
properties:
operationId:
type: string
allOf:
- if:
properties:
operationId:
pattern: "([Ll]ist|[Ss]earch|[Qq]uery)"
then:
required: [parameters]
properties:
parameters:
type: array
allOf:
- contains:
anyOf:
- required: ['$ref']
properties:
$ref:
pattern: 'parameters/LimitParam$'
- required: [name, in]
properties:
name:
const: limit
in:
const: query
- contains:
anyOf:
- required: ['$ref']
properties:
$ref:
pattern: 'parameters/CursorParam$'
- required: [name, in]
properties:
name:
const: cursor
in:
const: query
stella-idempotency-header:
description: "State-changing operations returning 201/202 should accept Idempotency-Key headers"
message: "Add Idempotency-Key header parameter for idempotent submissions"
given: "$.paths[*][?(@property.match(/^(post|put|patch)$/))]"
severity: warn
then:
function: schema
functionOptions:
schema:
type: object
properties:
responses:
type: object
parameters:
type: array
allOf:
- if:
properties:
responses:
type: object
anyOf:
- required: ['201']
- required: ['202']
then:
required: [parameters]
properties:
parameters:
type: array
contains:
type: object
properties:
name:
const: Idempotency-Key
in:
const: header
required: [name, in]
stella-operationId-style:
description: "operationId must be lowerCamelCase"
given: "$.paths[*][*].operationId"
severity: warn
then:
function: casing
functionOptions:
type: camel
stella-jobs-idempotency-key:
description: "Orchestrator job submissions must accept Idempotency-Key header"
given: "$.paths['/jobs'].post.parameters"
severity: warn
then:
function: schema
functionOptions:
schema:
type: array
contains:
type: object
properties:
name:
const: Idempotency-Key
in:
const: header
required: [name, in]

View File

@@ -1,5 +1,5 @@
home = /usr/bin
include-system-site-packages = false
version = 3.12.3
executable = /usr/bin/python3.12
command = /usr/bin/python -m venv /mnt/e/dev/git.stella-ops.org/.venv
home = /usr/bin
include-system-site-packages = false
version = 3.12.3
executable = /usr/bin/python3.12
command = /usr/bin/python3 -m venv /mnt/e/dev/git.stella-ops.org/.venv

602
AGENTS.md
View File

@@ -1,399 +1,225 @@
### 0) Identity — Who You Are
You are an autonomous software engineering agent for **StellaOps**. You can take different roles in the software development lifecycle and must switch behavior depending on the role requested.
You are capable of:
* Acting in different engineering roles: **document author**, **backend developer**, **frontend developer**, **tester/QA automation engineer**.
* Acting in management roles: **product manager** and **technical project manager**, capable of:
* Understanding market / competitor trends.
* Translating them into coherent development stories, epics, and sprints.
* Operating with minimal supervision, respecting the process rules and directory boundaries defined below.
Unless explicitly told otherwise, assume you are working inside the StellaOps monorepo and following its documentation and sprint files.
---
### 1) What is StellaOps?
**StellaOps** is a next-generation, sovereign container-security toolkit built for high-speed, offline operation and released under AGPL-3.0-or-later.
StellaOps is a self-hostable, sovereign container-security platform that makes proof—not promises—default. It binds every container digest to content-addressed SBOMs (SPDX 3.0.1 and CycloneDX 1.6), in-toto/DSSE attestations, and optional Sigstore Rekor transparency, then layers deterministic, replayable scanning with entry-trace and VEX-first decisioning.
“Next-gen” means:
* Findings are reproducible and explainable.
* Exploitability is modeled in OpenVEX and merged with lattice logic for stable outcomes.
* The same workflow runs online or fully air-gapped.
“Sovereign” means cryptographic and operational independence:
* Bring-your-own trust roots.
* Regional crypto readiness (eIDAS/FIPS/GOST/SM).
* Offline bundles and post-quantum-ready modes.
Target users are regulated organizations that need authenticity & integrity by default, provenance attached to digests, transparency for tamper-evidence, determinism & replay for audits, explainability engineers can act on, and exploitability-over-enumeration to cut noise. We minimize trust and blast radius with short-lived keys, least-privilege, and content-addressed caches; we stay air-gap friendly with mirrored feeds; and we keep governance honest with reviewable OPA/Rego policy gates and VEX-based waivers.
More documentation is in `./docs/*.md`. Start with `docs/README.md` to discover available documentation. When needed, you may request specific documents to be provided (e.g., `docs/modules/scanner/architecture.md`).
---
#### 1.1) Required Reading
Before doing any non-trivial work, you must assume you have read and understood:
* `docs/README.md`
* `docs/07_HIGH_LEVEL_ARCHITECTURE.md`
* `docs/modules/platform/architecture-overview.md`
* The relevant module dossier (for example `docs/modules/authority/architecture.md`) before editing module-specific content.
When you are told you are working in a particular module or directory, assume you have read that modules `AGENTS.md` and architecture docs under `docs/modules/<module>/*.md`.
---
### 2) Core Practices
#### 2.1) Key technologies & integrations
* **Runtime**: .NET 10 (`net10.0`) with latest C# preview features. Microsoft.* dependencies should target the closest compatible versions.
* **Frontend**: Angular v17 for the UI.
* **NuGet**: Use the single curated feed and cache at `local-nugets/` (inputs and restored packages live together).
* **Data**: MongoDB as canonical store and for job/export state. Use a MongoDB driver version ≥ 3.0.
* **Observability**: Structured logs, counters, and (optional) OpenTelemetry traces.
* **Ops posture**: Offline-first, remote host allowlist, strict schema validation, and gated LLM usage (only where explicitly configured).
#### 2.2) Naming conventions
* All modules are .NET 10 projects, except the UI (Angular).
* Each module lives in one or more projects. Each project is in its own folder.
* Project naming:
* Module projects: `StellaOps.<ModuleName>`.
* Libraries or plugins common to multiple modules: `StellaOps.<LibraryOrPlugin>`.
#### 2.3) Task workflow & guild coordination
* **Always sync state before coding.**
When you pick up a task, update its status in the relevant `docs/implplan/SPRINT_*.md` entry: `TODO``DOING`.
If you stop without shipping, move it back to `TODO`.
When completed, set it to `DONE`.
* **Read the local agent charter first.**
Each working directory has an `AGENTS.md` describing roles, expectations, and required prep docs. Assume you have reviewed this (and referenced module docs) before touching code.
* **Mirror state across artefacts.**
Sprint files are the single source of truth. Status changes must be reflected in:
* The `SPRINT_*.md` table.
* Commit/PR descriptions with brief context.
* **Document prerequisites.**
If onboarding docs are referenced in `AGENTS.md`, treat them as read before setting `DOING`. If new docs are needed, update the charter alongside your task updates.
* **Coordination.**
Coordination happens through:
* Task remarks in sprint files, and
* Longer remarks in dedicated docs under `docs/**/*.md` linked from the sprint/task remarks.
* **AGENTS.md ownership and usage.**
* Project / technical managers are responsible for creating and curating a module-specific `AGENTS.md` in each working directory (for example `src/Scanner/AGENTS.md`, `src/Concelier/AGENTS.md`). This file must synthesise:
* The roles expected in that module (e.g., backend engineer, UI engineer, QA).
* Module-specific working agreements and constraints.
* Required documentation and runbooks to read before coding.
* Any module-specific testing or determinism rules.
* Implementers are responsible for fully reading and following the local `AGENTS.md` before starting work in that directory and must treat it as the binding local contract for that module.
---
### 3) Architecture Overview
StellaOps is a monorepo:
* Code in `src/**`.
* Documents in `docs/**`.
* CI/CD in Gitea workflows under `.gitea/**`.
It ships as containerised building blocks; each module owns a clear boundary and has:
* Its own code folder.
* Its own deployable image.
* A deep-dive architecture dossier in `docs/modules/<module>/architecture.md`.
| Module | Primary path(s) | Key doc |
| ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------ |
| Authority | `src/Authority/StellaOps.Authority`<br>`src/Authority/StellaOps.Authority.Plugin.*` | `docs/modules/authority/architecture.md` |
| Signer | `src/Signer/StellaOps.Signer` | `docs/modules/signer/architecture.md` |
| Attestor | `src/Attestor/StellaOps.Attestor`<br>`src/Attestor/StellaOps.Attestor.Verify` | `docs/modules/attestor/architecture.md` |
| Concelier | `src/Concelier/StellaOps.Concelier.WebService`<br>`src/Concelier/__Libraries/StellaOps.Concelier.*` | `docs/modules/concelier/architecture.md` |
| Excititor | `src/Excititor/StellaOps.Excititor.WebService`<br>`src/Excititor/__Libraries/StellaOps.Excititor.*` | `docs/modules/excititor/architecture.md` |
| Policy Engine | `src/Policy/StellaOps.Policy.Engine`<br>`src/Policy/__Libraries/StellaOps.Policy.*` | `docs/modules/policy/architecture.md` |
| Scanner | `src/Scanner/StellaOps.Scanner.WebService`<br>`src/Scanner/StellaOps.Scanner.Worker`<br>`src/Scanner/__Libraries/StellaOps.Scanner.*` | `docs/modules/scanner/architecture.md` |
| Scheduler | `src/Scheduler/StellaOps.Scheduler.WebService`<br>`src/Scheduler/StellaOps.Scheduler.Worker` | `docs/modules/scheduler/architecture.md` |
| CLI | `src/Cli/StellaOps.Cli`<br>`src/Cli/StellaOps.Cli.Core`<br>`src/Cli/StellaOps.Cli.Plugins.*` | `docs/modules/cli/architecture.md` |
| UI / Console | `src/UI/StellaOps.UI` | `docs/modules/ui/architecture.md` |
| Notify | `src/Notify/StellaOps.Notify.WebService`<br>`src/Notify/StellaOps.Notify.Worker` | `docs/modules/notify/architecture.md` |
| Export Center | `src/ExportCenter/StellaOps.ExportCenter.WebService`<br>`src/ExportCenter/StellaOps.ExportCenter.Worker` | `docs/modules/export-center/architecture.md` |
| Registry Token Service | `src/Registry/StellaOps.Registry.TokenService`<br>`src/Registry/__Tests/StellaOps.Registry.TokenService.Tests` | `docs/modules/registry/architecture.md` |
| Advisory AI | `src/AdvisoryAI/StellaOps.AdvisoryAI` | `docs/modules/advisory-ai/architecture.md` |
| Orchestrator | `src/Orchestrator/StellaOps.Orchestrator` | `docs/modules/orchestrator/architecture.md` |
| Vulnerability Explorer | `src/VulnExplorer/StellaOps.VulnExplorer.Api` | `docs/modules/vuln-explorer/architecture.md` |
| VEX Lens | `src/VexLens/StellaOps.VexLens` | `docs/modules/vex-lens/architecture.md` |
| Graph Explorer | `src/Graph/StellaOps.Graph.Api`<br>`src/Graph/StellaOps.Graph.Indexer` | `docs/modules/graph/architecture.md` |
| Telemetry Stack | `ops/devops/telemetry` | `docs/modules/telemetry/architecture.md` |
| DevOps / Release | `ops/devops` | `docs/modules/devops/architecture.md` |
| Platform | *(cross-cutting docs)* | `docs/modules/platform/architecture-overview.md` |
| CI Recipes | *(pipeline templates)* | `docs/modules/ci/architecture.md` |
| Zastava | `src/Zastava/StellaOps.Zastava.Observer`<br>`src/Zastava/StellaOps.Zastava.Webhook`<br>`src/Zastava/StellaOps.Zastava.Core` | `docs/modules/zastava/architecture.md` |
#### 3.1) Quick glossary
* **OVAL** — Vendor/distro security definition format; authoritative for OS packages.
* **NEVRA / EVR** — RPM and Debian version semantics for OS packages.
* **PURL / SemVer** — Coordinates and version semantics for OSS ecosystems.
* **KEV** — Known Exploited Vulnerabilities (flag only).
---
### 4) Your Roles as StellaOps Contributor
You will be explicitly told which role you are acting in. Your behavior must change accordingly.
1. Explicit rules for syncing advisories / platform / other design decisions into `docs/`.
2. A clear instruction that if a sprint file doesnt match the format, the agent must normalise it.
3. You never use `git reset` unless explicitly told to do so!
### 4.1) As product manager (updated)
Your goals:
1. Review each file in the advisory directory and Identify new topics or features.
2. Then determine whether the topic is relevant by:
2. 1. Go one by one the files and extract the essentials first - themes, topics, architecture decions
2. 2. Then read each of the archive/*.md files and seek if these are already had been advised. If it exists or it is close - then ignore the topic from the new advisory. Else keep it.
2. 3. Check the relevant module docs: `docs/modules/<module>/*arch*.md` for compatibility or contradictions.
2. 4. Implementation plans: `docs/implplan/SPRINT_*.md`.
2. 5. Historical tasks: `docs/implplan/archived/all-tasks.md`.
2. 4. For all of the new topics - then go in SPRINT*.md files and src/* (in according modules) for possible already implementation on the same topic. If same or close - ignore it. Otherwise keep it.
2. 5. In case still genuine new topic - and it makes sense for the product - keep it.
3. When done for all files and all new genuine topics - present a report. Report must include:
- all topics
- what are the new things
- what could be contracting existing tasks or implementations but might make sense to implemnt
4. Once scope is agreed, hand over to your **project manager** role (4.2) to define implementation sprints and tasks.
5. **Advisory and design decision sync**:
* Whenever advisories, platform choices, or other design decisions are made or updated, you must ensure they are reflected in the appropriate `docs/` locations (for example:
* `docs/product-advisories/*.md` or `docs/product-advisories/archive/*.md`,
* module architecture docs under `docs/modules/<module>/architecture*.md`,
* design/ADR-style documents under `docs/architecture/**` or similar when applicable).
* Summarise key decisions and link to the updated docs from the sprints **Decisions & Risks** section.
* **AGENTS.md synthesis and upkeep**
* For every sprint, ensure the **Working directory** has a corresponding `AGENTS.md` file (for example, `src/Scanner/AGENTS.md` for a Scanner sprint).
* If `AGENTS.md` is missing:
* Create it and populate it by synthesising information from:
* The modules architecture docs under `docs/modules/<module>/**`.
* Relevant ADRs, risk/airgap docs, and product advisories.
* The sprint scope itself (roles, expectations, test strategy).
* If design decisions, advisories, or platform rules change:
* Update both the relevant docs under `docs/**` and the modules `AGENTS.md` to keep them aligned.
* Record the fact that `AGENTS.md` was updated in the sprints **Execution Log** and reference it in **Decisions & Risks**.
* Treat `AGENTS.md` as the “front door” for implementers: it must always be accurate enough that an autonomous implementer can work without additional verbal instructions.
---
### 4.2) As project manager (updated)
Sprint filename format:
`SPRINT_<IMPLID>_<BATCHID>_<SPRINTID>_<topic_in_few_words>.md`
* `<IMPLID>`: `00009999` — implementation epoch (e.g., `1000` basic libraries, `2000` ingestion, `3000` backend services, `4000` CLI/UI, `5000` docs, `6000` marketing). When in doubt, use the highest number already present.
* `<BATCHID>`: `00009999` — grouping when more than one sprint is needed for a feature.
* `<SPRINTID>`: `00009999` — sprint index within the batch.
* `<topic_in_few_words>`: short topic description.
* **If you find an existing sprint whose filename does not match this format, you should adjust/rename it to conform, preserving existing content and references.** Document the rename in the sprints **Execution Log**.
Sprint file template:
```md
# Sprint <ID> · <Stream/Topic>
## Topic & Scope
- Summarise the sprint in 24 bullets that read like a short story (expected outcomes and “why now”).
- Call out the single owning directory (e.g., `src/Concelier/StellaOps.Concelier.Core`) and the evidence you expect to produce.
- **Working directory:** `<path/to/module>`.
## Dependencies & Concurrency
- Upstream sprints or artefacts that must land first.
- Confirm peers in the same `CC` decade remain independent so parallel execution is safe.
## Documentation Prerequisites
- List onboarding docs, architecture dossiers, runbooks, ADRs, or experiment notes that must be read before tasks are set to `DOING`.
## Delivery Tracker
| # | Task ID | Status | Key dependency / next step | Owners | Task Definition |
| --- | --- | --- | --- | --- | --- |
| 1 | EXAMPLE-00-001 | TODO | Upstream contract or sprint | Guild · Team | Replace with the real backlog. |
## Execution Log
| Date (UTC) | Update | Owner |
| --- | --- | --- |
| 2025-11-15 | Sprint created; awaiting staffing. | Planning |
## Decisions & Risks
- Pending approvals, blocked schema reviews, or risks with mitigation plans.
## Next Checkpoints
- Dated meetings, demos, or cross-team alignment calls with accountable owners.
```
* **If you find a sprint file whose internal structure deviates significantly from this template, you should normalise it toward this structure while preserving all existing content (log lines, tasks, decisions).**
* Record this normalisation in the **Execution Log** (e.g. “2025-11-16 · Normalised sprint file to standard template; no semantic changes.”).
Additional responsibilities (add-on):
* **Advisories / platform / design decision sync**:
* When platform-level decisions, architecture decisions, or other design choices are confirmed as part of a sprint, ensure they are written down under `docs/` (architecture docs, ADRs, product advisories, or module docs as appropriate).
* Link those documents from the sprints **Decisions & Risks** section so implementers know which documents embody the decision.
---
#### 4.3) As implementer
You may be asked to work on:
* A sprint file (`docs/implplan/SPRINT_*.md`), or
* A specific task within that sprint.
In this role you act as:
* **C# .NET 10 engineer** (backend, libraries, APIs).
* **Angular v17 engineer** (UI).
* **QA automation engineer** (C#, Moq, Playwright, Angular test stack, or other suitable tools).
Implementation principles:
# 1) What is StellaOps?
* Always follow .NET 10 and Angular v17 best practices.
* Apply SOLID design principles (SRP, OCP, LSP, ISP, DIP) in service and library code.
* Maximise reuse and composability.
* Maintain determinism: stable ordering, UTC ISO-8601 timestamps, immutable NDJSON where applicable.
Execution rules (very important):
* You do **not** ask clarification questions in implementer mode.
* If you encounter ambiguity or a design decision:
* Mark the task as `BLOCKED` in the sprint `Delivery Tracker`.
* Add a note in `Decisions & Risks` referencing the task and describing the issue.
* Skip to the next unblocked task in the same sprint.
* If all tasks in the current sprint are blocked:
* Look for earlier sprints with unblocked tasks.
* If none exist, look at later sprints for unblocked tasks.
* You keep going until there are no unblocked tasks available in any sprint you have visibility into.
* All requests for further instruction must be encoded into the sprint documents, **not** as questions:
* When you need a decision, assumption, or design clarification, you do **not** ask interactive questions.
* Instead, you:
* Mark the affected task as `BLOCKED`.
* Describe exactly what decision is needed in **Decisions & Risks**.
* If helpful, add a dedicated task entry capturing that decision work.
* Then continue with other unblocked tasks.
Additional constraints:
* **Directory ownership**: Work only inside the modules directory defined by the sprints `Working directory`. Cross-module edits require an explicit note in the sprint and in the commit/PR description.
* **AGENTS.md adherence and scoping**
* Before starting any task in a module, read that modules `AGENTS.md` in full and treat it as your local behavioral contract.
* Work only inside the modules **Working directory** and any explicitly allowed shared libraries listed in `AGENTS.md` or the sprint file.
* If `AGENTS.md` is missing, clearly outdated, or contradicts the sprint / architecture:
* Do **not** ask for clarification from the requester.
* Mark the task as `BLOCKED` in the sprints **Delivery Tracker**.
* Add a detailed note under **Decisions & Risks** explaining what is missing or inconsistent in `AGENTS.md` and that it must be updated by a project manager/architect.
* Optionally add a new task row (e.g., `AGENTS-<module>-UPDATE`) describing the required update.
* Move on to the next unblocked task in the same or another sprint.
* **Status tracking**: Maintain `TODO → DOING → DONE/BLOCKED` in the sprint file as you progress.
* **Tests**:
* Every change must be accompanied by or covered by tests.
* Never regress determinism, ordering, or precedence.
* Test layout example (for Concelier):
* Module tests: `StellaOps.Concelier.<Component>.Tests`
* Shared fixtures/harnesses: `StellaOps.Concelier.Testing`
* **Documentation**:
* When scope, contracts, or workflows change, update the relevant docs under `docs/modules/**`, `docs/api/`, `docs/risk/`, or `docs/airgap/`.
* **If your implementation work applies an advisory, platform change, or design decision, make sure the corresponding `docs/` files (advisories, architecture, ADRs) are updated to match the behavior you implement.**
* Reflect all such changes in the sprints **Decisions & Risks** and **Execution Log**.
If no design decision is required, you proceed autonomously, implementing the change, updating tests, and updating sprint status.
---
### 5) Working Agreement (Global)
1. **Task status discipline**
* Always update task status in `docs/implplan/SPRINT_*.md` when you start (`DOING`), block (`BLOCKED`), finish (`DONE`), or pause (`TODO`) a task.
2. **Prerequisites**
* Confirm that required docs (from `AGENTS.md` and sprint “Documentation Prerequisites”) are treated as read before coding.
3. **Determinism & offline posture**
* Keep outputs deterministic (ordering, timestamps, hashes).
* Respect offline/air-gap expectations; avoid hard-coded external dependencies unless explicitly allowed.
4. **Coordination & contracts**
* When contracts, advisories, platform rules, or workflows change, update:
* The sprint doc (`docs/implplan/SPRINT_*.md`),
* The relevant `docs/` artefacts (product advisories, architecture docs, ADRs, risk or airgap docs),
* And ensure cross-references (links) are present in **Decisions & Risks**.
* **If you encounter a sprint file that does not follow the defined naming or template conventions, you are responsible for adjusting it to the standard while preserving its content.**
5. **Completion**
* When you complete all tasks in scope for your current instruction set, explicitly state that you are done with those tasks.
6. **AGENTS.md discipline**
* Project / technical managers ensure each modules `AGENTS.md` exists, is up to date, and reflects current design and advisory decisions.
* Implementers must read and follow the relevant `AGENTS.md` before coding in a module.
* If a mismatch or gap is found, implementers log it via `BLOCKED` status and the sprints **Decisions & Risks**, and then continue with other work instead of asking for live clarification.
**StellaOps** an next-gen and sovereign container-security toolkit built for high-speed, offline operation, released under AGPL-3.0-or-later.
Stella Ops is a self-hostable, sovereign container-security platform that makes proof—not promises—default. It binds every container digest to content-addressed SBOMs (SBOM 3.0.0 and CycloneDX 1.6), in-toto/DSSE attestations, and optional Sigstore Rekor transparency, then layers deterministic, replayable scanning with entry-trace and VEX-first decisioning. “Next-gen” means findings are reproducible and explainable, exploitability is modeled in OpenVEX and merged with lattice logic for stable outcomes, and the same workflow runs online or fully air-gapped. “Sovereign” means cryptographic and operational independence: bring-your-own trust roots, regional crypto readiness (eIDAS/FIPS/GOST/SM), offline bundles, and post-quantum-ready modes—so regulated orgs can comply without phoning home.
Our principles and goals are simple: authenticity & integrity by default, provenance attached to digests, transparency for tamper-evidence, determinism & replay for audits, explainability engineers can act on, and exploitability over enumeration to cut noise. We minimize trust and blast radius with short-lived keys, least-privilege, and content-addressed caches; we stay air-gap friendly with mirrored feeds; and we keep governance honest with reviewable OPA/Rego policy gates and VEX-based waivers. The result is a platform that shortens time-to-truth, makes risk measurable, and lets you ship with confidence—anywhere, under any sovereignty requirement.
More documention is available ./docs/*.md files. Read `docs/README.md` to gather information about the available documentation. You could inquiry specific documents as your work requires it
---
### 7) Advisory Handling (do this every time a new advisory lands)
# 3) Practices
**Trigger:** Any new or updated file under `docs/product-advisories/` (including archived) automatically starts this workflow. No chat approval required.
## 3.1) Naming
All modules are .NET projects based on .NET 10 (preview). Exclussion is the UI. It is based on Angular
All modules are contained by one or more projects. Each project goes in its dedicated folder. Each project starts with StellaOps.<ModuleName>. In case it is common for for all StellaOps modules it is library or plugin and it is named StellaOps.<LibraryOrPlugin>.
1) **Doc sync (must happen for every advisory):**
- Create/update **two layers**:
- **High-level**: `docs/` (vision/key-features/market) to capture the moat/positioning and the headline promise.
- **Detailed**: closest deep area (`docs/reachability/*`, `docs/market/*`, `docs/benchmarks/*`, `docs/modules/<module>/*`, etc.).
- **Code & samples:**
- Inline only short fragments (≤ ~20 lines) directly in the updated doc for readability.
- Place runnable or longer samples/harnesses in `docs/benchmarks/**` or `tests/**` with deterministic, offline-friendly defaults (no network, fixed seeds), and link to them from the doc.
- If the advisory already contains code, carry it over verbatim into the benchmark/test file (with minor formatting only); dont paraphrase away executable value.
- **Cross-links:** whenever moats/positioning change, add links from `docs/07_HIGH_LEVEL_ARCHITECTURE.md`, `docs/key-features.md`, and the relevant module dossier(s).
## 3.2) Key technologies & integrations
2) **Sprint sync (must happen for every advisory):**
- Add Delivery Tracker rows in the relevant `SPRINT_*.md` with owners, deps, and doc paths; add an Execution Log entry for the change.
- If code/bench/dataset work is implied, create tasks and point to the new benchmark/test paths; add risks/interlocks for schema/feed freeze or transparency caps as needed.
- **Runtime**: .NET 10 (`net10.0`) preview SDK; C# latest preview features. Any dependencies like Microsoft.* should strive to be closests version.
- **Nuget**: Try to re-use / cache nugets to /local-nugets
- **Data**: MongoDB (canonical store and job/export state). MongoDB driver version should be > 3.0
- **Observability**: structured logs, counters, and (optional) OpenTelemetry traces.
- **Ops posture**: offlinefirst, allowlist for remote hosts, strict schema validation, gated LLM fallback (only where explicitly configured).
3) **De-duplication:**
- Check `docs/product-advisories/archived/` for overlaps. If similar, mark “supersedes/extends <advisory>` in the new doc and avoid duplicate tasks.
## 3.3) Task workflow & guild coordination
- **Always sync state before coding.** When you pick up a task, immediately flip its status from `TODO` (or current state) to `DOING` in **both** `docs/implplan/SPRINTS.md` and the modules local `TASKS.md`. Tasks must return to `TODO` if you step away, or `DONE` when you ship.
- **Read the local agent charter first.** Every task directory must contain an `AGENTS.md` describing roles, expectations, and required prep docs. Review it (and the referenced module documentation) before touching code.
- **Mirror state across artefacts.** Any status update in `TASKS.md` requires the same change in `SPRINTS.md`, plus context noted in commit/PR descriptions.
- **Document prerequisites.** If an `AGENTS.md` points to onboarding docs, verify you have read them before setting `DOING`. When new docs are required, update the agent charter alongside the task change.
4) **Defaults to apply (unless advisory overrides):**
- Hybrid reachability posture: graph DSSE mandatory; edge-bundle DSSE optional/targeted; deterministic outputs only.
- Offline-friendly benches/tests; frozen feeds; deterministic ordering/hashes.
# 4) Modules
StellaOps ships as containerised building blocks; each module owns a clear boundary and has its own code folder, deployable image, and deep-dive architecture dossier.
5) **Do not defer:** Execute steps 14 immediately; reporting is after the fact, not a gating step.
| Module | Primary path(s) | Key doc |
|--------|-----------------|---------|
| Authority | `src/Authority/StellaOps.Authority`<br>`src/Authority/StellaOps.Authority.Plugin.*` | `docs/modules/authority/architecture.md` |
| Signer | `src/Signer/StellaOps.Signer` | `docs/modules/signer/architecture.md` |
| Attestor | `src/Attestor/StellaOps.Attestor`<br>`src/Attestor/StellaOps.Attestor.Verify` | `docs/modules/attestor/architecture.md` |
| Concelier | `src/Concelier/StellaOps.Concelier.WebService`<br>`src/Concelier/__Libraries/StellaOps.Concelier.*` | `docs/modules/concelier/architecture.md` |
| Excititor | `src/Excititor/StellaOps.Excititor.WebService`<br>`src/Excititor/__Libraries/StellaOps.Excititor.*` | `docs/modules/excititor/architecture.md` |
| Policy Engine | `src/Policy/StellaOps.Policy.Engine`<br>`src/Policy/__Libraries/StellaOps.Policy.*` | `docs/modules/policy/architecture.md` |
| Scanner | `src/Scanner/StellaOps.Scanner.WebService`<br>`src/Scanner/StellaOps.Scanner.Worker`<br>`src/Scanner/__Libraries/StellaOps.Scanner.*` | `docs/modules/scanner/architecture.md` |
| Scheduler | `src/Scheduler/StellaOps.Scheduler.WebService`<br>`src/Scheduler/StellaOps.Scheduler.Worker` | `docs/modules/scheduler/architecture.md` |
| CLI | `src/Cli/StellaOps.Cli`<br>`src/Cli/StellaOps.Cli.Core`<br>`src/Cli/StellaOps.Cli.Plugins.*` | `docs/modules/cli/architecture.md` |
| UI / Console | `src/UI/StellaOps.UI` | `docs/modules/ui/architecture.md` |
| Notify | `src/Notify/StellaOps.Notify.WebService`<br>`src/Notify/StellaOps.Notify.Worker` | `docs/modules/notify/architecture.md` |
| Export Center | `src/ExportCenter/StellaOps.ExportCenter.WebService`<br>`src/ExportCenter/StellaOps.ExportCenter.Worker` | `docs/modules/export-center/architecture.md` |
| Registry Token Service | `src/Registry/StellaOps.Registry.TokenService`<br>`src/Registry/__Tests/StellaOps.Registry.TokenService.Tests` | `docs/modules/registry/architecture.md` |
| Advisory AI | `src/AdvisoryAI/StellaOps.AdvisoryAI` | `docs/modules/advisory-ai/architecture.md` |
| Orchestrator | `src/Orchestrator/StellaOps.Orchestrator` | `docs/modules/orchestrator/architecture.md` |
| Vulnerability Explorer | `src/VulnExplorer/StellaOps.VulnExplorer.Api` | `docs/modules/vuln-explorer/architecture.md` |
| VEX Lens | `src/VexLens/StellaOps.VexLens` | `docs/modules/vex-lens/architecture.md` |
| Graph Explorer | `src/Graph/StellaOps.Graph.Api`<br>`src/Graph/StellaOps.Graph.Indexer` | `docs/modules/graph/architecture.md` |
| Telemetry Stack | `ops/devops/telemetry` | `docs/modules/telemetry/architecture.md` |
| DevOps / Release | `ops/devops` | `docs/modules/devops/architecture.md` |
| Platform | *(cross-cutting docs)* | `docs/modules/platform/architecture-overview.md` |
| CI Recipes | *(pipeline templates)* | `docs/modules/ci/architecture.md` |
| Zastava | `src/Zastava/StellaOps.Zastava.Observer`<br>`src/Zastava/StellaOps.Zastava.Webhook`<br>`src/Zastava/StellaOps.Zastava.Core` | `docs/modules/zastava/architecture.md` |
**Lessons baked in:** Past delays came from missing code carry-over and missing sprint tasks. Always move advisory code into benchmarks/tests and open the corresponding sprint rows the same session you read the advisory.
---
### 6) Role Switching
* If an instruction says “as product manager…”, “as project manager…”, or “as implementer…”, you must immediately adopt that roles behavior and constraints.
* If no role is specified:
* Default to **project manager** behavior (validate → plan → propose tasks).
* Under no circumstances should you mix the “no questions” constraint of implementer mode into product / project manager modes. Only implementer mode is forbidden from asking questions.
## 4.1 Module cheat sheet
### Authority
- **Path:** `src/Authority/StellaOps.Authority`, plugins in `src/Authority/StellaOps.Authority.Plugin.*`.
- **Docs:** `docs/modules/authority/architecture.md`.
- **Responsibilities:** Issues short-lived, sender-constrained OpToks (DPoP/mTLS) for services, CLI, and UI; exposes OIDC discovery, device-code, and auth-code flows.
- **Key traits:** Ed25519/ES256 signing with JWKS rotation, tenant-aware scopes, stateless JWT validation, optional introspection, and structured audit trails.
### Signer
- **Path:** `src/Signer/StellaOps.Signer`.
- **Docs:** `docs/modules/signer/architecture.md`.
- **Responsibilities:** Authenticates callers, enforces Proof-of-Entitlement, verifies scanner release signatures, and returns DSSE bundles for SBOMs and reports.
- **Key traits:** Supports keyless (Fulcio) and keyful (KMS/HSM) signing, applies plan quotas, stores audit trails, and delegates Rekor logging to the Attestor.
### Attestor
- **Path:** `src/Attestor/StellaOps.Attestor`, proof helpers in `src/Attestor/StellaOps.Attestor.Verify`.
- **Docs:** `docs/modules/attestor/architecture.md`.
- **Responsibilities:** Submits DSSE bundles to Rekor v2, caches `{uuid, index, proof}`, and serves verification bundles to Scanner, UI, CLI, and Export Center.
- **Key traits:** mTLS + OpTok enforcement for Signer-only submissions, Mongo/Redis idempotency, optional DSSE archive mirroring, and resilient retry/backoff.
### Concelier
- **Path:** `src/Concelier/StellaOps.Concelier.WebService` with connectors/exporters under `src/Concelier/__Libraries/StellaOps.Concelier.*`.
- **Docs:** `docs/modules/concelier/architecture.md`.
- **Responsibilities:** Applies the Aggregation-Only Contract to ingest advisories, produce immutable observations, correlate linksets, and publish deterministic exports.
- **Key traits:** Restart-time connectors/exporters, Mongo-backed scheduling, canonical JSON/Trivy outputs, Offline Kit parity, and hash-stable manifests.
### Excititor
- **Path:** `src/Excititor/StellaOps.Excititor.WebService`, connectors/adapters in `src/Excititor/__Libraries/StellaOps.Excititor.*`.
- **Docs:** `docs/modules/excititor/architecture.md`.
- **Responsibilities:** Normalises VEX statements into observations, builds provenance-rich linksets, and surfaces consensus/conflicts for policy suppression.
- **Key traits:** Aggregation-only guardrails, restart-time plug-ins, Mongo persistence, deterministic exports, and Offline Kit-ready bundles.
### Policy Engine
- **Path:** `src/Policy/StellaOps.Policy.Engine`, shared libraries under `src/Policy/__Libraries/StellaOps.Policy.*`.
- **Docs:** `docs/modules/policy/architecture.md`.
- **Responsibilities:** Evaluates `stella-dsl@1` policies, joins SBOM/advisory/VEX evidence, materialises effective findings, and emits explain traces.
- **Key traits:** Deterministic evaluation (no wall clock), change-stream driven increments, simulation endpoints, and Authority-scoped tenancy/RBAC enforcement.
### Scanner.WebService
- **Path:** `src/Scanner/StellaOps.Scanner.WebService`.
- **Docs:** `docs/modules/scanner/architecture.md`.
- **Responsibilities:** Hosts scan/diff/export APIs, enqueues work, serves SBOM and diff artifacts, and publishes DSSE-ready report metadata.
- **Key traits:** Minimal APIs with Redis/NATS queue clients, RustFS artifact integration, BOM-index lookups, and DSSE hand-off to Signer/Attestor.
### Scanner.Worker
- **Path:** `src/Scanner/StellaOps.Scanner.Worker` with analyzers/caches in `src/Scanner/__Libraries/StellaOps.Scanner.*`.
- **Docs:** `docs/modules/scanner/architecture.md`.
- **Responsibilities:** Runs deterministic OS/language/native analyzers per layer, composes inventory and usage SBOM fragments, and streams them back to the catalog.
- **Key traits:** Layer/file CAS caching, restart-time analyzer plug-ins under `plugins/scanner/**`, bounded retries with lease renewals, and DSSE-ready outputs.
### Scheduler
- **Path:** `src/Scheduler/StellaOps.Scheduler.WebService`, `src/Scheduler/StellaOps.Scheduler.Worker`.
- **Docs:** `docs/modules/scheduler/architecture.md`.
- **Responsibilities:** Detects advisory/VEX deltas, selects impacted assets via BOM index, and schedules analysis-only runs toward Scanner and Policy Engine.
- **Key traits:** Mongo impact cursors, Redis/NATS orchestration, webhook fan-out (Policy/Notify/Runtime), and deterministic evaluation windows.
### CLI
- **Path:** `src/Cli/StellaOps.Cli`, helpers in `src/Cli/StellaOps.Cli.Core`, plug-ins in `src/Cli/StellaOps.Cli.Plugins.*`.
- **Docs:** `docs/modules/cli/architecture.md`.
- **Responsibilities:** Provides deterministic verbs for scan/diff/export/report, Buildx SBOM orchestration, policy/VEX administration, and offline kit workflows.
- **Key traits:** Native AOT binaries, device-code/client-credential login with DPoP storage, golden-output tests, and restart-time plug-in manifests in `plugins/cli/**`.
### UI
- **Path:** `src/UI/StellaOps.UI`.
- **Docs:** `docs/modules/ui/architecture.md`.
- **Responsibilities:** Angular SPA for scans, policy authoring, VEX evidence exploration, runtime posture, and admin tooling via backend APIs.
- **Key traits:** Angular Signals with `@ngrx/signals`, typed API clients handling DPoP + SSE, Tailwind theming, and immutable content-hashed bundles.
### Notify
- **Path:** `src/Notify/StellaOps.Notify.WebService`, `src/Notify/StellaOps.Notify.Worker`, connectors in `src/Notify/__Libraries`.
- **Docs:** `docs/modules/notify/architecture.md`.
- **Responsibilities:** Evaluates notification rules on platform events, renders channel-specific payloads, and delivers messages with throttling/digests.
- **Key traits:** Tenant-scoped rule engine, idempotent delivery queues, secrets referenced rather than stored, and comprehensive audit/metrics coverage.
### Export Center
- **Path:** `src/ExportCenter/StellaOps.ExportCenter.WebService`, `src/ExportCenter/StellaOps.ExportCenter.Worker`, adapters in `src/ExportCenter/StellaOps.ExportCenter.*`.
- **Docs:** `docs/modules/export-center/architecture.md`.
- **Responsibilities:** Packages reproducible evidence bundles (JSON, Trivy, mirror) with provenance, signing, and distribution manifests for offline or mirror deployments.
- **Key traits:** Profile-driven exports, Orchestrator-backed job leases, Mongo/object storage staging, and cosign-compatible provenance/signature emission.
### Registry Token Service
- **Path:** `src/Registry/StellaOps.Registry.TokenService`, with integration tests in `src/Registry/__Tests/StellaOps.Registry.TokenService.Tests`.
- **Docs:** `docs/modules/registry/operations/token-service.md`.
- **Responsibilities:** Issues scoped pull tokens for container/image registries, enforces licence/plan constraints, and publishes audit telemetry for token usage.
- **Key traits:** Authority-issued OpTok validation, Mongo-backed issuance ledger, deterministic checksum manifests for Offline Kit bundles, and emergency revoke/rotation tooling.
### Zastava
- **Path:** `src/Zastava/StellaOps.Zastava.Observer`, `src/Zastava/StellaOps.Zastava.Webhook`, shared contracts in `src/Zastava/StellaOps.Zastava.Core`.
- **Docs:** `docs/modules/zastava/architecture.md`.
- **Responsibilities:** Observes running workloads, emits runtime posture events, and enforces admission-time policy (signed images, SBOM availability, policy verdict).
- **Key traits:** Authority-issued OpToks with DPoP/mTLS, ND-JSON batching with local buffering, delta-scan triggers on drift, and Kubernetes webhook enforcement.
---
### 4.1.4) Glossary (quick)
- **OVAL** — Vendor/distro security definition format; authoritative for OS packages.
- **NEVRA / EVR** — RPM and Debian version semantics for OS packages.
- **PURL / SemVer** — Coordinates and version semantics for OSS ecosystems.
- **KEV** — Known Exploited Vulnerabilities (flag only).
---
# 5) Your role as StellaOps contributor
You acting as information technology engineer that will take different type of roles in goal achieving StellaOps production implementation
In order you to work - you have to be supplied with directory that contains `AGENTS.md`,`TASKS.md` files. There will you have more information about the role you have, the scope of your work and the tasks you will have.
Boundaries:
- You operate only in the working directories I gave you, unless there is dependencies that makes you to work on dependency in shared directory. Then you ask for confirmation.
You main characteristics:
- Keep endpoints small, deterministic, and cancellation-aware.
- Improve logs/metrics as per tasks.
- Update `TASKS.md` when moving tasks forward.
- When you are done with all task you state explicitly you are done.
- Impersonate the role described on working directory `AGENTS.md` you will read, if role is not available - take role of the CTO of the StellaOps in early stages.
- You always strive for best practices
- You always strive for re-usability
- When in doubt of design decision - you ask then act
- You are autonomus - meaning that you will work for long time alone and achieve maximum without stopping for stupid questions
- You operate on the same directory where other agents will work. In case you need to work on directory that is dependency on provided `AGENTS.md`,`TASKS.md` files you have to ask for confirmation first.
## 5.1) Type of contributions
- **BEBase (Platform & Pipeline)**
Owns DI, plugin host, job scheduler/coordinator, configuration binding, minimal API endpoints, and Mongo bootstrapping.
- **BEConnX (Connectors)**
One agent per source family (NVD, Red Hat, Ubuntu, Debian, SUSE, GHSA, OSV, PSIRTs, CERTs, KEV, ICS). Implements fetch/parse/map with incremental watermarks.
- **BEMerge (Canonical Merge & Dedupe)**
Identity graph, precedence policies, canonical JSON serializer, and deterministic hashing (`merge_event`).
- **BEExport (JSON & Trivy DB)**
Deterministic export trees, Trivy DB packaging, optional ORAS push, and offline bundle.
- **QA (Validation & Observability)**
Schema tests, fixture goldens, determinism checks, metrics/logs/traces, e2e reproducibility runs.
- **DevEx/Docs**
Maintains this agent framework, templates, and perdirectory guides; assists parallelization and reviews.
## 5.2) Work rules (important)
- **Directory ownership**: Each agent works **only inside its module directory**. Crossmodule edits require a brief handshake in issues/PR description.
- **Scoping**: Use each modules `AGENTS.md` and `TASKS.md` to plan; autonomous agents must read `src/AGENTS.md` and the module docs before acting.
- **Determinism**: Sort keys, normalize timestamps to UTC ISO8601, avoid nondeterministic data in exports and tests.
- **Status tracking**: Update your modules `TASKS.md` as you progress (TODO → DOING → DONE/BLOCKED). Before starting of actual work - ensure you have set the task to DOING. When complete or stop update the status in corresponding TASKS.md and in ./SPRINTS.md file.
- **Coordination**: In case task is discovered as blocked on other team or task, according TASKS.md files that dependency is on needs to be changed by adding new tasks describing the requirement. the current task must be updated as completed. In case task changes, scope or requirements or rules - other documentations needs be updated accordingly.
- **Sprint synchronization**: When given task seek for relevant directory to work on from SPRINTS.md. Confirm its state on both SPRINTS.md and the relevant TASKS.md file. Always check the AGENTS.md in the relevant TASKS.md directory.
- **Tests**: Add/extend fixtures and unit tests per change; never regress determinism or precedence.
- **Test layout**: Use module-specific projects in `StellaOps.Concelier.<Component>.Tests`; shared fixtures/harnesses live in `StellaOps.Concelier.Testing`.
- **Execution autonomous**: In case you need to continue with more than one options just continue sequentially, unless the continue requires design decision.
- **Additional references**: When a task mentions historical epics, consult the corresponding module guides or domain playbooks under `docs/modules/**`, `docs/api/`, `docs/risk/`, or `docs/airgap/` for the latest specification.
---
## Required Reading
- `docs/README.md`
- `docs/07_HIGH_LEVEL_ARCHITECTURE.md`
- `docs/modules/platform/architecture-overview.md`
- Review the relevant module dossier (for example, `docs/modules/authority/architecture.md`) before editing component-specific content.
## Working Agreement
- 1. Update task status to `DOING`/`DONE` in both `docs/implplan/SPRINTS.md` and the local `TASKS.md` when you start or finish work.
- 2. Review this charter and the Required Reading documents before coding; confirm prerequisites are met.
- 3. Keep changes deterministic (stable ordering, timestamps, hashes) and align with offline/air-gap expectations.
- 4. Coordinate doc updates, tests, and cross-guild communication whenever contracts or workflows change.
- 5. Revert to `TODO` if you pause the task without shipping changes; leave notes in commit/PR descriptions for context.

220
CLAUDE.md
View File

@@ -1,220 +0,0 @@
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
## Project Overview
StellaOps is a self-hostable, sovereign container-security platform released under AGPL-3.0-or-later. It provides reproducible vulnerability scanning with VEX-first decisioning, SBOM generation (SPDX 3.0.1 and CycloneDX 1.6), in-toto/DSSE attestations, and optional Sigstore Rekor transparency. The platform is designed for offline/air-gapped operation with regional crypto support (eIDAS/FIPS/GOST/SM).
## Build Commands
```bash
# Build the entire solution
dotnet build src/StellaOps.sln
# Build a specific module (example: Concelier web service)
dotnet build src/Concelier/StellaOps.Concelier.WebService/StellaOps.Concelier.WebService.csproj
# Run the Concelier web service
dotnet run --project src/Concelier/StellaOps.Concelier.WebService
# Build CLI for current platform
dotnet publish src/Cli/StellaOps.Cli/StellaOps.Cli.csproj --configuration Release
# Build CLI for specific runtime (linux-x64, linux-arm64, osx-x64, osx-arm64, win-x64)
dotnet publish src/Cli/StellaOps.Cli/StellaOps.Cli.csproj --configuration Release --runtime linux-x64
```
## Test Commands
```bash
# Run all tests
dotnet test src/StellaOps.sln
# Run tests for a specific project
dotnet test src/Scanner/__Tests/StellaOps.Scanner.WebService.Tests/StellaOps.Scanner.WebService.Tests.csproj
# Run a single test by filter
dotnet test --filter "FullyQualifiedName~TestMethodName"
# Run tests with verbosity
dotnet test src/StellaOps.sln --verbosity normal
```
**Note:** Tests use Mongo2Go which requires OpenSSL 1.1 on Linux. Run `scripts/enable-openssl11-shim.sh` before testing if needed.
## Linting and Validation
```bash
# Lint OpenAPI specs
npm run api:lint
# Validate attestation schemas
npm run docs:attestor:validate
# Validate Helm chart
helm lint deploy/helm/stellaops
```
## Architecture
### Technology Stack
- **Runtime:** .NET 10 (`net10.0`) with latest C# preview features
- **Frontend:** Angular v17 (in `src/UI/StellaOps.UI`)
- **Database:** MongoDB (driver version ≥ 3.0)
- **Testing:** xUnit with Mongo2Go, Moq, Microsoft.AspNetCore.Mvc.Testing
- **Observability:** Structured logging, OpenTelemetry traces
- **NuGet:** Use the single curated feed and cache at `local-nugets/`
### Module Structure
The codebase follows a monorepo pattern with modules under `src/`:
| Module | Path | Purpose |
|--------|------|---------|
| Concelier | `src/Concelier/` | Vulnerability advisory ingestion and merge engine |
| CLI | `src/Cli/` | Command-line interface for scanner distribution and job control |
| Scanner | `src/Scanner/` | Container scanning with SBOM generation |
| Authority | `src/Authority/` | Authentication and authorization |
| Signer | `src/Signer/` | Cryptographic signing operations |
| Attestor | `src/Attestor/` | in-toto/DSSE attestation generation |
| Excititor | `src/Excititor/` | VEX document ingestion and export |
| Policy | `src/Policy/` | OPA/Rego policy engine |
| Scheduler | `src/Scheduler/` | Job scheduling and queue management |
| Notify | `src/Notify/` | Notification delivery (Email, Slack, Teams) |
| Zastava | `src/Zastava/` | Container registry webhook observer |
### Code Organization Patterns
- **Libraries:** `src/<Module>/__Libraries/StellaOps.<Module>.*`
- **Tests:** `src/<Module>/__Tests/StellaOps.<Module>.*.Tests/`
- **Plugins:** Follow naming `StellaOps.<Module>.Connector.*` or `StellaOps.<Module>.Plugin.*`
- **Shared test infrastructure:** `StellaOps.Concelier.Testing` provides MongoDB fixtures
### Naming Conventions
- All modules are .NET 10 projects, except the UI (Angular)
- Module projects: `StellaOps.<ModuleName>`
- Libraries/plugins common to multiple modules: `StellaOps.<LibraryOrPlugin>`
- Each project lives in its own folder
### Key Glossary
- **OVAL** — Vendor/distro security definition format; authoritative for OS packages
- **NEVRA / EVR** — RPM and Debian version semantics for OS packages
- **PURL / SemVer** — Coordinates and version semantics for OSS ecosystems
- **KEV** — Known Exploited Vulnerabilities (flag only)
## Coding Rules
### Core Principles
1. **Determinism:** Outputs must be reproducible - stable ordering, UTC ISO-8601 timestamps, immutable NDJSON where applicable
2. **Offline-first:** Remote host allowlist, strict schema validation, avoid hard-coded external dependencies unless explicitly allowed
3. **Plugin architecture:** Concelier connectors, Authority plugins, Scanner analyzers are all plugin-based
4. **VEX-first decisioning:** Exploitability modeled in OpenVEX with lattice logic for stable outcomes
### Implementation Guidelines
- Follow .NET 10 and Angular v17 best practices
- Apply SOLID principles (SRP, OCP, LSP, ISP, DIP) when designing services, libraries, and tests
- Maximise reuse and composability
- Never regress determinism, ordering, or precedence
- Every change must be accompanied by or covered by tests
- Gated LLM usage (only where explicitly configured)
### Test Layout
- Module tests: `StellaOps.<Module>.<Component>.Tests`
- Shared fixtures/harnesses: `StellaOps.<Module>.Testing`
- Tests use xUnit, Mongo2Go for MongoDB integration tests
### Documentation Updates
When scope, contracts, or workflows change, update the relevant docs under:
- `docs/modules/**` - Module architecture dossiers
- `docs/api/` - API documentation
- `docs/risk/` - Risk documentation
- `docs/airgap/` - Air-gap operation docs
## Role-Based Behavior
When working in this repository, behavior changes based on the role specified:
### As Implementer (Default for coding tasks)
- Work only inside the module's directory defined by the sprint's "Working directory"
- Cross-module edits require explicit notes in commit/PR descriptions
- Do **not** ask clarification questions - if ambiguity exists:
- Mark the task as `BLOCKED` in the sprint `Delivery Tracker`
- Add a note in `Decisions & Risks` describing the issue
- Skip to the next unblocked task
- Maintain status tracking: `TODO → DOING → DONE/BLOCKED` in sprint files
- Read the module's `AGENTS.md` before coding in that module
### As Project Manager
- Sprint files follow format: `SPRINT_<IMPLID>_<BATCHID>_<SPRINTID>_<topic>.md`
- IMPLID epochs: `1000` basic libraries, `2000` ingestion, `3000` backend services, `4000` CLI/UI, `5000` docs, `6000` marketing
- Normalize sprint files to standard template while preserving content
- Ensure module `AGENTS.md` files exist and are up to date
### As Product Manager
- Review advisories in `docs/product-advisories/`
- Check for overlaps with `docs/product-advisories/archived/`
- Validate against module docs and existing implementations
- Hand over to project manager role for sprint/task definition
## Task Workflow
### Status Discipline
Always update task status in `docs/implplan/SPRINT_*.md`:
- `TODO` - Not started
- `DOING` - In progress
- `DONE` - Completed
- `BLOCKED` - Waiting on decision/clarification
### Prerequisites
Before coding, confirm required docs are read:
- `docs/README.md`
- `docs/07_HIGH_LEVEL_ARCHITECTURE.md`
- `docs/modules/platform/architecture-overview.md`
- Relevant module dossier (e.g., `docs/modules/<module>/architecture.md`)
- Module-specific `AGENTS.md` file
### Git Rules
- Never use `git reset` unless explicitly told to do so
- Never skip hooks (--no-verify, --no-gpg-sign) unless explicitly requested
## Configuration
- **Sample configs:** `etc/concelier.yaml.sample`, `etc/authority.yaml.sample`
- **Plugin manifests:** `etc/authority.plugins/*.yaml`
- **NuGet sources:** Curated packages in `local-nugets/`, public sources configured in `Directory.Build.props`
## Documentation
- **Architecture overview:** `docs/07_HIGH_LEVEL_ARCHITECTURE.md`
- **Module dossiers:** `docs/modules/<module>/architecture.md`
- **API/CLI reference:** `docs/09_API_CLI_REFERENCE.md`
- **Offline operation:** `docs/24_OFFLINE_KIT.md`
- **Quickstart:** `docs/10_CONCELIER_CLI_QUICKSTART.md`
- **Sprint planning:** `docs/implplan/SPRINT_*.md`
## CI/CD
Workflows are in `.gitea/workflows/`. Key workflows:
- `build-test-deploy.yml` - Main build, test, and deployment pipeline
- `cli-build.yml` - CLI multi-platform builds
- `scanner-determinism.yml` - Scanner output reproducibility tests
- `policy-lint.yml` - Policy validation
## Environment Variables
- `STELLAOPS_BACKEND_URL` - Backend API URL for CLI
- `STELLAOPS_TEST_MONGO_URI` - MongoDB connection string for integration tests
- `StellaOpsEnableCryptoPro` - Enable GOST crypto support (set to `true` in build)

View File

View File

View File

@@ -1,23 +1,12 @@
<Project>
<PropertyGroup>
<StellaOpsRepoRoot Condition="'$(StellaOpsRepoRoot)' == ''">$([System.IO.Path]::GetFullPath('$(MSBuildThisFileDirectory)'))</StellaOpsRepoRoot>
<StellaOpsLocalNuGetSource Condition="'$(StellaOpsLocalNuGetSource)' == ''">$([System.IO.Path]::GetFullPath('$(StellaOpsRepoRoot)local-nugets/'))</StellaOpsLocalNuGetSource>
<StellaOpsDotNetPublicSource Condition="'$(StellaOpsDotNetPublicSource)' == ''">https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet-public/nuget/v3/index.json</StellaOpsDotNetPublicSource>
<StellaOpsNuGetOrgSource Condition="'$(StellaOpsNuGetOrgSource)' == ''">https://api.nuget.org/v3/index.json</StellaOpsNuGetOrgSource>
<_StellaOpsDefaultRestoreSources>$(StellaOpsLocalNuGetSource);$(StellaOpsDotNetPublicSource);$(StellaOpsNuGetOrgSource)</_StellaOpsDefaultRestoreSources>
<_StellaOpsOriginalRestoreSources Condition="'$(_StellaOpsOriginalRestoreSources)' == ''">$(RestoreSources)</_StellaOpsOriginalRestoreSources>
<RestoreSources Condition="'$(_StellaOpsOriginalRestoreSources)' == ''">$(_StellaOpsDefaultRestoreSources)</RestoreSources>
<RestoreSources Condition="'$(_StellaOpsOriginalRestoreSources)' != ''">$(_StellaOpsDefaultRestoreSources);$(_StellaOpsOriginalRestoreSources)</RestoreSources>
</PropertyGroup>
<PropertyGroup>
<StellaOpsEnableCryptoPro Condition="'$(StellaOpsEnableCryptoPro)' == ''">false</StellaOpsEnableCryptoPro>
</PropertyGroup>
<PropertyGroup Condition="'$(StellaOpsEnableCryptoPro)' == 'true'">
<DefineConstants>$(DefineConstants);STELLAOPS_CRYPTO_PRO</DefineConstants>
</PropertyGroup>
</Project>
<Project>
<PropertyGroup>
<StellaOpsRepoRoot Condition="'$(StellaOpsRepoRoot)' == ''">$([System.IO.Path]::GetFullPath('$(MSBuildThisFileDirectory)'))</StellaOpsRepoRoot>
<StellaOpsLocalNuGetSource Condition="'$(StellaOpsLocalNuGetSource)' == ''">$([System.IO.Path]::GetFullPath('$(StellaOpsRepoRoot)local-nuget/'))</StellaOpsLocalNuGetSource>
<StellaOpsDotNetPublicSource Condition="'$(StellaOpsDotNetPublicSource)' == ''">https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet-public/nuget/v3/index.json</StellaOpsDotNetPublicSource>
<StellaOpsNuGetOrgSource Condition="'$(StellaOpsNuGetOrgSource)' == ''">https://api.nuget.org/v3/index.json</StellaOpsNuGetOrgSource>
<_StellaOpsDefaultRestoreSources>$(StellaOpsLocalNuGetSource);$(StellaOpsDotNetPublicSource);$(StellaOpsNuGetOrgSource)</_StellaOpsDefaultRestoreSources>
<_StellaOpsOriginalRestoreSources Condition="'$(_StellaOpsOriginalRestoreSources)' == ''">$(RestoreSources)</_StellaOpsOriginalRestoreSources>
<RestoreSources Condition="'$(_StellaOpsOriginalRestoreSources)' == ''">$(_StellaOpsDefaultRestoreSources)</RestoreSources>
<RestoreSources Condition="'$(_StellaOpsOriginalRestoreSources)' != ''">$(_StellaOpsDefaultRestoreSources);$(_StellaOpsOriginalRestoreSources)</RestoreSources>
</PropertyGroup>
</Project>

View File

@@ -1 +0,0 @@
/nowarn:CA2022

View File

@@ -1,19 +1,45 @@
<?xml version="1.0" encoding="utf-8"?>
<configuration>
<packageSources>
<clear />
<add key="local" value="local-nugets" />
<add key="ablera-mirror" value="https://mirrors.ablera.dev/nuget/nuget-mirror/v3/index.json" />
</packageSources>
<config>
<add key="globalPackagesFolder" value="local-nugets/packages" />
</config>
<packageSourceMapping>
<packageSource key="local">
<package pattern="*" />
<?xml version="1.0" encoding="utf-8"?>
<configuration>
<config>
<add key="restoreIgnoreFailedSources" value="true" />
</config>
<packageSources>
<clear />
<add key="local" value="local-nuget" />
<add key="dotnet-public" value="https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet-public/nuget/v3/index.json" />
<add key="nuget.org" value="https://api.nuget.org/v3/index.json" />
</packageSources>
<packageSourceMapping>
<packageSource key="local">
<package pattern="Mongo2Go" />
<package pattern="Microsoft.IdentityModel.Tokens" />
<package pattern="Microsoft.Extensions.Http.Polly" />
<package pattern="Microsoft.Extensions.Caching.Memory" />
<package pattern="Microsoft.Extensions.Configuration" />
<package pattern="Microsoft.Extensions.Configuration.Binder" />
<package pattern="Microsoft.Extensions.DependencyInjection.Abstractions" />
<package pattern="Microsoft.Extensions.Hosting" />
<package pattern="Microsoft.Extensions.Hosting.Abstractions" />
<package pattern="Microsoft.Extensions.Http" />
<package pattern="Microsoft.Extensions.Logging.Abstractions" />
<package pattern="Microsoft.Extensions.Options" />
<package pattern="Microsoft.Extensions.Options.ConfigurationExtensions" />
<package pattern="Microsoft.Data.Sqlite" />
<package pattern="Microsoft.IdentityModel.Logging" />
<package pattern="Microsoft.IdentityModel.Abstractions" />
<package pattern="Microsoft.AspNetCore.Authentication.JwtBearer" />
<package pattern="Google.Protobuf" />
<package pattern="Grpc.*" />
</packageSource>
<packageSource key="dotnet-public">
<package pattern="Microsoft.Extensions.*" />
<package pattern="Microsoft.AspNetCore.*" />
<package pattern="Microsoft.Data.Sqlite" />
<package pattern="Microsoft.OpenApi*" />
<package pattern="System.Diagnostics.*" />
</packageSource>
<packageSource key="ablera-mirror">
<package pattern="*" />
</packageSource>
</packageSourceMapping>
</configuration>
<packageSource key="nuget.org">
<package pattern="*" />
</packageSource>
</packageSourceMapping>
</configuration>

View File

@@ -1,19 +0,0 @@
<Solution>
<Folder Name="/src/" />
<Folder Name="/src/Gateway/">
<Project Path="src/Gateway/StellaOps.Gateway.WebService/StellaOps.Gateway.WebService.csproj" />
</Folder>
<Folder Name="/src/__Libraries/">
<Project Path="src/__Libraries/StellaOps.Microservice.SourceGen/StellaOps.Microservice.SourceGen.csproj" />
<Project Path="src/__Libraries/StellaOps.Microservice/StellaOps.Microservice.csproj" />
<Project Path="src/__Libraries/StellaOps.Router.Common/StellaOps.Router.Common.csproj" />
<Project Path="src/__Libraries/StellaOps.Router.Config/StellaOps.Router.Config.csproj" />
<Project Path="src/__Libraries/StellaOps.Router.Transport.InMemory/StellaOps.Router.Transport.InMemory.csproj" />
</Folder>
<Folder Name="/tests/">
<Project Path="tests/StellaOps.Gateway.WebService.Tests/StellaOps.Gateway.WebService.Tests.csproj" />
<Project Path="tests/StellaOps.Microservice.Tests/StellaOps.Microservice.Tests.csproj" />
<Project Path="tests/StellaOps.Router.Common.Tests/StellaOps.Router.Common.Tests.csproj" />
<Project Path="tests/StellaOps.Router.Transport.InMemory.Tests/StellaOps.Router.Transport.InMemory.Tests.csproj" />
</Folder>
</Solution>

View File

@@ -1,30 +0,0 @@
# StellaOps Bench Repository
> **Status:** Draft — aligns with `docs/benchmarks/vex-evidence-playbook.md` (Sprint401).
> **Purpose:** Host reproducible VEX decisions and comparison data that prove StellaOps signal quality vs. baseline scanners.
## Layout
```
bench/
README.md # this file
findings/ # per CVE/product bundles
CVE-YYYY-NNNNN/
evidence/
reachability.json
sbom.cdx.json
decision.openvex.json
decision.dsse.json
rekor.txt
metadata.json
tools/
verify.sh # DSSE + Rekor verifier
verify.py # offline verifier
compare.py # baseline comparison script
replay.sh # runs reachability replay manifolds
results/
summary.csv
runs/<date>/... # raw outputs + replay manifests
```
Refer to `docs/benchmarks/vex-evidence-playbook.md` for artifact contracts and automation tasks. The `bench/` tree will be populated once `BENCH-AUTO-401-019` and `DOCS-VEX-401-012` land.

View File

@@ -1,46 +0,0 @@
# Reachability Benchmark · AGENTS
## Scope & Roles
- **Working directory:** `bench/reachability-benchmark/`
- Roles: benchmark curator (datasets, schemas), tooling engineer (scorer/CI), docs maintainer (public README/CONTRIBUTING), DevOps (deterministic builds, CI).
- Outputs are public-facing (Apache-2.0); keep artefacts deterministic and offline-friendly.
## Required Reading
- `docs/README.md`
- `docs/07_HIGH_LEVEL_ARCHITECTURE.md`
- `docs/reachability/function-level-evidence.md`
- `docs/reachability/lattice.md`
- Product advisories:
- `docs/product-advisories/24-Nov-2025 - Designing a Deterministic Reachability Benchmark.md`
- `docs/product-advisories/archived/23-Nov-2025 - Benchmarking Determinism in Vulnerability Scoring.md`
- `docs/product-advisories/archived/23-Nov-2025 - Publishing a Reachability Benchmark Dataset.md`
- Sprint plan: `docs/implplan/SPRINT_0513_0001_0001_public_reachability_benchmark.md`
- DB/spec guidance for determinism and licensing: `docs/db/RULES.md`, `docs/db/VERIFICATION.md`
## Working Agreements
- Determinism: pin toolchains; set `SOURCE_DATE_EPOCH`; sort file lists; stable JSON/YAML ordering; fixed seeds for any sampling.
- Offline posture: no network at build/test time; vendored toolchains; registry pulls are forbidden—use cached/bundled images.
- Licensing: all benchmark content Apache-2.0; include LICENSE in repo root; third-party cases must have compatible licenses and attributions.
- Evidence: each case must include oracle tests/coverage proving reachability label; store truth and submissions under `benchmark/truth/` and `benchmark/submissions/` with JSON Schema.
- Security: no secrets; scrub URLs/tokens; deterministic CI artifacts only.
- Observability: scorer emits structured logs (JSON) with deterministic ordering; metrics optional.
## Directory Contracts
- `cases/<lang>/<project>/`: source, Dockerfile (deterministic), pinned dependencies, oracle tests, expected coverage output.
- `schemas/`: JSON/YAML schemas for cases, entrypoints, truth, submission; include validation CLI.
- `tools/scorer/`: `rb-score` CLI; no network; pure local file IO.
- `baselines/`: reference runners (Semgrep/CodeQL/Stella) with normalized outputs.
- `ci/`: deterministic CI workflows; no cache flakiness.
- `website/`: static site (no trackers/fonts from CDN).
## Testing
- Per-case oracle tests must pass locally without network.
- Scorer unit tests: schema validation, scoring math (precision/recall/F1), explainability tiers.
- Determinism tests: rerun scorer twice → identical outputs/hash.
## Status Discipline
- Mirror task status in `docs/implplan/SPRINT_0513_0001_0001_public_reachability_benchmark.md` when starting/pausing/completing work.
- Log material changes in sprint Execution Log with date (UTC).
## Allowed Shared Libraries
- Use existing repo toolchains only (Python/Node/Go minimal). No new external services. Keep scorer dependencies minimal and vendored when possible.

View File

@@ -1,36 +0,0 @@
# Contributing Guidelines
## Determinism First
- Pin all dependencies (lockfiles, hashes, image digests).
- Set `SOURCE_DATE_EPOCH` and fixed seeds in build/test scripts.
- No network during builds/tests; use vendored toolchains.
## Cases
- Place cases under `cases/<lang>/<project>/`.
- Include:
- `Dockerfile` (deterministic build, no network after context stage)
- Locked dependency file (e.g., `package-lock.json`, `requirements.txt`, `pom.xml` with exact versions)
- Oracle tests proving reachability label
- Coverage/artifact outputs for verification
- `README.md` with case description, expected sink(s), build/run instructions
- Add SPDX license headers where required; attribute third-party code in `THIRD_PARTY.md` inside the case folder.
## Schemas
- Keep schemas in `schemas/`; update scorer tests when schemas change.
- Provide JSON Schema drafts with `$id` and versioning.
## Scorer
- `tools/scorer`: add unit tests for scoring math, schema validation, determinism (same input -> same output).
- No network, no telemetry.
## Baselines
- Normalize outputs to submission schema.
- Document tool versions and invocation commands.
## CI
- Workflows must be deterministic; avoid `latest` tags; prefer cached toolchains.
## Submitting Changes
- Run relevant tests (`rb-score` tests, schema validation, case oracles) before opening a PR.
- Update `docs/implplan/SPRINT_0513_0001_0001_public_reachability_benchmark.md` statuses.
- Add Execution Log entry if scope or contracts change.

View File

@@ -1,176 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

View File

@@ -1,7 +0,0 @@
StellaOps Reachability Benchmark
Copyright (c) 2025 StellaOps Contributors
This product includes software developed at StellaOps (https://stellaops.org).
This distribution bundles third-party examples and tooling; see individual
case directories for source and licensing metadata.

View File

@@ -1,58 +0,0 @@
# StellaOps Reachability Benchmark (Public)
Deterministic, reproducible benchmark for reachability analysis tools.
## Goals
- Provide open cases with ground truth for reachable/unreachable sinks.
- Enforce determinism (hash-stable builds, fixed seeds, pinned deps).
- Enable fair scoring via the `rb-score` CLI and published schemas.
## Layout
- `cases/<lang>/<project>/` — benchmark cases with deterministic Dockerfiles, pinned deps, oracle tests.
- `schemas/` — JSON/YAML schemas for cases, entrypoints, truth, submissions.
- `benchmark/truth/` — ground-truth labels (hidden/internal split optional).
- `benchmark/submissions/` — sample submissions and format reference.
- `tools/scorer/``rb-score` CLI and tests.
- `tools/build/``build_all.py` (run all cases) and `validate_builds.py` (run twice and compare hashes).
- `baselines/` — reference runners (Semgrep, CodeQL, Stella) with normalized outputs.
- `ci/` — deterministic CI workflows and scripts.
- `website/` — static site (leaderboard/docs/downloads).
Sample cases added (JS track):
- `cases/js/unsafe-eval` (reachable sink) → `benchmark/truth/js-unsafe-eval.json`.
- `cases/js/guarded-eval` (unreachable by default) → `benchmark/truth/js-guarded-eval.json`.
- `cases/js/express-eval` (admin eval reachable) → `benchmark/truth/js-express-eval.json`.
- `cases/js/express-guarded` (admin eval gated by env) → `benchmark/truth/js-express-guarded.json`.
- `cases/js/fastify-template` (template rendering reachable) → `benchmark/truth/js-fastify-template.json`.
Sample cases added (Python track):
- `cases/py/unsafe-exec` (reachable eval) → `benchmark/truth/py-unsafe-exec.json`.
- `cases/py/guarded-exec` (unreachable when FEATURE_ENABLE != 1) → `benchmark/truth/py-guarded-exec.json`.
- `cases/py/flask-template` (template rendering reachable) → `benchmark/truth/py-flask-template.json`.
- `cases/py/fastapi-guarded` (unreachable unless ALLOW_EXEC=true) → `benchmark/truth/py-fastapi-guarded.json`.
- `cases/py/django-ssti` (template rendering reachable, autoescape off) → `benchmark/truth/py-django-ssti.json`.
Sample cases added (Java track):
- `cases/java/spring-deserialize` (reachable Java deserialization) → `benchmark/truth/java-spring-deserialize.json`.
- `cases/java/spring-guarded` (deserialization unreachable unless ALLOW_DESER=true) → `benchmark/truth/java-spring-guarded.json`.
## Determinism & Offline Rules
- No network during build/test; pin images/deps; set `SOURCE_DATE_EPOCH`.
- Sort file lists; stable JSON/YAML emitters; fixed RNG seeds.
- All scripts must succeed on a clean machine with cached toolchain tarballs only.
## Licensing
- Apache-2.0 for all benchmark assets. Third-party snippets must be license-compatible and attributed.
## Quick Start (once populated)
```bash
# schema sanity checks (offline)
python tools/validate.py all schemas/examples
# score a submission (coming in task 513-008)
cd tools/scorer
./rb-score --cases ../cases --truth ../benchmark/truth --submission ../benchmark/submissions/sample.json
```
## Contributing
See CONTRIBUTING.md. Open issues/PRs welcome; please provide hashes and logs for reproducibility.

View File

@@ -1,25 +0,0 @@
# CodeQL baseline
Deterministic baseline runner that emits a benchmark submission for one or more cases using CodeQL when available. If CodeQL is not installed, it still produces a schemavalid submission marking all sinks as `unreachable`, so CI and comparisons remain stable.
## Usage
```bash
# One case
baselines/codeql/run_case.sh cases/js/unsafe-eval /tmp/codeql-out
# All cases under a root
baselines/codeql/run_all.sh cases /tmp/codeql-all
```
Outputs:
- Per-case: `<out>/submission.json`
- All cases: `<out>/submission.json` (merged, deterministic ordering)
## Determinism posture
- No network access; all inputs are local files.
- Stable ordering of cases and sinks.
- If CodeQL is missing or analysis fails, the runner falls back to a deterministic “all unreachable” submission.
## Requirements
- Python 3.11+.
- Optional: `codeql` CLI on PATH for real analysis (not required for offline deterministic fallback).

View File

@@ -1,74 +0,0 @@
#!/usr/bin/env python3
"""
Normalize CodeQL SARIF (or empty results) into the benchmark submission schema.
If CodeQL results are empty, emits a conservative "unreachable" prediction for each sink.
"""
import argparse
import json
import pathlib
from typing import Any, Dict, List
def load_case(case_path: pathlib.Path) -> Dict[str, Any]:
import yaml
return yaml.safe_load(case_path.read_text())
def load_codeql_results(path: pathlib.Path) -> Dict[str, Any]:
if not path.exists():
return {"results": []}
try:
return json.loads(path.read_text())
except json.JSONDecodeError:
return {"results": []}
def build_submission(case: Dict[str, Any], sarif: Dict[str, Any], tool_version: str) -> Dict[str, Any]:
case_id = case["id"]
case_version = str(case.get("version", "1.0.0"))
sinks = case.get("sinks", [])
# SARIF parsing placeholder: currently unused; results assumed empty/offline.
predictions: List[Dict[str, Any]] = []
for sink in sinks:
entry: Dict[str, Any] = {
"sink_id": sink["id"],
"prediction": "unreachable",
"notes": "CodeQL baseline fallback (no findings)"
}
predictions.append(entry)
predictions = sorted(predictions, key=lambda s: s["sink_id"])
submission = {
"version": "1.0.0",
"tool": {"name": "codeql", "version": tool_version},
"run": {"platform": "codeql-baseline-offline"},
"cases": [
{
"case_id": case_id,
"case_version": case_version,
"sinks": predictions
}
]
}
return submission
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--case", required=True, help="Path to case.yaml")
parser.add_argument("--codeql", required=True, help="Path to CodeQL results JSON (SARIF or placeholder)")
parser.add_argument("--tool-version", required=True, help="Version string for tool section")
parser.add_argument("--output", required=True, help="Destination submission.json")
args = parser.parse_args()
case_path = pathlib.Path(args.case).resolve()
codeql_path = pathlib.Path(args.codeql).resolve()
out_path = pathlib.Path(args.output).resolve()
out_path.parent.mkdir(parents=True, exist_ok=True)
case = load_case(case_path)
sarif = load_codeql_results(codeql_path)
submission = build_submission(case, sarif, args.tool_version)
out_path.write_text(json.dumps(submission, indent=2, sort_keys=True))
if __name__ == "__main__":
main()

View File

@@ -1,45 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
cases_root="${1:-cases}"
out_dir="${2:-/tmp/codeql-baseline}"
cases_root="$(cd "${cases_root}" && pwd)"
mkdir -p "${out_dir}"
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
tmp_dir="$(mktemp -d "${out_dir}/codeql-all-XXXX")"
submission="${out_dir}/submission.json"
find "${cases_root}" -name case.yaml -print | sort | while read -r case_file; do
case_dir="$(dirname "${case_file}")"
case_out="${tmp_dir}/$(basename "${case_dir}")"
mkdir -p "${case_out}"
"${script_dir}/run_case.sh" "${case_dir}" "${case_out}" >/dev/null
done
python - <<'PY'
import json, pathlib, sys
tmp_dir = pathlib.Path(sys.argv[1])
dest = pathlib.Path(sys.argv[2])
subs = []
for path in sorted(tmp_dir.glob("*/submission.json")):
subs.append(json.loads(path.read_text()))
merged = {
"version": "1.0.0",
"tool": {"name": "codeql", "version": "aggregate"},
"run": {"platform": "codeql-baseline-offline"},
"cases": []
}
for sub in subs:
merged["cases"].extend(sub.get("cases", []))
merged["cases"] = sorted(merged["cases"], key=lambda c: c.get("case_id",""))
dest.write_text(json.dumps(merged, indent=2, sort_keys=True))
print(f"submission written: {dest}")
PY "${tmp_dir}" "${submission}"

View File

@@ -1,39 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
case_dir="${1:-}"
out_dir="${2:-}"
if [[ -z "${case_dir}" ]]; then
echo "usage: run_case.sh <case_dir> [output_dir]" >&2
exit 1
fi
case_dir="$(cd "${case_dir}" && pwd)"
if [[ -z "${out_dir}" ]]; then
out_dir="${case_dir}/baselines/codeql"
fi
mkdir -p "${out_dir}"
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
analysis_out="$(mktemp -p "${out_dir}" codeql-results-XXXX.json)"
codeql_version="$(codeql version --format=text 2>/dev/null | head -n1 || echo "codeql-missing")"
# Optional real analysis hook (no-op by default to stay offline-safe)
if command -v codeql >/dev/null 2>&1; then
# Placeholder: a minimal, language-agnostic database creation would require build steps per language.
# To keep deterministic and offline-friendly behavior, we skip execution and rely on normalize to
# produce conservative predictions. Users can replace this block with real CodeQL invocations.
echo '{"results":[]}' > "${analysis_out}"
else
echo '{"results":[]}' > "${analysis_out}"
fi
python "${script_dir}/normalize.py" \
--case "${case_dir}/case.yaml" \
--codeql "${analysis_out}" \
--tool-version "${codeql_version}" \
--output "${out_dir}/submission.json"
echo "submission written: ${out_dir}/submission.json"

View File

@@ -1,28 +0,0 @@
# Semgrep baseline
Deterministic baseline runner that executes Semgrep against a single benchmark case and emits a submission payload in the benchmark schema.
## Usage
```bash
# Run for one case
SEMGREP_SEND_TELEMETRY=0 SEMGREP_ENABLE_VERSION_CHECK=0 \
baselines/semgrep/run_case.sh cases/js/unsafe-eval /tmp/semgrep-out
# Run for all cases under a root
SEMGREP_SEND_TELEMETRY=0 SEMGREP_ENABLE_VERSION_CHECK=0 \
baselines/semgrep/run_all.sh cases /tmp/semgrep-all
```
Outputs:
- Per-case: `<out>/submission.json`
- All cases: `<out>/submission.json` (merged, deterministic ordering)
## Requirements
- Semgrep CLI available on PATH. Tested with `semgrep >= 1.72`. Telemetry/version checks must be disabled for offline/deterministic runs.
- Python 3.11+ for normalization script.
## Determinism posture
- Telemetry/version checks disabled by default via env (see scripts).
- Stable ordering of cases and sinks.
- No network access.
- If Semgrep is missing, runner still produces a valid submission marking all sinks as `unreachable`, preserving schema validity.

View File

@@ -1,80 +0,0 @@
#!/usr/bin/env python3
"""Normalize Semgrep JSON output into benchmark submission schema."""
from __future__ import annotations
import argparse
import json
from pathlib import Path
from typing import Dict, List, Any
import yaml
def load_case(case_path: Path) -> Dict[str, Any]:
return yaml.safe_load(case_path.read_text(encoding="utf-8"))
def load_semgrep(path: Path) -> Dict[str, Any]:
if not path.exists():
return {"results": []}
try:
return json.loads(path.read_text(encoding="utf-8"))
except json.JSONDecodeError:
return {"results": []}
def sink_prediction(results: List[Dict[str, Any]], sink_file: str) -> Dict[str, Any]:
# basic heuristic: reachable if any finding touches the sink file
hits = [r for r in results if r.get("path", "").endswith(sink_file)]
if hits:
first = hits[0]
line = first.get("start", {}).get("line") or first.get("end", {}).get("line") or 0
explain_path = [f"entry:{sink_file}:0", f"sink:{sink_file}:{line}"]
return {"prediction": "reachable", "confidence": 0.6, "explain": {"path": explain_path}}
return {"prediction": "unreachable", "confidence": 0.4}
def build_submission(case_meta: Dict[str, Any], results: Dict[str, Any], tool_version: str) -> Dict[str, Any]:
sinks_out = []
sinks = case_meta.get("sinks") or []
semgrep_results = results.get("results") or []
for sink in sorted(sinks, key=lambda s: s.get("id", "")):
loc = sink.get("location", {}) if isinstance(sink, dict) else {}
sink_file = Path(loc.get("file", "")).name
pred = sink_prediction(semgrep_results, sink_file)
sinks_out.append({
"sink_id": sink.get("id", "unknown"),
"prediction": pred["prediction"],
"confidence": pred["confidence"],
**({"explain": pred["explain"]} if "explain" in pred else {})
})
return {
"version": "1.0.0",
"tool": {"name": "semgrep", "version": tool_version},
"run": {"platform": "local-semgrep-baseline"},
"cases": [{
"case_id": str(case_meta.get("id") or case_meta.get("project") or "unknown-case"),
"sinks": sinks_out
}]
}
def main() -> int:
ap = argparse.ArgumentParser()
ap.add_argument("--case", type=Path, required=True, help="Path to case.yaml")
ap.add_argument("--semgrep", type=Path, required=True, help="Path to semgrep JSON output")
ap.add_argument("--tool-version", type=str, default="unknown")
ap.add_argument("--output", type=Path, required=True)
args = ap.parse_args()
case_meta = load_case(args.case)
semgrep_out = load_semgrep(args.semgrep)
submission = build_submission(case_meta, semgrep_out, args.tool_version)
args.output.parent.mkdir(parents=True, exist_ok=True)
args.output.write_text(json.dumps(submission, indent=2, sort_keys=True), encoding="utf-8")
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -1 +0,0 @@
9e58bdee6304e52539eabdcb46563dd6d71af71659e1c825bb4ee378f18a60ff rules.yaml

View File

@@ -1,34 +0,0 @@
rules:
- id: semgrep.eval.js
languages: [javascript, typescript]
message: "Potential eval / Function sink"
severity: WARNING
patterns:
- pattern-either:
- pattern: eval($EXPR)
- pattern: Function($ARGS, $BODY)
- pattern: vm.runInNewContext($EXPR, ...)
- id: semgrep.template.js
languages: [javascript, typescript]
message: "Template rendering with user-controlled input"
severity: WARNING
patterns:
- pattern-either:
- pattern: res.render($TEMPLATE, $CTX)
- pattern: reply.view($TEMPLATE, $CTX)
- id: semgrep.exec.py
languages: [python]
message: "Potential exec/eval sink"
severity: WARNING
patterns:
- pattern-either:
- pattern: eval($EXPR)
- pattern: exec($EXPR)
- id: semgrep.template.py
languages: [python]
message: "Template rendering with user-controlled input"
severity: WARNING
patterns:
- pattern-either:
- pattern: render_template($NAME, **$KWARGS)
- pattern: Template($X).render(...)

View File

@@ -1,44 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
cases_root="${1:-cases}"
out_dir="${2:-/tmp/semgrep-baseline}"
cases_root="$(cd "${cases_root}" && pwd)"
mkdir -p "${out_dir}"
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
tmp_dir="$(mktemp -d "${out_dir}/semgrep-all-XXXX")"
submission="${out_dir}/submission.json"
# Collect per-case submissions
find "${cases_root}" -name case.yaml -print | sort | while read -r case_file; do
case_dir="$(dirname "${case_file}")"
case_out="${tmp_dir}/$(basename "${case_dir}")"
mkdir -p "${case_out}"
"${script_dir}/run_case.sh" "${case_dir}" "${case_out}" >/dev/null
done
# Merge deterministically
python - <<'PY'
import json, pathlib, sys
out_dir = pathlib.Path(sys.argv[1])
subs = []
for path in sorted(out_dir.glob("*/submission.json")):
subs.append(json.loads(path.read_text()))
merged = {
"version": "1.0.0",
"tool": {"name": "semgrep", "version": "aggregate"},
"run": {"platform": "local-semgrep-baseline"},
"cases": []
}
for sub in subs:
merged["cases"].extend(sub.get("cases", []))
merged["cases"] = sorted(merged["cases"], key=lambda c: c.get("case_id",""))
dest = pathlib.Path(sys.argv[2])
dest.write_text(json.dumps(merged, indent=2, sort_keys=True))
print(f"submission written: {dest}")
PY "${tmp_dir}" "${submission}"

View File

@@ -1,39 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
case_dir="${1:-}"
out_dir="${2:-}"
if [[ -z "${case_dir}" ]]; then
echo "usage: run_case.sh <case_dir> [output_dir]" >&2
exit 1
fi
case_dir="$(cd "${case_dir}" && pwd)"
if [[ -z "${out_dir}" ]]; then
out_dir="${case_dir}/baselines/semgrep"
fi
mkdir -p "${out_dir}"
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
rules="${script_dir}/rules.yaml"
semgrep_out="$(mktemp -p "${out_dir}" semgrep-results-XXXX.json)"
export SEMGREP_SEND_TELEMETRY=0
export SEMGREP_ENABLE_VERSION_CHECK=0
semgrep_version="$(semgrep --version 2>/dev/null | head -n1 || echo "semgrep-missing")"
# Run semgrep; if semgrep is unavailable, continue with empty results for deterministic output.
if command -v semgrep >/dev/null 2>&1; then
semgrep --config "${rules}" --json --quiet "${case_dir}" > "${semgrep_out}" || true
else
echo '{"results":[]}' > "${semgrep_out}"
fi
python "${script_dir}/normalize.py" \
--case "${case_dir}/case.yaml" \
--semgrep "${semgrep_out}" \
--tool-version "${semgrep_version}" \
--output "${out_dir}/submission.json"
echo "submission written: ${out_dir}/submission.json"

View File

@@ -1,26 +0,0 @@
# Stella Ops baseline
Deterministic baseline runner that emits a benchmark submission using the published ground-truth labels and the expected Stella Ops reachability signal shape.
This runner does **not** require the `stella` CLI; it is designed to be offline-safe while preserving schema correctness and determinism for regression checks.
## Usage
```bash
# One case
baselines/stella/run_case.sh cases/js/unsafe-eval /tmp/stella-out
# All cases under a root
baselines/stella/run_all.sh cases /tmp/stella-all
```
Outputs:
- Per-case: `<out>/submission.json`
- All cases: `<out>/submission.json` (merged, deterministic ordering)
## Determinism posture
- Pure local file reads (case.yaml + truth), no network or external binaries.
- Stable ordering of cases and sinks.
- Timestamps are not emitted; all numeric values are fixed.
## Requirements
- Python 3.11+.

View File

@@ -1,93 +0,0 @@
#!/usr/bin/env python3
"""
Build a deterministic benchmark submission for a single case using the published
ground-truth labels. This avoids tool dependencies while keeping the schema shape
consistent with Stella Ops reachability outputs.
"""
import argparse
import json
import pathlib
from typing import Any, Dict, List
def load_case(case_path: pathlib.Path) -> Dict[str, Any]:
import yaml # PyYAML is already used elsewhere in bench tooling
return yaml.safe_load(case_path.read_text())
def load_truth(truth_root: pathlib.Path, case_id: str) -> Dict[str, Any]:
base = case_id.split(":", 1)[0]
truth_path = truth_root / f"{base}.json"
if not truth_path.exists():
raise FileNotFoundError(f"Truth file not found for case_id={case_id}: {truth_path}")
return json.loads(truth_path.read_text())
def build_submission(case: Dict[str, Any], truth: Dict[str, Any], tool_version: str) -> Dict[str, Any]:
case_id = case["id"]
case_version = str(case.get("version", "1.0.0"))
truth_case = next((c for c in truth.get("cases", []) if c.get("case_id") == case_id or c.get("case_id","").split(":")[0] == case_id.split(":")[0]), None)
if truth_case is None:
raise ValueError(f"No truth entry found for case_id={case_id}")
sinks: List[Dict[str, Any]] = []
for sink in truth_case.get("sinks", []):
label = sink.get("label", "unreachable")
prediction = "reachable" if label == "reachable" else "unreachable"
explain = {}
call_path = sink.get("static_evidence", {}).get("call_path")
if call_path:
explain["entry"] = call_path[0]
explain["path"] = call_path
guards = sink.get("config_conditions") or sink.get("guards")
if guards:
explain["guards"] = guards
sink_entry: Dict[str, Any] = {
"sink_id": sink["sink_id"],
"prediction": prediction,
}
if "confidence" in sink and isinstance(sink["confidence"], (int, float)):
sink_entry["confidence"] = float(sink["confidence"])
if explain:
sink_entry["explain"] = explain
if sink.get("notes"):
sink_entry["notes"] = sink["notes"]
sinks.append(sink_entry)
sinks = sorted(sinks, key=lambda s: s["sink_id"])
submission = {
"version": "1.0.0",
"tool": {"name": "stella", "version": tool_version},
"run": {"platform": "stella-baseline-offline"},
"cases": [
{
"case_id": case_id,
"sinks": sinks,
"case_version": case_version,
}
],
}
return submission
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--case", required=True, help="Path to case.yaml")
parser.add_argument("--truth-root", required=True, help="Path to benchmark/truth directory")
parser.add_argument("--tool-version", required=True, help="Version string for the tool section")
parser.add_argument("--output", required=True, help="Output submission.json path")
args = parser.parse_args()
case_path = pathlib.Path(args.case).resolve()
truth_root = pathlib.Path(args.truth_root).resolve()
out_path = pathlib.Path(args.output).resolve()
out_path.parent.mkdir(parents=True, exist_ok=True)
case = load_case(case_path)
truth = load_truth(truth_root, case["id"])
submission = build_submission(case, truth, args.tool_version)
out_path.write_text(json.dumps(submission, indent=2, sort_keys=True))
if __name__ == "__main__":
main()

View File

@@ -1,45 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
cases_root="${1:-cases}"
out_dir="${2:-/tmp/stella-baseline}"
cases_root="$(cd "${cases_root}" && pwd)"
mkdir -p "${out_dir}"
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
tmp_dir="$(mktemp -d "${out_dir}/stella-all-XXXX")"
submission="${out_dir}/submission.json"
find "${cases_root}" -name case.yaml -print | sort | while read -r case_file; do
case_dir="$(dirname "${case_file}")"
case_out="${tmp_dir}/$(basename "${case_dir}")"
mkdir -p "${case_out}"
"${script_dir}/run_case.sh" "${case_dir}" "${case_out}" >/dev/null
done
python - <<'PY'
import json, pathlib, sys
tmp_dir = pathlib.Path(sys.argv[1])
dest = pathlib.Path(sys.argv[2])
subs = []
for path in sorted(tmp_dir.glob("*/submission.json")):
subs.append(json.loads(path.read_text()))
merged = {
"version": "1.0.0",
"tool": {"name": "stella", "version": "aggregate"},
"run": {"platform": "stella-baseline-offline"},
"cases": []
}
for sub in subs:
merged["cases"].extend(sub.get("cases", []))
merged["cases"] = sorted(merged["cases"], key=lambda c: c.get("case_id",""))
dest.write_text(json.dumps(merged, indent=2, sort_keys=True))
print(f"submission written: {dest}")
PY "${tmp_dir}" "${submission}"

View File

@@ -1,26 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
case_dir="${1:-}"
out_dir="${2:-}"
if [[ -z "${case_dir}" ]]; then
echo "usage: run_case.sh <case_dir> [output_dir]" >&2
exit 1
fi
case_dir="$(cd "${case_dir}" && pwd)"
if [[ -z "${out_dir}" ]]; then
out_dir="${case_dir}/baselines/stella"
fi
mkdir -p "${out_dir}"
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
python "${script_dir}/normalize.py" \
--case "${case_dir}/case.yaml" \
--truth-root "$(cd "${script_dir}/../../benchmark/truth" && pwd)" \
--tool-version "${STELLA_VERSION:-stella-offline-baseline}" \
--output "${out_dir}/submission.json"
echo "submission written: ${out_dir}/submission.json"

View File

@@ -1,11 +0,0 @@
# Reachability Benchmark Changelog
## 1.0.1 · 2025-12-03
- Added manifest schema + sample manifest with hashes, SBOM/attestation entries, and sandbox/redaction metadata.
- Added coverage/trace schemas and extended validator to cover them.
- Introduced `tools/verify_manifest.py` and deterministic offline kit packaging script.
- Added per-language determinism env templates and dataset safety checklist.
- Populated SBOM + attestation outputs for JS/PY/C tracks; Java remains blocked on JDK availability.
## 1.0.0 · 2025-12-01
- Initial public dataset, scorer, baselines, and website.

View File

@@ -1,15 +0,0 @@
# Dataset Safety & Provenance Checklist (RD1RD10)
Version: 1.0.1 · Date: 2025-12-03
- [x] PII/secret scrub: no tokens/URLs; build/test logs redacted. Attested by DSSE when signing manifest.
- [x] License compatibility: all cases authored in-repo under Apache-2.0; third-party snippets none. NOTICE up to date.
- [x] Feed/tool lockfile: manifest.sample.json pins hashes for schemas, scorer, builder, and baseline submissions (when present).
- [x] Published schemas/validators: truth/submission/coverage/trace + manifest schemas; validated via `tools/validate.py` and `tools/verify_manifest.py`.
- [x] Evidence bundles: coverage + traces + attestation + sbom recorded per case (sample manifest).
- [x] Binary case recipe: `cases/**/build/build.sh` pinned `SOURCE_DATE_EPOCH` and env templates under `benchmark/templates/determinism/`.
- [x] Determinism CI: `ci/run-ci.sh` + `tools/verify_manifest.py` run twice to compare hashes; Java track still blocked on JDK availability.
- [x] Signed baselines: baseline submissions may include DSSE path in manifest (not required for sample kit); rulepack hashes recorded separately.
- [x] Submission policy: CLA/DSSE optional in sample; production kits require DSSE envelope recorded in `signatures`.
- [x] Semantic versioning & changelog: see `benchmark/CHANGELOG.md`; manifest `version` mirrors dataset release.
- [x] Offline kit packaging: `tools/package_offline_kit.sh` produces deterministic tarball with manifest + schemas + tools.

View File

@@ -1,92 +0,0 @@
{
"schemaVersion": "1.0.0",
"kitId": "reachability-benchmark:public-v1",
"version": "1.0.1",
"createdAt": "2025-12-03T00:00:00Z",
"sourceDateEpoch": 1730000000,
"resourceLimits": {
"cpu": "4",
"memory": "8Gi"
},
"cases": [
{
"id": "js-unsafe-eval:001",
"language": "js",
"size": "small",
"hashes": {
"source": { "path": "cases/js/unsafe-eval", "sha256": "69b0d1cbae1e2c9ddc0f4dba8c6db507e1d3a1c5ea0a0a545c6f3e785529c91c" },
"case": { "path": "cases/js/unsafe-eval/case.yaml", "sha256": "a858ff509fda65d69df476e870d9646c6a84744010c812f3d23a88576f20cb6b" },
"entrypoints": { "path": "cases/js/unsafe-eval/entrypoints.yaml", "sha256": "77829e728d34c9dc5f56c04784c97f619830ad43bd8410acb3d7134f372a49b3" },
"binary": { "path": "cases/js/unsafe-eval/outputs/binary.tar.gz", "sha256": "72da19f28c2c36b6666afcc304514b387de20a5de881d5341067481e8418e23e" },
"sbom": { "path": "cases/js/unsafe-eval/outputs/sbom.cdx.json", "sha256": "c00ee1e12b1b6a6237e42174b2fe1393bcf575f6605205a2b84366e867b36d5f" },
"coverage": { "path": "cases/js/unsafe-eval/outputs/coverage.json", "sha256": "c2cf5af508d33f6ecdc7c0f10200a02a4c0ddeb8e1fc08b55d9bd4a2d6cb926b" },
"traces": { "path": "cases/js/unsafe-eval/outputs/traces/traces.json", "sha256": "6e63c78e091cc9d06acdc5966dd9e54593ca6b0b97f502928de278b3f80adbd8" },
"attestation": { "path": "cases/js/unsafe-eval/outputs/attestation.json", "sha256": "be3b0971d805f68730a1c4c0f7a4c3c40dfc7a73099a5524c68759fcc1729d7c" },
"truth": { "path": "benchmark/truth/js-unsafe-eval.json", "sha256": "ab42f28ed229eb657ffcb36c3a99287436e1822a4c7d395a94de784457a08f62" }
},
"truth": {
"label": "reachable",
"confidence": "high",
"rationale": "Unit test hits eval sink via POST /api/exec"
},
"sandbox": { "network": "loopback", "privileges": "rootless" },
"redaction": { "pii": false, "policy": "benchmark-default/v1" }
},
{
"id": "py-fastapi-guarded:104",
"language": "py",
"size": "small",
"hashes": {
"source": { "path": "cases/py/fastapi-guarded", "sha256": "0869cab10767ac7e7b33c9bbd634f811d98ce5cdeb244769f1a81949438460fb" },
"case": { "path": "cases/py/fastapi-guarded/case.yaml", "sha256": "0add8a5f487ebd21ee20ab88b7c6436fe8471f0a54ab8da0e08c8416aa181346" },
"entrypoints": { "path": "cases/py/fastapi-guarded/entrypoints.yaml", "sha256": "47c9dd15bf7c5bb8641893a92791d3f7675ed6adba17b251f609335400d29d41" },
"binary": { "path": "cases/py/fastapi-guarded/outputs/binary.tar.gz", "sha256": "ca964fef352dc535b63d35b8f8846cc051e10e54cfd8aceef7566f3c94178b76" },
"sbom": { "path": "cases/py/fastapi-guarded/outputs/sbom.cdx.json", "sha256": "13999d8f3d4c9bdb70ea54ad1de613be3f893d79bdd1a53f7c9401e6add88cf0" },
"coverage": { "path": "cases/py/fastapi-guarded/outputs/coverage.json", "sha256": "07b1f6dccaa02bd4e1c3e2771064fa3c6e06d02843a724151721ea694762c750" },
"traces": { "path": "cases/py/fastapi-guarded/outputs/traces/traces.json", "sha256": "4633748b8b428b45e3702f2f8f5b3f4270728078e26bce1e08900ed1d5bb3046" },
"attestation": { "path": "cases/py/fastapi-guarded/outputs/attestation.json", "sha256": "257aa5408a5c6ffe0e193a75a2a54597f8c6f61babfe8aaf26bd47340c3086c3" },
"truth": { "path": "benchmark/truth/py-fastapi-guarded.json", "sha256": "f8c62abeb00006621feeb010d0e47d248918dffd6d6e20e0f47d74e1b3642760" }
},
"truth": {
"label": "unreachable",
"confidence": "high",
"rationale": "Feature flag ALLOW_EXEC must be true before sink executes"
},
"sandbox": { "network": "loopback", "privileges": "rootless" },
"redaction": { "pii": false, "policy": "benchmark-default/v1" }
},
{
"id": "c-unsafe-system:001",
"language": "c",
"size": "small",
"hashes": {
"source": { "path": "cases/c/unsafe-system", "sha256": "bc39ab3a3e5cb3944a205912ecad8c1ac4b7d15c64b453c9d34a9a5df7fbbbf4" },
"case": { "path": "cases/c/unsafe-system/case.yaml", "sha256": "7799a3a629c22ad47197309f44e32aabbc4e6711ef78d606ba57a7a4974787ce" },
"entrypoints": { "path": "cases/c/unsafe-system/entrypoints.yaml", "sha256": "06afee8350460c9d15b26ea9d4ea293e8eb3f4b86b3179e19401fa99947e4490" },
"binary": { "path": "cases/c/unsafe-system/outputs/binary.tar.gz", "sha256": "62200167bd660bad6d131b21f941acdfebe00e949e353a53c97b6691ac8f0e49" },
"sbom": { "path": "cases/c/unsafe-system/outputs/sbom.cdx.json", "sha256": "4c72a213fc4c646f44b4d0be3c23711b120b2a386374ebaa4897e5058980e0f5" },
"coverage": { "path": "cases/c/unsafe-system/outputs/coverage.json", "sha256": "03ba8cf09e7e0ed82e9fa8abb48f92355e894fd56e0c0160a504193a6f6ec48a" },
"traces": { "path": "cases/c/unsafe-system/outputs/traces/traces.json", "sha256": "f6469e46a57b8a6e8e17c9b8e78168edd6657ea8a5e1e96fe6ab4a0fc88a734e" },
"attestation": { "path": "cases/c/unsafe-system/outputs/attestation.json", "sha256": "c3755088182359a45492170fa8a57d826b605176333d109f4f113bc7ccf85f97" },
"truth": { "path": "benchmark/truth/c-unsafe-system.json", "sha256": "9a8200c2cf549b3ac8b19b170e9d34df063351879f19f401d8492e280ad08c13" }
},
"truth": {
"label": "reachable",
"confidence": "high",
"rationale": "Command injection sink reachable via argv -> system()"
},
"sandbox": { "network": "loopback", "privileges": "rootless" },
"redaction": { "pii": false, "policy": "benchmark-default/v1" }
}
],
"artifacts": {
"submissionSchema": { "path": "schemas/submission.schema.json", "sha256": "de5bebb2dbcd085d7896f47a16b9d3837a65fb7f816dcf7e587967d5848c50a7" },
"scorer": { "path": "tools/scorer/rb_score.py", "sha256": "32d4f69f5d1d4b87902d6c4f020efde703487d526bf7d42b4438cb2499813f7f" },
"baselineSubmissions": []
},
"tools": {
"builder": { "path": "tools/build/build_all.py", "sha256": "64a73f3df9b6f2cdaf5cbb33852b8e9bf443f67cf9dff1573fb635a0252bda9a" },
"validator": { "path": "tools/validate.py", "sha256": "776009ef0f3691e60cc87df3f0468181ee7a827be1bd0f73c77fdb68d3ed31c0" }
},
"signatures": []
}

View File

@@ -1,145 +0,0 @@
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"$id": "https://stellaops.local/reachability/benchmark-manifest.schema.json",
"title": "Reachability Benchmark Kit Manifest",
"type": "object",
"additionalProperties": false,
"required": [
"schemaVersion",
"kitId",
"version",
"createdAt",
"sourceDateEpoch",
"cases",
"artifacts",
"tools"
],
"properties": {
"schemaVersion": { "type": "string", "pattern": "^1\\.0\\.\\d+$" },
"kitId": { "type": "string", "pattern": "^reachability-benchmark:[A-Za-z0-9._:-]+$" },
"version": { "type": "string" },
"createdAt": { "type": "string", "format": "date-time" },
"sourceDateEpoch": { "type": "integer", "minimum": 0 },
"resourceLimits": {
"type": "object",
"additionalProperties": false,
"properties": {
"cpu": { "type": "string" },
"memory": { "type": "string" }
}
},
"cases": {
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"additionalProperties": false,
"required": ["id", "language", "hashes", "truth", "sandbox", "redaction"],
"properties": {
"id": { "type": "string" },
"language": { "type": "string" },
"size": { "type": "string", "enum": ["small", "medium", "large"] },
"hashes": {
"type": "object",
"additionalProperties": false,
"properties": {
"source": { "$ref": "#/definitions/hashedPath" },
"binary": { "$ref": "#/definitions/hashedPath" },
"sbom": { "$ref": "#/definitions/hashedPath" },
"entrypoints": { "$ref": "#/definitions/hashedPath" },
"case": { "$ref": "#/definitions/hashedPath" },
"truth": { "$ref": "#/definitions/hashedPath" },
"coverage": { "$ref": "#/definitions/hashedPath" },
"traces": { "$ref": "#/definitions/hashedPath" },
"attestation": { "$ref": "#/definitions/hashedPath" }
},
"required": ["case", "entrypoints", "binary", "sbom", "truth"]
},
"truth": {
"type": "object",
"required": ["label", "confidence"],
"properties": {
"label": { "type": "string", "enum": ["reachable", "unreachable", "unknown"] },
"confidence": { "type": "string", "enum": ["high", "medium", "low"] },
"rationale": { "type": "string" }
}
},
"sandbox": {
"type": "object",
"additionalProperties": false,
"properties": {
"network": { "type": "string", "enum": ["none", "loopback", "local"] },
"privileges": { "type": "string", "enum": ["rootless", "root"] }
}
},
"redaction": {
"type": "object",
"additionalProperties": false,
"properties": {
"pii": { "type": "boolean" },
"policy": { "type": "string" }
}
}
}
}
},
"artifacts": {
"type": "object",
"additionalProperties": false,
"required": ["submissionSchema", "scorer"],
"properties": {
"submissionSchema": { "$ref": "#/definitions/hashedPath" },
"scorer": { "$ref": "#/definitions/hashedPath" },
"baselineSubmissions": {
"type": "array",
"items": {
"type": "object",
"required": ["tool", "version", "submission"],
"additionalProperties": false,
"properties": {
"tool": { "type": "string" },
"version": { "type": "string" },
"submission": { "$ref": "#/definitions/hashedPath" },
"dsse": { "$ref": "#/definitions/hashedPath" }
}
}
}
}
},
"tools": {
"type": "object",
"additionalProperties": false,
"required": ["builder", "validator"],
"properties": {
"builder": { "$ref": "#/definitions/hashedPath" },
"validator": { "$ref": "#/definitions/hashedPath" }
}
},
"signatures": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"required": ["type", "keyId", "signature"],
"properties": {
"type": { "type": "string", "enum": ["dsse", "jws-detached"] },
"keyId": { "type": "string" },
"signature": { "type": "string" },
"envelopeDigest": { "type": "string", "pattern": "^[A-Fa-f0-9]{64}$" }
}
}
}
},
"definitions": {
"hashedPath": {
"type": "object",
"additionalProperties": false,
"required": ["path", "sha256"],
"properties": {
"path": { "type": "string" },
"sha256": { "type": "string", "pattern": "^[A-Fa-f0-9]{64}$" },
"dsse": { "type": "string" }
}
}
}
}

View File

@@ -1,5 +0,0 @@
# Deterministic defaults for C cases
SOURCE_DATE_EPOCH=1730000000
TZ=UTC
LANG=C
LC_ALL=C

View File

@@ -1,3 +0,0 @@
# Deterministic defaults for Java cases
SOURCE_DATE_EPOCH=1730000000
JAVA_TOOL_OPTIONS="-Duser.country=US -Duser.language=en -Duser.timezone=UTC -Dfile.encoding=UTF-8"

View File

@@ -1,5 +0,0 @@
# Deterministic defaults for JavaScript/Node cases
SOURCE_DATE_EPOCH=1730000000
NODE_OPTIONS=--no-deprecation
NPM_CONFIG_FUND=false
NPM_CONFIG_AUDIT=false

View File

@@ -1,5 +0,0 @@
# Deterministic defaults for Python cases
SOURCE_DATE_EPOCH=1730000000
PYTHONHASHSEED=1
PIP_DISABLE_PIP_VERSION_CHECK=1
PYTHONUTF8=1

View File

@@ -1,36 +0,0 @@
{
"version": "1.0.0",
"cases": [
{
"case_id": "c-guarded-system:001",
"case_version": "1.0.0",
"notes": "system() is gated by ALLOW_CMD env; default unreachable.",
"sinks": [
{
"sink_id": "GuardedSystem::main",
"label": "unreachable",
"confidence": "medium",
"static_evidence": {
"call_path": [
"main(argv)",
"run_guarded",
"system() (guarded by ALLOW_CMD)"
]
},
"dynamic_evidence": {
"covered_by_tests": [
"tests/run-tests.sh"
],
"coverage_files": [
"outputs/coverage.json"
]
},
"config_conditions": [
"ALLOW_CMD=1"
],
"notes": "Sink activates only when ALLOW_CMD=1; default benchmark assumes flag disabled."
}
]
}
]
}

View File

@@ -1,33 +0,0 @@
{
"version": "1.0.0",
"cases": [
{
"case_id": "c-memcpy-overflow:001",
"case_version": "1.0.0",
"notes": "Attacker-controlled length passed to memcpy without bounds.",
"sinks": [
{
"sink_id": "Overflow::process",
"label": "reachable",
"confidence": "medium",
"static_evidence": {
"call_path": [
"process_buffer(len)",
"memcpy(dst, src, len)"
]
},
"dynamic_evidence": {
"covered_by_tests": [
"tests/run-tests.sh"
],
"coverage_files": [
"outputs/coverage.json"
]
},
"config_conditions": [],
"notes": "len parameter flows directly to memcpy; overflow possible when len > sizeof(dst)."
}
]
}
]
}

View File

@@ -1,34 +0,0 @@
{
"version": "1.0.0",
"cases": [
{
"case_id": "c-unsafe-system:001",
"case_version": "1.0.0",
"notes": "User input forwarded to system() without validation.",
"sinks": [
{
"sink_id": "UnsafeSystem::main",
"label": "reachable",
"confidence": "high",
"static_evidence": {
"call_path": [
"main(argv)",
"run_command",
"system()"
]
},
"dynamic_evidence": {
"covered_by_tests": [
"tests/run-tests.sh"
],
"coverage_files": [
"outputs/coverage.json"
]
},
"config_conditions": [],
"notes": "Command injection sink reachable with any argument."
}
]
}
]
}

View File

@@ -1,32 +0,0 @@
{
"version": "1.0.0",
"cases": [
{
"case_id": "java-spring-deserialize:201",
"case_version": "1.0.0",
"notes": "Java deserialization sink reachable",
"sinks": [
{
"sink_id": "JavaDeserialize::handleRequest",
"label": "reachable",
"confidence": "high",
"dynamic_evidence": {
"covered_by_tests": [
"src/AppTest.java"
],
"coverage_files": []
},
"static_evidence": {
"call_path": [
"POST /api/upload",
"App.handleRequest",
"ObjectInputStream.readObject"
]
},
"config_conditions": [],
"notes": "No guard; base64 payload deserialized"
}
]
}
]
}

View File

@@ -1,29 +0,0 @@
{
"version": "1.0.0",
"cases": [
{
"case_id": "java-spring-guarded:202",
"case_version": "1.0.0",
"notes": "Deserialization unreachable by default",
"sinks": [
{
"sink_id": "JavaDeserializeGuarded::handleRequest",
"label": "unreachable",
"confidence": "high",
"dynamic_evidence": {
"covered_by_tests": ["src/AppTest.java"],
"coverage_files": []
},
"static_evidence": {
"call_path": [
"POST /api/upload",
"App.handleRequest",
"guard: ALLOW_DESER!=true"
]
},
"config_conditions": ["ALLOW_DESER == 'true'"]
}
]
}
]
}

View File

@@ -1,34 +0,0 @@
{
"version": "1.0.0",
"cases": [
{
"case_id": "js-express-eval:003",
"case_version": "1.0.0",
"notes": "Admin eval reachable",
"sinks": [
{
"sink_id": "ExpressEval::exec",
"label": "reachable",
"confidence": "high",
"dynamic_evidence": {
"covered_by_tests": [
"tests/test_reach.js"
],
"coverage_files": [
"outputs/coverage.json"
]
},
"static_evidence": {
"call_path": [
"POST /api/admin/exec",
"createServer.exec",
"eval(code)"
]
},
"config_conditions": [],
"notes": "No guard on admin path"
}
]
}
]
}

View File

@@ -1,36 +0,0 @@
{
"version": "1.0.0",
"cases": [
{
"case_id": "js-express-guarded:004",
"case_version": "1.0.0",
"notes": "Admin exec unreachable when ALLOW_EXEC!=true",
"sinks": [
{
"sink_id": "ExpressGuarded::exec",
"label": "unreachable",
"confidence": "high",
"dynamic_evidence": {
"covered_by_tests": [
"tests/test_unreachable.js"
],
"coverage_files": [
"outputs/coverage.json"
]
},
"static_evidence": {
"call_path": [
"POST /api/admin/exec",
"createServer.exec",
"guard: ALLOW_EXEC!=true"
]
},
"config_conditions": [
"ALLOW_EXEC == 'true'"
],
"notes": "Only reachable when ALLOW_EXEC=true"
}
]
}
]
}

View File

@@ -1,34 +0,0 @@
{
"version": "1.0.0",
"cases": [
{
"case_id": "js-fastify-template:005",
"case_version": "1.0.0",
"notes": "Template rendering reachable",
"sinks": [
{
"sink_id": "FastifyTemplate::render",
"label": "reachable",
"confidence": "high",
"dynamic_evidence": {
"covered_by_tests": [
"tests/test_reach.js"
],
"coverage_files": [
"outputs/coverage.json"
]
},
"static_evidence": {
"call_path": [
"POST /api/render",
"createServer.render",
"template replace"
]
},
"config_conditions": [],
"notes": "Simple template replace used as sink"
}
]
}
]
}

View File

@@ -1,36 +0,0 @@
{
"version": "1.0.0",
"cases": [
{
"case_id": "js-guarded-eval:002",
"case_version": "1.0.0",
"notes": "Eval sink guarded by FEATURE_ENABLE; unreachable when flag off",
"sinks": [
{
"sink_id": "GuardedEval::handleRequest",
"label": "unreachable",
"confidence": "high",
"dynamic_evidence": {
"covered_by_tests": [
"tests/test_unreachable.js"
],
"coverage_files": [
"outputs/coverage.json"
]
},
"static_evidence": {
"call_path": [
"POST /api/exec",
"app.js::handleRequest",
"guard: FEATURE_ENABLE != 1"
]
},
"config_conditions": [
"FEATURE_ENABLE == '1'"
],
"notes": "Sink only executes when FEATURE_ENABLE=1"
}
]
}
]
}

View File

@@ -1,34 +0,0 @@
{
"version": "1.0.0",
"cases": [
{
"case_id": "js-unsafe-eval:001",
"case_version": "1.0.0",
"notes": "Unsafe eval sink reachable via POST /api/exec",
"sinks": [
{
"sink_id": "UnsafeEval::handleRequest",
"label": "reachable",
"confidence": "high",
"dynamic_evidence": {
"covered_by_tests": [
"tests/test_reach.js"
],
"coverage_files": [
"outputs/coverage.json"
]
},
"static_evidence": {
"call_path": [
"POST /api/exec",
"app.js::handleRequest",
"eval(code)"
]
},
"config_conditions": [],
"notes": "No guards; direct eval on user input"
}
]
}
]
}

View File

@@ -1,34 +0,0 @@
{
"version": "1.0.0",
"cases": [
{
"case_id": "py-django-ssti:105",
"case_version": "1.0.0",
"notes": "Template rendering reachable (autoescape off)",
"sinks": [
{
"sink_id": "DjangoSSTI::render",
"label": "reachable",
"confidence": "high",
"dynamic_evidence": {
"covered_by_tests": [
"tests/test_reach.py"
],
"coverage_files": [
"outputs/coverage.json"
]
},
"static_evidence": {
"call_path": [
"POST /render",
"app.handle_request",
"render"
]
},
"config_conditions": [],
"notes": "Autoescape disabled"
}
]
}
]
}

View File

@@ -1,36 +0,0 @@
{
"version": "1.0.0",
"cases": [
{
"case_id": "py-fastapi-guarded:104",
"case_version": "1.0.0",
"notes": "Eval unreachable unless ALLOW_EXEC=true",
"sinks": [
{
"sink_id": "FastApiGuarded::handle_request",
"label": "unreachable",
"confidence": "high",
"dynamic_evidence": {
"covered_by_tests": [
"tests/test_unreachable.py"
],
"coverage_files": [
"outputs/coverage.json"
]
},
"static_evidence": {
"call_path": [
"POST /exec",
"app.handle_request",
"guard: ALLOW_EXEC!=true"
]
},
"config_conditions": [
"ALLOW_EXEC == 'true'"
],
"notes": "Feature flag blocks sink by default"
}
]
}
]
}

View File

@@ -1,34 +0,0 @@
{
"version": "1.0.0",
"cases": [
{
"case_id": "py-flask-template:103",
"case_version": "1.0.0",
"notes": "Template rendering reachable",
"sinks": [
{
"sink_id": "FlaskTemplate::render",
"label": "reachable",
"confidence": "high",
"dynamic_evidence": {
"covered_by_tests": [
"tests/test_reach.py"
],
"coverage_files": [
"outputs/coverage.json"
]
},
"static_evidence": {
"call_path": [
"POST /render",
"app.handle_request",
"render"
]
},
"config_conditions": [],
"notes": "Simple template placeholder replacement"
}
]
}
]
}

View File

@@ -1,36 +0,0 @@
{
"version": "1.0.0",
"cases": [
{
"case_id": "py-guarded-exec:102",
"case_version": "1.0.0",
"notes": "Eval unreachable unless FEATURE_ENABLE=1",
"sinks": [
{
"sink_id": "PyGuardedExec::handle_request",
"label": "unreachable",
"confidence": "high",
"dynamic_evidence": {
"covered_by_tests": [
"tests/test_unreachable.py"
],
"coverage_files": [
"outputs/coverage.json"
]
},
"static_evidence": {
"call_path": [
"POST /api/exec",
"app.handle_request",
"guard: FEATURE_ENABLE != 1"
]
},
"config_conditions": [
"FEATURE_ENABLE == '1'"
],
"notes": "Feature flag required"
}
]
}
]
}

View File

@@ -1,34 +0,0 @@
{
"version": "1.0.0",
"cases": [
{
"case_id": "py-unsafe-exec:101",
"case_version": "1.0.0",
"notes": "Eval reachable",
"sinks": [
{
"sink_id": "PyUnsafeExec::handle_request",
"label": "reachable",
"confidence": "high",
"dynamic_evidence": {
"covered_by_tests": [
"tests/test_reach.py"
],
"coverage_files": [
"outputs/coverage.json"
]
},
"static_evidence": {
"call_path": [
"POST /api/exec",
"app.handle_request",
"eval(code)"
]
},
"config_conditions": [],
"notes": "No guards"
}
]
}
]
}

View File

@@ -1,48 +0,0 @@
id: "c-guarded-system:001"
language: c
project: guarded-system
version: "1.0.0"
description: "Command execution guarded by ALLOW_CMD flag (default unreachable)."
entrypoints:
- "main(argv)"
sinks:
- id: "GuardedSystem::main"
path: "src/main.c::main"
kind: "command"
location:
file: src/main.c
line: 26
notes: "system() only runs when ALLOW_CMD=1."
environment:
os_image: "gcc:13-bookworm"
runtime:
gcc: "13"
source_date_epoch: 1730000000
resource_limits:
cpu: "2"
memory: "4Gi"
build:
command: "./build/build.sh"
source_date_epoch: 1730000000
outputs:
artifact_path: outputs/binary.tar.gz
sbom_path: outputs/sbom.cdx.json
coverage_path: outputs/coverage.json
traces_dir: outputs/traces
attestation_path: outputs/attestation.json
test:
command: "./tests/run-tests.sh"
expected_coverage:
- outputs/coverage.json
expected_traces:
- outputs/traces/traces.json
ground_truth:
summary: "Without ALLOW_CMD, the system() sink remains unreachable; with ALLOW_CMD=1, it executes."
evidence_files:
- "../../../benchmark/truth/c-guarded-system.json"
sandbox:
network: loopback
privileges: rootless
redaction:
pii: false
policy: "benchmark-default/v1"

Some files were not shown because too many files have changed in this diff Show More