Compare commits

1 Commits

Author SHA1 Message Date
master
89543de7f1 feat: Implement vulnerability token signing and verification utilities
Some checks failed
Docs CI / lint-and-preview (push) Has been cancelled
- Added VulnTokenSigner for signing JWT tokens with specified algorithms and keys.
- Introduced VulnTokenUtilities for resolving tenant and subject claims, and sanitizing context dictionaries.
- Created VulnTokenVerificationUtilities for parsing tokens, verifying signatures, and deserializing payloads.
- Developed VulnWorkflowAntiForgeryTokenIssuer for issuing anti-forgery tokens with configurable options.
- Implemented VulnWorkflowAntiForgeryTokenVerifier for verifying anti-forgery tokens and validating payloads.
- Added AuthorityVulnerabilityExplorerOptions to manage configuration for vulnerability explorer features.
- Included tests for FilesystemPackRunDispatcher to ensure proper job handling under egress policy restrictions.
2025-11-03 10:02:29 +02:00
18618 changed files with 341211 additions and 4097809 deletions

View File

@@ -1,37 +0,0 @@
{
"permissions": {
"allow": [
"Bash(dotnet --list-sdks:*)",
"Bash(winget install:*)",
"Bash(dotnet restore:*)",
"Bash(dotnet nuget:*)",
"Bash(csc -parse:*)",
"Bash(grep:*)",
"Bash(dotnet build:*)",
"Bash(cat:*)",
"Bash(copy:*)",
"Bash(dotnet test:*)",
"Bash(dir:*)",
"Bash(Select-Object -ExpandProperty FullName)",
"Bash(echo:*)",
"Bash(Out-File -FilePath \"E:\\dev\\git.stella-ops.org\\src\\Scanner\\__Libraries\\StellaOps.Scanner.Surface\\StellaOps.Scanner.Surface.csproj\" -Encoding utf8)",
"Bash(wc:*)",
"Bash(find:*)",
"WebFetch(domain:docs.gradle.org)",
"WebSearch",
"Bash(dotnet msbuild:*)",
"Bash(test:*)",
"Bash(taskkill:*)",
"Bash(timeout /t)",
"Bash(dotnet clean:*)",
"Bash(if not exist \"E:\\dev\\git.stella-ops.org\\src\\Scanner\\__Tests\\StellaOps.Scanner.Analyzers.Lang.Java.Tests\\Internal\" mkdir \"E:\\dev\\git.stella-ops.org\\src\\Scanner\\__Tests\\StellaOps.Scanner.Analyzers.Lang.Java.Tests\\Internal\")",
"Bash(if not exist \"E:\\dev\\git.stella-ops.org\\src\\Scanner\\__Tests\\StellaOps.Scanner.Analyzers.Lang.Node.Tests\\Internal\" mkdir \"E:\\dev\\git.stella-ops.org\\src\\Scanner\\__Tests\\StellaOps.Scanner.Analyzers.Lang.Node.Tests\\Internal\")",
"Bash(rm:*)",
"Bash(if not exist \"C:\\dev\\New folder\\git.stella-ops.org\\docs\\implplan\\archived\" mkdir \"C:\\dev\\New folder\\git.stella-ops.org\\docs\\implplan\\archived\")",
"Bash(del \"C:\\dev\\New folder\\git.stella-ops.org\\docs\\implplan\\SPRINT_0510_0001_0001_airgap.md\")"
],
"deny": [],
"ask": []
},
"outputStyle": "default"
}

View File

@@ -1,12 +0,0 @@
{
"version": 1,
"isRoot": true,
"tools": {
"dotnet-stryker": {
"version": "4.4.0",
"commands": [
"stryker"
]
}
}
}

View File

@@ -1,22 +0,0 @@
.git
.gitignore
.gitea
.venv
bin
obj
**/bin
**/obj
.nuget
**/node_modules
**/dist
**/coverage
**/*.user
**/*.suo
**/*.cache
**/.vscode
**/.idea
**/.DS_Store
**/TestResults
**/out
**/packages
/tmp

View File

@@ -1,5 +0,0 @@
[src/Scanner/StellaOps.Scanner.Analyzers.Native/**.cs]
dotnet_diagnostic.CA2022.severity = none
[src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Native.Tests/**.cs]
dotnet_diagnostic.CA2022.severity = none

3
.gitattributes vendored
View File

@@ -1,5 +1,2 @@
# Ensure analyzer fixture assets keep LF endings for deterministic hashes # Ensure analyzer fixture assets keep LF endings for deterministic hashes
src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Python.Tests/Fixtures/** text eol=lf src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Python.Tests/Fixtures/** text eol=lf
# Ensure reachability sample assets keep LF endings for deterministic hashes
tests/reachability/samples-public/** text eol=lf

View File

@@ -1,22 +0,0 @@
# .gitea AGENTS
## Purpose & Scope
- Working directory: `.gitea/` (CI workflows, templates, pipeline configs).
- Roles: DevOps engineer, QA automation.
## Required Reading (treat as read before DOING)
- `docs/README.md`
- `docs/modules/ci/architecture.md`
- `docs/modules/devops/architecture.md`
- Relevant sprint file(s).
## Working Agreements
- Keep workflows deterministic and offline-friendly.
- Pin versions for tooling where possible.
- Use UTC timestamps in comments/logs.
- Avoid adding external network calls unless the sprint explicitly requires them.
- Record workflow changes in the sprint Execution Log and Decisions & Risks.
## Validation
- Manually validate YAML structure and paths.
- Ensure workflow paths match repository layout.

View File

@@ -1,43 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# DEVOPS-CONTAINERS-46-001: build air-gap bundle from existing buildx OCI archive
if [[ $# -lt 1 ]]; then
echo "Usage: $0 <image-tag> [bundle-dir]" >&2
exit 64
fi
IMAGE_TAG=$1
BUNDLE_DIR=${2:-"out/bundles/$(echo "$IMAGE_TAG" | tr '/:' '__')"}
SRC_DIR="out/buildx/$(echo "$IMAGE_TAG" | tr '/:' '__')"
OCI_ARCHIVE="${SRC_DIR}/image.oci"
if [[ ! -f "$OCI_ARCHIVE" ]]; then
echo "[airgap] OCI archive not found at $OCI_ARCHIVE. Run build-multiarch first." >&2
exit 66
fi
mkdir -p "$BUNDLE_DIR"
SBOM_FILE=""
if [[ -f "${SRC_DIR}/sbom.syft.json" ]]; then
SBOM_FILE="${SRC_DIR}/sbom.syft.json"
fi
cat > "${BUNDLE_DIR}/bundle-manifest.json" <<EOF
{
"image": "${IMAGE_TAG}",
"oci_archive": "image.oci",
"sbom": "$( [[ -n "$SBOM_FILE" ]] && echo sbom.syft.json || echo null )",
"created_at": "$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
}
EOF
cp "$OCI_ARCHIVE" "${BUNDLE_DIR}/image.oci"
[[ -n "$SBOM_FILE" ]] && cp "$SBOM_FILE" "${BUNDLE_DIR}/sbom.syft.json"
[[ -f "${SRC_DIR}/image.sha256" ]] && cp "${SRC_DIR}/image.sha256" "${BUNDLE_DIR}/image.sha256"
[[ -f "${SRC_DIR}/image.sig" ]] && cp "${SRC_DIR}/image.sig" "${BUNDLE_DIR}/image.sig"
tar -C "$BUNDLE_DIR" -czf "${BUNDLE_DIR}.tgz" .
echo "[airgap] bundle created at ${BUNDLE_DIR}.tgz"

View File

@@ -1,131 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# DEVOPS-CLI-41-001: Build multi-platform CLI binaries with SBOM and checksums.
# Updated: SPRINT_5100_0001_0001 - CLI Consolidation: includes Aoc and Symbols plugins
RIDS="${RIDS:-linux-x64,win-x64,osx-arm64}"
CONFIG="${CONFIG:-Release}"
PROJECT="src/Cli/StellaOps.Cli/StellaOps.Cli.csproj"
OUT_ROOT="out/cli"
SBOM_TOOL="${SBOM_TOOL:-syft}" # syft|none
SIGN="${SIGN:-false}"
COSIGN_KEY="${COSIGN_KEY:-}"
# CLI Plugins to include in the distribution
# SPRINT_5100_0001_0001: CLI Consolidation - stella aoc and stella symbols
PLUGIN_PROJECTS=(
"src/Cli/__Libraries/StellaOps.Cli.Plugins.Aoc/StellaOps.Cli.Plugins.Aoc.csproj"
"src/Cli/__Libraries/StellaOps.Cli.Plugins.Symbols/StellaOps.Cli.Plugins.Symbols.csproj"
)
PLUGIN_MANIFESTS=(
"src/Cli/plugins/cli/StellaOps.Cli.Plugins.Aoc/stellaops.cli.plugins.aoc.manifest.json"
"src/Cli/plugins/cli/StellaOps.Cli.Plugins.Symbols/stellaops.cli.plugins.symbols.manifest.json"
)
IFS=',' read -ra TARGETS <<< "$RIDS"
mkdir -p "$OUT_ROOT"
if ! command -v dotnet >/dev/null 2>&1; then
echo "[cli-build] dotnet CLI not found" >&2
exit 69
fi
generate_sbom() {
local dir="$1"
local sbom="$2"
if [[ "$SBOM_TOOL" == "syft" ]] && command -v syft >/dev/null 2>&1; then
syft "dir:${dir}" -o json > "$sbom"
fi
}
sign_file() {
local file="$1"
if [[ "$SIGN" == "true" && -n "$COSIGN_KEY" && -x "$(command -v cosign || true)" ]]; then
COSIGN_EXPERIMENTAL=1 cosign sign-blob --key "$COSIGN_KEY" --output-signature "${file}.sig" "$file"
fi
}
for rid in "${TARGETS[@]}"; do
echo "[cli-build] publishing for $rid"
out_dir="${OUT_ROOT}/${rid}"
publish_dir="${out_dir}/publish"
plugins_dir="${publish_dir}/plugins/cli"
mkdir -p "$publish_dir"
mkdir -p "$plugins_dir"
# Build main CLI
dotnet publish "$PROJECT" -c "$CONFIG" -r "$rid" \
-o "$publish_dir" \
--self-contained true \
-p:PublishSingleFile=true \
-p:PublishTrimmed=false \
-p:DebugType=None \
>/dev/null
# Build and copy plugins
# SPRINT_5100_0001_0001: CLI Consolidation
for i in "${!PLUGIN_PROJECTS[@]}"; do
plugin_project="${PLUGIN_PROJECTS[$i]}"
manifest_path="${PLUGIN_MANIFESTS[$i]}"
if [[ ! -f "$plugin_project" ]]; then
echo "[cli-build] WARNING: Plugin project not found: $plugin_project"
continue
fi
# Get plugin name from project path
plugin_name=$(basename "$(dirname "$plugin_project")")
plugin_out="${plugins_dir}/${plugin_name}"
mkdir -p "$plugin_out"
echo "[cli-build] building plugin: $plugin_name"
dotnet publish "$plugin_project" -c "$CONFIG" -r "$rid" \
-o "$plugin_out" \
--self-contained false \
-p:DebugType=None \
>/dev/null 2>&1 || echo "[cli-build] WARNING: Plugin build failed for $plugin_name (may have pre-existing errors)"
# Copy manifest file
if [[ -f "$manifest_path" ]]; then
cp "$manifest_path" "$plugin_out/"
else
echo "[cli-build] WARNING: Manifest not found: $manifest_path"
fi
done
# Package
archive_ext="tar.gz"
archive_cmd=(tar -C "$publish_dir" -czf)
if [[ "$rid" == win-* ]]; then
archive_ext="zip"
archive_cmd=(zip -jr)
fi
archive_name="stella-cli-${rid}.${archive_ext}"
archive_path="${out_dir}/${archive_name}"
"${archive_cmd[@]}" "$archive_path" "$publish_dir"
sha256sum "$archive_path" > "${archive_path}.sha256"
sign_file "$archive_path"
# SBOM
generate_sbom "$publish_dir" "${archive_path}.sbom.json"
done
# Build manifest
manifest="${OUT_ROOT}/manifest.json"
plugin_list=$(printf '"%s",' "${PLUGIN_PROJECTS[@]}" | sed 's/,.*//' | sed 's/.*\///' | sed 's/\.csproj//')
cat > "$manifest" <<EOF
{
"generated_at": "$(date -u +"%Y-%m-%dT%H:%M:%SZ")",
"config": "$CONFIG",
"rids": [$(printf '"%s",' "${TARGETS[@]}" | sed 's/,$//')],
"plugins": ["stellaops.cli.plugins.aoc", "stellaops.cli.plugins.symbols"],
"artifacts_root": "$OUT_ROOT",
"notes": "CLI Consolidation (SPRINT_5100_0001_0001) - includes aoc and symbols plugins"
}
EOF
echo "[cli-build] artifacts in $OUT_ROOT"

View File

@@ -1,93 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Multi-arch buildx helper for DEVOPS-CONTAINERS-44-001
# Requirements: docker CLI with buildx, optional syft (for SBOM) and cosign (for signing).
usage() {
echo "Usage: $0 <image-tag> <context-dir> [--platform linux/amd64,linux/arm64] [--push] [--sbom syft|none] [--sign <cosign-key>]" >&2
exit 64
}
if [[ $# -lt 2 ]]; then
usage
fi
IMAGE_TAG=$1; shift
CONTEXT_DIR=$1; shift
PLATFORMS="linux/amd64,linux/arm64"
PUSH=false
SBOM_TOOL="syft"
COSIGN_KEY=""
while [[ $# -gt 0 ]]; do
case "$1" in
--platform) PLATFORMS="$2"; shift 2;;
--push) PUSH=true; shift;;
--sbom) SBOM_TOOL="$2"; shift 2;;
--sign) COSIGN_KEY="$2"; shift 2;;
*) echo "Unknown option: $1" >&2; usage;;
esac
done
if ! command -v docker >/dev/null 2>&1; then
echo "[buildx] docker CLI not found" >&2
exit 69
fi
OUT_ROOT="out/buildx/$(echo "$IMAGE_TAG" | tr '/:' '__')"
mkdir -p "$OUT_ROOT"
BUILDER_NAME="stellaops-multiarch"
if ! docker buildx inspect "$BUILDER_NAME" >/dev/null 2>&1; then
docker buildx create --name "$BUILDER_NAME" --driver docker-container --use >/dev/null
else
docker buildx use "$BUILDER_NAME" >/dev/null
fi
BUILD_OPTS=(
--platform "$PLATFORMS"
-t "$IMAGE_TAG"
--provenance=false
--sbom=false
--output "type=oci,dest=${OUT_ROOT}/image.oci"
)
if $PUSH; then
BUILD_OPTS+=("--push")
fi
echo "[buildx] building $IMAGE_TAG for $PLATFORMS"
docker buildx build "${BUILD_OPTS[@]}" "$CONTEXT_DIR"
echo "[buildx] computing digest"
IMAGE_DIGEST=$(sha256sum "${OUT_ROOT}/image.oci" | awk '{print $1}')
echo "$IMAGE_DIGEST image.oci" > "${OUT_ROOT}/image.sha256"
if [[ "$SBOM_TOOL" == "syft" ]] && command -v syft >/dev/null 2>&1; then
echo "[buildx] generating SBOM via syft"
syft "oci-archive:${OUT_ROOT}/image.oci" -o json > "${OUT_ROOT}/sbom.syft.json"
else
echo "[buildx] skipping SBOM (tool=$SBOM_TOOL, syft available? $(command -v syft >/dev/null && echo yes || echo no))"
fi
if [[ -n "$COSIGN_KEY" ]] && command -v cosign >/dev/null 2>&1; then
echo "[buildx] signing digest with cosign key"
COSIGN_EXPERIMENTAL=1 cosign sign-blob --key "$COSIGN_KEY" --output-signature "${OUT_ROOT}/image.sig" --output-certificate "${OUT_ROOT}/image.cert" "${OUT_ROOT}/image.oci"
else
echo "[buildx] signature skipped (no key provided or cosign missing)"
fi
cat > "${OUT_ROOT}/build-metadata.json" <<EOF
{
"image": "${IMAGE_TAG}",
"platforms": "${PLATFORMS}",
"pushed": ${PUSH},
"digest_sha256": "${IMAGE_DIGEST}",
"generated_at": "$(date -u +"%Y-%m-%dT%H:%M:%SZ")",
"sbom": "$( [[ -f ${OUT_ROOT}/sbom.syft.json ]] && echo sbom.syft.json || echo null )"
}
EOF
echo "[buildx] artifacts written to ${OUT_ROOT}"

View File

@@ -1,43 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
STAGED_DIR="evidence-locker/signals/2025-12-05"
MODULE_ROOT="docs/modules/signals"
TAR_OUT="/tmp/signals-evidence.tar"
if [[ -z "${EVIDENCE_LOCKER_URL:-}" || -z "${CI_EVIDENCE_LOCKER_TOKEN:-}" ]]; then
echo "EVIDENCE_LOCKER_URL and CI_EVIDENCE_LOCKER_TOKEN are required" >&2
exit 1
fi
tmpdir=$(mktemp -d)
trap 'rm -rf "$tmpdir"' EXIT
rsync -a --relative \
"$STAGED_DIR/SHA256SUMS" \
"$STAGED_DIR/confidence_decay_config.sigstore.json" \
"$STAGED_DIR/unknowns_scoring_manifest.sigstore.json" \
"$STAGED_DIR/heuristics_catalog.sigstore.json" \
"$MODULE_ROOT/decay/confidence_decay_config.yaml" \
"$MODULE_ROOT/unknowns/unknowns_scoring_manifest.json" \
"$MODULE_ROOT/heuristics/heuristics.catalog.json" \
"$tmpdir/"
pushd "$tmpdir/$STAGED_DIR" >/dev/null
sha256sum --check SHA256SUMS
popd >/dev/null
# Build deterministic tarball
pushd "$tmpdir" >/dev/null
tar --sort=name --mtime="UTC 1970-01-01" --owner=0 --group=0 --numeric-owner \
-cf "$TAR_OUT" .
popd >/dev/null
sha256sum "$TAR_OUT"
curl --retry 3 --retry-delay 2 --fail \
-H "Authorization: Bearer $CI_EVIDENCE_LOCKER_TOKEN" \
-X PUT "$EVIDENCE_LOCKER_URL/signals/2025-12-05/signals-evidence.tar" \
--data-binary "@$TAR_OUT"
echo "Uploaded $TAR_OUT to $EVIDENCE_LOCKER_URL/signals/2025-12-05/"

View File

@@ -1,46 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Upload both Zastava and Signals evidence bundles to the locker.
# Requires EVIDENCE_LOCKER_URL and CI_EVIDENCE_LOCKER_TOKEN.
EVIDENCE_LOCKER_URL=${EVIDENCE_LOCKER_URL:-}
CI_EVIDENCE_LOCKER_TOKEN=${CI_EVIDENCE_LOCKER_TOKEN:-}
if [[ -z "$EVIDENCE_LOCKER_URL" || -z "$CI_EVIDENCE_LOCKER_TOKEN" ]]; then
echo "EVIDENCE_LOCKER_URL and CI_EVIDENCE_LOCKER_TOKEN are required" >&2
exit 1
fi
# Defaults
ZASTAVA_TAR=${ZASTAVA_TAR:-evidence-locker/zastava/2025-12-02/zastava-evidence.tar}
ZASTAVA_VERIFY=${ZASTAVA_VERIFY:-tools/zastava-verify-evidence-tar.sh}
ZASTAVA_PATH=\$EVIDENCE_LOCKER_URL/zastava/2025-12-02/zastava-evidence.tar
SIGNALS_TAR=${SIGNALS_TAR:-evidence-locker/signals/2025-12-05/signals-evidence.tar}
SIGNALS_VERIFY=${SIGNALS_VERIFY:-tools/signals-verify-evidence-tar.sh}
SIGNALS_PATH=\$EVIDENCE_LOCKER_URL/signals/2025-12-05/signals-evidence.tar
# Verify
if [[ -x "$ZASTAVA_VERIFY" ]]; then
"$ZASTAVA_VERIFY" "$ZASTAVA_TAR"
fi
if [[ -x "$SIGNALS_VERIFY" ]]; then
"$SIGNALS_VERIFY" "$SIGNALS_TAR"
fi
# Upload Zastava
curl --retry 3 --retry-delay 2 --fail \
-H "Authorization: Bearer $CI_EVIDENCE_LOCKER_TOKEN" \
-X PUT "$EVIDENCE_LOCKER_URL/zastava/2025-12-02/zastava-evidence.tar" \
--data-binary @"$ZASTAVA_TAR"
echo "Uploaded Zastava evidence to $EVIDENCE_LOCKER_URL/zastava/2025-12-02/zastava-evidence.tar"
# Upload Signals
curl --retry 3 --retry-delay 2 --fail \
-H "Authorization: Bearer $CI_EVIDENCE_LOCKER_TOKEN" \
-X PUT "$EVIDENCE_LOCKER_URL/signals/2025-12-05/signals-evidence.tar" \
--data-binary @"$SIGNALS_TAR"
echo "Uploaded Signals evidence to $EVIDENCE_LOCKER_URL/signals/2025-12-05/signals-evidence.tar"

View File

@@ -1,48 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
if [[ -z "${EVIDENCE_LOCKER_URL:-}" || -z "${CI_EVIDENCE_LOCKER_TOKEN:-}" ]]; then
echo "EVIDENCE_LOCKER_URL and CI_EVIDENCE_LOCKER_TOKEN are required" >&2
exit 1
fi
STAGED_DIR="evidence-locker/zastava/2025-12-02"
TAR_OUT="/tmp/zastava-evidence.tar"
MODULE_ROOT="docs/modules/zastava"
test -d "$MODULE_ROOT" || { echo "missing module root $MODULE_ROOT" >&2; exit 1; }
mkdir -p "$STAGED_DIR"
tmpdir=$(mktemp -d)
trap 'rm -rf "$tmpdir"' EXIT
rsync -a --relative \
"$MODULE_ROOT/SHA256SUMS" \
"$MODULE_ROOT/schemas/" \
"$MODULE_ROOT/exports/" \
"$MODULE_ROOT/thresholds.yaml" \
"$MODULE_ROOT/thresholds.yaml.dsse" \
"$MODULE_ROOT/kit/verify.sh" \
"$MODULE_ROOT/kit/README.md" \
"$MODULE_ROOT/kit/ed25519.pub" \
"$MODULE_ROOT/kit/zastava-kit.tzst" \
"$MODULE_ROOT/kit/zastava-kit.tzst.dsse" \
"$MODULE_ROOT/evidence/README.md" \
"$tmpdir/"
pushd "$tmpdir/docs/modules/zastava" >/dev/null
sha256sum --check SHA256SUMS
# Build deterministic tarball for reproducibility (payloads + DSSE)
tar --sort=name --mtime="UTC 1970-01-01" --owner=0 --group=0 --numeric-owner \
-cf "$TAR_OUT" .
popd >/dev/null
sha256sum "$TAR_OUT"
curl --retry 3 --retry-delay 2 --fail \
-H "Authorization: Bearer $CI_EVIDENCE_LOCKER_TOKEN" \
-X PUT "$EVIDENCE_LOCKER_URL/zastava/2025-12-02/zastava-evidence.tar" \
--data-binary "@$TAR_OUT"
echo "Uploaded $TAR_OUT to $EVIDENCE_LOCKER_URL/zastava/2025-12-02/"

View File

@@ -1,287 +0,0 @@
#!/usr/bin/env bash
# =============================================================================
# compute-reachability-metrics.sh
# Computes reachability metrics against ground-truth corpus
#
# Usage: ./compute-reachability-metrics.sh [options]
# --corpus-path PATH Path to ground-truth corpus (default: src/__Tests/reachability/corpus)
# --output FILE Output JSON file (default: stdout)
# --dry-run Show what would be computed without running scanner
# --strict Exit non-zero if any threshold is violated
# --verbose Enable verbose output
#
# Output: JSON with recall, precision, accuracy metrics per vulnerability class
# =============================================================================
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
# Default paths
CORPUS_PATH="${REPO_ROOT}/src/__Tests/reachability/corpus"
OUTPUT_FILE=""
DRY_RUN=false
STRICT=false
VERBOSE=false
# Parse arguments
while [[ $# -gt 0 ]]; do
case "$1" in
--corpus-path)
CORPUS_PATH="$2"
shift 2
;;
--output)
OUTPUT_FILE="$2"
shift 2
;;
--dry-run)
DRY_RUN=true
shift
;;
--strict)
STRICT=true
shift
;;
--verbose)
VERBOSE=true
shift
;;
-h|--help)
head -20 "$0" | tail -15
exit 0
;;
*)
echo "Unknown option: $1" >&2
exit 1
;;
esac
done
log() {
if [[ "${VERBOSE}" == "true" ]]; then
echo "[$(date -u '+%Y-%m-%dT%H:%M:%SZ')] $*" >&2
fi
}
error() {
echo "[ERROR] $*" >&2
}
# Validate corpus exists
if [[ ! -d "${CORPUS_PATH}" ]]; then
error "Corpus directory not found: ${CORPUS_PATH}"
exit 1
fi
MANIFEST_FILE="${CORPUS_PATH}/manifest.json"
if [[ ! -f "${MANIFEST_FILE}" ]]; then
error "Corpus manifest not found: ${MANIFEST_FILE}"
exit 1
fi
log "Loading corpus from ${CORPUS_PATH}"
log "Manifest: ${MANIFEST_FILE}"
# Initialize counters for each vulnerability class
declare -A true_positives
declare -A false_positives
declare -A false_negatives
declare -A total_expected
CLASSES=("runtime_dep" "os_pkg" "code" "config")
for class in "${CLASSES[@]}"; do
true_positives[$class]=0
false_positives[$class]=0
false_negatives[$class]=0
total_expected[$class]=0
done
if [[ "${DRY_RUN}" == "true" ]]; then
log "[DRY RUN] Would process corpus fixtures..."
# Generate mock metrics for dry-run
cat <<EOF
{
"timestamp": "$(date -u '+%Y-%m-%dT%H:%M:%SZ')",
"corpus_path": "${CORPUS_PATH}",
"dry_run": true,
"metrics": {
"runtime_dep": {
"recall": 0.96,
"precision": 0.94,
"f1_score": 0.95,
"total_expected": 100,
"true_positives": 96,
"false_positives": 6,
"false_negatives": 4
},
"os_pkg": {
"recall": 0.98,
"precision": 0.97,
"f1_score": 0.975,
"total_expected": 50,
"true_positives": 49,
"false_positives": 2,
"false_negatives": 1
},
"code": {
"recall": 0.92,
"precision": 0.90,
"f1_score": 0.91,
"total_expected": 25,
"true_positives": 23,
"false_positives": 3,
"false_negatives": 2
},
"config": {
"recall": 0.88,
"precision": 0.85,
"f1_score": 0.865,
"total_expected": 20,
"true_positives": 18,
"false_positives": 3,
"false_negatives": 2
}
},
"aggregate": {
"overall_recall": 0.9538,
"overall_precision": 0.9302,
"reachability_accuracy": 0.9268
}
}
EOF
exit 0
fi
# Process each fixture in the corpus
log "Processing corpus fixtures..."
# Read manifest and iterate fixtures
FIXTURE_COUNT=$(jq -r '.fixtures | length' "${MANIFEST_FILE}")
log "Found ${FIXTURE_COUNT} fixtures"
for i in $(seq 0 $((FIXTURE_COUNT - 1))); do
FIXTURE_ID=$(jq -r ".fixtures[$i].id" "${MANIFEST_FILE}")
FIXTURE_PATH="${CORPUS_PATH}/$(jq -r ".fixtures[$i].path" "${MANIFEST_FILE}")"
FIXTURE_CLASS=$(jq -r ".fixtures[$i].class" "${MANIFEST_FILE}")
EXPECTED_REACHABLE=$(jq -r ".fixtures[$i].expected_reachable // 0" "${MANIFEST_FILE}")
EXPECTED_UNREACHABLE=$(jq -r ".fixtures[$i].expected_unreachable // 0" "${MANIFEST_FILE}")
log "Processing fixture: ${FIXTURE_ID} (class: ${FIXTURE_CLASS})"
if [[ ! -d "${FIXTURE_PATH}" ]] && [[ ! -f "${FIXTURE_PATH}" ]]; then
error "Fixture not found: ${FIXTURE_PATH}"
continue
fi
# Update expected counts
total_expected[$FIXTURE_CLASS]=$((${total_expected[$FIXTURE_CLASS]} + EXPECTED_REACHABLE))
# Run scanner on fixture (deterministic mode, offline)
SCAN_RESULT_FILE=$(mktemp)
trap "rm -f ${SCAN_RESULT_FILE}" EXIT
if dotnet run --project "${REPO_ROOT}/src/Scanner/StellaOps.Scanner.Cli" -- \
scan --input "${FIXTURE_PATH}" \
--output "${SCAN_RESULT_FILE}" \
--deterministic \
--offline \
--format json \
2>/dev/null; then
# Parse scanner results
DETECTED_REACHABLE=$(jq -r '[.findings[] | select(.reachable == true)] | length' "${SCAN_RESULT_FILE}" 2>/dev/null || echo "0")
DETECTED_UNREACHABLE=$(jq -r '[.findings[] | select(.reachable == false)] | length' "${SCAN_RESULT_FILE}" 2>/dev/null || echo "0")
# Calculate TP, FP, FN for this fixture
TP=$((DETECTED_REACHABLE < EXPECTED_REACHABLE ? DETECTED_REACHABLE : EXPECTED_REACHABLE))
FP=$((DETECTED_REACHABLE > EXPECTED_REACHABLE ? DETECTED_REACHABLE - EXPECTED_REACHABLE : 0))
FN=$((EXPECTED_REACHABLE - TP))
true_positives[$FIXTURE_CLASS]=$((${true_positives[$FIXTURE_CLASS]} + TP))
false_positives[$FIXTURE_CLASS]=$((${false_positives[$FIXTURE_CLASS]} + FP))
false_negatives[$FIXTURE_CLASS]=$((${false_negatives[$FIXTURE_CLASS]} + FN))
else
error "Scanner failed for fixture: ${FIXTURE_ID}"
false_negatives[$FIXTURE_CLASS]=$((${false_negatives[$FIXTURE_CLASS]} + EXPECTED_REACHABLE))
fi
done
# Calculate metrics per class
calculate_metrics() {
local class=$1
local tp=${true_positives[$class]}
local fp=${false_positives[$class]}
local fn=${false_negatives[$class]}
local total=${total_expected[$class]}
local recall=0
local precision=0
local f1=0
if [[ $((tp + fn)) -gt 0 ]]; then
recall=$(echo "scale=4; $tp / ($tp + $fn)" | bc)
fi
if [[ $((tp + fp)) -gt 0 ]]; then
precision=$(echo "scale=4; $tp / ($tp + $fp)" | bc)
fi
if (( $(echo "$recall + $precision > 0" | bc -l) )); then
f1=$(echo "scale=4; 2 * $recall * $precision / ($recall + $precision)" | bc)
fi
echo "{\"recall\": $recall, \"precision\": $precision, \"f1_score\": $f1, \"total_expected\": $total, \"true_positives\": $tp, \"false_positives\": $fp, \"false_negatives\": $fn}"
}
# Generate output JSON
OUTPUT=$(cat <<EOF
{
"timestamp": "$(date -u '+%Y-%m-%dT%H:%M:%SZ')",
"corpus_path": "${CORPUS_PATH}",
"dry_run": false,
"metrics": {
"runtime_dep": $(calculate_metrics "runtime_dep"),
"os_pkg": $(calculate_metrics "os_pkg"),
"code": $(calculate_metrics "code"),
"config": $(calculate_metrics "config")
},
"aggregate": {
"overall_recall": $(echo "scale=4; (${true_positives[runtime_dep]} + ${true_positives[os_pkg]} + ${true_positives[code]} + ${true_positives[config]}) / (${total_expected[runtime_dep]} + ${total_expected[os_pkg]} + ${total_expected[code]} + ${total_expected[config]} + 0.0001)" | bc),
"overall_precision": $(echo "scale=4; (${true_positives[runtime_dep]} + ${true_positives[os_pkg]} + ${true_positives[code]} + ${true_positives[config]}) / (${true_positives[runtime_dep]} + ${true_positives[os_pkg]} + ${true_positives[code]} + ${true_positives[config]} + ${false_positives[runtime_dep]} + ${false_positives[os_pkg]} + ${false_positives[code]} + ${false_positives[config]} + 0.0001)" | bc)
}
}
EOF
)
# Output results
if [[ -n "${OUTPUT_FILE}" ]]; then
echo "${OUTPUT}" > "${OUTPUT_FILE}"
log "Results written to ${OUTPUT_FILE}"
else
echo "${OUTPUT}"
fi
# Check thresholds in strict mode
if [[ "${STRICT}" == "true" ]]; then
THRESHOLDS_FILE="${SCRIPT_DIR}/reachability-thresholds.yaml"
if [[ -f "${THRESHOLDS_FILE}" ]]; then
log "Checking thresholds from ${THRESHOLDS_FILE}"
# Extract thresholds and check
MIN_RECALL=$(yq -r '.thresholds.runtime_dependency_recall.min // 0.95' "${THRESHOLDS_FILE}")
ACTUAL_RECALL=$(echo "${OUTPUT}" | jq -r '.metrics.runtime_dep.recall')
if (( $(echo "$ACTUAL_RECALL < $MIN_RECALL" | bc -l) )); then
error "Runtime dependency recall ${ACTUAL_RECALL} below threshold ${MIN_RECALL}"
exit 1
fi
log "All thresholds passed"
fi
fi
exit 0

View File

@@ -1,313 +0,0 @@
#!/usr/bin/env bash
# =============================================================================
# compute-ttfs-metrics.sh
# Computes Time-to-First-Signal (TTFS) metrics from test runs
#
# Usage: ./compute-ttfs-metrics.sh [options]
# --results-path PATH Path to test results directory
# --output FILE Output JSON file (default: stdout)
# --baseline FILE Baseline TTFS file for comparison
# --dry-run Show what would be computed
# --strict Exit non-zero if thresholds are violated
# --verbose Enable verbose output
#
# Output: JSON with TTFS p50, p95, p99 metrics and regression status
# =============================================================================
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
# Default paths
RESULTS_PATH="${REPO_ROOT}/src/__Tests/__Benchmarks/results"
OUTPUT_FILE=""
BASELINE_FILE="${REPO_ROOT}/src/__Tests/__Benchmarks/baselines/ttfs-baseline.json"
DRY_RUN=false
STRICT=false
VERBOSE=false
# Parse arguments
while [[ $# -gt 0 ]]; do
case "$1" in
--results-path)
RESULTS_PATH="$2"
shift 2
;;
--output)
OUTPUT_FILE="$2"
shift 2
;;
--baseline)
BASELINE_FILE="$2"
shift 2
;;
--dry-run)
DRY_RUN=true
shift
;;
--strict)
STRICT=true
shift
;;
--verbose)
VERBOSE=true
shift
;;
-h|--help)
head -20 "$0" | tail -15
exit 0
;;
*)
echo "Unknown option: $1" >&2
exit 1
;;
esac
done
log() {
if [[ "${VERBOSE}" == "true" ]]; then
echo "[$(date -u '+%Y-%m-%dT%H:%M:%SZ')] $*" >&2
fi
}
error() {
echo "[ERROR] $*" >&2
}
warn() {
echo "[WARN] $*" >&2
}
# Calculate percentiles from sorted array
percentile() {
local -n arr=$1
local p=$2
local n=${#arr[@]}
if [[ $n -eq 0 ]]; then
echo "0"
return
fi
local idx=$(echo "scale=0; ($n - 1) * $p / 100" | bc)
echo "${arr[$idx]}"
}
if [[ "${DRY_RUN}" == "true" ]]; then
log "[DRY RUN] Would process TTFS metrics..."
cat <<EOF
{
"timestamp": "$(date -u '+%Y-%m-%dT%H:%M:%SZ')",
"dry_run": true,
"results_path": "${RESULTS_PATH}",
"metrics": {
"ttfs_ms": {
"p50": 1250,
"p95": 3500,
"p99": 5200,
"min": 450,
"max": 8500,
"mean": 1850,
"sample_count": 100
},
"by_scan_type": {
"image_scan": {
"p50": 2100,
"p95": 4500,
"p99": 6800
},
"filesystem_scan": {
"p50": 850,
"p95": 1800,
"p99": 2500
},
"sbom_scan": {
"p50": 320,
"p95": 650,
"p99": 950
}
}
},
"baseline_comparison": {
"baseline_path": "${BASELINE_FILE}",
"p50_regression_pct": -2.5,
"p95_regression_pct": 1.2,
"regression_detected": false
}
}
EOF
exit 0
fi
# Validate results directory
if [[ ! -d "${RESULTS_PATH}" ]]; then
error "Results directory not found: ${RESULTS_PATH}"
exit 1
fi
log "Processing TTFS results from ${RESULTS_PATH}"
# Collect all TTFS values from result files
declare -a ttfs_values=()
declare -a image_ttfs=()
declare -a fs_ttfs=()
declare -a sbom_ttfs=()
# Find and process all result files
for result_file in "${RESULTS_PATH}"/*.json "${RESULTS_PATH}"/**/*.json; do
[[ -f "${result_file}" ]] || continue
log "Processing: ${result_file}"
# Extract TTFS value if present
TTFS=$(jq -r '.ttfs_ms // .time_to_first_signal_ms // empty' "${result_file}" 2>/dev/null || true)
SCAN_TYPE=$(jq -r '.scan_type // "unknown"' "${result_file}" 2>/dev/null || echo "unknown")
if [[ -n "${TTFS}" ]] && [[ "${TTFS}" != "null" ]]; then
ttfs_values+=("${TTFS}")
case "${SCAN_TYPE}" in
image|image_scan|container)
image_ttfs+=("${TTFS}")
;;
filesystem|fs|fs_scan)
fs_ttfs+=("${TTFS}")
;;
sbom|sbom_scan)
sbom_ttfs+=("${TTFS}")
;;
esac
fi
done
# Sort arrays for percentile calculation
IFS=$'\n' ttfs_sorted=($(sort -n <<<"${ttfs_values[*]}")); unset IFS
IFS=$'\n' image_sorted=($(sort -n <<<"${image_ttfs[*]}")); unset IFS
IFS=$'\n' fs_sorted=($(sort -n <<<"${fs_ttfs[*]}")); unset IFS
IFS=$'\n' sbom_sorted=($(sort -n <<<"${sbom_ttfs[*]}")); unset IFS
# Calculate overall metrics
SAMPLE_COUNT=${#ttfs_values[@]}
if [[ $SAMPLE_COUNT -eq 0 ]]; then
warn "No TTFS samples found"
P50=0
P95=0
P99=0
MIN=0
MAX=0
MEAN=0
else
P50=$(percentile ttfs_sorted 50)
P95=$(percentile ttfs_sorted 95)
P99=$(percentile ttfs_sorted 99)
MIN=${ttfs_sorted[0]}
MAX=${ttfs_sorted[-1]}
# Calculate mean
SUM=0
for v in "${ttfs_values[@]}"; do
SUM=$((SUM + v))
done
MEAN=$((SUM / SAMPLE_COUNT))
fi
# Calculate per-type metrics
IMAGE_P50=$(percentile image_sorted 50)
IMAGE_P95=$(percentile image_sorted 95)
IMAGE_P99=$(percentile image_sorted 99)
FS_P50=$(percentile fs_sorted 50)
FS_P95=$(percentile fs_sorted 95)
FS_P99=$(percentile fs_sorted 99)
SBOM_P50=$(percentile sbom_sorted 50)
SBOM_P95=$(percentile sbom_sorted 95)
SBOM_P99=$(percentile sbom_sorted 99)
# Compare against baseline if available
REGRESSION_DETECTED=false
P50_REGRESSION_PCT=0
P95_REGRESSION_PCT=0
if [[ -f "${BASELINE_FILE}" ]]; then
log "Comparing against baseline: ${BASELINE_FILE}"
BASELINE_P50=$(jq -r '.metrics.ttfs_ms.p50 // 0' "${BASELINE_FILE}")
BASELINE_P95=$(jq -r '.metrics.ttfs_ms.p95 // 0' "${BASELINE_FILE}")
if [[ $BASELINE_P50 -gt 0 ]]; then
P50_REGRESSION_PCT=$(echo "scale=2; (${P50} - ${BASELINE_P50}) * 100 / ${BASELINE_P50}" | bc)
fi
if [[ $BASELINE_P95 -gt 0 ]]; then
P95_REGRESSION_PCT=$(echo "scale=2; (${P95} - ${BASELINE_P95}) * 100 / ${BASELINE_P95}" | bc)
fi
# Check for regression (>10% increase)
if (( $(echo "${P50_REGRESSION_PCT} > 10" | bc -l) )) || (( $(echo "${P95_REGRESSION_PCT} > 10" | bc -l) )); then
REGRESSION_DETECTED=true
warn "TTFS regression detected: p50=${P50_REGRESSION_PCT}%, p95=${P95_REGRESSION_PCT}%"
fi
fi
# Generate output
OUTPUT=$(cat <<EOF
{
"timestamp": "$(date -u '+%Y-%m-%dT%H:%M:%SZ')",
"dry_run": false,
"results_path": "${RESULTS_PATH}",
"metrics": {
"ttfs_ms": {
"p50": ${P50},
"p95": ${P95},
"p99": ${P99},
"min": ${MIN},
"max": ${MAX},
"mean": ${MEAN},
"sample_count": ${SAMPLE_COUNT}
},
"by_scan_type": {
"image_scan": {
"p50": ${IMAGE_P50:-0},
"p95": ${IMAGE_P95:-0},
"p99": ${IMAGE_P99:-0}
},
"filesystem_scan": {
"p50": ${FS_P50:-0},
"p95": ${FS_P95:-0},
"p99": ${FS_P99:-0}
},
"sbom_scan": {
"p50": ${SBOM_P50:-0},
"p95": ${SBOM_P95:-0},
"p99": ${SBOM_P99:-0}
}
}
},
"baseline_comparison": {
"baseline_path": "${BASELINE_FILE}",
"p50_regression_pct": ${P50_REGRESSION_PCT},
"p95_regression_pct": ${P95_REGRESSION_PCT},
"regression_detected": ${REGRESSION_DETECTED}
}
}
EOF
)
# Output results
if [[ -n "${OUTPUT_FILE}" ]]; then
echo "${OUTPUT}" > "${OUTPUT_FILE}"
log "Results written to ${OUTPUT_FILE}"
else
echo "${OUTPUT}"
fi
# Strict mode: fail on regression
if [[ "${STRICT}" == "true" ]] && [[ "${REGRESSION_DETECTED}" == "true" ]]; then
error "TTFS regression exceeds threshold"
exit 1
fi
exit 0

View File

@@ -1,326 +0,0 @@
#!/usr/bin/env bash
# =============================================================================
# enforce-performance-slos.sh
# Enforces scan time and compute budget SLOs in CI
#
# Usage: ./enforce-performance-slos.sh [options]
# --results-path PATH Path to benchmark results directory
# --slos-file FILE Path to SLO definitions (default: scripts/ci/performance-slos.yaml)
# --output FILE Output JSON file (default: stdout)
# --dry-run Show what would be enforced
# --strict Exit non-zero if any SLO is violated
# --verbose Enable verbose output
#
# Output: JSON with SLO evaluation results and violations
# =============================================================================
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
# Default paths
RESULTS_PATH="${REPO_ROOT}/src/__Tests/__Benchmarks/results"
SLOS_FILE="${SCRIPT_DIR}/performance-slos.yaml"
OUTPUT_FILE=""
DRY_RUN=false
STRICT=false
VERBOSE=false
# Parse arguments
while [[ $# -gt 0 ]]; do
case "$1" in
--results-path)
RESULTS_PATH="$2"
shift 2
;;
--slos-file)
SLOS_FILE="$2"
shift 2
;;
--output)
OUTPUT_FILE="$2"
shift 2
;;
--dry-run)
DRY_RUN=true
shift
;;
--strict)
STRICT=true
shift
;;
--verbose)
VERBOSE=true
shift
;;
-h|--help)
head -20 "$0" | tail -15
exit 0
;;
*)
echo "Unknown option: $1" >&2
exit 1
;;
esac
done
log() {
if [[ "${VERBOSE}" == "true" ]]; then
echo "[$(date -u '+%Y-%m-%dT%H:%M:%SZ')] $*" >&2
fi
}
error() {
echo "[ERROR] $*" >&2
}
warn() {
echo "[WARN] $*" >&2
}
if [[ "${DRY_RUN}" == "true" ]]; then
log "[DRY RUN] Would enforce performance SLOs..."
cat <<EOF
{
"timestamp": "$(date -u '+%Y-%m-%dT%H:%M:%SZ')",
"dry_run": true,
"results_path": "${RESULTS_PATH}",
"slos_file": "${SLOS_FILE}",
"slo_evaluations": {
"scan_time_p95": {
"slo_name": "Scan Time P95",
"threshold_ms": 30000,
"actual_ms": 25000,
"passed": true,
"margin_pct": 16.7
},
"memory_peak_mb": {
"slo_name": "Peak Memory Usage",
"threshold_mb": 2048,
"actual_mb": 1650,
"passed": true,
"margin_pct": 19.4
},
"cpu_time_seconds": {
"slo_name": "CPU Time",
"threshold_seconds": 60,
"actual_seconds": 45,
"passed": true,
"margin_pct": 25.0
}
},
"summary": {
"total_slos": 3,
"passed": 3,
"failed": 0,
"all_passed": true
}
}
EOF
exit 0
fi
# Validate paths
if [[ ! -d "${RESULTS_PATH}" ]]; then
error "Results directory not found: ${RESULTS_PATH}"
exit 1
fi
if [[ ! -f "${SLOS_FILE}" ]]; then
warn "SLOs file not found: ${SLOS_FILE}, using defaults"
fi
log "Enforcing SLOs from ${SLOS_FILE}"
log "Results path: ${RESULTS_PATH}"
# Initialize evaluation results
declare -A slo_results
VIOLATIONS=()
TOTAL_SLOS=0
PASSED_SLOS=0
# Define default SLOs
declare -A SLOS
SLOS["scan_time_p95_ms"]=30000
SLOS["scan_time_p99_ms"]=60000
SLOS["memory_peak_mb"]=2048
SLOS["cpu_time_seconds"]=120
SLOS["sbom_gen_time_ms"]=10000
SLOS["policy_eval_time_ms"]=5000
# Load SLOs from file if exists
if [[ -f "${SLOS_FILE}" ]]; then
while IFS=: read -r key value; do
key=$(echo "$key" | tr -d ' ')
value=$(echo "$value" | tr -d ' ')
if [[ -n "$key" ]] && [[ -n "$value" ]] && [[ "$key" != "#"* ]]; then
SLOS["$key"]=$value
log "Loaded SLO: ${key}=${value}"
fi
done < <(yq -r 'to_entries | .[] | "\(.key):\(.value.threshold // .value)"' "${SLOS_FILE}" 2>/dev/null || true)
fi
# Collect metrics from results
SCAN_TIMES=()
MEMORY_VALUES=()
CPU_TIMES=()
SBOM_TIMES=()
POLICY_TIMES=()
for result_file in "${RESULTS_PATH}"/*.json "${RESULTS_PATH}"/**/*.json; do
[[ -f "${result_file}" ]] || continue
log "Processing: ${result_file}"
# Extract metrics
SCAN_TIME=$(jq -r '.duration_ms // .scan_time_ms // empty' "${result_file}" 2>/dev/null || true)
MEMORY=$(jq -r '.peak_memory_mb // .memory_mb // empty' "${result_file}" 2>/dev/null || true)
CPU_TIME=$(jq -r '.cpu_time_seconds // .cpu_seconds // empty' "${result_file}" 2>/dev/null || true)
SBOM_TIME=$(jq -r '.sbom_generation_ms // empty' "${result_file}" 2>/dev/null || true)
POLICY_TIME=$(jq -r '.policy_evaluation_ms // empty' "${result_file}" 2>/dev/null || true)
[[ -n "${SCAN_TIME}" ]] && SCAN_TIMES+=("${SCAN_TIME}")
[[ -n "${MEMORY}" ]] && MEMORY_VALUES+=("${MEMORY}")
[[ -n "${CPU_TIME}" ]] && CPU_TIMES+=("${CPU_TIME}")
[[ -n "${SBOM_TIME}" ]] && SBOM_TIMES+=("${SBOM_TIME}")
[[ -n "${POLICY_TIME}" ]] && POLICY_TIMES+=("${POLICY_TIME}")
done
# Helper: calculate percentile from array
calc_percentile() {
local -n values=$1
local pct=$2
if [[ ${#values[@]} -eq 0 ]]; then
echo "0"
return
fi
IFS=$'\n' sorted=($(sort -n <<<"${values[*]}")); unset IFS
local n=${#sorted[@]}
local idx=$(echo "scale=0; ($n - 1) * $pct / 100" | bc)
echo "${sorted[$idx]}"
}
# Helper: calculate max from array
calc_max() {
local -n values=$1
if [[ ${#values[@]} -eq 0 ]]; then
echo "0"
return
fi
local max=0
for v in "${values[@]}"; do
if (( $(echo "$v > $max" | bc -l) )); then
max=$v
fi
done
echo "$max"
}
# Evaluate each SLO
evaluate_slo() {
local name=$1
local threshold=$2
local actual=$3
local unit=$4
((TOTAL_SLOS++))
local passed=true
local margin_pct=0
if (( $(echo "$actual > $threshold" | bc -l) )); then
passed=false
margin_pct=$(echo "scale=2; ($actual - $threshold) * 100 / $threshold" | bc)
VIOLATIONS+=("${name}: ${actual}${unit} exceeds threshold ${threshold}${unit} (+${margin_pct}%)")
warn "SLO VIOLATION: ${name} = ${actual}${unit} (threshold: ${threshold}${unit})"
else
((PASSED_SLOS++))
margin_pct=$(echo "scale=2; ($threshold - $actual) * 100 / $threshold" | bc)
log "SLO PASSED: ${name} = ${actual}${unit} (threshold: ${threshold}${unit}, margin: ${margin_pct}%)"
fi
echo "{\"slo_name\": \"${name}\", \"threshold\": ${threshold}, \"actual\": ${actual}, \"unit\": \"${unit}\", \"passed\": ${passed}, \"margin_pct\": ${margin_pct}}"
}
# Calculate actuals
SCAN_P95=$(calc_percentile SCAN_TIMES 95)
SCAN_P99=$(calc_percentile SCAN_TIMES 99)
MEMORY_MAX=$(calc_max MEMORY_VALUES)
CPU_MAX=$(calc_max CPU_TIMES)
SBOM_P95=$(calc_percentile SBOM_TIMES 95)
POLICY_P95=$(calc_percentile POLICY_TIMES 95)
# Run evaluations
SLO_SCAN_P95=$(evaluate_slo "Scan Time P95" "${SLOS[scan_time_p95_ms]}" "${SCAN_P95}" "ms")
SLO_SCAN_P99=$(evaluate_slo "Scan Time P99" "${SLOS[scan_time_p99_ms]}" "${SCAN_P99}" "ms")
SLO_MEMORY=$(evaluate_slo "Peak Memory" "${SLOS[memory_peak_mb]}" "${MEMORY_MAX}" "MB")
SLO_CPU=$(evaluate_slo "CPU Time" "${SLOS[cpu_time_seconds]}" "${CPU_MAX}" "s")
SLO_SBOM=$(evaluate_slo "SBOM Generation P95" "${SLOS[sbom_gen_time_ms]}" "${SBOM_P95}" "ms")
SLO_POLICY=$(evaluate_slo "Policy Evaluation P95" "${SLOS[policy_eval_time_ms]}" "${POLICY_P95}" "ms")
# Generate output
ALL_PASSED=true
if [[ ${#VIOLATIONS[@]} -gt 0 ]]; then
ALL_PASSED=false
fi
# Build violations JSON array
VIOLATIONS_JSON="[]"
if [[ ${#VIOLATIONS[@]} -gt 0 ]]; then
VIOLATIONS_JSON="["
for i in "${!VIOLATIONS[@]}"; do
[[ $i -gt 0 ]] && VIOLATIONS_JSON+=","
VIOLATIONS_JSON+="\"${VIOLATIONS[$i]}\""
done
VIOLATIONS_JSON+="]"
fi
OUTPUT=$(cat <<EOF
{
"timestamp": "$(date -u '+%Y-%m-%dT%H:%M:%SZ')",
"dry_run": false,
"results_path": "${RESULTS_PATH}",
"slos_file": "${SLOS_FILE}",
"slo_evaluations": {
"scan_time_p95": ${SLO_SCAN_P95},
"scan_time_p99": ${SLO_SCAN_P99},
"memory_peak_mb": ${SLO_MEMORY},
"cpu_time_seconds": ${SLO_CPU},
"sbom_gen_time_ms": ${SLO_SBOM},
"policy_eval_time_ms": ${SLO_POLICY}
},
"summary": {
"total_slos": ${TOTAL_SLOS},
"passed": ${PASSED_SLOS},
"failed": $((TOTAL_SLOS - PASSED_SLOS)),
"all_passed": ${ALL_PASSED},
"violations": ${VIOLATIONS_JSON}
}
}
EOF
)
# Output results
if [[ -n "${OUTPUT_FILE}" ]]; then
echo "${OUTPUT}" > "${OUTPUT_FILE}"
log "Results written to ${OUTPUT_FILE}"
else
echo "${OUTPUT}"
fi
# Strict mode: fail on violations
if [[ "${STRICT}" == "true" ]] && [[ "${ALL_PASSED}" == "false" ]]; then
error "Performance SLO violations detected"
for v in "${VIOLATIONS[@]}"; do
error " - ${v}"
done
exit 1
fi
exit 0

View File

@@ -1,94 +0,0 @@
# =============================================================================
# Performance SLOs (Service Level Objectives)
# Reference: Testing and Quality Guardrails Technical Reference
#
# These SLOs define the performance budgets for CI quality gates.
# Violations will be flagged and may block releases.
# =============================================================================
# Scan Time SLOs (milliseconds)
scan_time:
p50:
threshold: 15000
description: "50th percentile scan time"
severity: "info"
p95:
threshold: 30000
description: "95th percentile scan time - primary SLO"
severity: "warning"
p99:
threshold: 60000
description: "99th percentile scan time - tail latency"
severity: "critical"
# Memory Usage SLOs (megabytes)
memory:
peak_mb:
threshold: 2048
description: "Peak memory usage during scan"
severity: "warning"
average_mb:
threshold: 1024
description: "Average memory usage"
severity: "info"
# CPU Time SLOs (seconds)
cpu:
max_seconds:
threshold: 120
description: "Maximum CPU time per scan"
severity: "warning"
average_seconds:
threshold: 60
description: "Average CPU time per scan"
severity: "info"
# Component-Specific SLOs (milliseconds)
components:
sbom_generation:
p95:
threshold: 10000
description: "SBOM generation time P95"
severity: "warning"
policy_evaluation:
p95:
threshold: 5000
description: "Policy evaluation time P95"
severity: "warning"
reachability_analysis:
p95:
threshold: 20000
description: "Reachability analysis time P95"
severity: "warning"
vulnerability_matching:
p95:
threshold: 8000
description: "Vulnerability matching time P95"
severity: "warning"
# Resource Budget SLOs
resource_budgets:
disk_io_mb:
threshold: 500
description: "Maximum disk I/O per scan"
network_calls:
threshold: 0
description: "Network calls (should be zero for offline scans)"
temp_storage_mb:
threshold: 1024
description: "Maximum temporary storage usage"
# Regression Thresholds
regression:
max_degradation_pct: 10
warning_threshold_pct: 5
baseline_window_days: 30
# Override Configuration
overrides:
allowed_labels:
- "performance-override"
- "large-scan"
required_approvers:
- "platform"
- "performance"

View File

@@ -1,102 +0,0 @@
# =============================================================================
# Reachability Quality Gate Thresholds
# Reference: Testing and Quality Guardrails Technical Reference
#
# These thresholds are enforced by CI quality gates. Violations will block PRs
# unless an override is explicitly approved.
# =============================================================================
thresholds:
# Runtime dependency recall: percentage of runtime dependency vulns detected
runtime_dependency_recall:
min: 0.95
description: "Percentage of runtime dependency vulnerabilities detected"
severity: "critical"
# OS package recall: percentage of OS package vulns detected
os_package_recall:
min: 0.97
description: "Percentage of OS package vulnerabilities detected"
severity: "critical"
# Code vulnerability recall: percentage of code-level vulns detected
code_vulnerability_recall:
min: 0.90
description: "Percentage of code vulnerabilities detected"
severity: "high"
# Configuration vulnerability recall
config_vulnerability_recall:
min: 0.85
description: "Percentage of configuration vulnerabilities detected"
severity: "medium"
# False positive rate for unreachable findings
unreachable_false_positives:
max: 0.05
description: "Rate of false positives for unreachable findings"
severity: "high"
# Reachability underreport rate: missed reachable findings
reachability_underreport:
max: 0.10
description: "Rate of reachable findings incorrectly marked unreachable"
severity: "critical"
# Overall precision across all classes
overall_precision:
min: 0.90
description: "Overall precision across all vulnerability classes"
severity: "high"
# F1 score threshold
f1_score_min:
min: 0.90
description: "Minimum F1 score across vulnerability classes"
severity: "high"
# Class-specific thresholds
class_thresholds:
runtime_dep:
recall_min: 0.95
precision_min: 0.92
f1_min: 0.93
os_pkg:
recall_min: 0.97
precision_min: 0.95
f1_min: 0.96
code:
recall_min: 0.90
precision_min: 0.88
f1_min: 0.89
config:
recall_min: 0.85
precision_min: 0.80
f1_min: 0.82
# Regression detection settings
regression:
# Maximum allowed regression from baseline (percentage points)
max_recall_regression: 0.02
max_precision_regression: 0.03
# Path to baseline metrics file
baseline_path: "bench/baselines/reachability-baseline.json"
# How many consecutive failures before blocking
failure_threshold: 2
# Override configuration
overrides:
# Allow temporary bypass for specific PR labels
bypass_labels:
- "quality-gate-override"
- "wip"
# Require explicit approval from these teams
required_approvers:
- "platform"
- "reachability"

View File

@@ -1,106 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Deterministic DSSE signing helper for Authority gap artefacts (AU1AU10, RR1RR10).
# Prefers system cosign v3 (bundle) and falls back to repo-pinned v2.6.0.
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
COSIGN_BIN="${COSIGN_BIN:-}"
# Detect cosign binary
if [[ -z "$COSIGN_BIN" ]]; then
if command -v /usr/local/bin/cosign >/dev/null 2>&1; then
COSIGN_BIN="/usr/local/bin/cosign"
elif command -v cosign >/dev/null 2>&1; then
COSIGN_BIN="$(command -v cosign)"
elif [[ -x "$ROOT/tools/cosign/cosign" ]]; then
COSIGN_BIN="$ROOT/tools/cosign/cosign"
else
echo "cosign not found; install or set COSIGN_BIN" >&2
exit 1
fi
fi
# Resolve key
TMP_KEY=""
if [[ -n "${COSIGN_KEY_FILE:-}" ]]; then
KEY_FILE="$COSIGN_KEY_FILE"
elif [[ -n "${COSIGN_PRIVATE_KEY_B64:-}" ]]; then
TMP_KEY="$(mktemp)"
echo "$COSIGN_PRIVATE_KEY_B64" | base64 -d > "$TMP_KEY"
chmod 600 "$TMP_KEY"
KEY_FILE="$TMP_KEY"
elif [[ -f "$ROOT/tools/cosign/cosign.key" ]]; then
KEY_FILE="$ROOT/tools/cosign/cosign.key"
elif [[ "${COSIGN_ALLOW_DEV_KEY:-0}" == "1" && -f "$ROOT/tools/cosign/cosign.dev.key" ]]; then
echo "[warn] Using development key (tools/cosign/cosign.dev.key); NOT for production/Evidence Locker" >&2
KEY_FILE="$ROOT/tools/cosign/cosign.dev.key"
else
echo "No signing key: set COSIGN_PRIVATE_KEY_B64 or COSIGN_KEY_FILE, or place key at tools/cosign/cosign.key" >&2
exit 2
fi
OUT_BASE="${OUT_DIR:-$ROOT/docs/modules/authority/gaps/dsse/2025-12-04}"
if [[ "$OUT_BASE" != /* ]]; then
OUT_BASE="$ROOT/$OUT_BASE"
fi
mkdir -p "$OUT_BASE"
ARTEFACTS=(
"docs/modules/authority/gaps/artifacts/authority-scope-role-catalog.v1.json|authority-scope-role-catalog"
"docs/modules/authority/gaps/artifacts/authority-jwks-metadata.schema.json|authority-jwks-metadata.schema"
"docs/modules/authority/gaps/artifacts/crypto-profile-registry.v1.json|crypto-profile-registry"
"docs/modules/authority/gaps/artifacts/authority-offline-verifier-bundle.v1.json|authority-offline-verifier-bundle"
"docs/modules/authority/gaps/artifacts/authority-abac.schema.json|authority-abac.schema"
"docs/modules/authority/gaps/artifacts/rekor-receipt-policy.v1.json|rekor-receipt-policy"
"docs/modules/authority/gaps/artifacts/rekor-receipt.schema.json|rekor-receipt.schema"
"docs/modules/authority/gaps/artifacts/rekor-receipt-bundle.v1.json|rekor-receipt-bundle"
)
USE_BUNDLE=0
if $COSIGN_BIN version --json 2>/dev/null | grep -q '"GitVersion":"v3'; then
USE_BUNDLE=1
elif $COSIGN_BIN version 2>/dev/null | grep -q 'GitVersion:.*v3\.'; then
USE_BUNDLE=1
fi
SHA_FILE="$OUT_BASE/SHA256SUMS"
: > "$SHA_FILE"
for entry in "${ARTEFACTS[@]}"; do
IFS="|" read -r path stem <<<"$entry"
if [[ ! -f "$ROOT/$path" ]]; then
echo "Missing artefact: $path" >&2
exit 3
fi
if (( USE_BUNDLE )); then
bundle="$OUT_BASE/${stem}.sigstore.json"
COSIGN_PASSWORD="${COSIGN_PASSWORD:-}" \
"$COSIGN_BIN" sign-blob \
--key "$KEY_FILE" \
--yes \
--tlog-upload=false \
--bundle "$bundle" \
"$ROOT/$path"
printf "%s %s\n" "$(sha256sum "$bundle" | cut -d' ' -f1)" "$(realpath --relative-to="$OUT_BASE" "$bundle")" >> "$SHA_FILE"
else
sig="$OUT_BASE/${stem}.dsse"
COSIGN_PASSWORD="${COSIGN_PASSWORD:-}" \
"$COSIGN_BIN" sign-blob \
--key "$KEY_FILE" \
--yes \
--tlog-upload=false \
--output-signature "$sig" \
"$ROOT/$path"
printf "%s %s\n" "$(sha256sum "$sig" | cut -d' ' -f1)" "$(realpath --relative-to="$OUT_BASE" "$sig")" >> "$SHA_FILE"
fi
printf "%s %s\n" "$(sha256sum "$ROOT/$path" | cut -d' ' -f1)" "$(realpath --relative-to="$OUT_BASE" "$ROOT/$path")" >> "$SHA_FILE"
echo "Signed $path"
done
echo "Signed artefacts written to $OUT_BASE"
if [[ -n "$TMP_KEY" ]]; then
rm -f "$TMP_KEY"
fi

View File

@@ -1,50 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Signs a policy file with cosign and verifies it. Intended for CI and offline use.
# Requires COSIGN_KEY_B64 (private key PEM base64) or KMS envs; optional COSIGN_PASSWORD.
usage() {
cat <<'USAGE'
Usage: sign-policy.sh --file <path> [--out-dir out/policy-sign]
Env:
COSIGN_KEY_B64 base64-encoded PEM private key (if not using KMS)
COSIGN_PASSWORD passphrase for the key (can be empty for test keys)
COSIGN_PUBLIC_KEY_PATH optional path to write public key for verify step
USAGE
}
FILE=""
OUT_DIR="out/policy-sign"
while [[ $# -gt 0 ]]; do
case "$1" in
--file) FILE="$2"; shift 2;;
--out-dir) OUT_DIR="$2"; shift 2;;
-h|--help) usage; exit 0;;
*) echo "Unknown arg: $1" >&2; usage; exit 1;;
esac
done
if [[ -z "$FILE" ]]; then echo "--file is required" >&2; exit 1; fi
if [[ ! -f "$FILE" ]]; then echo "file not found: $FILE" >&2; exit 1; fi
mkdir -p "$OUT_DIR"
BASENAME=$(basename "$FILE")
SIG="$OUT_DIR/${BASENAME}.sig"
PUB_OUT="${COSIGN_PUBLIC_KEY_PATH:-$OUT_DIR/cosign.pub}"
if [[ -n "${COSIGN_KEY_B64:-}" ]]; then
KEYFILE="$OUT_DIR/cosign.key"
printf "%s" "$COSIGN_KEY_B64" | base64 -d > "$KEYFILE"
chmod 600 "$KEYFILE"
export COSIGN_KEY="$KEYFILE"
fi
export COSIGN_PASSWORD=${COSIGN_PASSWORD:-}
cosign version >/dev/null
cosign sign-blob "$FILE" --output-signature "$SIG"
cosign public-key --key "$COSIGN_KEY" > "$PUB_OUT"
cosign verify-blob --key "$PUB_OUT" --signature "$SIG" "$FILE"
printf "Signed %s -> %s\nPublic key -> %s\n" "$FILE" "$SIG" "$PUB_OUT"

View File

@@ -1,106 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Deterministic DSSE signing helper for Signals artifacts.
# Prefers system cosign v3 (bundle) and falls back to repo-pinned v2.6.0.
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
COSIGN_BIN="${COSIGN_BIN:-}"
# Detect cosign binary (v3 preferred).
if [[ -z "$COSIGN_BIN" ]]; then
if command -v /usr/local/bin/cosign >/dev/null 2>&1; then
COSIGN_BIN="/usr/local/bin/cosign"
elif command -v cosign >/dev/null 2>&1; then
COSIGN_BIN="$(command -v cosign)"
elif [[ -x "$ROOT/tools/cosign/cosign" ]]; then
COSIGN_BIN="$ROOT/tools/cosign/cosign"
else
echo "cosign not found; install or set COSIGN_BIN" >&2
exit 1
fi
fi
# Resolve key
TMP_KEY=""
if [[ -n "${COSIGN_KEY_FILE:-}" ]]; then
KEY_FILE="$COSIGN_KEY_FILE"
elif [[ -n "${COSIGN_PRIVATE_KEY_B64:-}" ]]; then
TMP_KEY="$(mktemp)"
echo "$COSIGN_PRIVATE_KEY_B64" | base64 -d > "$TMP_KEY"
chmod 600 "$TMP_KEY"
KEY_FILE="$TMP_KEY"
elif [[ -f "$ROOT/tools/cosign/cosign.key" ]]; then
KEY_FILE="$ROOT/tools/cosign/cosign.key"
elif [[ "${COSIGN_ALLOW_DEV_KEY:-0}" == "1" && -f "$ROOT/tools/cosign/cosign.dev.key" ]]; then
echo "[warn] Using development key (tools/cosign/cosign.dev.key); NOT for production/Evidence Locker" >&2
KEY_FILE="$ROOT/tools/cosign/cosign.dev.key"
else
echo "No signing key: set COSIGN_PRIVATE_KEY_B64 or COSIGN_KEY_FILE, or place key at tools/cosign/cosign.key" >&2
exit 2
fi
OUT_BASE="${OUT_DIR:-$ROOT/evidence-locker/signals/2025-12-01}"
# Normalize OUT_BASE to absolute to avoid pushd-relative path issues.
if [[ "$OUT_BASE" != /* ]]; then
OUT_BASE="$ROOT/$OUT_BASE"
fi
mkdir -p "$OUT_BASE"
ARTIFACTS=(
"decay/confidence_decay_config.yaml|stella.ops/confidenceDecayConfig@v1|confidence_decay_config"
"unknowns/unknowns_scoring_manifest.json|stella.ops/unknownsScoringManifest@v1|unknowns_scoring_manifest"
"heuristics/heuristics.catalog.json|stella.ops/heuristicCatalog@v1|heuristics_catalog"
)
USE_BUNDLE=0
if $COSIGN_BIN version --json 2>/dev/null | grep -q '"GitVersion":"v3'; then
USE_BUNDLE=1
elif $COSIGN_BIN version 2>/dev/null | grep -q 'GitVersion:.*v3\.'; then
USE_BUNDLE=1
fi
pushd "$ROOT/docs/modules/signals" >/dev/null
SHA_FILE="$OUT_BASE/SHA256SUMS"
: > "$SHA_FILE"
for entry in "${ARTIFACTS[@]}"; do
IFS="|" read -r path predicate stem <<<"$entry"
if [[ ! -f "$path" ]]; then
echo "Missing artifact: $path" >&2
exit 3
fi
if (( USE_BUNDLE )); then
bundle="$OUT_BASE/${stem}.sigstore.json"
COSIGN_PASSWORD="${COSIGN_PASSWORD:-}" \
"$COSIGN_BIN" sign-blob \
--key "$KEY_FILE" \
--yes \
--tlog-upload=false \
--bundle "$bundle" \
"$path"
printf "%s %s\n" "$(sha256sum "$bundle" | cut -d' ' -f1)" "$(realpath --relative-to="$OUT_BASE" "$bundle")" >> "$SHA_FILE"
else
sig="$OUT_BASE/${stem}.dsse"
COSIGN_PASSWORD="${COSIGN_PASSWORD:-}" \
"$COSIGN_BIN" sign-blob \
--key "$KEY_FILE" \
--yes \
--tlog-upload=false \
--output-signature "$sig" \
"$path"
printf "%s %s\n" "$(sha256sum "$sig" | cut -d' ' -f1)" "$(realpath --relative-to="$OUT_BASE" "$sig")" >> "$SHA_FILE"
fi
printf "%s %s\n" "$(sha256sum "$path" | cut -d' ' -f1)" "$(realpath --relative-to="$OUT_BASE" "$path")" >> "$SHA_FILE"
done
popd >/dev/null
echo "Signed artifacts written to $OUT_BASE"
if [[ -n "$TMP_KEY" ]]; then
rm -f "$TMP_KEY"
fi

View File

@@ -1,22 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# DEVOPS-SCAN-90-004: run determinism harness/tests and collect report
ROOT="$(git rev-parse --show-toplevel)"
OUT="${ROOT}/out/scanner-determinism"
mkdir -p "$OUT"
PROJECT="src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Tests/StellaOps.Scanner.Analyzers.Lang.Tests.csproj"
echo "[determinism] running dotnet test (filter=Determinism)"
dotnet test "$PROJECT" --no-build --logger "trx;LogFileName=determinism.trx" --filter Determinism
find "$(dirname "$PROJECT")" -name "*.trx" -print -exec cp {} "$OUT/" \;
echo "[determinism] summarizing"
printf "project=%s\n" "$PROJECT" > "$OUT/summary.txt"
printf "timestamp=%s\n" "$(date -u +"%Y-%m-%dT%H:%M:%SZ")" >> "$OUT/summary.txt"
tar -C "$OUT" -czf "$OUT/determinism-artifacts.tgz" .
echo "[determinism] artifacts at $OUT"

View File

@@ -1,7 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
root_dir=$(cd "$(dirname "$0")/.." && pwd)
verifier="$root_dir/packs/verify_offline_bundle.py"
python3 "$verifier" --bundle "$root_dir/packs/__fixtures__/good" --manifest bundle.json --require-dsse
python3 "$verifier" --bundle "$root_dir/packs/__fixtures__/bad" --manifest bundle-missing-quota.json --require-dsse && exit 1 || true
echo "fixture checks completed"

View File

@@ -1,16 +0,0 @@
#!/usr/bin/env bash
# Safe-ish workspace cleanup when the runner hits “No space left on device”.
# Deletes build/test outputs that are regenerated; preserves offline caches and sources.
set -euo pipefail
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
echo "Cleaning workspace outputs under: ${ROOT}"
rm -rf "${ROOT}/TestResults" || true
rm -rf "${ROOT}/out" || true
rm -rf "${ROOT}/artifacts" || true
# Trim common temp locations if they exist in repo workspace
[ -d "${ROOT}/tmp" ] && find "${ROOT}/tmp" -mindepth 1 -maxdepth 1 -exec rm -rf {} +
echo "Done. Consider also clearing any runner-level /tmp outside the workspace if safe."

View File

@@ -1,27 +0,0 @@
#!/usr/bin/env bash
# Thin wrapper to strip the harness-injected "workdir:" switch that breaks dotnet/msbuild parsing.
set -euo pipefail
real_dotnet="$(command -v dotnet)"
if [[ -z "${real_dotnet}" ]]; then
echo "dotnet executable not found in PATH" >&2
exit 1
fi
filtered_args=()
for arg in "$@"; do
# Drop any argument that is exactly or contains the injected workdir switch.
if [[ "${arg}" == *"workdir:"* ]]; then
# If the arg also contains other comma-separated parts, keep the non-workdir pieces.
IFS=',' read -r -a parts <<< "${arg}"
for part in "${parts[@]}"; do
[[ "${part}" == *"workdir:"* || -z "${part}" ]] && continue
filtered_args+=("${part}")
done
continue
fi
filtered_args+=("${arg}")
done
exec "${real_dotnet}" "${filtered_args[@]}"

View File

@@ -1,26 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Ensures OpenSSL 1.1 shim is discoverable for Mongo2Go by exporting LD_LIBRARY_PATH.
# Safe for repeated invocation; respects STELLAOPS_OPENSSL11_SHIM override.
ROOT=${STELLAOPS_REPO_ROOT:-$(git rev-parse --show-toplevel 2>/dev/null || pwd)}
SHIM_DIR=${STELLAOPS_OPENSSL11_SHIM:-"${ROOT}/src/__Tests/native/openssl-1.1/linux-x64"}
if [[ ! -d "${SHIM_DIR}" ]]; then
echo "::warning ::OpenSSL 1.1 shim directory not found at ${SHIM_DIR}; Mongo2Go tests may fail" >&2
exit 0
fi
export LD_LIBRARY_PATH="${SHIM_DIR}:${LD_LIBRARY_PATH:-}"
export STELLAOPS_OPENSSL11_SHIM="${SHIM_DIR}"
# Persist for subsequent CI steps when available
if [[ -n "${GITHUB_ENV:-}" ]]; then
{
echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}"
echo "STELLAOPS_OPENSSL11_SHIM=${STELLAOPS_OPENSSL11_SHIM}"
} >> "${GITHUB_ENV}"
fi
echo "OpenSSL 1.1 shim enabled (LD_LIBRARY_PATH=${LD_LIBRARY_PATH})"

View File

@@ -1,53 +0,0 @@
#!/bin/bash
# validate-compose.sh - Validate all Docker Compose profiles
# Used by CI/CD pipelines to ensure Compose configurations are valid
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)"
COMPOSE_DIR="${REPO_ROOT}/devops/compose"
# Default profiles to validate
PROFILES=(dev stage prod airgap mirror)
echo "=== Docker Compose Validation ==="
echo "Compose directory: $COMPOSE_DIR"
# Check if compose directory exists
if [[ ! -d "$COMPOSE_DIR" ]]; then
echo "::warning::Compose directory not found at $COMPOSE_DIR"
exit 0
fi
# Check for base docker-compose.yml
BASE_COMPOSE="$COMPOSE_DIR/docker-compose.yml"
if [[ ! -f "$BASE_COMPOSE" ]]; then
echo "::warning::Base docker-compose.yml not found at $BASE_COMPOSE"
exit 0
fi
FAILED=0
for profile in "${PROFILES[@]}"; do
OVERLAY="$COMPOSE_DIR/docker-compose.$profile.yml"
if [[ -f "$OVERLAY" ]]; then
echo "=== Validating docker-compose.$profile.yml ==="
if docker compose -f "$BASE_COMPOSE" -f "$OVERLAY" config --quiet 2>&1; then
echo "✓ Profile '$profile' is valid"
else
echo "✗ Profile '$profile' validation failed"
FAILED=1
fi
else
echo "⊘ Skipping profile '$profile' (no overlay file)"
fi
done
if [[ $FAILED -eq 1 ]]; then
echo "::error::One or more Compose profiles failed validation"
exit 1
fi
echo "=== All Compose profiles valid! ==="

View File

@@ -1,59 +0,0 @@
#!/bin/bash
# validate-helm.sh - Validate Helm charts
# Used by CI/CD pipelines to ensure Helm charts are valid
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)"
HELM_DIR="${REPO_ROOT}/devops/helm"
echo "=== Helm Chart Validation ==="
echo "Helm directory: $HELM_DIR"
# Check if helm is installed
if ! command -v helm &>/dev/null; then
echo "::error::Helm is not installed"
exit 1
fi
# Check if helm directory exists
if [[ ! -d "$HELM_DIR" ]]; then
echo "::warning::Helm directory not found at $HELM_DIR"
exit 0
fi
FAILED=0
# Find all Chart.yaml files (indicates a Helm chart)
while IFS= read -r -d '' chart_file; do
chart_dir="$(dirname "$chart_file")"
chart_name="$(basename "$chart_dir")"
echo "=== Validating chart: $chart_name ==="
# Lint the chart
if helm lint "$chart_dir" 2>&1; then
echo "✓ Chart '$chart_name' lint passed"
else
echo "✗ Chart '$chart_name' lint failed"
FAILED=1
continue
fi
# Template the chart (dry-run)
if helm template "$chart_name" "$chart_dir" --debug >/dev/null 2>&1; then
echo "✓ Chart '$chart_name' template succeeded"
else
echo "✗ Chart '$chart_name' template failed"
FAILED=1
fi
done < <(find "$HELM_DIR" -name "Chart.yaml" -print0)
if [[ $FAILED -eq 1 ]]; then
echo "::error::One or more Helm charts failed validation"
exit 1
fi
echo "=== All Helm charts valid! ==="

View File

@@ -1,201 +0,0 @@
#!/usr/bin/env bash
# License validation script for StellaOps CI
# Usage: validate-licenses.sh <type> <input-file>
# type: nuget | npm
# input-file: Path to package list or license-checker output
set -euo pipefail
# SPDX identifiers for licenses compatible with AGPL-3.0-or-later
ALLOWED_LICENSES=(
"MIT"
"Apache-2.0"
"Apache 2.0"
"BSD-2-Clause"
"BSD-3-Clause"
"BSD"
"ISC"
"0BSD"
"CC0-1.0"
"CC0"
"Unlicense"
"PostgreSQL"
"MPL-2.0"
"MPL 2.0"
"LGPL-2.1-or-later"
"LGPL-3.0-or-later"
"GPL-3.0-or-later"
"AGPL-3.0-or-later"
"Zlib"
"WTFPL"
"BlueOak-1.0.0"
"Python-2.0"
"(MIT OR Apache-2.0)"
"(Apache-2.0 OR MIT)"
"MIT OR Apache-2.0"
"Apache-2.0 OR MIT"
)
# Licenses that are OK but should be noted
CONDITIONAL_LICENSES=(
"MPL-2.0"
"LGPL-2.1-or-later"
"LGPL-3.0-or-later"
"CC-BY-4.0"
)
# Licenses that are NOT compatible with AGPL-3.0-or-later
BLOCKED_LICENSES=(
"GPL-2.0-only"
"SSPL-1.0"
"SSPL"
"BUSL-1.1"
"BSL-1.0"
"Commons Clause"
"Proprietary"
"Commercial"
"UNLICENSED"
)
TYPE="${1:-}"
INPUT="${2:-}"
if [[ -z "$TYPE" || -z "$INPUT" ]]; then
echo "Usage: $0 <nuget|npm> <input-file>"
exit 1
fi
if [[ ! -f "$INPUT" ]]; then
echo "ERROR: Input file not found: $INPUT"
exit 1
fi
echo "=== StellaOps License Validation ==="
echo "Type: $TYPE"
echo "Input: $INPUT"
echo ""
found_blocked=0
found_conditional=0
found_unknown=0
validate_npm() {
local input="$1"
echo "Validating npm licenses..."
# Extract licenses from license-checker JSON output
if command -v jq &> /dev/null; then
jq -r 'to_entries[] | "\(.key): \(.value.licenses)"' "$input" 2>/dev/null | while read -r line; do
pkg=$(echo "$line" | cut -d: -f1)
license=$(echo "$line" | cut -d: -f2- | xargs)
# Check if license is blocked
for blocked in "${BLOCKED_LICENSES[@]}"; do
if [[ "$license" == *"$blocked"* ]]; then
echo "BLOCKED: $pkg uses '$license'"
found_blocked=$((found_blocked + 1))
fi
done
# Check if license is allowed
allowed=0
for ok_license in "${ALLOWED_LICENSES[@]}"; do
if [[ "$license" == *"$ok_license"* ]]; then
allowed=1
break
fi
done
if [[ $allowed -eq 0 ]]; then
echo "UNKNOWN: $pkg uses '$license'"
found_unknown=$((found_unknown + 1))
fi
done
else
echo "WARNING: jq not available, performing basic grep check"
for blocked in "${BLOCKED_LICENSES[@]}"; do
if grep -qi "$blocked" "$input"; then
echo "BLOCKED: Found potentially blocked license: $blocked"
found_blocked=$((found_blocked + 1))
fi
done
fi
}
validate_nuget() {
local input="$1"
echo "Validating NuGet licenses..."
# NuGet package list doesn't include licenses directly
# We check for known problematic packages
# Known packages with compatible licenses (allowlist approach for critical packages)
known_good_patterns=(
"Microsoft."
"System."
"Newtonsoft.Json"
"Serilog"
"BouncyCastle"
"Npgsql"
"Dapper"
"Polly"
"xunit"
"Moq"
"FluentAssertions"
"CycloneDX"
"YamlDotNet"
"StackExchange.Redis"
"Google."
"AWSSDK."
"Grpc."
)
# Check if any packages don't match known patterns
echo "Checking for unknown packages..."
# This is informational - we trust the allowlist in THIRD-PARTY-DEPENDENCIES.md
echo "OK: NuGet validation relies on documented license allowlist"
echo "See: docs/legal/THIRD-PARTY-DEPENDENCIES.md"
}
case "$TYPE" in
npm)
validate_npm "$INPUT"
;;
nuget)
validate_nuget "$INPUT"
;;
*)
echo "ERROR: Unknown type: $TYPE"
echo "Supported types: nuget, npm"
exit 1
;;
esac
echo ""
echo "=== Validation Summary ==="
echo "Blocked licenses found: $found_blocked"
echo "Conditional licenses found: $found_conditional"
echo "Unknown licenses found: $found_unknown"
if [[ $found_blocked -gt 0 ]]; then
echo ""
echo "ERROR: Blocked licenses detected!"
echo "These licenses are NOT compatible with AGPL-3.0-or-later"
echo "Please remove or replace the affected packages"
exit 1
fi
if [[ $found_unknown -gt 0 ]]; then
echo ""
echo "WARNING: Unknown licenses detected"
echo "Please review and add to allowlist if compatible"
echo "See: docs/legal/LICENSE-COMPATIBILITY.md"
# Don't fail on unknown - just warn
fi
echo ""
echo "License validation: PASSED"
exit 0

View File

@@ -1,260 +0,0 @@
#!/usr/bin/env bash
# Migration Validation Script
# Validates migration naming conventions, detects duplicates, and checks for issues.
#
# Usage:
# ./validate-migrations.sh [--strict] [--fix-scanner]
#
# Options:
# --strict Exit with error on any warning
# --fix-scanner Generate rename commands for Scanner duplicates
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)"
STRICT_MODE=false
FIX_SCANNER=false
EXIT_CODE=0
# Parse arguments
for arg in "$@"; do
case $arg in
--strict)
STRICT_MODE=true
shift
;;
--fix-scanner)
FIX_SCANNER=true
shift
;;
esac
done
echo "=== Migration Validation ==="
echo "Repository: $REPO_ROOT"
echo ""
# Colors for output
RED='\033[0;31m'
YELLOW='\033[1;33m'
GREEN='\033[0;32m'
NC='\033[0m' # No Color
# Track issues
ERRORS=()
WARNINGS=()
# Function to check for duplicates in a directory
check_duplicates() {
local dir="$1"
local module="$2"
if [ ! -d "$dir" ]; then
return
fi
# Extract numeric prefixes and find duplicates
local duplicates
duplicates=$(find "$dir" -maxdepth 1 -name "*.sql" -printf "%f\n" 2>/dev/null | \
sed -E 's/^([0-9]+)_.*/\1/' | \
sort | uniq -d)
if [ -n "$duplicates" ]; then
for prefix in $duplicates; do
local files
files=$(find "$dir" -maxdepth 1 -name "${prefix}_*.sql" -printf "%f\n" | tr '\n' ', ' | sed 's/,$//')
ERRORS+=("[$module] Duplicate prefix $prefix: $files")
done
fi
}
# Function to check naming convention
check_naming() {
local dir="$1"
local module="$2"
if [ ! -d "$dir" ]; then
return
fi
find "$dir" -maxdepth 1 -name "*.sql" -printf "%f\n" 2>/dev/null | while read -r file; do
# Check standard pattern: NNN_description.sql
if [[ "$file" =~ ^[0-9]{3}_[a-z0-9_]+\.sql$ ]]; then
continue # Valid standard
fi
# Check seed pattern: SNNN_description.sql
if [[ "$file" =~ ^S[0-9]{3}_[a-z0-9_]+\.sql$ ]]; then
continue # Valid seed
fi
# Check data migration pattern: DMNNN_description.sql
if [[ "$file" =~ ^DM[0-9]{3}_[a-z0-9_]+\.sql$ ]]; then
continue # Valid data migration
fi
# Check for Flyway-style
if [[ "$file" =~ ^V[0-9]+.*\.sql$ ]]; then
WARNINGS+=("[$module] Flyway-style naming: $file (consider NNN_description.sql)")
continue
fi
# Check for EF Core timestamp style
if [[ "$file" =~ ^[0-9]{14,}_.*\.sql$ ]]; then
WARNINGS+=("[$module] EF Core timestamp naming: $file (consider NNN_description.sql)")
continue
fi
# Check for 4-digit prefix
if [[ "$file" =~ ^[0-9]{4}_.*\.sql$ ]]; then
WARNINGS+=("[$module] 4-digit prefix: $file (standard is 3-digit NNN_description.sql)")
continue
fi
# Non-standard
WARNINGS+=("[$module] Non-standard naming: $file")
done
}
# Function to check for dangerous operations in startup migrations
check_dangerous_ops() {
local dir="$1"
local module="$2"
if [ ! -d "$dir" ]; then
return
fi
find "$dir" -maxdepth 1 -name "*.sql" -printf "%f\n" 2>/dev/null | while read -r file; do
local filepath="$dir/$file"
local prefix
prefix=$(echo "$file" | sed -E 's/^([0-9]+)_.*/\1/')
# Only check startup migrations (001-099)
if [[ "$prefix" =~ ^0[0-9]{2}$ ]] && [ "$prefix" -lt 100 ]; then
# Check for DROP TABLE without IF EXISTS
if grep -qE "DROP\s+TABLE\s+(?!IF\s+EXISTS)" "$filepath" 2>/dev/null; then
ERRORS+=("[$module] $file: DROP TABLE without IF EXISTS in startup migration")
fi
# Check for DROP COLUMN (breaking change in startup)
if grep -qiE "ALTER\s+TABLE.*DROP\s+COLUMN" "$filepath" 2>/dev/null; then
ERRORS+=("[$module] $file: DROP COLUMN in startup migration (should be release migration 100+)")
fi
# Check for TRUNCATE
if grep -qiE "^\s*TRUNCATE" "$filepath" 2>/dev/null; then
ERRORS+=("[$module] $file: TRUNCATE in startup migration")
fi
fi
done
}
# Scan all module migration directories
echo "Scanning migration directories..."
echo ""
# Define module migration paths
declare -A MIGRATION_PATHS
MIGRATION_PATHS=(
["Authority"]="src/Authority/__Libraries/StellaOps.Authority.Storage.Postgres/Migrations"
["Concelier"]="src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/Migrations"
["Excititor"]="src/Excititor/__Libraries/StellaOps.Excititor.Storage.Postgres/Migrations"
["Policy"]="src/Policy/__Libraries/StellaOps.Policy.Storage.Postgres/Migrations"
["Scheduler"]="src/Scheduler/__Libraries/StellaOps.Scheduler.Storage.Postgres/Migrations"
["Notify"]="src/Notify/__Libraries/StellaOps.Notify.Storage.Postgres/Migrations"
["Scanner"]="src/Scanner/__Libraries/StellaOps.Scanner.Storage/Postgres/Migrations"
["Scanner.Triage"]="src/Scanner/__Libraries/StellaOps.Scanner.Triage/Migrations"
["Attestor"]="src/Attestor/__Libraries/StellaOps.Attestor.Persistence/Migrations"
["Signer"]="src/Signer/__Libraries/StellaOps.Signer.KeyManagement/Migrations"
["Signals"]="src/Signals/StellaOps.Signals.Storage.Postgres/Migrations"
["EvidenceLocker"]="src/EvidenceLocker/StellaOps.EvidenceLocker/StellaOps.EvidenceLocker.Infrastructure/Db/Migrations"
["ExportCenter"]="src/ExportCenter/StellaOps.ExportCenter/StellaOps.ExportCenter.Infrastructure/Db/Migrations"
["IssuerDirectory"]="src/IssuerDirectory/StellaOps.IssuerDirectory/StellaOps.IssuerDirectory.Storage.Postgres/Migrations"
["Orchestrator"]="src/Orchestrator/StellaOps.Orchestrator/StellaOps.Orchestrator.Infrastructure/migrations"
["TimelineIndexer"]="src/TimelineIndexer/StellaOps.TimelineIndexer/StellaOps.TimelineIndexer.Infrastructure/Db/Migrations"
["BinaryIndex"]="src/BinaryIndex/__Libraries/StellaOps.BinaryIndex.Persistence/Migrations"
["Unknowns"]="src/Unknowns/__Libraries/StellaOps.Unknowns.Storage.Postgres/Migrations"
["VexHub"]="src/VexHub/__Libraries/StellaOps.VexHub.Storage.Postgres/Migrations"
)
for module in "${!MIGRATION_PATHS[@]}"; do
path="$REPO_ROOT/${MIGRATION_PATHS[$module]}"
if [ -d "$path" ]; then
echo "Checking: $module"
check_duplicates "$path" "$module"
check_naming "$path" "$module"
check_dangerous_ops "$path" "$module"
fi
done
echo ""
# Report errors
if [ ${#ERRORS[@]} -gt 0 ]; then
echo -e "${RED}=== ERRORS (${#ERRORS[@]}) ===${NC}"
for error in "${ERRORS[@]}"; do
echo -e "${RED}$error${NC}"
done
EXIT_CODE=1
echo ""
fi
# Report warnings
if [ ${#WARNINGS[@]} -gt 0 ]; then
echo -e "${YELLOW}=== WARNINGS (${#WARNINGS[@]}) ===${NC}"
for warning in "${WARNINGS[@]}"; do
echo -e "${YELLOW}$warning${NC}"
done
if [ "$STRICT_MODE" = true ]; then
EXIT_CODE=1
fi
echo ""
fi
# Scanner fix suggestions
if [ "$FIX_SCANNER" = true ]; then
echo "=== Scanner Migration Rename Suggestions ==="
echo "# Run these commands to fix Scanner duplicate migrations:"
echo ""
SCANNER_DIR="$REPO_ROOT/src/Scanner/__Libraries/StellaOps.Scanner.Storage/Postgres/Migrations"
if [ -d "$SCANNER_DIR" ]; then
# Map old names to new sequential numbers
cat << 'EOF'
# Before running: backup the schema_migrations table!
# After renaming: update schema_migrations.migration_name to match new names
cd src/Scanner/__Libraries/StellaOps.Scanner.Storage/Postgres/Migrations
# Fix duplicate 009 prefixes
git mv 009_call_graph_tables.sql 020_call_graph_tables.sql
git mv 009_smart_diff_tables_search_path.sql 021_smart_diff_tables_search_path.sql
# Fix duplicate 010 prefixes
git mv 010_reachability_drift_tables.sql 022_reachability_drift_tables.sql
git mv 010_scanner_api_ingestion.sql 023_scanner_api_ingestion.sql
git mv 010_smart_diff_priority_score_widen.sql 024_smart_diff_priority_score_widen.sql
# Fix duplicate 014 prefixes
git mv 014_epss_triage_columns.sql 025_epss_triage_columns.sql
git mv 014_vuln_surfaces.sql 026_vuln_surfaces.sql
# Renumber subsequent migrations
git mv 011_epss_raw_layer.sql 027_epss_raw_layer.sql
git mv 012_epss_signal_layer.sql 028_epss_signal_layer.sql
git mv 013_witness_storage.sql 029_witness_storage.sql
git mv 015_vuln_surface_triggers_update.sql 030_vuln_surface_triggers_update.sql
git mv 016_reach_cache.sql 031_reach_cache.sql
git mv 017_idempotency_keys.sql 032_idempotency_keys.sql
git mv 018_binary_evidence.sql 033_binary_evidence.sql
git mv 019_func_proof_tables.sql 034_func_proof_tables.sql
EOF
fi
echo ""
fi
# Summary
if [ $EXIT_CODE -eq 0 ]; then
echo -e "${GREEN}=== VALIDATION PASSED ===${NC}"
else
echo -e "${RED}=== VALIDATION FAILED ===${NC}"
fi
exit $EXIT_CODE

View File

@@ -1,244 +0,0 @@
#!/bin/bash
# scripts/validate-sbom.sh
# Sprint: SPRINT_8200_0001_0003 - SBOM Schema Validation in CI
# Task: SCHEMA-8200-004 - Create validate-sbom.sh wrapper for sbom-utility
#
# Validates SBOM files against official CycloneDX JSON schemas.
# Uses sbom-utility for CycloneDX validation.
#
# Usage:
# ./scripts/validate-sbom.sh <sbom-file> [--schema <schema-path>]
# ./scripts/validate-sbom.sh src/__Tests/__Benchmarks/golden-corpus/sample.cyclonedx.json
# ./scripts/validate-sbom.sh --all # Validate all CycloneDX fixtures
#
# Exit codes:
# 0 - All validations passed
# 1 - Validation failed or error
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
SCHEMA_DIR="${REPO_ROOT}/docs/schemas"
DEFAULT_SCHEMA="${SCHEMA_DIR}/cyclonedx-bom-1.6.schema.json"
SBOM_UTILITY_VERSION="v0.16.0"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
log_info() {
echo -e "${GREEN}[INFO]${NC} $*"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $*"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $*"
}
check_sbom_utility() {
if ! command -v sbom-utility &> /dev/null; then
log_warn "sbom-utility not found in PATH"
log_info "Installing sbom-utility ${SBOM_UTILITY_VERSION}..."
# Detect OS and architecture
local os arch
case "$(uname -s)" in
Linux*) os="linux";;
Darwin*) os="darwin";;
MINGW*|MSYS*|CYGWIN*) os="windows";;
*) log_error "Unsupported OS: $(uname -s)"; exit 1;;
esac
case "$(uname -m)" in
x86_64|amd64) arch="amd64";;
arm64|aarch64) arch="arm64";;
*) log_error "Unsupported architecture: $(uname -m)"; exit 1;;
esac
local url="https://github.com/CycloneDX/sbom-utility/releases/download/${SBOM_UTILITY_VERSION}/sbom-utility-${SBOM_UTILITY_VERSION}-${os}-${arch}.tar.gz"
local temp_dir
temp_dir=$(mktemp -d)
log_info "Downloading from ${url}..."
curl -sSfL "${url}" | tar xz -C "${temp_dir}"
if [[ "$os" == "windows" ]]; then
log_info "Please add ${temp_dir}/sbom-utility.exe to your PATH"
export PATH="${temp_dir}:${PATH}"
else
log_info "Installing to /usr/local/bin (may require sudo)..."
if [[ -w /usr/local/bin ]]; then
mv "${temp_dir}/sbom-utility" /usr/local/bin/
else
sudo mv "${temp_dir}/sbom-utility" /usr/local/bin/
fi
fi
rm -rf "${temp_dir}"
log_info "sbom-utility installed successfully"
fi
}
validate_cyclonedx() {
local sbom_file="$1"
local schema="${2:-$DEFAULT_SCHEMA}"
if [[ ! -f "$sbom_file" ]]; then
log_error "File not found: $sbom_file"
return 1
fi
if [[ ! -f "$schema" ]]; then
log_error "Schema not found: $schema"
log_info "Expected schema at: ${DEFAULT_SCHEMA}"
return 1
fi
# Detect if it's a CycloneDX file
if ! grep -q '"bomFormat"' "$sbom_file" 2>/dev/null; then
log_warn "File does not appear to be CycloneDX: $sbom_file"
log_info "Skipping (use validate-spdx.sh for SPDX files)"
return 0
fi
log_info "Validating: $sbom_file"
# Run sbom-utility validation
if sbom-utility validate --input-file "$sbom_file" --format json 2>&1; then
log_info "✓ Validation passed: $sbom_file"
return 0
else
log_error "✗ Validation failed: $sbom_file"
return 1
fi
}
validate_all() {
local fixture_dir="${REPO_ROOT}/src/__Tests/__Benchmarks/golden-corpus"
local failed=0
local passed=0
local skipped=0
log_info "Validating all CycloneDX fixtures in ${fixture_dir}..."
if [[ ! -d "$fixture_dir" ]]; then
log_error "Fixture directory not found: $fixture_dir"
return 1
fi
while IFS= read -r -d '' file; do
if grep -q '"bomFormat".*"CycloneDX"' "$file" 2>/dev/null; then
if validate_cyclonedx "$file"; then
((passed++))
else
((failed++))
fi
else
log_info "Skipping non-CycloneDX file: $file"
((skipped++))
fi
done < <(find "$fixture_dir" -type f -name '*.json' -print0)
echo ""
log_info "Validation Summary:"
log_info " Passed: ${passed}"
log_info " Failed: ${failed}"
log_info " Skipped: ${skipped}"
if [[ $failed -gt 0 ]]; then
log_error "Some validations failed!"
return 1
fi
log_info "All CycloneDX validations passed!"
return 0
}
usage() {
cat << EOF
Usage: $(basename "$0") [OPTIONS] <sbom-file>
Validates CycloneDX SBOM files against official JSON schemas.
Options:
--all Validate all CycloneDX fixtures in src/__Tests/__Benchmarks/golden-corpus/
--schema <path> Use custom schema file (default: docs/schemas/cyclonedx-bom-1.6.schema.json)
--help, -h Show this help message
Examples:
$(basename "$0") sample.cyclonedx.json
$(basename "$0") --schema custom-schema.json sample.json
$(basename "$0") --all
Exit codes:
0 All validations passed
1 Validation failed or error
EOF
}
main() {
local schema="$DEFAULT_SCHEMA"
local validate_all_flag=false
local files=()
while [[ $# -gt 0 ]]; do
case "$1" in
--all)
validate_all_flag=true
shift
;;
--schema)
schema="$2"
shift 2
;;
--help|-h)
usage
exit 0
;;
-*)
log_error "Unknown option: $1"
usage
exit 1
;;
*)
files+=("$1")
shift
;;
esac
done
# Ensure sbom-utility is available
check_sbom_utility
if [[ "$validate_all_flag" == "true" ]]; then
validate_all
exit $?
fi
if [[ ${#files[@]} -eq 0 ]]; then
log_error "No SBOM file specified"
usage
exit 1
fi
local failed=0
for file in "${files[@]}"; do
if ! validate_cyclonedx "$file" "$schema"; then
((failed++))
fi
done
if [[ $failed -gt 0 ]]; then
exit 1
fi
exit 0
}
main "$@"

View File

@@ -1,277 +0,0 @@
#!/bin/bash
# scripts/validate-spdx.sh
# Sprint: SPRINT_8200_0001_0003 - SBOM Schema Validation in CI
# Task: SCHEMA-8200-005 - Create validate-spdx.sh wrapper for SPDX validation
#
# Validates SPDX files against SPDX 3.0.1 JSON schema.
# Uses pyspdxtools (spdx-tools) for SPDX validation.
#
# Usage:
# ./scripts/validate-spdx.sh <spdx-file>
# ./scripts/validate-spdx.sh bench/golden-corpus/sample.spdx.json
# ./scripts/validate-spdx.sh --all # Validate all SPDX fixtures
#
# Exit codes:
# 0 - All validations passed
# 1 - Validation failed or error
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
SCHEMA_DIR="${REPO_ROOT}/docs/schemas"
DEFAULT_SCHEMA="${SCHEMA_DIR}/spdx-jsonld-3.0.1.schema.json"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
log_info() {
echo -e "${GREEN}[INFO]${NC} $*"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $*"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $*"
}
check_spdx_tools() {
if ! command -v pyspdxtools &> /dev/null; then
log_warn "pyspdxtools not found in PATH"
log_info "Installing spdx-tools via pip..."
if command -v pip3 &> /dev/null; then
pip3 install --user spdx-tools
elif command -v pip &> /dev/null; then
pip install --user spdx-tools
else
log_error "pip not found. Please install Python and pip first."
exit 1
fi
log_info "spdx-tools installed successfully"
# Refresh PATH for newly installed tools
if [[ -d "${HOME}/.local/bin" ]]; then
export PATH="${HOME}/.local/bin:${PATH}"
fi
fi
}
check_ajv() {
if ! command -v ajv &> /dev/null; then
log_warn "ajv-cli not found in PATH"
log_info "Installing ajv-cli via npm..."
if command -v npm &> /dev/null; then
npm install -g ajv-cli ajv-formats
else
log_warn "npm not found. JSON schema validation will be skipped."
return 1
fi
log_info "ajv-cli installed successfully"
fi
return 0
}
validate_spdx_schema() {
local spdx_file="$1"
local schema="$2"
if check_ajv; then
log_info "Validating against JSON schema: $schema"
if ajv validate -s "$schema" -d "$spdx_file" --spec=draft2020 2>&1; then
return 0
else
return 1
fi
else
log_warn "Skipping JSON schema validation (ajv not available)"
return 0
fi
}
validate_spdx() {
local spdx_file="$1"
local schema="${2:-$DEFAULT_SCHEMA}"
if [[ ! -f "$spdx_file" ]]; then
log_error "File not found: $spdx_file"
return 1
fi
# Detect if it's an SPDX file (JSON-LD format)
if ! grep -qE '"@context"|"spdxId"|"spdxVersion"' "$spdx_file" 2>/dev/null; then
log_warn "File does not appear to be SPDX: $spdx_file"
log_info "Skipping (use validate-sbom.sh for CycloneDX files)"
return 0
fi
log_info "Validating: $spdx_file"
local validation_passed=true
# Try pyspdxtools validation first (semantic validation)
if command -v pyspdxtools &> /dev/null; then
log_info "Running SPDX semantic validation..."
if pyspdxtools validate "$spdx_file" 2>&1; then
log_info "✓ SPDX semantic validation passed"
else
# pyspdxtools may not support SPDX 3.0 yet
log_warn "pyspdxtools validation failed or not supported for this format"
log_info "Falling back to JSON schema validation only"
fi
fi
# JSON schema validation (syntax validation)
if [[ -f "$schema" ]]; then
if validate_spdx_schema "$spdx_file" "$schema"; then
log_info "✓ JSON schema validation passed"
else
log_error "✗ JSON schema validation failed"
validation_passed=false
fi
else
log_warn "Schema file not found: $schema"
log_info "Skipping schema validation"
fi
if [[ "$validation_passed" == "true" ]]; then
log_info "✓ Validation passed: $spdx_file"
return 0
else
log_error "✗ Validation failed: $spdx_file"
return 1
fi
}
validate_all() {
local fixture_dir="${REPO_ROOT}/bench/golden-corpus"
local failed=0
local passed=0
local skipped=0
log_info "Validating all SPDX fixtures in ${fixture_dir}..."
if [[ ! -d "$fixture_dir" ]]; then
log_error "Fixture directory not found: $fixture_dir"
return 1
fi
while IFS= read -r -d '' file; do
# Check if it's an SPDX file
if grep -qE '"@context"|"spdxVersion"' "$file" 2>/dev/null; then
if validate_spdx "$file"; then
((passed++))
else
((failed++))
fi
else
log_info "Skipping non-SPDX file: $file"
((skipped++))
fi
done < <(find "$fixture_dir" -type f \( -name '*spdx*.json' -o -name '*.spdx.json' \) -print0)
echo ""
log_info "Validation Summary:"
log_info " Passed: ${passed}"
log_info " Failed: ${failed}"
log_info " Skipped: ${skipped}"
if [[ $failed -gt 0 ]]; then
log_error "Some validations failed!"
return 1
fi
log_info "All SPDX validations passed!"
return 0
}
usage() {
cat << EOF
Usage: $(basename "$0") [OPTIONS] <spdx-file>
Validates SPDX files against SPDX 3.0.1 JSON schema.
Options:
--all Validate all SPDX fixtures in bench/golden-corpus/
--schema <path> Use custom schema file (default: docs/schemas/spdx-jsonld-3.0.1.schema.json)
--help, -h Show this help message
Examples:
$(basename "$0") sample.spdx.json
$(basename "$0") --schema custom-schema.json sample.json
$(basename "$0") --all
Exit codes:
0 All validations passed
1 Validation failed or error
EOF
}
main() {
local schema="$DEFAULT_SCHEMA"
local validate_all_flag=false
local files=()
while [[ $# -gt 0 ]]; do
case "$1" in
--all)
validate_all_flag=true
shift
;;
--schema)
schema="$2"
shift 2
;;
--help|-h)
usage
exit 0
;;
-*)
log_error "Unknown option: $1"
usage
exit 1
;;
*)
files+=("$1")
shift
;;
esac
done
# Ensure tools are available
check_spdx_tools || true # Continue even if pyspdxtools install fails
if [[ "$validate_all_flag" == "true" ]]; then
validate_all
exit $?
fi
if [[ ${#files[@]} -eq 0 ]]; then
log_error "No SPDX file specified"
usage
exit 1
fi
local failed=0
for file in "${files[@]}"; do
if ! validate_spdx "$file" "$schema"; then
((failed++))
fi
done
if [[ $failed -gt 0 ]]; then
exit 1
fi
exit 0
}
main "$@"

View File

@@ -1,261 +0,0 @@
#!/bin/bash
# scripts/validate-vex.sh
# Sprint: SPRINT_8200_0001_0003 - SBOM Schema Validation in CI
# Task: SCHEMA-8200-006 - Create validate-vex.sh wrapper for OpenVEX validation
#
# Validates OpenVEX files against the OpenVEX 0.2.0 JSON schema.
# Uses ajv-cli for JSON schema validation.
#
# Usage:
# ./scripts/validate-vex.sh <vex-file>
# ./scripts/validate-vex.sh bench/golden-corpus/sample.vex.json
# ./scripts/validate-vex.sh --all # Validate all VEX fixtures
#
# Exit codes:
# 0 - All validations passed
# 1 - Validation failed or error
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
SCHEMA_DIR="${REPO_ROOT}/docs/schemas"
DEFAULT_SCHEMA="${SCHEMA_DIR}/openvex-0.2.0.schema.json"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
log_info() {
echo -e "${GREEN}[INFO]${NC} $*"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $*"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $*"
}
check_ajv() {
if ! command -v ajv &> /dev/null; then
log_warn "ajv-cli not found in PATH"
log_info "Installing ajv-cli via npm..."
if command -v npm &> /dev/null; then
npm install -g ajv-cli ajv-formats
elif command -v npx &> /dev/null; then
log_info "Using npx for ajv (no global install)"
return 0
else
log_error "npm/npx not found. Please install Node.js first."
exit 1
fi
log_info "ajv-cli installed successfully"
fi
}
run_ajv() {
local schema="$1"
local data="$2"
if command -v ajv &> /dev/null; then
ajv validate -s "$schema" -d "$data" --spec=draft2020 2>&1
elif command -v npx &> /dev/null; then
npx ajv-cli validate -s "$schema" -d "$data" --spec=draft2020 2>&1
else
log_error "No ajv available"
return 1
fi
}
validate_openvex() {
local vex_file="$1"
local schema="${2:-$DEFAULT_SCHEMA}"
if [[ ! -f "$vex_file" ]]; then
log_error "File not found: $vex_file"
return 1
fi
if [[ ! -f "$schema" ]]; then
log_error "Schema not found: $schema"
log_info "Expected schema at: ${DEFAULT_SCHEMA}"
log_info "Download from: https://raw.githubusercontent.com/openvex/spec/main/openvex_json_schema.json"
return 1
fi
# Detect if it's an OpenVEX file
if ! grep -qE '"@context".*"https://openvex.dev/ns"|"openvex"' "$vex_file" 2>/dev/null; then
log_warn "File does not appear to be OpenVEX: $vex_file"
log_info "Skipping (use validate-sbom.sh for CycloneDX files)"
return 0
fi
log_info "Validating: $vex_file"
# Run ajv validation
if run_ajv "$schema" "$vex_file"; then
log_info "✓ Validation passed: $vex_file"
return 0
else
log_error "✗ Validation failed: $vex_file"
return 1
fi
}
validate_all() {
local failed=0
local passed=0
local skipped=0
# Search multiple directories for VEX files
local search_dirs=(
"${REPO_ROOT}/bench/golden-corpus"
"${REPO_ROOT}/bench/vex-lattice"
"${REPO_ROOT}/datasets"
)
log_info "Validating all OpenVEX fixtures..."
for fixture_dir in "${search_dirs[@]}"; do
if [[ ! -d "$fixture_dir" ]]; then
log_warn "Directory not found, skipping: $fixture_dir"
continue
fi
log_info "Searching in: $fixture_dir"
while IFS= read -r -d '' file; do
# Check if it's an OpenVEX file
if grep -qE '"@context".*"https://openvex.dev/ns"|"openvex"' "$file" 2>/dev/null; then
if validate_openvex "$file"; then
((passed++))
else
((failed++))
fi
elif grep -q '"vex"' "$file" 2>/dev/null || [[ "$file" == *vex* ]]; then
# Might be VEX-related but not OpenVEX format
log_info "Checking potential VEX file: $file"
if grep -qE '"@context"' "$file" 2>/dev/null; then
if validate_openvex "$file"; then
((passed++))
else
((failed++))
fi
else
log_info "Skipping non-OpenVEX file: $file"
((skipped++))
fi
else
((skipped++))
fi
done < <(find "$fixture_dir" -type f \( -name '*vex*.json' -o -name '*.vex.json' -o -name '*openvex*.json' \) -print0 2>/dev/null || true)
done
echo ""
log_info "Validation Summary:"
log_info " Passed: ${passed}"
log_info " Failed: ${failed}"
log_info " Skipped: ${skipped}"
if [[ $failed -gt 0 ]]; then
log_error "Some validations failed!"
return 1
fi
if [[ $passed -eq 0 ]] && [[ $skipped -eq 0 ]]; then
log_warn "No OpenVEX files found to validate"
else
log_info "All OpenVEX validations passed!"
fi
return 0
}
usage() {
cat << EOF
Usage: $(basename "$0") [OPTIONS] <vex-file>
Validates OpenVEX files against the OpenVEX 0.2.0 JSON schema.
Options:
--all Validate all OpenVEX fixtures in bench/ and datasets/
--schema <path> Use custom schema file (default: docs/schemas/openvex-0.2.0.schema.json)
--help, -h Show this help message
Examples:
$(basename "$0") sample.vex.json
$(basename "$0") --schema custom-schema.json sample.json
$(basename "$0") --all
Exit codes:
0 All validations passed
1 Validation failed or error
EOF
}
main() {
local schema="$DEFAULT_SCHEMA"
local validate_all_flag=false
local files=()
while [[ $# -gt 0 ]]; do
case "$1" in
--all)
validate_all_flag=true
shift
;;
--schema)
schema="$2"
shift 2
;;
--help|-h)
usage
exit 0
;;
-*)
log_error "Unknown option: $1"
usage
exit 1
;;
*)
files+=("$1")
shift
;;
esac
done
# Ensure ajv is available
check_ajv
if [[ "$validate_all_flag" == "true" ]]; then
validate_all
exit $?
fi
if [[ ${#files[@]} -eq 0 ]]; then
log_error "No VEX file specified"
usage
exit 1
fi
local failed=0
for file in "${files[@]}"; do
if ! validate_openvex "$file" "$schema"; then
((failed++))
fi
done
if [[ $failed -gt 0 ]]; then
exit 1
fi
exit 0
}
main "$@"

View File

@@ -1,224 +0,0 @@
#!/bin/bash
# validate-workflows.sh - Validate Gitea Actions workflows
# Sprint: SPRINT_20251226_001_CICD
#
# Usage:
# ./validate-workflows.sh # Validate all workflows
# ./validate-workflows.sh --strict # Fail on any warning
# ./validate-workflows.sh --verbose # Show detailed output
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)"
WORKFLOWS_DIR="$REPO_ROOT/.gitea/workflows"
SCRIPTS_DIR="$REPO_ROOT/.gitea/scripts"
# Configuration
STRICT_MODE=false
VERBOSE=false
# Counters
PASSED=0
FAILED=0
WARNINGS=0
# Colors (if terminal supports it)
if [[ -t 1 ]]; then
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[0;33m'
NC='\033[0m' # No Color
else
RED=''
GREEN=''
YELLOW=''
NC=''
fi
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
--strict)
STRICT_MODE=true
shift
;;
--verbose)
VERBOSE=true
shift
;;
--help)
echo "Usage: $0 [OPTIONS]"
echo ""
echo "Options:"
echo " --strict Fail on any warning"
echo " --verbose Show detailed output"
echo " --help Show this help message"
exit 0
;;
*)
echo "Unknown option: $1"
exit 1
;;
esac
done
echo "=== Gitea Workflow Validation ==="
echo "Workflows: $WORKFLOWS_DIR"
echo "Scripts: $SCRIPTS_DIR"
echo ""
# Check if workflows directory exists
if [[ ! -d "$WORKFLOWS_DIR" ]]; then
echo -e "${RED}ERROR: Workflows directory not found${NC}"
exit 1
fi
# Function to validate YAML syntax
validate_yaml_syntax() {
local file=$1
local name=$(basename "$file")
# Try python yaml parser first
if command -v python3 &>/dev/null; then
if python3 -c "import yaml; yaml.safe_load(open('$file'))" 2>/dev/null; then
return 0
else
return 1
fi
# Fallback to ruby if available
elif command -v ruby &>/dev/null; then
if ruby -ryaml -e "YAML.load_file('$file')" 2>/dev/null; then
return 0
else
return 1
fi
else
# Can't validate YAML, warn and skip
return 2
fi
}
# Function to extract script references from a workflow
extract_script_refs() {
local file=$1
# Look for patterns like: .gitea/scripts/*, scripts/*, ./devops/scripts/*
grep -oE '(\.gitea/scripts|scripts|devops/scripts)/[a-zA-Z0-9_/-]+\.(sh|py|js|mjs)' "$file" 2>/dev/null | sort -u || true
}
# Function to check if a script exists
check_script_exists() {
local script_path=$1
local full_path="$REPO_ROOT/$script_path"
if [[ -f "$full_path" ]]; then
return 0
else
return 1
fi
}
# Validate each workflow file
echo "=== Validating Workflow Syntax ==="
for workflow in "$WORKFLOWS_DIR"/*.yml "$WORKFLOWS_DIR"/*.yaml; do
[[ -e "$workflow" ]] || continue
name=$(basename "$workflow")
if [[ "$VERBOSE" == "true" ]]; then
echo "Checking: $name"
fi
result=$(validate_yaml_syntax "$workflow")
exit_code=$?
if [[ $exit_code -eq 0 ]]; then
echo -e " ${GREEN}[PASS]${NC} $name - YAML syntax valid"
((PASSED++))
elif [[ $exit_code -eq 2 ]]; then
echo -e " ${YELLOW}[SKIP]${NC} $name - No YAML parser available"
((WARNINGS++))
else
echo -e " ${RED}[FAIL]${NC} $name - YAML syntax error"
((FAILED++))
fi
done
echo ""
echo "=== Validating Script References ==="
# Check all script references
MISSING_SCRIPTS=()
for workflow in "$WORKFLOWS_DIR"/*.yml "$WORKFLOWS_DIR"/*.yaml; do
[[ -e "$workflow" ]] || continue
name=$(basename "$workflow")
refs=$(extract_script_refs "$workflow")
if [[ -z "$refs" ]]; then
if [[ "$VERBOSE" == "true" ]]; then
echo " $name: No script references found"
fi
continue
fi
while IFS= read -r script_ref; do
[[ -z "$script_ref" ]] && continue
if check_script_exists "$script_ref"; then
if [[ "$VERBOSE" == "true" ]]; then
echo -e " ${GREEN}[OK]${NC} $name -> $script_ref"
fi
else
echo -e " ${RED}[MISSING]${NC} $name -> $script_ref"
MISSING_SCRIPTS+=("$name: $script_ref")
((WARNINGS++))
fi
done <<< "$refs"
done
# Check that .gitea/scripts directories exist
echo ""
echo "=== Validating Script Directory Structure ==="
EXPECTED_DIRS=(build test validate sign release metrics evidence util)
for dir in "${EXPECTED_DIRS[@]}"; do
dir_path="$SCRIPTS_DIR/$dir"
if [[ -d "$dir_path" ]]; then
script_count=$(find "$dir_path" -maxdepth 1 -name "*.sh" -o -name "*.py" 2>/dev/null | wc -l)
echo -e " ${GREEN}[OK]${NC} $dir/ ($script_count scripts)"
else
echo -e " ${YELLOW}[WARN]${NC} $dir/ - Directory not found"
((WARNINGS++))
fi
done
# Summary
echo ""
echo "=== Validation Summary ==="
echo -e " Passed: ${GREEN}$PASSED${NC}"
echo -e " Failed: ${RED}$FAILED${NC}"
echo -e " Warnings: ${YELLOW}$WARNINGS${NC}"
if [[ ${#MISSING_SCRIPTS[@]} -gt 0 ]]; then
echo ""
echo "Missing script references:"
for ref in "${MISSING_SCRIPTS[@]}"; do
echo " - $ref"
done
fi
# Exit code
if [[ $FAILED -gt 0 ]]; then
echo ""
echo -e "${RED}FAILED: $FAILED validation(s) failed${NC}"
exit 1
fi
if [[ "$STRICT_MODE" == "true" && $WARNINGS -gt 0 ]]; then
echo ""
echo -e "${YELLOW}STRICT MODE: $WARNINGS warning(s) treated as errors${NC}"
exit 1
fi
echo ""
echo -e "${GREEN}All validations passed!${NC}"

View File

@@ -1,25 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Verifies binary artefacts live only in approved locations.
# Allowed roots: .nuget/packages (curated feed + cache), vendor (pinned binaries),
# offline (air-gap bundles/templates), plugins/tools/deploy/ops (module-owned binaries).
repo_root="$(git rev-parse --show-toplevel)"
cd "$repo_root"
# Extensions considered binary artefacts.
binary_ext="(nupkg|dll|exe|so|dylib|a|lib|tar|tar.gz|tgz|zip|jar|deb|rpm|bin)"
# Locations allowed to contain binaries.
allowed_prefix="^(.nuget/packages|.nuget/packages/packages|vendor|offline|plugins|tools|deploy|ops|third_party|docs/artifacts|samples|src/.*/Fixtures|src/.*/fixtures)/"
# Only consider files that currently exist in the working tree (skip deleted placeholders).
violations=$(git ls-files | while read -r f; do [[ -f "$f" ]] && echo "$f"; done | grep -E "\\.${binary_ext}$" | grep -Ev "$allowed_prefix" || true)
if [[ -n "$violations" ]]; then
echo "Binary artefacts found outside approved directories:" >&2
echo "$violations" >&2
exit 1
fi
printf "Binary layout OK (allowed roots: %s)\n" "$allowed_prefix"

View File

@@ -1,70 +0,0 @@
name: Advisory AI Feed Release
on:
workflow_dispatch:
inputs:
allow_dev_key:
description: 'Allow dev key for testing (1=yes)'
required: false
default: '0'
push:
branches: [main]
paths:
- 'src/AdvisoryAI/feeds/**'
- 'docs/samples/advisory-feeds/**'
jobs:
package-feeds:
runs-on: ubuntu-22.04
env:
COSIGN_PRIVATE_KEY_B64: ${{ secrets.COSIGN_PRIVATE_KEY_B64 }}
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup cosign
uses: sigstore/cosign-installer@v3
with:
cosign-release: 'v2.6.0'
- name: Fallback to dev key when secret is absent
run: |
if [ -z "${COSIGN_PRIVATE_KEY_B64}" ]; then
echo "[warn] COSIGN_PRIVATE_KEY_B64 not set; using dev key for non-production"
echo "COSIGN_ALLOW_DEV_KEY=1" >> $GITHUB_ENV
echo "COSIGN_PASSWORD=stellaops-dev" >> $GITHUB_ENV
fi
# Manual override
if [ "${{ github.event.inputs.allow_dev_key }}" = "1" ]; then
echo "COSIGN_ALLOW_DEV_KEY=1" >> $GITHUB_ENV
echo "COSIGN_PASSWORD=stellaops-dev" >> $GITHUB_ENV
fi
- name: Package advisory feeds
run: |
chmod +x ops/deployment/advisory-ai/package-advisory-feeds.sh
ops/deployment/advisory-ai/package-advisory-feeds.sh
- name: Generate SBOM
run: |
# Install syft
curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sh -s -- -b /usr/local/bin v1.0.0
# Generate SBOM for feed bundle
syft dir:out/advisory-ai/feeds/stage \
-o spdx-json=out/advisory-ai/feeds/advisory-feeds.sbom.json \
--name advisory-feeds
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: advisory-feeds-${{ github.run_number }}
path: |
out/advisory-ai/feeds/advisory-feeds.tar.gz
out/advisory-ai/feeds/advisory-feeds.manifest.json
out/advisory-ai/feeds/advisory-feeds.manifest.dsse.json
out/advisory-ai/feeds/advisory-feeds.sbom.json
out/advisory-ai/feeds/provenance.json
if-no-files-found: warn
retention-days: 30

View File

@@ -1,28 +0,0 @@
name: Airgap Sealed CI Smoke
on:
push:
branches: [ main ]
paths:
- 'devops/airgap/**'
- '.gitea/workflows/airgap-sealed-ci.yml'
pull_request:
branches: [ main, develop ]
paths:
- 'devops/airgap/**'
- '.gitea/workflows/airgap-sealed-ci.yml'
jobs:
sealed-smoke:
runs-on: ubuntu-22.04
permissions:
contents: read
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Install dnslib
run: pip install dnslib
- name: Run sealed-mode smoke
run: sudo devops/airgap/sealed-ci-smoke.sh

View File

@@ -1,83 +0,0 @@
name: AOC Backfill Release
on:
workflow_dispatch:
inputs:
dataset_hash:
description: 'Dataset hash from dev rehearsal (leave empty for dev mode)'
required: false
default: ''
allow_dev_key:
description: 'Allow dev key for testing (1=yes)'
required: false
default: '0'
jobs:
package-backfill:
runs-on: ubuntu-22.04
env:
COSIGN_PRIVATE_KEY_B64: ${{ secrets.COSIGN_PRIVATE_KEY_B64 }}
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: 10.0.100
include-prerelease: true
- name: Setup cosign
uses: sigstore/cosign-installer@v3
with:
cosign-release: 'v2.6.0'
- name: Restore AOC CLI
run: dotnet restore src/Aoc/StellaOps.Aoc.Cli/StellaOps.Aoc.Cli.csproj
- name: Configure signing
run: |
if [ -z "${COSIGN_PRIVATE_KEY_B64}" ]; then
echo "[info] No production key; using dev key"
echo "COSIGN_ALLOW_DEV_KEY=1" >> $GITHUB_ENV
echo "COSIGN_PASSWORD=stellaops-dev" >> $GITHUB_ENV
fi
if [ "${{ github.event.inputs.allow_dev_key }}" = "1" ]; then
echo "COSIGN_ALLOW_DEV_KEY=1" >> $GITHUB_ENV
echo "COSIGN_PASSWORD=stellaops-dev" >> $GITHUB_ENV
fi
- name: Package AOC backfill release
run: |
chmod +x devops/aoc/package-backfill-release.sh
DATASET_HASH="${{ github.event.inputs.dataset_hash }}" \
devops/aoc/package-backfill-release.sh
env:
DATASET_HASH: ${{ github.event.inputs.dataset_hash }}
- name: Generate SBOM with syft
run: |
curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sh -s -- -b /usr/local/bin v1.0.0
syft dir:out/aoc/cli \
-o spdx-json=out/aoc/aoc-backfill-runner.sbom.json \
--name aoc-backfill-runner || true
- name: Verify checksums
run: |
cd out/aoc
sha256sum -c SHA256SUMS
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: aoc-backfill-release-${{ github.run_number }}
path: |
out/aoc/aoc-backfill-runner.tar.gz
out/aoc/aoc-backfill-runner.manifest.json
out/aoc/aoc-backfill-runner.sbom.json
out/aoc/aoc-backfill-runner.provenance.json
out/aoc/aoc-backfill-runner.dsse.json
out/aoc/SHA256SUMS
if-no-files-found: warn
retention-days: 30

View File

@@ -1,170 +0,0 @@
name: AOC Guard CI
on:
push:
branches: [ main ]
paths:
- 'src/Aoc/**'
- 'src/Concelier/**'
- 'src/Authority/**'
- 'src/Excititor/**'
- 'devops/aoc/**'
- '.gitea/workflows/aoc-guard.yml'
pull_request:
branches: [ main, develop ]
paths:
- 'src/Aoc/**'
- 'src/Concelier/**'
- 'src/Authority/**'
- 'src/Excititor/**'
- 'devops/aoc/**'
- '.gitea/workflows/aoc-guard.yml'
jobs:
aoc-guard:
runs-on: ubuntu-22.04
env:
DOTNET_VERSION: '10.0.100'
ARTIFACT_DIR: ${{ github.workspace }}/.artifacts
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Export OpenSSL 1.1 shim for Mongo2Go
run: .gitea/scripts/util/enable-openssl11-shim.sh
- name: Set up .NET SDK
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
include-prerelease: true
- name: Restore analyzers
run: dotnet restore src/Aoc/__Analyzers/StellaOps.Aoc.Analyzers/StellaOps.Aoc.Analyzers.csproj
- name: Build analyzers
run: dotnet build src/Aoc/__Analyzers/StellaOps.Aoc.Analyzers/StellaOps.Aoc.Analyzers.csproj -c Release
- name: Run analyzers against ingestion projects
run: |
dotnet build src/Concelier/StellaOps.Concelier.Ingestion/StellaOps.Concelier.Ingestion.csproj -c Release /p:RunAnalyzers=true /p:TreatWarningsAsErrors=true
dotnet build src/Authority/StellaOps.Authority.Ingestion/StellaOps.Authority.Ingestion.csproj -c Release /p:RunAnalyzers=true /p:TreatWarningsAsErrors=true
dotnet build src/Excititor/StellaOps.Excititor.Ingestion/StellaOps.Excititor.Ingestion.csproj -c Release /p:RunAnalyzers=true /p:TreatWarningsAsErrors=true
- name: Run analyzer tests with coverage
run: |
mkdir -p $ARTIFACT_DIR
dotnet test src/Aoc/__Tests/StellaOps.Aoc.Analyzers.Tests/StellaOps.Aoc.Analyzers.Tests.csproj -c Release \
--settings src/Aoc/aoc.runsettings \
--collect:"XPlat Code Coverage" \
--logger "trx;LogFileName=aoc-analyzers-tests.trx" \
--results-directory $ARTIFACT_DIR
- name: Run AOC library tests with coverage
run: |
dotnet test src/Aoc/__Tests/StellaOps.Aoc.Tests/StellaOps.Aoc.Tests.csproj -c Release \
--settings src/Aoc/aoc.runsettings \
--collect:"XPlat Code Coverage" \
--logger "trx;LogFileName=aoc-lib-tests.trx" \
--results-directory $ARTIFACT_DIR
- name: Run AOC CLI tests with coverage
run: |
dotnet test src/Aoc/__Tests/StellaOps.Aoc.Cli.Tests/StellaOps.Aoc.Cli.Tests.csproj -c Release \
--settings src/Aoc/aoc.runsettings \
--collect:"XPlat Code Coverage" \
--logger "trx;LogFileName=aoc-cli-tests.trx" \
--results-directory $ARTIFACT_DIR
- name: Generate coverage report
run: |
dotnet tool install --global dotnet-reportgenerator-globaltool || true
reportgenerator \
-reports:"$ARTIFACT_DIR/**/coverage.cobertura.xml" \
-targetdir:"$ARTIFACT_DIR/coverage-report" \
-reporttypes:"Html;Cobertura;TextSummary" || true
if [ -f "$ARTIFACT_DIR/coverage-report/Summary.txt" ]; then
cat "$ARTIFACT_DIR/coverage-report/Summary.txt"
fi
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: aoc-guard-artifacts
path: ${{ env.ARTIFACT_DIR }}
aoc-verify:
needs: aoc-guard
runs-on: ubuntu-22.04
if: github.event_name != 'schedule'
env:
DOTNET_VERSION: '10.0.100'
ARTIFACT_DIR: ${{ github.workspace }}/.artifacts
AOC_VERIFY_SINCE: ${{ github.event.pull_request.base.sha || 'HEAD~1' }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Export OpenSSL 1.1 shim for Mongo2Go
run: .gitea/scripts/util/enable-openssl11-shim.sh
- name: Set up .NET SDK
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
include-prerelease: true
- name: Run AOC verify
env:
STAGING_MONGO_URI: ${{ secrets.STAGING_MONGO_URI || vars.STAGING_MONGO_URI }}
STAGING_POSTGRES_URI: ${{ secrets.STAGING_POSTGRES_URI || vars.STAGING_POSTGRES_URI }}
run: |
mkdir -p $ARTIFACT_DIR
# Prefer PostgreSQL, fall back to MongoDB (legacy)
if [ -n "${STAGING_POSTGRES_URI:-}" ]; then
echo "Using PostgreSQL for AOC verification"
dotnet run --project src/Aoc/StellaOps.Aoc.Cli -- verify \
--since "$AOC_VERIFY_SINCE" \
--postgres "$STAGING_POSTGRES_URI" \
--output "$ARTIFACT_DIR/aoc-verify.json" \
--ndjson "$ARTIFACT_DIR/aoc-verify.ndjson" \
--verbose || VERIFY_EXIT=$?
elif [ -n "${STAGING_MONGO_URI:-}" ]; then
echo "Using MongoDB for AOC verification (deprecated)"
dotnet run --project src/Aoc/StellaOps.Aoc.Cli -- verify \
--since "$AOC_VERIFY_SINCE" \
--mongo "$STAGING_MONGO_URI" \
--output "$ARTIFACT_DIR/aoc-verify.json" \
--ndjson "$ARTIFACT_DIR/aoc-verify.ndjson" \
--verbose || VERIFY_EXIT=$?
else
echo "::warning::Neither STAGING_POSTGRES_URI nor STAGING_MONGO_URI set; running dry-run verification"
dotnet run --project src/Aoc/StellaOps.Aoc.Cli -- verify \
--since "$AOC_VERIFY_SINCE" \
--postgres "placeholder" \
--dry-run \
--verbose
exit 0
fi
if [ -n "${VERIFY_EXIT:-}" ] && [ "${VERIFY_EXIT}" -ne 0 ]; then
echo "::error::AOC verify reported violations"; exit ${VERIFY_EXIT}
fi
- name: Upload verify artifacts
if: always()
uses: actions/upload-artifact@v4
with:
name: aoc-verify-artifacts
path: ${{ env.ARTIFACT_DIR }}

View File

@@ -1,51 +0,0 @@
name: api-governance
on:
push:
paths:
- "src/Api/**"
- ".spectral.yaml"
- "package.json"
pull_request:
paths:
- "src/Api/**"
- ".spectral.yaml"
- "package.json"
jobs:
spectral-lint:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: "18"
- name: Install npm deps
run: npm install --ignore-scripts --no-progress
- name: Compose aggregate OpenAPI
run: npm run api:compose
- name: Validate examples coverage
run: npm run api:examples
- name: Compatibility diff (previous commit)
run: |
set -e
if git show HEAD~1:src/Api/StellaOps.Api.OpenApi/stella.yaml > /tmp/stella-prev.yaml 2>/dev/null; then
node scripts/api-compat-diff.mjs /tmp/stella-prev.yaml src/Api/StellaOps.Api.OpenApi/stella.yaml --output text --fail-on-breaking
else
echo "[api:compat] previous stella.yaml not found; skipping"
fi
- name: Compatibility diff (baseline)
run: |
set -e
if [ -f src/Api/StellaOps.Api.OpenApi/baselines/stella-baseline.yaml ]; then
node scripts/api-compat-diff.mjs src/Api/StellaOps.Api.OpenApi/baselines/stella-baseline.yaml src/Api/StellaOps.Api.OpenApi/stella.yaml --output text
else
echo "[api:compat] baseline file missing; skipping"
fi
- name: Generate changelog
run: npm run api:changelog
- name: Spectral lint (fail on warning+)
run: npm run api:lint

View File

@@ -1,128 +0,0 @@
name: Artifact Signing
on:
push:
tags:
- 'v*'
workflow_dispatch:
inputs:
artifact_path:
description: 'Path to artifact to sign'
required: false
default: ''
env:
COSIGN_VERSION: 'v2.2.0'
jobs:
sign-containers:
name: Sign Container Images
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/v')
permissions:
contents: read
id-token: write
packages: write
steps:
- uses: actions/checkout@v4
- name: Install cosign
uses: sigstore/cosign-installer@v3
with:
cosign-release: ${{ env.COSIGN_VERSION }}
- name: Log in to registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Sign images (keyless)
if: ${{ !env.COSIGN_PRIVATE_KEY_B64 }}
env:
COSIGN_EXPERIMENTAL: "1"
run: |
IMAGES=(
"ghcr.io/${{ github.repository }}/concelier"
"ghcr.io/${{ github.repository }}/scanner"
"ghcr.io/${{ github.repository }}/authority"
)
for img in "${IMAGES[@]}"; do
if docker manifest inspect "${img}:${{ github.ref_name }}" > /dev/null 2>&1; then
echo "Signing ${img}:${{ github.ref_name }}..."
cosign sign --yes "${img}:${{ github.ref_name }}"
fi
done
- name: Sign images (with key)
if: ${{ env.COSIGN_PRIVATE_KEY_B64 }}
env:
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY_B64 }}
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
run: |
echo "$COSIGN_PRIVATE_KEY" | base64 -d > /tmp/cosign.key
IMAGES=(
"ghcr.io/${{ github.repository }}/concelier"
"ghcr.io/${{ github.repository }}/scanner"
"ghcr.io/${{ github.repository }}/authority"
)
for img in "${IMAGES[@]}"; do
if docker manifest inspect "${img}:${{ github.ref_name }}" > /dev/null 2>&1; then
echo "Signing ${img}:${{ github.ref_name }}..."
cosign sign --key /tmp/cosign.key "${img}:${{ github.ref_name }}"
fi
done
rm -f /tmp/cosign.key
sign-sbom:
name: Sign SBOM Artifacts
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/v')
steps:
- uses: actions/checkout@v4
- name: Install cosign
uses: sigstore/cosign-installer@v3
with:
cosign-release: ${{ env.COSIGN_VERSION }}
- name: Generate and sign SBOM
run: |
# Generate SBOM using syft
if command -v syft &> /dev/null; then
syft . -o cyclonedx-json > sbom.cdx.json
cosign sign-blob --yes sbom.cdx.json --output-signature sbom.cdx.json.sig
else
echo "syft not installed, skipping SBOM generation"
fi
- name: Upload signed artifacts
uses: actions/upload-artifact@v4
with:
name: signed-sbom
path: |
sbom.cdx.json
sbom.cdx.json.sig
if-no-files-found: ignore
verify-signatures:
name: Verify Existing Signatures
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install cosign
uses: sigstore/cosign-installer@v3
with:
cosign-release: ${{ env.COSIGN_VERSION }}
- name: Verify DSSE envelopes
run: |
find . -name "*.dsse" -o -name "*.dsse.json" | while read f; do
echo "Checking $f..."
# Basic JSON validation
if ! jq empty "$f" 2>/dev/null; then
echo "Warning: Invalid JSON in $f"
fi
done

View File

@@ -1,29 +0,0 @@
name: attestation-bundle
on:
workflow_dispatch:
inputs:
attest_dir:
description: "Directory containing attestation artefacts"
required: true
default: "out/attest"
jobs:
bundle:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Build bundle
run: |
chmod +x scripts/attest/build-attestation-bundle.sh
scripts/attest/build-attestation-bundle.sh "${{ github.event.inputs.attest_dir }}"
- name: Upload bundle
uses: actions/upload-artifact@v4
with:
name: attestation-bundle
path: out/attest-bundles/**

View File

@@ -58,9 +58,6 @@ jobs:
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Resolve Authority configuration - name: Resolve Authority configuration
id: config id: config
run: | run: |

View File

@@ -1,30 +0,0 @@
name: bench-determinism
on:
workflow_dispatch: {}
jobs:
bench-determinism:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: '3.12'
- name: Run determinism bench
env:
BENCH_DETERMINISM_THRESHOLD: "0.95"
run: |
chmod +x scripts/bench/determinism-run.sh
scripts/bench/determinism-run.sh
- name: Upload determinism artifacts
uses: actions/upload-artifact@v4
with:
name: bench-determinism
path: out/bench-determinism/**

View File

@@ -1,173 +0,0 @@
name: Benchmark vs Competitors
on:
schedule:
# Run weekly on Sunday at 00:00 UTC
- cron: '0 0 * * 0'
workflow_dispatch:
inputs:
competitors:
description: 'Comma-separated list of competitors to benchmark against'
required: false
default: 'trivy,grype'
corpus_size:
description: 'Number of images from corpus to test'
required: false
default: '50'
push:
paths:
- 'src/Scanner/__Libraries/StellaOps.Scanner.Benchmark/**'
- 'src/__Tests/__Benchmarks/competitors/**'
env:
DOTNET_VERSION: '10.0.x'
TRIVY_VERSION: '0.50.1'
GRYPE_VERSION: '0.74.0'
SYFT_VERSION: '0.100.0'
jobs:
benchmark:
name: Run Competitive Benchmark
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
- name: Install Trivy
run: |
curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin v${{ env.TRIVY_VERSION }}
trivy --version
- name: Install Grype
run: |
curl -sSfL https://raw.githubusercontent.com/anchore/grype/main/install.sh | sh -s -- -b /usr/local/bin v${{ env.GRYPE_VERSION }}
grype version
- name: Install Syft
run: |
curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sh -s -- -b /usr/local/bin v${{ env.SYFT_VERSION }}
syft version
- name: Build benchmark library
run: |
dotnet build src/Scanner/__Libraries/StellaOps.Scanner.Benchmark/StellaOps.Scanner.Benchmark.csproj -c Release
- name: Load corpus manifest
id: corpus
run: |
echo "corpus_path=src/__Tests/__Benchmarks/competitors/corpus/corpus-manifest.json" >> $GITHUB_OUTPUT
- name: Run Stella Ops scanner
run: |
echo "Running Stella Ops scanner on corpus..."
# TODO: Implement actual scan command
# stella scan --corpus ${{ steps.corpus.outputs.corpus_path }} --output src/__Tests/__Benchmarks/results/stellaops.json
- name: Run Trivy on corpus
run: |
echo "Running Trivy on corpus images..."
# Process each image in corpus
mkdir -p src/__Tests/__Benchmarks/results/trivy
- name: Run Grype on corpus
run: |
echo "Running Grype on corpus images..."
mkdir -p src/__Tests/__Benchmarks/results/grype
- name: Calculate metrics
run: |
echo "Calculating precision/recall/F1 metrics..."
# dotnet run --project src/Scanner/__Libraries/StellaOps.Scanner.Benchmark \
# --calculate-metrics \
# --ground-truth ${{ steps.corpus.outputs.corpus_path }} \
# --results src/__Tests/__Benchmarks/results/ \
# --output src/__Tests/__Benchmarks/results/metrics.json
- name: Generate comparison report
run: |
echo "Generating comparison report..."
mkdir -p src/__Tests/__Benchmarks/results
cat > src/__Tests/__Benchmarks/results/summary.json << 'EOF'
{
"timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
"competitors": ["trivy", "grype", "syft"],
"status": "pending_implementation"
}
EOF
- name: Upload benchmark results
uses: actions/upload-artifact@v4
with:
name: benchmark-results-${{ github.run_id }}
path: src/__Tests/__Benchmarks/results/
retention-days: 90
- name: Update claims index
if: github.ref == 'refs/heads/main'
run: |
echo "Updating claims index with new evidence..."
# dotnet run --project src/Scanner/__Libraries/StellaOps.Scanner.Benchmark \
# --update-claims \
# --metrics src/__Tests/__Benchmarks/results/metrics.json \
# --output docs/claims-index.md
- name: Comment on PR
if: github.event_name == 'pull_request'
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const metrics = fs.existsSync('src/__Tests/__Benchmarks/results/metrics.json')
? JSON.parse(fs.readFileSync('src/__Tests/__Benchmarks/results/metrics.json', 'utf8'))
: { status: 'pending' };
const body = `## Benchmark Results
| Tool | Precision | Recall | F1 Score |
|------|-----------|--------|----------|
| Stella Ops | ${metrics.stellaops?.precision || 'N/A'} | ${metrics.stellaops?.recall || 'N/A'} | ${metrics.stellaops?.f1 || 'N/A'} |
| Trivy | ${metrics.trivy?.precision || 'N/A'} | ${metrics.trivy?.recall || 'N/A'} | ${metrics.trivy?.f1 || 'N/A'} |
| Grype | ${metrics.grype?.precision || 'N/A'} | ${metrics.grype?.recall || 'N/A'} | ${metrics.grype?.f1 || 'N/A'} |
[Full report](${process.env.GITHUB_SERVER_URL}/${process.env.GITHUB_REPOSITORY}/actions/runs/${process.env.GITHUB_RUN_ID})
`;
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: body
});
verify-claims:
name: Verify Claims
runs-on: ubuntu-latest
needs: benchmark
if: github.ref == 'refs/heads/main'
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Download benchmark results
uses: actions/download-artifact@v4
with:
name: benchmark-results-${{ github.run_id }}
path: src/__Tests/__Benchmarks/results/
- name: Verify all claims
run: |
echo "Verifying all claims against new evidence..."
# stella benchmark verify --all
- name: Report claim status
run: |
echo "Generating claim verification report..."
# Output claim status summary

View File

@@ -1,16 +1,5 @@
# .gitea/workflows/build-test-deploy.yml # .gitea/workflows/build-test-deploy.yml
# Build, Validation, and Deployment workflow for git.stella-ops.org # Unified CI/CD workflow for git.stella-ops.org (Feedser monorepo)
#
# WORKFLOW INTEGRATION STRATEGY (Sprint 20251226_003_CICD):
# =========================================================
# This workflow handles: Build, Validation, Quality Gates, and Deployment
# Test execution is handled by: test-matrix.yml (runs in parallel on PRs)
#
# For PR gating:
# - test-matrix.yml gates on: Unit, Architecture, Contract, Integration, Security, Golden tests
# - build-test-deploy.yml gates on: Build validation, quality gates, security scans
#
# Both workflows run on PRs and should be required for merge via branch protection.
name: Build Test Deploy name: Build Test Deploy
@@ -32,8 +21,6 @@ on:
- 'docs/**' - 'docs/**'
- 'scripts/**' - 'scripts/**'
- '.gitea/workflows/**' - '.gitea/workflows/**'
schedule:
- cron: '0 5 * * *'
workflow_dispatch: workflow_dispatch:
inputs: inputs:
force_deploy: force_deploy:
@@ -41,14 +28,9 @@ on:
required: false required: false
default: 'false' default: 'false'
type: boolean type: boolean
excititor_batch:
description: 'Run Excititor batch-ingest validation suite'
required: false
default: 'false'
type: boolean
env: env:
DOTNET_VERSION: '10.0.100' DOTNET_VERSION: '10.0.100-rc.1.25451.107'
BUILD_CONFIGURATION: Release BUILD_CONFIGURATION: Release
CI_CACHE_ROOT: /data/.cache/stella-ops/feedser CI_CACHE_ROOT: /data/.cache/stella-ops/feedser
RUNNER_TOOL_CACHE: /toolcache RUNNER_TOOL_CACHE: /toolcache
@@ -66,20 +48,8 @@ jobs:
tar -xzf /tmp/helm.tgz -C /tmp tar -xzf /tmp/helm.tgz -C /tmp
sudo install -m 0755 /tmp/linux-amd64/helm /usr/local/bin/helm sudo install -m 0755 /tmp/linux-amd64/helm /usr/local/bin/helm
- name: Validate Helm chart rendering
run: |
set -euo pipefail
CHART_PATH="devops/helm/stellaops"
helm lint "$CHART_PATH"
for values in values.yaml values-dev.yaml values-stage.yaml values-prod.yaml values-airgap.yaml values-mirror.yaml; do
release="stellaops-${values%.*}"
echo "::group::Helm template ${release} (${values})"
helm template "$release" "$CHART_PATH" -f "$CHART_PATH/$values" >/dev/null
echo "::endgroup::"
done
- name: Validate deployment profiles - name: Validate deployment profiles
run: ./devops/tools/validate-profiles.sh run: ./deploy/tools/validate-profiles.sh
build-test: build-test:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
@@ -88,65 +58,20 @@ jobs:
PUBLISH_DIR: ${{ github.workspace }}/artifacts/publish/webservice PUBLISH_DIR: ${{ github.workspace }}/artifacts/publish/webservice
AUTHORITY_PUBLISH_DIR: ${{ github.workspace }}/artifacts/publish/authority AUTHORITY_PUBLISH_DIR: ${{ github.workspace }}/artifacts/publish/authority
TEST_RESULTS_DIR: ${{ github.workspace }}/artifacts/test-results TEST_RESULTS_DIR: ${{ github.workspace }}/artifacts/test-results
STELLAOPS_TEST_MONGO_URI: ${{ secrets.STELLAOPS_TEST_MONGO_URI || vars.STELLAOPS_TEST_MONGO_URI }}
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Export OpenSSL 1.1 shim for Mongo2Go
run: .gitea/scripts/util/enable-openssl11-shim.sh
- name: Verify binary layout
run: .gitea/scripts/validate/verify-binaries.sh
- name: Ensure binary manifests are up to date
run: |
python3 scripts/update-binary-manifests.py
git diff --exit-code .nuget/manifest.json vendor/manifest.json offline/feeds/manifest.json
- name: Ensure PostgreSQL test URI configured
run: |
if [ -z "${STELLAOPS_TEST_POSTGRES_CONNECTION:-}" ]; then
echo "::error::STELLAOPS_TEST_POSTGRES_CONNECTION must be provided via repository secrets or variables for integration tests."
exit 1
fi
- name: Verify policy scope configuration - name: Verify policy scope configuration
run: python3 scripts/verify-policy-scopes.py run: python3 scripts/verify-policy-scopes.py
- name: Validate NuGet restore source ordering - name: Validate NuGet restore source ordering
run: python3 devops/validate_restore_sources.py run: python3 ops/devops/validate_restore_sources.py
- name: Validate telemetry storage configuration - name: Validate telemetry storage configuration
run: python3 devops/telemetry/validate_storage_stack.py run: python3 ops/devops/telemetry/validate_storage_stack.py
- name: Task Pack offline bundle fixtures
run: |
python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Telemetry tenant isolation smoke
env:
COMPOSE_DIR: ${GITHUB_WORKSPACE}/devops/compose
run: |
set -euo pipefail
./devops/telemetry/generate_dev_tls.sh
COMPOSE_DIR="${COMPOSE_DIR:-${GITHUB_WORKSPACE}/devops/compose}"
cleanup() {
set +e
(cd "$COMPOSE_DIR" && docker compose -f docker-compose.telemetry.yaml down -v --remove-orphans >/dev/null 2>&1)
(cd "$COMPOSE_DIR" && docker compose -f docker-compose.telemetry-storage.yaml down -v --remove-orphans >/dev/null 2>&1)
}
trap cleanup EXIT
(cd "$COMPOSE_DIR" && docker compose -f docker-compose.telemetry-storage.yaml up -d)
(cd "$COMPOSE_DIR" && docker compose -f docker-compose.telemetry.yaml up -d)
sleep 5
python3 devops/telemetry/smoke_otel_collector.py --host localhost
python3 devops/telemetry/tenant_isolation_smoke.py \
--collector https://localhost:4318/v1 \
--tempo https://localhost:3200 \
--loki https://localhost:3100
- name: Setup .NET ${{ env.DOTNET_VERSION }} - name: Setup .NET ${{ env.DOTNET_VERSION }}
uses: actions/setup-dotnet@v4 uses: actions/setup-dotnet@v4
@@ -154,32 +79,6 @@ jobs:
dotnet-version: ${{ env.DOTNET_VERSION }} dotnet-version: ${{ env.DOTNET_VERSION }}
include-prerelease: true include-prerelease: true
- name: Build CLI multi-runtime binaries
run: |
set -euo pipefail
export DOTNET_SKIP_FIRST_TIME_EXPERIENCE=1
RUNTIMES=(linux-x64 linux-arm64 osx-x64 osx-arm64 win-x64)
rm -rf out/cli-ci
for runtime in "${RUNTIMES[@]}"; do
dotnet publish src/Cli/StellaOps.Cli/StellaOps.Cli.csproj \
--configuration $BUILD_CONFIGURATION \
--runtime "$runtime" \
--self-contained true \
/p:PublishSingleFile=true \
/p:IncludeNativeLibrariesForSelfExtract=true \
/p:EnableCompressionInSingleFile=true \
/p:InvariantGlobalization=true \
--output "out/cli-ci/${runtime}"
done
- name: Run CLI unit tests
run: |
mkdir -p "$TEST_RESULTS_DIR"
dotnet test src/Cli/StellaOps.Cli.Tests/StellaOps.Cli.Tests.csproj \
--configuration $BUILD_CONFIGURATION \
--logger "trx;LogFileName=stellaops-cli-tests.trx" \
--results-directory "$TEST_RESULTS_DIR"
- name: Restore Concelier solution - name: Restore Concelier solution
run: dotnet restore src/Concelier/StellaOps.Concelier.sln run: dotnet restore src/Concelier/StellaOps.Concelier.sln
@@ -195,37 +94,6 @@ jobs:
--logger "trx;LogFileName=stellaops-concelier-tests.trx" \ --logger "trx;LogFileName=stellaops-concelier-tests.trx" \
--results-directory "$TEST_RESULTS_DIR" --results-directory "$TEST_RESULTS_DIR"
- name: Run PostgreSQL storage integration tests (Testcontainers)
env:
POSTGRES_TEST_IMAGE: postgres:16-alpine
run: |
set -euo pipefail
mkdir -p "$TEST_RESULTS_DIR"
PROJECTS=(
src/__Libraries/__Tests/StellaOps.Infrastructure.Postgres.Tests/StellaOps.Infrastructure.Postgres.Tests.csproj
src/Authority/__Tests/StellaOps.Authority.Storage.Postgres.Tests/StellaOps.Authority.Storage.Postgres.Tests.csproj
src/Scheduler/__Tests/StellaOps.Scheduler.Storage.Postgres.Tests/StellaOps.Scheduler.Storage.Postgres.Tests.csproj
src/Concelier/__Tests/StellaOps.Concelier.Storage.Postgres.Tests/StellaOps.Concelier.Storage.Postgres.Tests.csproj
src/Excititor/__Tests/StellaOps.Excititor.Storage.Postgres.Tests/StellaOps.Excititor.Storage.Postgres.Tests.csproj
src/Notify/__Tests/StellaOps.Notify.Storage.Postgres.Tests/StellaOps.Notify.Storage.Postgres.Tests.csproj
src/Policy/__Tests/StellaOps.Policy.Storage.Postgres.Tests/StellaOps.Policy.Storage.Postgres.Tests.csproj
)
for project in "${PROJECTS[@]}"; do
name="$(basename "${project%.*}")"
dotnet test "$project" \
--configuration $BUILD_CONFIGURATION \
--logger "trx;LogFileName=${name}.trx" \
--results-directory "$TEST_RESULTS_DIR"
done
- name: Run TimelineIndexer tests (EB1 evidence linkage gate)
run: |
mkdir -p "$TEST_RESULTS_DIR"
dotnet test src/TimelineIndexer/StellaOps.TimelineIndexer/StellaOps.TimelineIndexer.sln \
--configuration $BUILD_CONFIGURATION \
--logger "trx;LogFileName=timelineindexer-tests.trx" \
--results-directory "$TEST_RESULTS_DIR"
- name: Lint policy DSL samples - name: Lint policy DSL samples
run: dotnet run --project tools/PolicyDslValidator/PolicyDslValidator.csproj -- --strict docs/examples/policies/*.yaml run: dotnet run --project tools/PolicyDslValidator/PolicyDslValidator.csproj -- --strict docs/examples/policies/*.yaml
@@ -331,7 +199,7 @@ PY
curl -sSf -X POST -H 'Content-type: application/json' --data "$payload" "$SLACK_WEBHOOK" curl -sSf -X POST -H 'Content-type: application/json' --data "$payload" "$SLACK_WEBHOOK"
- name: Run release tooling tests - name: Run release tooling tests
run: python devops/release/test_verify_release.py run: python ops/devops/release/test_verify_release.py
- name: Build scanner language analyzer projects - name: Build scanner language analyzer projects
run: | run: |
@@ -356,56 +224,6 @@ PY
--logger "trx;LogFileName=stellaops-scanner-lang-tests.trx" \ --logger "trx;LogFileName=stellaops-scanner-lang-tests.trx" \
--results-directory "$TEST_RESULTS_DIR" --results-directory "$TEST_RESULTS_DIR"
- name: Build and test Router components
run: |
set -euo pipefail
ROUTER_PROJECTS=(
src/__Libraries/StellaOps.Router.Common/StellaOps.Router.Common.csproj
src/__Libraries/StellaOps.Router.Config/StellaOps.Router.Config.csproj
src/__Libraries/StellaOps.Router.Transport.InMemory/StellaOps.Router.Transport.InMemory.csproj
src/__Libraries/StellaOps.Router.Transport.Tcp/StellaOps.Router.Transport.Tcp.csproj
src/__Libraries/StellaOps.Router.Transport.Tls/StellaOps.Router.Transport.Tls.csproj
src/__Libraries/StellaOps.Router.Transport.Udp/StellaOps.Router.Transport.Udp.csproj
src/__Libraries/StellaOps.Router.Transport.RabbitMq/StellaOps.Router.Transport.RabbitMq.csproj
src/__Libraries/StellaOps.Microservice/StellaOps.Microservice.csproj
src/__Libraries/StellaOps.Microservice.SourceGen/StellaOps.Microservice.SourceGen.csproj
)
for project in "${ROUTER_PROJECTS[@]}"; do
echo "::group::Build $project"
dotnet build "$project" --configuration $BUILD_CONFIGURATION --no-restore -warnaserror
echo "::endgroup::"
done
- name: Run Router and Microservice tests
run: |
mkdir -p "$TEST_RESULTS_DIR"
ROUTER_TEST_PROJECTS=(
# Core Router libraries
src/__Libraries/__Tests/StellaOps.Router.Common.Tests/StellaOps.Router.Common.Tests.csproj
src/__Libraries/__Tests/StellaOps.Router.Config.Tests/StellaOps.Router.Config.Tests.csproj
# Transport layers
src/__Libraries/__Tests/StellaOps.Router.Transport.InMemory.Tests/StellaOps.Router.Transport.InMemory.Tests.csproj
src/__Libraries/__Tests/StellaOps.Router.Transport.Tcp.Tests/StellaOps.Router.Transport.Tcp.Tests.csproj
src/__Libraries/__Tests/StellaOps.Router.Transport.Tls.Tests/StellaOps.Router.Transport.Tls.Tests.csproj
src/__Libraries/__Tests/StellaOps.Router.Transport.Udp.Tests/StellaOps.Router.Transport.Udp.Tests.csproj
# Microservice SDK
src/__Libraries/__Tests/StellaOps.Microservice.Tests/StellaOps.Microservice.Tests.csproj
src/__Libraries/__Tests/StellaOps.Microservice.SourceGen.Tests/StellaOps.Microservice.SourceGen.Tests.csproj
# Integration tests
src/__Libraries/__Tests/StellaOps.Router.Integration.Tests/StellaOps.Router.Integration.Tests.csproj
# Gateway tests
src/Gateway/__Tests/StellaOps.Gateway.WebService.Tests/StellaOps.Gateway.WebService.Tests.csproj
)
for project in "${ROUTER_TEST_PROJECTS[@]}"; do
name="$(basename "${project%.*}")"
echo "::group::Test $name"
dotnet test "$project" \
--configuration $BUILD_CONFIGURATION \
--logger "trx;LogFileName=${name}.trx" \
--results-directory "$TEST_RESULTS_DIR"
echo "::endgroup::"
done
- name: Run scanner analyzer performance benchmark - name: Run scanner analyzer performance benchmark
env: env:
PERF_OUTPUT_DIR: ${{ github.workspace }}/artifacts/perf/scanner-analyzers PERF_OUTPUT_DIR: ${{ github.workspace }}/artifacts/perf/scanner-analyzers
@@ -568,15 +386,6 @@ PY
if-no-files-found: error if-no-files-found: error
retention-days: 7 retention-days: 7
- name: Run console endpoint tests
run: |
mkdir -p "$TEST_RESULTS_DIR"
dotnet test src/Authority/StellaOps.Authority/StellaOps.Authority.Tests/StellaOps.Authority.Tests.csproj \
--configuration $BUILD_CONFIGURATION \
--logger "trx;LogFileName=console-endpoints.trx" \
--results-directory "$TEST_RESULTS_DIR" \
--filter ConsoleEndpointsTests
- name: Upload test results - name: Upload test results
if: always() if: always()
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
@@ -586,247 +395,6 @@ PY
if-no-files-found: ignore if-no-files-found: ignore
retention-days: 7 retention-days: 7
# ============================================================================
# Quality Gates Foundation (Sprint 0350)
# ============================================================================
quality-gates:
runs-on: ubuntu-22.04
needs: build-test
permissions:
contents: read
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Reachability quality gate
id: reachability
run: |
set -euo pipefail
echo "::group::Computing reachability metrics"
if [ -f .gitea/scripts/metrics/compute-reachability-metrics.sh ]; then
chmod +x .gitea/scripts/metrics/compute-reachability-metrics.sh
METRICS=$(./.gitea/scripts/metrics/compute-reachability-metrics.sh --dry-run 2>/dev/null || echo '{}')
echo "metrics=$METRICS" >> $GITHUB_OUTPUT
echo "Reachability metrics: $METRICS"
else
echo "Reachability script not found, skipping"
fi
echo "::endgroup::"
- name: TTFS regression gate
id: ttfs
run: |
set -euo pipefail
echo "::group::Computing TTFS metrics"
if [ -f .gitea/scripts/metrics/compute-ttfs-metrics.sh ]; then
chmod +x .gitea/scripts/metrics/compute-ttfs-metrics.sh
METRICS=$(./.gitea/scripts/metrics/compute-ttfs-metrics.sh --dry-run 2>/dev/null || echo '{}')
echo "metrics=$METRICS" >> $GITHUB_OUTPUT
echo "TTFS metrics: $METRICS"
else
echo "TTFS script not found, skipping"
fi
echo "::endgroup::"
- name: Performance SLO gate
id: slo
run: |
set -euo pipefail
echo "::group::Enforcing performance SLOs"
if [ -f .gitea/scripts/metrics/enforce-performance-slos.sh ]; then
chmod +x .gitea/scripts/metrics/enforce-performance-slos.sh
./.gitea/scripts/metrics/enforce-performance-slos.sh --warn-only || true
else
echo "Performance SLO script not found, skipping"
fi
echo "::endgroup::"
- name: RLS policy validation
id: rls
run: |
set -euo pipefail
echo "::group::Validating RLS policies"
if [ -f devops/database/postgres/validation/001_validate_rls.sql ]; then
echo "RLS validation script found"
# Check that all tenant-scoped schemas have RLS enabled
SCHEMAS=("scheduler" "vex" "authority" "notify" "policy" "findings_ledger")
for schema in "${SCHEMAS[@]}"; do
echo "Checking RLS for schema: $schema"
# Validate migration files exist
if ls src/*/Migrations/*enable_rls*.sql 2>/dev/null | grep -q "$schema"; then
echo " ✓ RLS migration exists for $schema"
fi
done
echo "RLS validation passed (static check)"
else
echo "RLS validation script not found, skipping"
fi
echo "::endgroup::"
- name: Upload quality gate results
uses: actions/upload-artifact@v4
with:
name: quality-gate-results
path: |
scripts/ci/*.json
scripts/ci/*.yaml
if-no-files-found: ignore
retention-days: 14
security-testing:
runs-on: ubuntu-22.04
needs: build-test
if: github.event_name == 'pull_request' || github.event_name == 'schedule'
permissions:
contents: read
env:
DOTNET_VERSION: '10.0.100'
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
- name: Restore dependencies
run: dotnet restore src/__Tests/security/StellaOps.Security.Tests/StellaOps.Security.Tests.csproj
- name: Run OWASP security tests
run: |
set -euo pipefail
echo "::group::Running security tests"
dotnet test src/__Tests/security/StellaOps.Security.Tests/StellaOps.Security.Tests.csproj \
--no-restore \
--logger "trx;LogFileName=security-tests.trx" \
--results-directory ./security-test-results \
--filter "Category=Security" \
--verbosity normal
echo "::endgroup::"
- name: Upload security test results
uses: actions/upload-artifact@v4
if: always()
with:
name: security-test-results
path: security-test-results/
if-no-files-found: ignore
retention-days: 30
mutation-testing:
runs-on: ubuntu-22.04
needs: build-test
if: github.event_name == 'schedule' || (github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 'mutation-test'))
permissions:
contents: read
env:
DOTNET_VERSION: '10.0.100'
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
- name: Restore tools
run: dotnet tool restore
- name: Run mutation tests - Scanner.Core
id: scanner-mutation
run: |
set -euo pipefail
echo "::group::Mutation testing Scanner.Core"
cd src/Scanner/__Libraries/StellaOps.Scanner.Core
dotnet stryker --reporter json --reporter html --output ../../../mutation-results/scanner-core || echo "MUTATION_FAILED=true" >> $GITHUB_ENV
echo "::endgroup::"
continue-on-error: true
- name: Run mutation tests - Policy.Engine
id: policy-mutation
run: |
set -euo pipefail
echo "::group::Mutation testing Policy.Engine"
cd src/Policy/__Libraries/StellaOps.Policy
dotnet stryker --reporter json --reporter html --output ../../../mutation-results/policy-engine || echo "MUTATION_FAILED=true" >> $GITHUB_ENV
echo "::endgroup::"
continue-on-error: true
- name: Run mutation tests - Authority.Core
id: authority-mutation
run: |
set -euo pipefail
echo "::group::Mutation testing Authority.Core"
cd src/Authority/StellaOps.Authority
dotnet stryker --reporter json --reporter html --output ../../mutation-results/authority-core || echo "MUTATION_FAILED=true" >> $GITHUB_ENV
echo "::endgroup::"
continue-on-error: true
- name: Upload mutation results
uses: actions/upload-artifact@v4
with:
name: mutation-testing-results
path: mutation-results/
if-no-files-found: ignore
retention-days: 30
- name: Check mutation thresholds
run: |
set -euo pipefail
echo "Checking mutation score thresholds..."
# Parse JSON results and check against thresholds
if [ -f "mutation-results/scanner-core/mutation-report.json" ]; then
SCORE=$(jq '.mutationScore // 0' mutation-results/scanner-core/mutation-report.json)
echo "Scanner.Core mutation score: $SCORE%"
if (( $(echo "$SCORE < 65" | bc -l) )); then
echo "::error::Scanner.Core mutation score below threshold"
fi
fi
sealed-mode-ci:
runs-on: ubuntu-22.04
needs: build-test
permissions:
contents: read
packages: read
env:
COMPOSE_PROJECT_NAME: sealedmode
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Login to registry
if: ${{ secrets.REGISTRY_USERNAME != '' && secrets.REGISTRY_PASSWORD != '' }}
uses: docker/login-action@v3
with:
registry: registry.stella-ops.org
username: ${{ secrets.REGISTRY_USERNAME }}
password: ${{ secrets.REGISTRY_PASSWORD }}
- name: Run sealed-mode CI harness
working-directory: devops/sealed-mode-ci
env:
COMPOSE_PROJECT_NAME: sealedmode
run: |
set -euo pipefail
./run-sealed-ci.sh
- name: Upload sealed-mode CI artifacts
uses: actions/upload-artifact@v4
with:
name: sealed-mode-ci
path: devops/sealed-mode-ci/artifacts/sealed-mode-ci
if-no-files-found: error
retention-days: 14
authority-container: authority-container:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
needs: build-test needs: build-test
@@ -840,41 +408,6 @@ PY
- name: Build Authority container image - name: Build Authority container image
run: docker build -f ops/authority/Dockerfile -t stellaops-authority:ci . run: docker build -f ops/authority/Dockerfile -t stellaops-authority:ci .
excititor-batch-validation:
needs: build-test
if: github.event_name == 'schedule' || (github.event_name == 'workflow_dispatch' && github.event.inputs.excititor_batch == 'true')
runs-on: ubuntu-22.04
env:
BATCH_RESULTS_DIR: ${{ github.workspace }}/artifacts/test-results/excititor-batch
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
include-prerelease: true
- name: Run Excititor batch ingest validation suite
env:
DOTNET_SKIP_FIRST_TIME_EXPERIENCE: 1
run: |
set -euo pipefail
mkdir -p "$BATCH_RESULTS_DIR"
dotnet test src/Excititor/__Tests/StellaOps.Excititor.WebService.Tests/StellaOps.Excititor.WebService.Tests.csproj \
--configuration $BUILD_CONFIGURATION \
--filter "Category=BatchIngestValidation" \
--logger "trx;LogFileName=excititor-batch.trx" \
--results-directory "$BATCH_RESULTS_DIR"
- name: Upload Excititor batch ingest results
if: always()
uses: actions/upload-artifact@v4
with:
name: excititor-batch-ingest-results
path: ${{ env.BATCH_RESULTS_DIR }}
docs: docs:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
env: env:

View File

@@ -1,48 +0,0 @@
name: cli-build
on:
workflow_dispatch:
inputs:
rids:
description: "Comma-separated RIDs (e.g., linux-x64,win-x64,osx-arm64)"
required: false
default: "linux-x64,win-x64,osx-arm64"
config:
description: "Build configuration"
required: false
default: "Release"
sign:
description: "Enable cosign signing (requires COSIGN_KEY)"
required: false
default: "false"
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Install syft (SBOM)
uses: anchore/sbom-action/download-syft@v0
- name: Build CLI artifacts
run: |
chmod +x .gitea/scripts/build/build-cli.sh
RIDS="${{ github.event.inputs.rids }}" CONFIG="${{ github.event.inputs.config }}" SBOM_TOOL=syft SIGN="${{ github.event.inputs.sign }}" COSIGN_KEY="${{ secrets.COSIGN_KEY }}" .gitea/scripts/build/build-cli.sh
- name: List artifacts
run: find out/cli -maxdepth 3 -type f -print
- name: Upload CLI artifacts
uses: actions/upload-artifact@v4
with:
name: stella-cli
path: out/cli/**

View File

@@ -1,47 +0,0 @@
name: cli-chaos-parity
on:
workflow_dispatch:
inputs:
chaos:
description: "Run chaos smoke (true/false)"
required: false
default: "true"
parity:
description: "Run parity diff (true/false)"
required: false
default: "true"
jobs:
cli-checks:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Chaos smoke
if: ${{ github.event.inputs.chaos == 'true' }}
run: |
chmod +x scripts/cli/chaos-smoke.sh
scripts/cli/chaos-smoke.sh
- name: Parity diff
if: ${{ github.event.inputs.parity == 'true' }}
run: |
chmod +x scripts/cli/parity-diff.sh
scripts/cli/parity-diff.sh
- name: Upload evidence
uses: actions/upload-artifact@v4
with:
name: cli-chaos-parity
path: |
out/cli-chaos/**
out/cli-goldens/**

View File

@@ -1,47 +0,0 @@
name: Concelier Attestation Tests
on:
push:
paths:
- 'src/Concelier/**'
- '.gitea/workflows/concelier-attestation-tests.yml'
pull_request:
paths:
- 'src/Concelier/**'
- '.gitea/workflows/concelier-attestation-tests.yml'
jobs:
attestation-tests:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Setup .NET 10 preview
uses: actions/setup-dotnet@v4
with:
dotnet-version: '10.0.100'
- name: Restore Concelier solution
run: dotnet restore src/Concelier/StellaOps.Concelier.sln
- name: Build WebService Tests (no analyzers)
run: dotnet build src/Concelier/__Tests/StellaOps.Concelier.WebService.Tests/StellaOps.Concelier.WebService.Tests.csproj -c Release -p:DisableAnalyzers=true
- name: Run WebService attestation test
run: dotnet test src/Concelier/__Tests/StellaOps.Concelier.WebService.Tests/StellaOps.Concelier.WebService.Tests.csproj -c Release --filter InternalAttestationVerify --no-build --logger trx --results-directory TestResults
- name: Build Core Tests (no analyzers)
run: dotnet build src/Concelier/__Tests/StellaOps.Concelier.Core.Tests/StellaOps.Concelier.Core.Tests.csproj -c Release -p:DisableAnalyzers=true
- name: Run Core attestation builder tests
run: dotnet test src/Concelier/__Tests/StellaOps.Concelier.Core.Tests/StellaOps.Concelier.Core.Tests.csproj -c Release --filter EvidenceBundleAttestationBuilderTests --no-build --logger trx --results-directory TestResults
- name: Upload TRX results
uses: actions/upload-artifact@v4
with:
name: concelier-attestation-tests-trx
path: '**/TestResults/*.trx'

View File

@@ -1,32 +0,0 @@
name: Concelier STORE-AOC-19-005 Dataset
on:
workflow_dispatch: {}
jobs:
build-dataset:
runs-on: ubuntu-22.04
env:
ARTIFACT_DIR: ${{ github.workspace }}/out/linksets
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install dependencies
run: sudo apt-get update && sudo apt-get install -y zstd
- name: Build dataset tarball
run: |
chmod +x scripts/concelier/build-store-aoc-19-005-dataset.sh scripts/concelier/test-store-aoc-19-005-dataset.sh
scripts/concelier/build-store-aoc-19-005-dataset.sh "${ARTIFACT_DIR}/linksets-stage-backfill.tar.zst"
- name: Validate dataset
run: scripts/concelier/test-store-aoc-19-005-dataset.sh "${ARTIFACT_DIR}/linksets-stage-backfill.tar.zst"
- name: Upload dataset artifacts
uses: actions/upload-artifact@v4
with:
name: concelier-store-aoc-19-005-dataset
path: |
${ARTIFACT_DIR}/linksets-stage-backfill.tar.zst
${ARTIFACT_DIR}/linksets-stage-backfill.tar.zst.sha256

View File

@@ -1,247 +0,0 @@
# -----------------------------------------------------------------------------
# connector-fixture-drift.yml
# Sprint: SPRINT_5100_0007_0005_connector_fixtures
# Task: CONN-FIX-016
# Description: Weekly schema drift detection for connector fixtures with auto-PR
# -----------------------------------------------------------------------------
name: Connector Fixture Drift
on:
# Weekly schedule: Sunday at 2:00 UTC
schedule:
- cron: '0 2 * * 0'
# Manual trigger for on-demand drift detection
workflow_dispatch:
inputs:
auto_update:
description: 'Auto-update fixtures if drift detected'
required: false
default: 'true'
type: boolean
create_pr:
description: 'Create PR for updated fixtures'
required: false
default: 'true'
type: boolean
env:
DOTNET_NOLOGO: 1
DOTNET_CLI_TELEMETRY_OPTOUT: 1
TZ: UTC
jobs:
detect-drift:
runs-on: ubuntu-22.04
permissions:
contents: write
pull-requests: write
outputs:
has_drift: ${{ steps.drift.outputs.has_drift }}
drift_count: ${{ steps.drift.outputs.drift_count }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: '10.0.100'
include-prerelease: true
- name: Cache NuGet packages
uses: actions/cache@v4
with:
path: |
.nuget/packages
key: fixture-drift-nuget-${{ runner.os }}-${{ hashFiles('**/*.csproj') }}
- name: Restore solution
run: dotnet restore src/StellaOps.sln --configfile nuget.config
- name: Build test projects
run: |
dotnet build src/Concelier/__Tests/StellaOps.Concelier.Connector.Ghsa.Tests/StellaOps.Concelier.Connector.Ghsa.Tests.csproj -c Release --no-restore
dotnet build src/Excititor/__Tests/StellaOps.Excititor.Connectors.RedHat.CSAF.Tests/StellaOps.Excititor.Connectors.RedHat.CSAF.Tests.csproj -c Release --no-restore
- name: Run Live schema drift tests
id: drift
env:
STELLAOPS_LIVE_TESTS: 'true'
STELLAOPS_UPDATE_FIXTURES: ${{ inputs.auto_update || 'true' }}
run: |
set +e
# Run Live tests and capture output
dotnet test src/StellaOps.sln \
--filter "Category=Live" \
--no-build \
-c Release \
--logger "console;verbosity=detailed" \
--results-directory out/drift-results \
2>&1 | tee out/drift-output.log
EXIT_CODE=$?
# Check for fixture changes
CHANGED_FILES=$(git diff --name-only -- '**/Fixtures/*.json' '**/Expected/*.json' | wc -l)
if [ "$CHANGED_FILES" -gt 0 ]; then
echo "has_drift=true" >> $GITHUB_OUTPUT
echo "drift_count=$CHANGED_FILES" >> $GITHUB_OUTPUT
echo "::warning::Schema drift detected in $CHANGED_FILES fixture files"
else
echo "has_drift=false" >> $GITHUB_OUTPUT
echo "drift_count=0" >> $GITHUB_OUTPUT
echo "::notice::No schema drift detected"
fi
# Don't fail workflow on test failures (drift is expected)
exit 0
- name: Show changed fixtures
if: steps.drift.outputs.has_drift == 'true'
run: |
echo "## Changed fixture files:"
git diff --name-only -- '**/Fixtures/*.json' '**/Expected/*.json'
echo ""
echo "## Diff summary:"
git diff --stat -- '**/Fixtures/*.json' '**/Expected/*.json'
- name: Upload drift report
uses: actions/upload-artifact@v4
if: always()
with:
name: drift-report-${{ github.run_id }}
path: |
out/drift-output.log
out/drift-results/**
retention-days: 30
create-pr:
needs: detect-drift
if: needs.detect-drift.outputs.has_drift == 'true' && (github.event.inputs.create_pr == 'true' || github.event_name == 'schedule')
runs-on: ubuntu-22.04
permissions:
contents: write
pull-requests: write
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: '10.0.100'
include-prerelease: true
- name: Restore and run Live tests with updates
env:
STELLAOPS_LIVE_TESTS: 'true'
STELLAOPS_UPDATE_FIXTURES: 'true'
run: |
dotnet restore src/StellaOps.sln --configfile nuget.config
dotnet test src/StellaOps.sln \
--filter "Category=Live" \
-c Release \
--logger "console;verbosity=minimal" \
|| true
- name: Configure Git
run: |
git config user.name "StellaOps Bot"
git config user.email "bot@stellaops.local"
- name: Create branch and commit
id: commit
run: |
BRANCH_NAME="fixture-drift/$(date +%Y-%m-%d)"
echo "branch=$BRANCH_NAME" >> $GITHUB_OUTPUT
# Check for changes
if git diff --quiet -- '**/Fixtures/*.json' '**/Expected/*.json'; then
echo "No fixture changes to commit"
echo "has_changes=false" >> $GITHUB_OUTPUT
exit 0
fi
echo "has_changes=true" >> $GITHUB_OUTPUT
# Create branch
git checkout -b "$BRANCH_NAME"
# Stage fixture changes
git add '**/Fixtures/*.json' '**/Expected/*.json'
# Get list of changed connectors
CHANGED_DIRS=$(git diff --cached --name-only | xargs -I{} dirname {} | sort -u | head -10)
# Create commit message
COMMIT_MSG="chore(fixtures): Update connector fixtures for schema drift
Detected schema drift in live upstream sources.
Updated fixture files to match current API responses.
Changed directories:
$CHANGED_DIRS
This commit was auto-generated by the connector-fixture-drift workflow.
🤖 Generated with [StellaOps CI](https://stellaops.local)"
git commit -m "$COMMIT_MSG"
git push origin "$BRANCH_NAME"
- name: Create Pull Request
if: steps.commit.outputs.has_changes == 'true'
uses: actions/github-script@v7
with:
script: |
const branch = '${{ steps.commit.outputs.branch }}';
const driftCount = '${{ needs.detect-drift.outputs.drift_count }}';
const { data: pr } = await github.rest.pulls.create({
owner: context.repo.owner,
repo: context.repo.repo,
title: `chore(fixtures): Update ${driftCount} connector fixtures for schema drift`,
head: branch,
base: 'main',
body: `## Summary
Automated fixture update due to schema drift detected in live upstream sources.
- **Fixtures Updated**: ${driftCount}
- **Detection Date**: ${new Date().toISOString().split('T')[0]}
- **Workflow Run**: [#${{ github.run_id }}](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})
## Review Checklist
- [ ] Review fixture diffs for expected schema changes
- [ ] Verify no sensitive data in fixtures
- [ ] Check that tests still pass with updated fixtures
- [ ] Update Expected/ snapshots if normalization changed
## Test Plan
- [ ] Run \`dotnet test --filter "Category=Snapshot"\` to verify fixture-based tests
---
🤖 Generated by [connector-fixture-drift workflow](${{ github.server_url }}/${{ github.repository }}/actions/workflows/connector-fixture-drift.yml)
`
});
console.log(`Created PR #${pr.number}: ${pr.html_url}`);
// Add labels
await github.rest.issues.addLabels({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: pr.number,
labels: ['automated', 'fixtures', 'schema-drift']
});

View File

@@ -1,64 +0,0 @@
name: console-ci
on:
workflow_dispatch:
pull_request:
paths:
- 'src/Web/**'
- '.gitea/workflows/console-ci.yml'
- 'devops/console/**'
jobs:
lint-test-build:
runs-on: ubuntu-latest
defaults:
run:
shell: bash
working-directory: src/Web/StellaOps.Web
env:
PLAYWRIGHT_BROWSERS_PATH: ~/.cache/ms-playwright
CI: true
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: '20'
cache: npm
cache-dependency-path: src/Web/StellaOps.Web/package-lock.json
- name: Install deps (offline-friendly)
run: npm ci --prefer-offline --no-audit --progress=false
- name: Lint
run: npm run lint -- --no-progress
- name: Console export specs (targeted)
run: bash ./scripts/ci-console-exports.sh
continue-on-error: true
- name: Unit tests
run: npm run test:ci
env:
CHROME_BIN: chromium
- name: Build
run: npm run build -- --configuration=production --progress=false
- name: Collect artifacts
if: always()
run: |
mkdir -p ../artifacts
cp -r dist ../artifacts/dist || true
cp -r coverage ../artifacts/coverage || true
find . -maxdepth 3 -type f -name "*.xml" -o -name "*.trx" -o -name "*.json" -path "*test*" -print0 | xargs -0 -I{} cp --parents {} ../artifacts 2>/dev/null || true
- name: Upload artifacts
if: always()
uses: actions/upload-artifact@v4
with:
name: console-ci-${{ github.run_id }}
path: artifacts
retention-days: 14

View File

@@ -1,32 +0,0 @@
name: console-runner-image
on:
workflow_dispatch:
push:
paths:
- 'devops/console/**'
- '.gitea/workflows/console-runner-image.yml'
jobs:
build-runner-image:
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Build runner image tarball (baked caches)
env:
RUN_ID: ${{ github.run_id }}
run: |
set -euo pipefail
chmod +x devops/console/build-runner-image.sh devops/console/build-runner-image-ci.sh
devops/console/build-runner-image-ci.sh
- name: Upload runner image artifact
uses: actions/upload-artifact@v4
with:
name: console-runner-image-${{ github.run_id }}
path: devops/artifacts/console-runner/
retention-days: 14

View File

@@ -1,89 +0,0 @@
name: containers-multiarch
on:
workflow_dispatch:
inputs:
image:
description: "Image tag (e.g., ghcr.io/stella-ops/example:edge)"
required: true
context:
description: "Build context directory"
required: true
default: "."
platforms:
description: "Platforms (comma-separated)"
required: false
default: "linux/amd64,linux/arm64"
push:
description: "Push to registry"
required: false
default: "false"
jobs:
build-multiarch:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
install: true
- name: Install syft (SBOM)
uses: anchore/sbom-action/download-syft@v0
- name: Login to ghcr (optional)
if: ${{ github.event.inputs.push == 'true' && secrets.GHCR_TOKEN != '' }}
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GHCR_TOKEN }}
- name: Run multi-arch build
env:
COSIGN_EXPERIMENTAL: "1"
run: |
chmod +x .gitea/scripts/build/build-multiarch.sh
extra=""
if [[ "${{ github.event.inputs.push }}" == "true" ]]; then extra="--push"; fi
.gitea/scripts/build/build-multiarch.sh \
"${{ github.event.inputs.image }}" \
"${{ github.event.inputs.context }}" \
--platform "${{ github.event.inputs.platforms }}" \
--sbom syft ${extra}
- name: Build air-gap bundle
run: |
chmod +x .gitea/scripts/build/build-airgap-bundle.sh
.gitea/scripts/build/build-airgap-bundle.sh "${{ github.event.inputs.image }}"
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: buildx-${{ github.event.inputs.image }}
path: out/buildx/**
- name: Inspect built image archive
run: |
set -e
ls -lh out/buildx/
find out/buildx -name "image.oci" -print -exec sh -c 'tar -tf "$1" | head' _ {} \;
- name: Upload air-gap bundle
uses: actions/upload-artifact@v4
with:
name: bundle-${{ github.event.inputs.image }}
path: out/bundles/**
- name: Inspect remote image (if pushed)
if: ${{ github.event.inputs.push == 'true' }}
run: |
docker buildx imagetools inspect "${{ github.event.inputs.image }}"

View File

@@ -1,206 +0,0 @@
name: cross-platform-determinism
on:
workflow_dispatch: {}
push:
branches: [main]
paths:
- 'src/__Libraries/StellaOps.Canonical.Json/**'
- 'src/__Libraries/StellaOps.Replay.Core/**'
- 'src/__Tests/**Determinism**'
- '.gitea/workflows/cross-platform-determinism.yml'
pull_request:
branches: [main]
paths:
- 'src/__Libraries/StellaOps.Canonical.Json/**'
- 'src/__Libraries/StellaOps.Replay.Core/**'
- 'src/__Tests/**Determinism**'
jobs:
# DET-GAP-11: Windows determinism test runner
determinism-windows:
runs-on: windows-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Restore dependencies
run: dotnet restore src/__Tests/__Libraries/StellaOps.Testing.Determinism.Properties/StellaOps.Testing.Determinism.Properties.csproj
- name: Run determinism property tests
run: |
dotnet test src/__Tests/__Libraries/StellaOps.Testing.Determinism.Properties/StellaOps.Testing.Determinism.Properties.csproj `
--logger "trx;LogFileName=determinism-windows.trx" `
--results-directory ./test-results/windows
- name: Generate hash report
shell: pwsh
run: |
# Generate determinism baseline hashes
$hashReport = @{
platform = "windows"
timestamp = (Get-Date -Format "o")
hashes = @{}
}
# Run hash generation script
dotnet run --project tools/determinism-hash-generator -- `
--output ./test-results/windows/hashes.json
# Upload for comparison
Copy-Item ./test-results/windows/hashes.json ./test-results/windows-hashes.json
- name: Upload Windows results
uses: actions/upload-artifact@v4
with:
name: determinism-windows
path: |
./test-results/windows/
./test-results/windows-hashes.json
# DET-GAP-12: macOS determinism test runner
determinism-macos:
runs-on: macos-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Restore dependencies
run: dotnet restore src/__Tests/__Libraries/StellaOps.Testing.Determinism.Properties/StellaOps.Testing.Determinism.Properties.csproj
- name: Run determinism property tests
run: |
dotnet test src/__Tests/__Libraries/StellaOps.Testing.Determinism.Properties/StellaOps.Testing.Determinism.Properties.csproj \
--logger "trx;LogFileName=determinism-macos.trx" \
--results-directory ./test-results/macos
- name: Generate hash report
run: |
# Generate determinism baseline hashes
dotnet run --project tools/determinism-hash-generator -- \
--output ./test-results/macos/hashes.json
cp ./test-results/macos/hashes.json ./test-results/macos-hashes.json
- name: Upload macOS results
uses: actions/upload-artifact@v4
with:
name: determinism-macos
path: |
./test-results/macos/
./test-results/macos-hashes.json
# Linux runner (baseline)
determinism-linux:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Restore dependencies
run: dotnet restore src/__Tests/__Libraries/StellaOps.Testing.Determinism.Properties/StellaOps.Testing.Determinism.Properties.csproj
- name: Run determinism property tests
run: |
dotnet test src/__Tests/__Libraries/StellaOps.Testing.Determinism.Properties/StellaOps.Testing.Determinism.Properties.csproj \
--logger "trx;LogFileName=determinism-linux.trx" \
--results-directory ./test-results/linux
- name: Generate hash report
run: |
# Generate determinism baseline hashes
dotnet run --project tools/determinism-hash-generator -- \
--output ./test-results/linux/hashes.json
cp ./test-results/linux/hashes.json ./test-results/linux-hashes.json
- name: Upload Linux results
uses: actions/upload-artifact@v4
with:
name: determinism-linux
path: |
./test-results/linux/
./test-results/linux-hashes.json
# DET-GAP-13: Cross-platform hash comparison report
compare-hashes:
runs-on: ubuntu-latest
needs: [determinism-windows, determinism-macos, determinism-linux]
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Download all artifacts
uses: actions/download-artifact@v4
with:
path: ./artifacts
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: '3.12'
- name: Generate comparison report
run: |
python3 scripts/determinism/compare-platform-hashes.py \
--linux ./artifacts/determinism-linux/linux-hashes.json \
--windows ./artifacts/determinism-windows/windows-hashes.json \
--macos ./artifacts/determinism-macos/macos-hashes.json \
--output ./cross-platform-report.json \
--markdown ./cross-platform-report.md
- name: Check for divergences
run: |
# Fail if any hashes differ across platforms
python3 -c "
import json
import sys
with open('./cross-platform-report.json') as f:
report = json.load(f)
divergences = report.get('divergences', [])
if divergences:
print(f'ERROR: {len(divergences)} hash divergence(s) detected!')
for d in divergences:
print(f' - {d[\"key\"]}: linux={d[\"linux\"]}, windows={d[\"windows\"]}, macos={d[\"macos\"]}')
sys.exit(1)
else:
print('SUCCESS: All hashes match across platforms.')
"
- name: Upload comparison report
uses: actions/upload-artifact@v4
with:
name: cross-platform-comparison
path: |
./cross-platform-report.json
./cross-platform-report.md
- name: Comment on PR (if applicable)
if: github.event_name == 'pull_request'
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const report = fs.readFileSync('./cross-platform-report.md', 'utf8');
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: '## Cross-Platform Determinism Report\n\n' + report
});

View File

@@ -1,44 +0,0 @@
name: Crypto Compliance Audit
on:
pull_request:
paths:
- 'src/**/*.cs'
- 'etc/crypto-plugins-manifest.json'
- 'scripts/audit-crypto-usage.ps1'
- '.gitea/workflows/crypto-compliance.yml'
push:
branches: [ main ]
paths:
- 'src/**/*.cs'
- 'etc/crypto-plugins-manifest.json'
- 'scripts/audit-crypto-usage.ps1'
- '.gitea/workflows/crypto-compliance.yml'
jobs:
crypto-audit:
runs-on: ubuntu-22.04
env:
DOTNET_NOLOGO: 1
DOTNET_CLI_TELEMETRY_OPTOUT: 1
TZ: UTC
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Run crypto usage audit
shell: pwsh
run: |
Write-Host "Running crypto compliance audit..."
./scripts/audit-crypto-usage.ps1 -RootPath "$PWD" -FailOnViolations $true -Verbose
- name: Upload audit report on failure
if: failure()
uses: actions/upload-artifact@v4
with:
name: crypto-compliance-violations
path: |
scripts/audit-crypto-usage.ps1
retention-days: 30

View File

@@ -1,41 +0,0 @@
name: crypto-sim-smoke
on:
workflow_dispatch:
push:
paths:
- "devops/services/crypto/sim-crypto-service/**"
- "devops/services/crypto/sim-crypto-smoke/**"
- "devops/tools/crypto/run-sim-smoke.ps1"
- "docs/security/crypto-simulation-services.md"
- ".gitea/workflows/crypto-sim-smoke.yml"
jobs:
sim-smoke:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.x"
- name: Build sim service and smoke harness
run: |
dotnet build devops/services/crypto/sim-crypto-service/SimCryptoService.csproj -c Release
dotnet build devops/services/crypto/sim-crypto-smoke/SimCryptoSmoke.csproj -c Release
- name: "Run smoke (sim profile: sm)"
env:
ASPNETCORE_URLS: http://localhost:5000
STELLAOPS_CRYPTO_SIM_URL: http://localhost:5000
SIM_PROFILE: sm
run: |
set -euo pipefail
dotnet run --project devops/services/crypto/sim-crypto-service/SimCryptoService.csproj --no-build -c Release &
service_pid=$!
sleep 6
dotnet run --project devops/services/crypto/sim-crypto-smoke/SimCryptoSmoke.csproj --no-build -c Release
kill $service_pid

View File

@@ -1,55 +0,0 @@
name: cryptopro-linux-csp
on:
push:
branches: [main, develop]
paths:
- 'ops/cryptopro/linux-csp-service/**'
- 'opt/cryptopro/downloads/**'
- '.gitea/workflows/cryptopro-linux-csp.yml'
pull_request:
paths:
- 'ops/cryptopro/linux-csp-service/**'
- 'opt/cryptopro/downloads/**'
- '.gitea/workflows/cryptopro-linux-csp.yml'
env:
IMAGE_NAME: cryptopro-linux-csp
DOCKERFILE: ops/cryptopro/linux-csp-service/Dockerfile
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build image (accept EULA explicitly)
run: |
docker build -t $IMAGE_NAME \
--build-arg CRYPTOPRO_ACCEPT_EULA=1 \
-f $DOCKERFILE .
- name: Run container
run: |
docker run -d --rm --name $IMAGE_NAME -p 18080:8080 $IMAGE_NAME
for i in {1..20}; do
if curl -sf http://127.0.0.1:18080/health >/dev/null; then
exit 0
fi
sleep 3
done
echo "Service failed to start" && exit 1
- name: Test endpoints
run: |
curl -sf http://127.0.0.1:18080/health
curl -sf http://127.0.0.1:18080/license || true
curl -sf -X POST http://127.0.0.1:18080/hash \
-H "Content-Type: application/json" \
-d '{"data_b64":"SGVsbG8="}'
- name: Stop container
if: always()
run: docker rm -f $IMAGE_NAME || true

View File

@@ -1,40 +0,0 @@
name: cryptopro-optin
on:
workflow_dispatch:
inputs:
configuration:
description: Build configuration
default: Release
run_tests:
description: Run CryptoPro signer tests (requires CSP installed on runner)
default: true
jobs:
cryptopro:
runs-on: windows-latest
env:
STELLAOPS_CRYPTO_PRO_ENABLED: "1"
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Setup .NET 10 (preview)
uses: actions/setup-dotnet@v4
with:
dotnet-version: 10.0.100
- name: Build CryptoPro plugin
run: |
dotnet build src/__Libraries/StellaOps.Cryptography.Plugin.CryptoPro/StellaOps.Cryptography.Plugin.CryptoPro.csproj -c ${{ github.event.inputs.configuration || 'Release' }}
- name: Run CryptoPro signer tests (requires CSP pre-installed)
if: ${{ github.event.inputs.run_tests != 'false' }}
run: |
powershell -File scripts/crypto/run-cryptopro-tests.ps1 -Configuration ${{ github.event.inputs.configuration || 'Release' }}
# NOTE: This workflow assumes the windows runner already has CryptoPro CSP installed and licensed.
# Leave it opt-in to avoid breaking default CI lanes.

View File

@@ -1,204 +0,0 @@
# .gitea/workflows/deploy-keyless-verify.yml
# Verification gate for deployments using keyless signatures
#
# This workflow verifies all required attestations before
# allowing deployment to production environments.
#
# Dogfooding the StellaOps keyless verification feature.
name: Deployment Verification Gate
on:
workflow_dispatch:
inputs:
image:
description: 'Image to deploy (with digest)'
required: true
type: string
environment:
description: 'Target environment'
required: true
type: choice
options:
- staging
- production
require_sbom:
description: 'Require SBOM attestation'
required: false
default: true
type: boolean
require_verdict:
description: 'Require policy verdict attestation'
required: false
default: true
type: boolean
env:
STELLAOPS_URL: "https://api.stella-ops.internal"
jobs:
pre-flight:
runs-on: ubuntu-22.04
outputs:
identity-pattern: ${{ steps.config.outputs.identity-pattern }}
steps:
- name: Configure Identity Constraints
id: config
run: |
ENV="${{ github.event.inputs.environment }}"
if [[ "$ENV" == "production" ]]; then
# Production: only allow signed releases from main or tags
PATTERN="stella-ops.org/git.stella-ops.org:ref:refs/(heads/main|tags/v.*)"
else
# Staging: allow any branch
PATTERN="stella-ops.org/git.stella-ops.org:ref:refs/heads/.*"
fi
echo "identity-pattern=${PATTERN}" >> $GITHUB_OUTPUT
echo "Using identity pattern: ${PATTERN}"
verify-attestations:
needs: pre-flight
runs-on: ubuntu-22.04
permissions:
contents: read
outputs:
verified: ${{ steps.verify.outputs.verified }}
attestation-count: ${{ steps.verify.outputs.count }}
steps:
- name: Install StellaOps CLI
run: |
curl -sL https://get.stella-ops.org/cli | sh
echo "$HOME/.stellaops/bin" >> $GITHUB_PATH
- name: Verify All Attestations
id: verify
run: |
set -euo pipefail
IMAGE="${{ github.event.inputs.image }}"
IDENTITY="${{ needs.pre-flight.outputs.identity-pattern }}"
ISSUER="https://git.stella-ops.org"
VERIFY_ARGS=(
--artifact "${IMAGE}"
--certificate-identity "${IDENTITY}"
--certificate-oidc-issuer "${ISSUER}"
--require-rekor
--output json
)
if [[ "${{ github.event.inputs.require_sbom }}" == "true" ]]; then
VERIFY_ARGS+=(--require-sbom)
fi
if [[ "${{ github.event.inputs.require_verdict }}" == "true" ]]; then
VERIFY_ARGS+=(--require-verdict)
fi
echo "Verifying: ${IMAGE}"
echo "Identity: ${IDENTITY}"
echo "Issuer: ${ISSUER}"
RESULT=$(stella attest verify "${VERIFY_ARGS[@]}" 2>&1)
echo "$RESULT" | jq .
VERIFIED=$(echo "$RESULT" | jq -r '.valid')
COUNT=$(echo "$RESULT" | jq -r '.attestationCount')
echo "verified=${VERIFIED}" >> $GITHUB_OUTPUT
echo "count=${COUNT}" >> $GITHUB_OUTPUT
if [[ "$VERIFIED" != "true" ]]; then
echo "::error::Verification failed"
echo "$RESULT" | jq -r '.issues[]? | "::error::\(.code): \(.message)"'
exit 1
fi
echo "Verification passed with ${COUNT} attestations"
verify-provenance:
needs: pre-flight
runs-on: ubuntu-22.04
permissions:
contents: read
outputs:
valid: ${{ steps.verify.outputs.valid }}
steps:
- name: Install StellaOps CLI
run: |
curl -sL https://get.stella-ops.org/cli | sh
echo "$HOME/.stellaops/bin" >> $GITHUB_PATH
- name: Verify Build Provenance
id: verify
run: |
IMAGE="${{ github.event.inputs.image }}"
echo "Verifying provenance for: ${IMAGE}"
RESULT=$(stella provenance verify \
--artifact "${IMAGE}" \
--require-source-repo "stella-ops.org/git.stella-ops.org" \
--output json)
echo "$RESULT" | jq .
VALID=$(echo "$RESULT" | jq -r '.valid')
echo "valid=${VALID}" >> $GITHUB_OUTPUT
if [[ "$VALID" != "true" ]]; then
echo "::error::Provenance verification failed"
exit 1
fi
create-audit-entry:
needs: [verify-attestations, verify-provenance]
runs-on: ubuntu-22.04
steps:
- name: Install StellaOps CLI
run: |
curl -sL https://get.stella-ops.org/cli | sh
echo "$HOME/.stellaops/bin" >> $GITHUB_PATH
- name: Log Deployment Verification
run: |
stella audit log \
--event "deployment-verification" \
--artifact "${{ github.event.inputs.image }}" \
--environment "${{ github.event.inputs.environment }}" \
--verified true \
--attestations "${{ needs.verify-attestations.outputs.attestation-count }}" \
--provenance-valid "${{ needs.verify-provenance.outputs.valid }}" \
--actor "${{ github.actor }}" \
--workflow "${{ github.workflow }}" \
--run-id "${{ github.run_id }}"
approve-deployment:
needs: [verify-attestations, verify-provenance, create-audit-entry]
runs-on: ubuntu-22.04
environment: ${{ github.event.inputs.environment }}
steps:
- name: Deployment Approved
run: |
cat >> $GITHUB_STEP_SUMMARY << EOF
## Deployment Approved
| Field | Value |
|-------|-------|
| **Image** | \`${{ github.event.inputs.image }}\` |
| **Environment** | ${{ github.event.inputs.environment }} |
| **Attestations** | ${{ needs.verify-attestations.outputs.attestation-count }} |
| **Provenance Valid** | ${{ needs.verify-provenance.outputs.valid }} |
| **Approved By** | @${{ github.actor }} |
Deployment can now proceed.
EOF

View File

@@ -1,330 +0,0 @@
# .gitea/workflows/determinism-gate.yml
# Determinism gate for artifact reproducibility validation
# Implements Tasks 10-11 from SPRINT 5100.0007.0003
# Updated: Task 13 from SPRINT 8200.0001.0003 - Add schema validation dependency
name: Determinism Gate
on:
push:
branches: [ main ]
paths:
- 'src/**'
- 'src/__Tests/Integration/StellaOps.Integration.Determinism/**'
- 'src/__Tests/baselines/determinism/**'
- 'src/__Tests/__Benchmarks/golden-corpus/**'
- 'docs/schemas/**'
- '.gitea/workflows/determinism-gate.yml'
pull_request:
branches: [ main ]
types: [ closed ]
workflow_dispatch:
inputs:
update_baselines:
description: 'Update baselines with current hashes'
required: false
default: false
type: boolean
fail_on_missing:
description: 'Fail if baselines are missing'
required: false
default: false
type: boolean
skip_schema_validation:
description: 'Skip schema validation step'
required: false
default: false
type: boolean
env:
DOTNET_VERSION: '10.0.100'
BUILD_CONFIGURATION: Release
DETERMINISM_OUTPUT_DIR: ${{ github.workspace }}/out/determinism
BASELINE_DIR: src/__Tests/baselines/determinism
jobs:
# ===========================================================================
# Schema Validation Gate (runs before determinism checks)
# ===========================================================================
schema-validation:
name: Schema Validation
runs-on: ubuntu-22.04
if: github.event.inputs.skip_schema_validation != 'true'
timeout-minutes: 10
env:
SBOM_UTILITY_VERSION: "0.16.0"
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install sbom-utility
run: |
curl -sSfL "https://github.com/CycloneDX/sbom-utility/releases/download/v${SBOM_UTILITY_VERSION}/sbom-utility-v${SBOM_UTILITY_VERSION}-linux-amd64.tar.gz" | tar xz
sudo mv sbom-utility /usr/local/bin/
sbom-utility --version
- name: Validate CycloneDX fixtures
run: |
set -e
SCHEMA="docs/schemas/cyclonedx-bom-1.6.schema.json"
FIXTURE_DIRS=(
"src/__Tests/__Benchmarks/golden-corpus"
"src/__Tests/fixtures"
"src/__Tests/__Datasets/seed-data"
)
FOUND=0
PASSED=0
FAILED=0
for dir in "${FIXTURE_DIRS[@]}"; do
if [ -d "$dir" ]; then
# Skip invalid fixtures directory (used for negative testing)
while IFS= read -r -d '' file; do
if [[ "$file" == *"/invalid/"* ]]; then
continue
fi
if grep -q '"bomFormat".*"CycloneDX"' "$file" 2>/dev/null; then
FOUND=$((FOUND + 1))
echo "::group::Validating: $file"
if sbom-utility validate --input-file "$file" --schema "$SCHEMA" 2>&1; then
echo "✅ PASS: $file"
PASSED=$((PASSED + 1))
else
echo "❌ FAIL: $file"
FAILED=$((FAILED + 1))
fi
echo "::endgroup::"
fi
done < <(find "$dir" -name '*.json' -type f -print0 2>/dev/null || true)
fi
done
echo "================================================"
echo "CycloneDX Validation Summary"
echo "================================================"
echo "Found: $FOUND fixtures"
echo "Passed: $PASSED"
echo "Failed: $FAILED"
echo "================================================"
if [ "$FAILED" -gt 0 ]; then
echo "::error::$FAILED CycloneDX fixtures failed validation"
exit 1
fi
- name: Schema validation summary
run: |
echo "## Schema Validation" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "✅ All SBOM fixtures passed schema validation" >> $GITHUB_STEP_SUMMARY
# ===========================================================================
# Determinism Validation Gate
# ===========================================================================
determinism-gate:
needs: [schema-validation]
if: always() && (needs.schema-validation.result == 'success' || needs.schema-validation.result == 'skipped')
name: Determinism Validation
runs-on: ubuntu-22.04
timeout-minutes: 30
outputs:
status: ${{ steps.check.outputs.status }}
drifted: ${{ steps.check.outputs.drifted }}
missing: ${{ steps.check.outputs.missing }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup .NET ${{ env.DOTNET_VERSION }}
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
include-prerelease: true
- name: Restore solution
run: dotnet restore src/StellaOps.sln
- name: Build solution
run: dotnet build src/StellaOps.sln --configuration $BUILD_CONFIGURATION --no-restore
- name: Create output directories
run: |
mkdir -p "$DETERMINISM_OUTPUT_DIR"
mkdir -p "$DETERMINISM_OUTPUT_DIR/hashes"
mkdir -p "$DETERMINISM_OUTPUT_DIR/manifests"
- name: Run determinism tests
id: tests
run: |
dotnet test src/__Tests/Integration/StellaOps.Integration.Determinism/StellaOps.Integration.Determinism.csproj \
--configuration $BUILD_CONFIGURATION \
--no-build \
--logger "trx;LogFileName=determinism-tests.trx" \
--results-directory "$DETERMINISM_OUTPUT_DIR" \
--verbosity normal
env:
DETERMINISM_OUTPUT_DIR: ${{ env.DETERMINISM_OUTPUT_DIR }}
UPDATE_BASELINES: ${{ github.event.inputs.update_baselines || 'false' }}
FAIL_ON_MISSING: ${{ github.event.inputs.fail_on_missing || 'false' }}
- name: Generate determinism summary
id: check
run: |
# Create determinism.json summary
cat > "$DETERMINISM_OUTPUT_DIR/determinism.json" << 'EOF'
{
"schemaVersion": "1.0",
"generatedAt": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
"sourceRef": "${{ github.sha }}",
"ciRunId": "${{ github.run_id }}",
"status": "pass",
"statistics": {
"total": 0,
"matched": 0,
"drifted": 0,
"missing": 0
}
}
EOF
# Output status for downstream jobs
echo "status=pass" >> $GITHUB_OUTPUT
echo "drifted=0" >> $GITHUB_OUTPUT
echo "missing=0" >> $GITHUB_OUTPUT
- name: Upload determinism artifacts
uses: actions/upload-artifact@v4
if: always()
with:
name: determinism-artifacts
path: |
${{ env.DETERMINISM_OUTPUT_DIR }}/determinism.json
${{ env.DETERMINISM_OUTPUT_DIR }}/hashes/**
${{ env.DETERMINISM_OUTPUT_DIR }}/manifests/**
${{ env.DETERMINISM_OUTPUT_DIR }}/*.trx
if-no-files-found: warn
retention-days: 30
- name: Upload hash files as individual artifacts
uses: actions/upload-artifact@v4
if: always()
with:
name: determinism-hashes
path: ${{ env.DETERMINISM_OUTPUT_DIR }}/hashes/**
if-no-files-found: ignore
retention-days: 30
- name: Generate summary
if: always()
run: |
echo "## Determinism Gate Results" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "| Metric | Value |" >> $GITHUB_STEP_SUMMARY
echo "|--------|-------|" >> $GITHUB_STEP_SUMMARY
echo "| Status | ${{ steps.check.outputs.status || 'unknown' }} |" >> $GITHUB_STEP_SUMMARY
echo "| Source Ref | \`${{ github.sha }}\` |" >> $GITHUB_STEP_SUMMARY
echo "| CI Run | ${{ github.run_id }} |" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Artifact Summary" >> $GITHUB_STEP_SUMMARY
echo "- **Drifted**: ${{ steps.check.outputs.drifted || '0' }}" >> $GITHUB_STEP_SUMMARY
echo "- **Missing Baselines**: ${{ steps.check.outputs.missing || '0' }}" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "See \`determinism.json\` artifact for full details." >> $GITHUB_STEP_SUMMARY
# ===========================================================================
# Baseline Update (only on workflow_dispatch with update_baselines=true)
# ===========================================================================
update-baselines:
name: Update Baselines
runs-on: ubuntu-22.04
needs: [schema-validation, determinism-gate]
if: github.event_name == 'workflow_dispatch' && github.event.inputs.update_baselines == 'true'
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
token: ${{ secrets.GITHUB_TOKEN }}
- name: Download determinism artifacts
uses: actions/download-artifact@v4
with:
name: determinism-hashes
path: new-hashes
- name: Update baseline files
run: |
mkdir -p "$BASELINE_DIR"
if [ -d "new-hashes" ]; then
cp -r new-hashes/* "$BASELINE_DIR/" || true
echo "Updated baseline files from new-hashes"
fi
- name: Commit baseline updates
run: |
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
git add "$BASELINE_DIR"
if git diff --cached --quiet; then
echo "No baseline changes to commit"
else
git commit -m "chore: update determinism baselines
Updated by Determinism Gate workflow run #${{ github.run_id }}
Source: ${{ github.sha }}
Co-Authored-By: github-actions[bot] <github-actions[bot]@users.noreply.github.com>"
git push
echo "Baseline updates committed and pushed"
fi
# ===========================================================================
# Drift Detection Gate (fails workflow if drift detected)
# ===========================================================================
drift-check:
name: Drift Detection Gate
runs-on: ubuntu-22.04
needs: [schema-validation, determinism-gate]
if: always()
steps:
- name: Check for drift
run: |
SCHEMA_STATUS="${{ needs.schema-validation.result || 'skipped' }}"
DRIFTED="${{ needs.determinism-gate.outputs.drifted || '0' }}"
STATUS="${{ needs.determinism-gate.outputs.status || 'unknown' }}"
echo "Schema Validation: $SCHEMA_STATUS"
echo "Determinism Status: $STATUS"
echo "Drifted Artifacts: $DRIFTED"
# Fail if schema validation failed
if [ "$SCHEMA_STATUS" = "failure" ]; then
echo "::error::Schema validation failed! Fix SBOM schema issues before determinism check."
exit 1
fi
if [ "$STATUS" = "fail" ] || [ "$DRIFTED" != "0" ]; then
echo "::error::Determinism drift detected! $DRIFTED artifact(s) have changed."
echo "Run workflow with 'update_baselines=true' to update baselines if changes are intentional."
exit 1
fi
echo "No determinism drift detected. All artifacts match baselines."
- name: Gate status
run: |
echo "## Drift Detection Gate" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "Schema Validation: ${{ needs.schema-validation.result || 'skipped' }}" >> $GITHUB_STEP_SUMMARY
echo "Determinism Status: ${{ needs.determinism-gate.outputs.status || 'pass' }}" >> $GITHUB_STEP_SUMMARY

View File

@@ -1,32 +0,0 @@
name: devportal-offline
on:
schedule:
- cron: "0 5 * * *"
workflow_dispatch: {}
jobs:
build-offline:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Setup Node (corepack/pnpm)
uses: actions/setup-node@v4
with:
node-version: "18"
cache: "pnpm"
- name: Build devportal (offline bundle)
run: |
chmod +x scripts/devportal/build-devportal.sh
scripts/devportal/build-devportal.sh
- name: Upload bundle
uses: actions/upload-artifact@v4
with:
name: devportal-offline
path: out/devportal/**.tgz

View File

@@ -1,218 +0,0 @@
name: Regional Docker Builds
on:
push:
branches:
- main
paths:
- 'devops/docker/**'
- 'devops/compose/docker-compose.*.yml'
- 'etc/appsettings.crypto.*.yaml'
- 'etc/crypto-plugins-manifest.json'
- 'src/__Libraries/StellaOps.Cryptography.Plugin.**'
- '.gitea/workflows/docker-regional-builds.yml'
pull_request:
paths:
- 'devops/docker/**'
- 'devops/compose/docker-compose.*.yml'
- 'etc/appsettings.crypto.*.yaml'
- 'etc/crypto-plugins-manifest.json'
- 'src/__Libraries/StellaOps.Cryptography.Plugin.**'
workflow_dispatch:
env:
REGISTRY: registry.stella-ops.org
PLATFORM_IMAGE_NAME: stellaops/platform
DOCKER_BUILDKIT: 1
jobs:
# Build the base platform image containing all crypto plugins
build-platform:
name: Build Platform Image (All Plugins)
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Container Registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ gitea.actor }}
password: ${{ secrets.GITEA_TOKEN }}
- name: Extract metadata (tags, labels)
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.PLATFORM_IMAGE_NAME }}
tags: |
type=ref,event=branch
type=ref,event=pr
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=sha,prefix={{branch}}-
type=raw,value=latest,enable={{is_default_branch}}
- name: Build and push platform image
uses: docker/build-push-action@v5
with:
context: .
file: ./devops/docker/Dockerfile.platform
target: runtime-base
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=registry,ref=${{ env.REGISTRY }}/${{ env.PLATFORM_IMAGE_NAME }}:buildcache
cache-to: type=registry,ref=${{ env.REGISTRY }}/${{ env.PLATFORM_IMAGE_NAME }}:buildcache,mode=max
build-args: |
BUILDKIT_INLINE_CACHE=1
- name: Export platform image tag
id: platform
run: |
echo "tag=${{ env.REGISTRY }}/${{ env.PLATFORM_IMAGE_NAME }}:${{ github.sha }}" >> $GITHUB_OUTPUT
outputs:
platform-tag: ${{ steps.platform.outputs.tag }}
# Build regional profile images for each service
build-regional-profiles:
name: Build Regional Profiles
runs-on: ubuntu-latest
needs: build-platform
permissions:
contents: read
packages: write
strategy:
fail-fast: false
matrix:
profile: [international, russia, eu, china]
service:
- authority
- signer
- attestor
- concelier
- scanner
- excititor
- policy
- scheduler
- notify
- zastava
- gateway
- airgap-importer
- airgap-exporter
- cli
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Container Registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ gitea.actor }}
password: ${{ secrets.GITEA_TOKEN }}
- name: Extract metadata
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/stellaops/${{ matrix.service }}
tags: |
type=raw,value=${{ matrix.profile }},enable={{is_default_branch}}
type=raw,value=${{ matrix.profile }}-${{ github.sha }}
type=raw,value=${{ matrix.profile }}-pr-${{ github.event.pull_request.number }},enable=${{ github.event_name == 'pull_request' }}
- name: Build and push regional service image
uses: docker/build-push-action@v5
with:
context: .
file: ./devops/docker/Dockerfile.crypto-profile
target: ${{ matrix.service }}
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
build-args: |
CRYPTO_PROFILE=${{ matrix.profile }}
BASE_IMAGE=${{ needs.build-platform.outputs.platform-tag }}
SERVICE_NAME=${{ matrix.service }}
# Validate regional configurations
validate-configs:
name: Validate Regional Configurations
runs-on: ubuntu-latest
needs: build-regional-profiles
strategy:
fail-fast: false
matrix:
profile: [international, russia, eu, china]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Validate crypto configuration YAML
run: |
# Install yq for YAML validation
sudo wget -qO /usr/local/bin/yq https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64
sudo chmod +x /usr/local/bin/yq
# Validate YAML syntax
yq eval 'true' etc/appsettings.crypto.${{ matrix.profile }}.yaml
- name: Validate docker-compose file
run: |
docker compose -f devops/compose/docker-compose.${{ matrix.profile }}.yml config --quiet
- name: Check required crypto configuration fields
run: |
# Verify ManifestPath is set
MANIFEST_PATH=$(yq eval '.StellaOps.Crypto.Plugins.ManifestPath' etc/appsettings.crypto.${{ matrix.profile }}.yaml)
if [ -z "$MANIFEST_PATH" ] || [ "$MANIFEST_PATH" == "null" ]; then
echo "Error: ManifestPath not set in ${{ matrix.profile }} configuration"
exit 1
fi
# Verify at least one plugin is enabled
ENABLED_COUNT=$(yq eval '.StellaOps.Crypto.Plugins.Enabled | length' etc/appsettings.crypto.${{ matrix.profile }}.yaml)
if [ "$ENABLED_COUNT" -eq 0 ]; then
echo "Error: No plugins enabled in ${{ matrix.profile }} configuration"
exit 1
fi
echo "Configuration valid: ${{ matrix.profile }}"
# Summary job
summary:
name: Build Summary
runs-on: ubuntu-latest
needs: [build-platform, build-regional-profiles, validate-configs]
if: always()
steps:
- name: Generate summary
run: |
echo "## Regional Docker Builds Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "Platform image built successfully: ${{ needs.build-platform.result == 'success' }}" >> $GITHUB_STEP_SUMMARY
echo "Regional profiles built: ${{ needs.build-regional-profiles.result == 'success' }}" >> $GITHUB_STEP_SUMMARY
echo "Configurations validated: ${{ needs.validate-configs.result == 'success' }}" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Build Details" >> $GITHUB_STEP_SUMMARY
echo "- Commit: ${{ github.sha }}" >> $GITHUB_STEP_SUMMARY
echo "- Branch: ${{ github.ref_name }}" >> $GITHUB_STEP_SUMMARY
echo "- Event: ${{ github.event_name }}" >> $GITHUB_STEP_SUMMARY

View File

@@ -29,12 +29,6 @@ jobs:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Export OpenSSL 1.1 shim for Mongo2Go
run: .gitea/scripts/util/enable-openssl11-shim.sh
- name: Setup Node.js - name: Setup Node.js
uses: actions/setup-node@v4 uses: actions/setup-node@v4
with: with:
@@ -47,7 +41,7 @@ jobs:
- name: Setup .NET SDK - name: Setup .NET SDK
uses: actions/setup-dotnet@v4 uses: actions/setup-dotnet@v4
with: with:
dotnet-version: '10.0.100' dotnet-version: '10.0.100-rc.2.25502.107'
- name: Link check - name: Link check
run: | run: |

View File

@@ -1,473 +0,0 @@
# =============================================================================
# e2e-reproducibility.yml
# Sprint: SPRINT_8200_0001_0004_e2e_reproducibility_test
# Tasks: E2E-8200-015 to E2E-8200-024 - CI Workflow for E2E Reproducibility
# Description: CI workflow for end-to-end reproducibility verification.
# Runs tests across multiple platforms and compares results.
# =============================================================================
name: E2E Reproducibility
on:
pull_request:
paths:
- 'src/**'
- 'src/__Tests/Integration/StellaOps.Integration.E2E/**'
- 'src/__Tests/fixtures/**'
- '.gitea/workflows/e2e-reproducibility.yml'
push:
branches:
- main
- develop
paths:
- 'src/**'
- 'src/__Tests/Integration/StellaOps.Integration.E2E/**'
schedule:
# Nightly at 2am UTC
- cron: '0 2 * * *'
workflow_dispatch:
inputs:
run_cross_platform:
description: 'Run cross-platform tests'
type: boolean
default: false
update_baseline:
description: 'Update golden baseline (requires approval)'
type: boolean
default: false
env:
DOTNET_VERSION: '10.0.x'
DOTNET_NOLOGO: true
DOTNET_CLI_TELEMETRY_OPTOUT: true
jobs:
# =============================================================================
# Job: Run E2E reproducibility tests on primary platform
# =============================================================================
reproducibility-ubuntu:
name: E2E Reproducibility (Ubuntu)
runs-on: ubuntu-latest
outputs:
verdict_hash: ${{ steps.run-tests.outputs.verdict_hash }}
manifest_hash: ${{ steps.run-tests.outputs.manifest_hash }}
envelope_hash: ${{ steps.run-tests.outputs.envelope_hash }}
services:
postgres:
image: postgres:16-alpine
env:
POSTGRES_USER: test_user
POSTGRES_PASSWORD: test_password
POSTGRES_DB: stellaops_e2e_test
ports:
- 5432:5432
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
- name: Restore dependencies
run: dotnet restore src/__Tests/Integration/StellaOps.Integration.E2E/StellaOps.Integration.E2E.csproj
- name: Build E2E tests
run: dotnet build src/__Tests/Integration/StellaOps.Integration.E2E/StellaOps.Integration.E2E.csproj --no-restore -c Release
- name: Run E2E reproducibility tests
id: run-tests
run: |
dotnet test src/__Tests/Integration/StellaOps.Integration.E2E/StellaOps.Integration.E2E.csproj \
--no-build \
-c Release \
--logger "trx;LogFileName=e2e-results.trx" \
--logger "console;verbosity=detailed" \
--results-directory ./TestResults \
-- RunConfiguration.CollectSourceInformation=true
# Extract hashes from test output for cross-platform comparison
echo "verdict_hash=$(cat ./TestResults/verdict_hash.txt 2>/dev/null || echo 'NOT_FOUND')" >> $GITHUB_OUTPUT
echo "manifest_hash=$(cat ./TestResults/manifest_hash.txt 2>/dev/null || echo 'NOT_FOUND')" >> $GITHUB_OUTPUT
echo "envelope_hash=$(cat ./TestResults/envelope_hash.txt 2>/dev/null || echo 'NOT_FOUND')" >> $GITHUB_OUTPUT
env:
ConnectionStrings__ScannerDb: "Host=localhost;Port=5432;Database=stellaops_e2e_test;Username=test_user;Password=test_password"
- name: Upload test results
uses: actions/upload-artifact@v4
if: always()
with:
name: e2e-results-ubuntu
path: ./TestResults/
retention-days: 14
- name: Upload hash artifacts
uses: actions/upload-artifact@v4
with:
name: hashes-ubuntu
path: |
./TestResults/verdict_hash.txt
./TestResults/manifest_hash.txt
./TestResults/envelope_hash.txt
retention-days: 14
# =============================================================================
# Job: Run E2E tests on Windows (conditional)
# =============================================================================
reproducibility-windows:
name: E2E Reproducibility (Windows)
runs-on: windows-latest
if: github.event_name == 'schedule' || github.event.inputs.run_cross_platform == 'true'
outputs:
verdict_hash: ${{ steps.run-tests.outputs.verdict_hash }}
manifest_hash: ${{ steps.run-tests.outputs.manifest_hash }}
envelope_hash: ${{ steps.run-tests.outputs.envelope_hash }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
- name: Restore dependencies
run: dotnet restore src/__Tests/Integration/StellaOps.Integration.E2E/StellaOps.Integration.E2E.csproj
- name: Build E2E tests
run: dotnet build src/__Tests/Integration/StellaOps.Integration.E2E/StellaOps.Integration.E2E.csproj --no-restore -c Release
- name: Run E2E reproducibility tests
id: run-tests
run: |
dotnet test src/__Tests/Integration/StellaOps.Integration.E2E/StellaOps.Integration.E2E.csproj `
--no-build `
-c Release `
--logger "trx;LogFileName=e2e-results.trx" `
--logger "console;verbosity=detailed" `
--results-directory ./TestResults
# Extract hashes for comparison
$verdictHash = Get-Content -Path ./TestResults/verdict_hash.txt -ErrorAction SilentlyContinue
$manifestHash = Get-Content -Path ./TestResults/manifest_hash.txt -ErrorAction SilentlyContinue
$envelopeHash = Get-Content -Path ./TestResults/envelope_hash.txt -ErrorAction SilentlyContinue
"verdict_hash=$($verdictHash ?? 'NOT_FOUND')" >> $env:GITHUB_OUTPUT
"manifest_hash=$($manifestHash ?? 'NOT_FOUND')" >> $env:GITHUB_OUTPUT
"envelope_hash=$($envelopeHash ?? 'NOT_FOUND')" >> $env:GITHUB_OUTPUT
- name: Upload test results
uses: actions/upload-artifact@v4
if: always()
with:
name: e2e-results-windows
path: ./TestResults/
retention-days: 14
- name: Upload hash artifacts
uses: actions/upload-artifact@v4
with:
name: hashes-windows
path: |
./TestResults/verdict_hash.txt
./TestResults/manifest_hash.txt
./TestResults/envelope_hash.txt
retention-days: 14
# =============================================================================
# Job: Run E2E tests on macOS (conditional)
# =============================================================================
reproducibility-macos:
name: E2E Reproducibility (macOS)
runs-on: macos-latest
if: github.event_name == 'schedule' || github.event.inputs.run_cross_platform == 'true'
outputs:
verdict_hash: ${{ steps.run-tests.outputs.verdict_hash }}
manifest_hash: ${{ steps.run-tests.outputs.manifest_hash }}
envelope_hash: ${{ steps.run-tests.outputs.envelope_hash }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
- name: Restore dependencies
run: dotnet restore src/__Tests/Integration/StellaOps.Integration.E2E/StellaOps.Integration.E2E.csproj
- name: Build E2E tests
run: dotnet build src/__Tests/Integration/StellaOps.Integration.E2E/StellaOps.Integration.E2E.csproj --no-restore -c Release
- name: Run E2E reproducibility tests
id: run-tests
run: |
dotnet test src/__Tests/Integration/StellaOps.Integration.E2E/StellaOps.Integration.E2E.csproj \
--no-build \
-c Release \
--logger "trx;LogFileName=e2e-results.trx" \
--logger "console;verbosity=detailed" \
--results-directory ./TestResults
# Extract hashes for comparison
echo "verdict_hash=$(cat ./TestResults/verdict_hash.txt 2>/dev/null || echo 'NOT_FOUND')" >> $GITHUB_OUTPUT
echo "manifest_hash=$(cat ./TestResults/manifest_hash.txt 2>/dev/null || echo 'NOT_FOUND')" >> $GITHUB_OUTPUT
echo "envelope_hash=$(cat ./TestResults/envelope_hash.txt 2>/dev/null || echo 'NOT_FOUND')" >> $GITHUB_OUTPUT
- name: Upload test results
uses: actions/upload-artifact@v4
if: always()
with:
name: e2e-results-macos
path: ./TestResults/
retention-days: 14
- name: Upload hash artifacts
uses: actions/upload-artifact@v4
with:
name: hashes-macos
path: |
./TestResults/verdict_hash.txt
./TestResults/manifest_hash.txt
./TestResults/envelope_hash.txt
retention-days: 14
# =============================================================================
# Job: Cross-platform hash comparison
# =============================================================================
cross-platform-compare:
name: Cross-Platform Hash Comparison
runs-on: ubuntu-latest
needs: [reproducibility-ubuntu, reproducibility-windows, reproducibility-macos]
if: always() && (github.event_name == 'schedule' || github.event.inputs.run_cross_platform == 'true')
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Download Ubuntu hashes
uses: actions/download-artifact@v4
with:
name: hashes-ubuntu
path: ./hashes/ubuntu
- name: Download Windows hashes
uses: actions/download-artifact@v4
with:
name: hashes-windows
path: ./hashes/windows
continue-on-error: true
- name: Download macOS hashes
uses: actions/download-artifact@v4
with:
name: hashes-macos
path: ./hashes/macos
continue-on-error: true
- name: Compare hashes across platforms
run: |
echo "=== Cross-Platform Hash Comparison ==="
echo ""
ubuntu_verdict=$(cat ./hashes/ubuntu/verdict_hash.txt 2>/dev/null || echo "NOT_AVAILABLE")
windows_verdict=$(cat ./hashes/windows/verdict_hash.txt 2>/dev/null || echo "NOT_AVAILABLE")
macos_verdict=$(cat ./hashes/macos/verdict_hash.txt 2>/dev/null || echo "NOT_AVAILABLE")
echo "Verdict Hashes:"
echo " Ubuntu: $ubuntu_verdict"
echo " Windows: $windows_verdict"
echo " macOS: $macos_verdict"
echo ""
ubuntu_manifest=$(cat ./hashes/ubuntu/manifest_hash.txt 2>/dev/null || echo "NOT_AVAILABLE")
windows_manifest=$(cat ./hashes/windows/manifest_hash.txt 2>/dev/null || echo "NOT_AVAILABLE")
macos_manifest=$(cat ./hashes/macos/manifest_hash.txt 2>/dev/null || echo "NOT_AVAILABLE")
echo "Manifest Hashes:"
echo " Ubuntu: $ubuntu_manifest"
echo " Windows: $windows_manifest"
echo " macOS: $macos_manifest"
echo ""
# Check if all available hashes match
all_match=true
if [ "$ubuntu_verdict" != "NOT_AVAILABLE" ] && [ "$windows_verdict" != "NOT_AVAILABLE" ]; then
if [ "$ubuntu_verdict" != "$windows_verdict" ]; then
echo "❌ FAIL: Ubuntu and Windows verdict hashes differ!"
all_match=false
fi
fi
if [ "$ubuntu_verdict" != "NOT_AVAILABLE" ] && [ "$macos_verdict" != "NOT_AVAILABLE" ]; then
if [ "$ubuntu_verdict" != "$macos_verdict" ]; then
echo "❌ FAIL: Ubuntu and macOS verdict hashes differ!"
all_match=false
fi
fi
if [ "$all_match" = true ]; then
echo "✅ All available platform hashes match!"
else
echo ""
echo "Cross-platform reproducibility verification FAILED."
exit 1
fi
- name: Create comparison report
run: |
cat > ./cross-platform-report.md << 'EOF'
# Cross-Platform Reproducibility Report
## Test Run Information
- **Workflow Run:** ${{ github.run_id }}
- **Trigger:** ${{ github.event_name }}
- **Commit:** ${{ github.sha }}
- **Branch:** ${{ github.ref_name }}
## Hash Comparison
| Platform | Verdict Hash | Manifest Hash | Status |
|----------|--------------|---------------|--------|
| Ubuntu | ${{ needs.reproducibility-ubuntu.outputs.verdict_hash }} | ${{ needs.reproducibility-ubuntu.outputs.manifest_hash }} | ✅ |
| Windows | ${{ needs.reproducibility-windows.outputs.verdict_hash }} | ${{ needs.reproducibility-windows.outputs.manifest_hash }} | ${{ needs.reproducibility-windows.result == 'success' && '✅' || '⚠️' }} |
| macOS | ${{ needs.reproducibility-macos.outputs.verdict_hash }} | ${{ needs.reproducibility-macos.outputs.manifest_hash }} | ${{ needs.reproducibility-macos.result == 'success' && '✅' || '⚠️' }} |
## Conclusion
Cross-platform reproducibility: **${{ job.status == 'success' && 'VERIFIED' || 'NEEDS REVIEW' }}**
EOF
cat ./cross-platform-report.md
- name: Upload comparison report
uses: actions/upload-artifact@v4
with:
name: cross-platform-report
path: ./cross-platform-report.md
retention-days: 30
# =============================================================================
# Job: Golden baseline comparison
# =============================================================================
golden-baseline:
name: Golden Baseline Verification
runs-on: ubuntu-latest
needs: [reproducibility-ubuntu]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Download current hashes
uses: actions/download-artifact@v4
with:
name: hashes-ubuntu
path: ./current
- name: Compare with golden baseline
run: |
echo "=== Golden Baseline Comparison ==="
baseline_file="./src/__Tests/__Benchmarks/determinism/golden-baseline/e2e-hashes.json"
if [ ! -f "$baseline_file" ]; then
echo "⚠️ Golden baseline not found. Skipping comparison."
echo "To create baseline, run with update_baseline=true"
exit 0
fi
current_verdict=$(cat ./current/verdict_hash.txt 2>/dev/null || echo "NOT_FOUND")
baseline_verdict=$(jq -r '.verdict_hash' "$baseline_file" 2>/dev/null || echo "NOT_FOUND")
echo "Current verdict hash: $current_verdict"
echo "Baseline verdict hash: $baseline_verdict"
if [ "$current_verdict" != "$baseline_verdict" ]; then
echo ""
echo "❌ FAIL: Current run does not match golden baseline!"
echo ""
echo "This may indicate:"
echo " 1. An intentional change requiring baseline update"
echo " 2. An unintentional regression in reproducibility"
echo ""
echo "To update baseline, run workflow with update_baseline=true"
exit 1
fi
echo ""
echo "✅ Current run matches golden baseline!"
- name: Update golden baseline (if requested)
if: github.event.inputs.update_baseline == 'true'
run: |
mkdir -p ./src/__Tests/__Benchmarks/determinism/golden-baseline
cat > ./src/__Tests/__Benchmarks/determinism/golden-baseline/e2e-hashes.json << EOF
{
"verdict_hash": "$(cat ./current/verdict_hash.txt 2>/dev/null || echo 'NOT_SET')",
"manifest_hash": "$(cat ./current/manifest_hash.txt 2>/dev/null || echo 'NOT_SET')",
"envelope_hash": "$(cat ./current/envelope_hash.txt 2>/dev/null || echo 'NOT_SET')",
"updated_at": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
"updated_by": "${{ github.actor }}",
"commit": "${{ github.sha }}"
}
EOF
echo "Golden baseline updated:"
cat ./src/__Tests/__Benchmarks/determinism/golden-baseline/e2e-hashes.json
- name: Commit baseline update
if: github.event.inputs.update_baseline == 'true'
uses: stefanzweifel/git-auto-commit-action@v5
with:
commit_message: "chore: Update E2E reproducibility golden baseline"
file_pattern: src/__Tests/__Benchmarks/determinism/golden-baseline/e2e-hashes.json
# =============================================================================
# Job: Status check gate
# =============================================================================
reproducibility-gate:
name: Reproducibility Gate
runs-on: ubuntu-latest
needs: [reproducibility-ubuntu, golden-baseline]
if: always()
steps:
- name: Check reproducibility status
run: |
ubuntu_status="${{ needs.reproducibility-ubuntu.result }}"
baseline_status="${{ needs.golden-baseline.result }}"
echo "Ubuntu E2E tests: $ubuntu_status"
echo "Golden baseline: $baseline_status"
if [ "$ubuntu_status" != "success" ]; then
echo "❌ E2E reproducibility tests failed!"
exit 1
fi
if [ "$baseline_status" == "failure" ]; then
echo "⚠️ Golden baseline comparison failed (may require review)"
# Don't fail the gate for baseline mismatch - it may be intentional
fi
echo "✅ Reproducibility gate passed!"

View File

@@ -1,98 +0,0 @@
name: EPSS Ingest Perf
# Sprint: SPRINT_3410_0001_0001_epss_ingestion_storage
# Tasks: EPSS-3410-013B, EPSS-3410-014
#
# Runs the EPSS ingest perf harness against a Dockerized PostgreSQL instance (Testcontainers).
#
# Runner requirements:
# - Linux runner with Docker Engine available to the runner user (Testcontainers).
# - Label: `ubuntu-22.04` (adjust `runs-on` if your labels differ).
# - >= 4 CPU / >= 8GB RAM recommended for stable baselines.
on:
workflow_dispatch:
inputs:
rows:
description: 'Row count to generate (default: 310000)'
required: false
default: '310000'
postgres_image:
description: 'PostgreSQL image (default: postgres:16-alpine)'
required: false
default: 'postgres:16-alpine'
schedule:
# Nightly at 03:00 UTC
- cron: '0 3 * * *'
pull_request:
paths:
- 'src/Scanner/__Libraries/StellaOps.Scanner.Storage/**'
- 'src/Scanner/StellaOps.Scanner.Worker/**'
- 'src/Scanner/__Benchmarks/StellaOps.Scanner.Storage.Epss.Perf/**'
- '.gitea/workflows/epss-ingest-perf.yml'
push:
branches: [ main ]
paths:
- 'src/Scanner/__Libraries/StellaOps.Scanner.Storage/**'
- 'src/Scanner/StellaOps.Scanner.Worker/**'
- 'src/Scanner/__Benchmarks/StellaOps.Scanner.Storage.Epss.Perf/**'
- '.gitea/workflows/epss-ingest-perf.yml'
jobs:
perf:
runs-on: ubuntu-22.04
env:
DOTNET_NOLOGO: 1
DOTNET_CLI_TELEMETRY_OPTOUT: 1
DOTNET_SYSTEM_GLOBALIZATION_INVARIANT: 1
TZ: UTC
STELLAOPS_OFFLINE: 'true'
STELLAOPS_DETERMINISTIC: 'true'
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET 10
uses: actions/setup-dotnet@v4
with:
dotnet-version: 10.0.100
include-prerelease: true
- name: Cache NuGet packages
uses: actions/cache@v4
with:
path: ~/.nuget/packages
key: ${{ runner.os }}-nuget-${{ hashFiles('**/*.csproj') }}
restore-keys: |
${{ runner.os }}-nuget-
- name: Restore
run: |
dotnet restore src/Scanner/__Benchmarks/StellaOps.Scanner.Storage.Epss.Perf/StellaOps.Scanner.Storage.Epss.Perf.csproj \
--configfile nuget.config
- name: Build
run: |
dotnet build src/Scanner/__Benchmarks/StellaOps.Scanner.Storage.Epss.Perf/StellaOps.Scanner.Storage.Epss.Perf.csproj \
-c Release \
--no-restore
- name: Run perf harness
run: |
mkdir -p bench/results
dotnet run \
--project src/Scanner/__Benchmarks/StellaOps.Scanner.Storage.Epss.Perf/StellaOps.Scanner.Storage.Epss.Perf.csproj \
-c Release \
--no-build \
-- \
--rows ${{ inputs.rows || '310000' }} \
--postgres-image '${{ inputs.postgres_image || 'postgres:16-alpine' }}' \
--output bench/results/epss-ingest-perf-${{ github.sha }}.json
- name: Upload results
uses: actions/upload-artifact@v4
with:
name: epss-ingest-perf-${{ github.sha }}
path: |
bench/results/epss-ingest-perf-${{ github.sha }}.json
retention-days: 90

View File

@@ -1,86 +0,0 @@
name: evidence-locker
on:
workflow_dispatch:
inputs:
retention_target:
description: "Retention days target"
required: false
default: "180"
jobs:
check-evidence-locker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Emit retention summary
env:
RETENTION_TARGET: ${{ github.event.inputs.retention_target }}
run: |
echo "target_retention_days=${RETENTION_TARGET}" > out/evidence-locker/summary.txt
- name: Upload evidence locker summary
uses: actions/upload-artifact@v4
with:
name: evidence-locker
path: out/evidence-locker/**
push-zastava-evidence:
runs-on: ubuntu-latest
needs: check-evidence-locker
env:
STAGED_DIR: evidence-locker/zastava/2025-12-02
MODULE_ROOT: docs/modules/zastava
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Package staged Zastava artefacts
run: |
test -d "$MODULE_ROOT" || { echo "missing $MODULE_ROOT" >&2; exit 1; }
tmpdir=$(mktemp -d)
rsync -a --relative \
"$MODULE_ROOT/SHA256SUMS" \
"$MODULE_ROOT/schemas/" \
"$MODULE_ROOT/exports/" \
"$MODULE_ROOT/thresholds.yaml" \
"$MODULE_ROOT/thresholds.yaml.dsse" \
"$MODULE_ROOT/kit/verify.sh" \
"$MODULE_ROOT/kit/README.md" \
"$MODULE_ROOT/kit/ed25519.pub" \
"$MODULE_ROOT/kit/zastava-kit.tzst" \
"$MODULE_ROOT/kit/zastava-kit.tzst.dsse" \
"$MODULE_ROOT/evidence/README.md" \
"$tmpdir/"
(cd "$tmpdir/docs/modules/zastava" && sha256sum --check SHA256SUMS)
tar --sort=name --mtime="UTC 1970-01-01" --owner=0 --group=0 --numeric-owner \
-cf /tmp/zastava-evidence.tar -C "$tmpdir/docs/modules/zastava" .
sha256sum /tmp/zastava-evidence.tar
- name: Upload staged artefacts (fallback)
uses: actions/upload-artifact@v4
with:
name: zastava-evidence-locker-2025-12-02
path: /tmp/zastava-evidence.tar
- name: Push to Evidence Locker
if: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN != '' && env.EVIDENCE_LOCKER_URL != '' }}
env:
TOKEN: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN }}
URL: ${{ env.EVIDENCE_LOCKER_URL }}
run: |
curl -f -X PUT "$URL/zastava/2025-12-02/zastava-evidence.tar" \
-H "Authorization: Bearer $TOKEN" \
--data-binary @/tmp/zastava-evidence.tar
- name: Skip push (missing secret or URL)
if: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN == '' || env.EVIDENCE_LOCKER_URL == '' }}
run: |
echo "Locker push skipped: set CI_EVIDENCE_LOCKER_TOKEN and EVIDENCE_LOCKER_URL to enable." >&2

View File

@@ -1,85 +0,0 @@
name: Export Center CI
on:
push:
branches: [ main ]
paths:
- 'src/ExportCenter/**'
- 'devops/export/**'
- '.gitea/workflows/export-ci.yml'
- 'docs/modules/devops/export-ci-contract.md'
pull_request:
branches: [ main, develop ]
paths:
- 'src/ExportCenter/**'
- 'devops/export/**'
- '.gitea/workflows/export-ci.yml'
- 'docs/modules/devops/export-ci-contract.md'
jobs:
export-ci:
runs-on: ubuntu-22.04
env:
DOTNET_VERSION: '10.0.100'
MINIO_ACCESS_KEY: exportci
MINIO_SECRET_KEY: exportci123
BUCKET: export-ci
ARTIFACT_DIR: ${{ github.workspace }}/.artifacts
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
with:
fetch-depth: 0
- name: Export OpenSSL 1.1 shim for Mongo2Go
run: .gitea/scripts/util/enable-openssl11-shim.sh
- name: Set up .NET SDK
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
include-prerelease: true
- name: Restore
run: dotnet restore src/ExportCenter/StellaOps.ExportCenter.WebService/StellaOps.ExportCenter.WebService.csproj
- name: Bring up MinIO
run: |
docker compose -f devops/export/minio-compose.yml up -d
sleep 5
MINIO_ENDPOINT=http://localhost:9000 devops/export/seed-minio.sh
- name: Build
run: dotnet build src/ExportCenter/StellaOps.ExportCenter.WebService/StellaOps.ExportCenter.WebService.csproj -c Release /p:ContinuousIntegrationBuild=true
- name: Test
run: |
mkdir -p $ARTIFACT_DIR
dotnet test src/ExportCenter/__Tests/StellaOps.ExportCenter.Tests/StellaOps.ExportCenter.Tests.csproj -c Release --logger "trx;LogFileName=export-tests.trx" --results-directory $ARTIFACT_DIR
- name: Trivy/OCI smoke
run: devops/export/trivy-smoke.sh
- name: Schema lint
run: |
python -m json.tool docs/modules/export-center/schemas/export-profile.schema.json >/dev/null
python -m json.tool docs/modules/export-center/schemas/export-manifest.schema.json >/dev/null
- name: Offline kit verify (fixtures)
run: bash docs/modules/export-center/operations/verify-export-kit.sh src/ExportCenter/__fixtures/export-kit
- name: SBOM
run: syft dir:src/ExportCenter -o spdx-json=$ARTIFACT_DIR/exportcenter.spdx.json
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: export-ci-artifacts
path: ${{ env.ARTIFACT_DIR }}
- name: Teardown MinIO
if: always()
run: docker compose -f devops/export/minio-compose.yml down -v

View File

@@ -1,41 +0,0 @@
name: export-compat
on:
workflow_dispatch:
inputs:
image:
description: "Exporter image ref"
required: true
default: "ghcr.io/stella-ops/exporter:edge"
jobs:
compat:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Setup Trivy
uses: aquasecurity/trivy-action@v0.24.0
with:
version: latest
- name: Setup Cosign
uses: sigstore/cosign-installer@v3.6.0
- name: Run compatibility checks
env:
IMAGE: ${{ github.event.inputs.image }}
run: |
chmod +x scripts/export/trivy-compat.sh
chmod +x scripts/export/oci-verify.sh
scripts/export/trivy-compat.sh
scripts/export/oci-verify.sh
- name: Upload reports
uses: actions/upload-artifact@v4
with:
name: export-compat
path: out/export-compat/**

View File

@@ -1,46 +0,0 @@
name: exporter-ci
on:
workflow_dispatch:
pull_request:
paths:
- 'src/ExportCenter/**'
- '.gitea/workflows/exporter-ci.yml'
env:
DOTNET_CLI_TELEMETRY_OPTOUT: 1
DOTNET_NOLOGO: 1
jobs:
build-test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: '10.0.x'
- name: Restore
run: dotnet restore src/ExportCenter/StellaOps.ExportCenter.WebService/StellaOps.ExportCenter.WebService.csproj
- name: Build
run: dotnet build src/ExportCenter/StellaOps.ExportCenter.WebService/StellaOps.ExportCenter.WebService.csproj --configuration Release --no-restore
- name: Test
run: dotnet test src/ExportCenter/__Tests/StellaOps.ExportCenter.Tests/StellaOps.ExportCenter.Tests.csproj --configuration Release --no-build --verbosity normal
- name: Publish
run: |
dotnet publish src/ExportCenter/StellaOps.ExportCenter.WebService/StellaOps.ExportCenter.WebService.csproj \
--configuration Release \
--output artifacts/exporter
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: exporter-${{ github.run_id }}
path: artifacts/
retention-days: 14

View File

@@ -1,325 +0,0 @@
# .gitea/workflows/findings-ledger-ci.yml
# Findings Ledger CI with RLS migration validation (DEVOPS-LEDGER-TEN-48-001-REL)
name: Findings Ledger CI
on:
push:
branches: [main]
paths:
- 'src/Findings/**'
- '.gitea/workflows/findings-ledger-ci.yml'
- 'devops/releases/2025.09-stable.yaml'
- 'devops/releases/2025.09-airgap.yaml'
- 'devops/downloads/manifest.json'
- 'devops/release/check_release_manifest.py'
pull_request:
branches: [main, develop]
paths:
- 'src/Findings/**'
- '.gitea/workflows/findings-ledger-ci.yml'
env:
DOTNET_VERSION: '10.0.100'
POSTGRES_IMAGE: postgres:16-alpine
BUILD_CONFIGURATION: Release
jobs:
build-test:
runs-on: ubuntu-22.04
env:
TEST_RESULTS_DIR: ${{ github.workspace }}/artifacts/test-results
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup .NET ${{ env.DOTNET_VERSION }}
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
include-prerelease: true
- name: Restore dependencies
run: |
dotnet restore src/Findings/StellaOps.Findings.Ledger/StellaOps.Findings.Ledger.csproj
dotnet restore src/Findings/__Tests/StellaOps.Findings.Ledger.Tests/StellaOps.Findings.Ledger.Tests.csproj
- name: Build
run: |
dotnet build src/Findings/StellaOps.Findings.Ledger/StellaOps.Findings.Ledger.csproj \
-c ${{ env.BUILD_CONFIGURATION }} \
/p:ContinuousIntegrationBuild=true
- name: Run unit tests
run: |
mkdir -p $TEST_RESULTS_DIR
dotnet test src/Findings/__Tests/StellaOps.Findings.Ledger.Tests/StellaOps.Findings.Ledger.Tests.csproj \
-c ${{ env.BUILD_CONFIGURATION }} \
--logger "trx;LogFileName=ledger-tests.trx" \
--results-directory $TEST_RESULTS_DIR
- name: Upload test results
uses: actions/upload-artifact@v4
if: always()
with:
name: ledger-test-results
path: ${{ env.TEST_RESULTS_DIR }}
migration-validation:
runs-on: ubuntu-22.04
services:
postgres:
image: postgres:16-alpine
env:
POSTGRES_USER: ledgertest
POSTGRES_PASSWORD: ledgertest
POSTGRES_DB: ledger_test
ports:
- 5432:5432
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
env:
PGHOST: localhost
PGPORT: 5432
PGUSER: ledgertest
PGPASSWORD: ledgertest
PGDATABASE: ledger_test
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup .NET ${{ env.DOTNET_VERSION }}
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
include-prerelease: true
- name: Install PostgreSQL client
run: |
sudo apt-get update
sudo apt-get install -y postgresql-client
- name: Wait for PostgreSQL
run: |
until pg_isready -h $PGHOST -p $PGPORT -U $PGUSER; do
echo "Waiting for PostgreSQL..."
sleep 2
done
- name: Apply prerequisite migrations (001-006)
run: |
set -euo pipefail
MIGRATION_DIR="src/Findings/StellaOps.Findings.Ledger/migrations"
for migration in 001_initial.sql 002_add_evidence_bundle_ref.sql 002_projection_offsets.sql \
003_policy_rationale.sql 004_ledger_attestations.sql 004_risk_fields.sql \
005_risk_fields.sql 006_orchestrator_airgap.sql; do
if [ -f "$MIGRATION_DIR/$migration" ]; then
echo "Applying migration: $migration"
psql -h $PGHOST -p $PGPORT -U $PGUSER -d $PGDATABASE -f "$MIGRATION_DIR/$migration"
fi
done
- name: Apply RLS migration (007_enable_rls.sql)
run: |
set -euo pipefail
echo "Applying RLS migration..."
psql -h $PGHOST -p $PGPORT -U $PGUSER -d $PGDATABASE \
-f src/Findings/StellaOps.Findings.Ledger/migrations/007_enable_rls.sql
- name: Validate RLS configuration
run: |
set -euo pipefail
echo "Validating RLS is enabled on all protected tables..."
# Check RLS enabled
TABLES_WITH_RLS=$(psql -h $PGHOST -p $PGPORT -U $PGUSER -d $PGDATABASE -t -A -c "
SELECT COUNT(*)
FROM pg_class c
JOIN pg_namespace n ON c.relnamespace = n.oid
WHERE n.nspname = 'public'
AND c.relrowsecurity = true
AND c.relname IN (
'ledger_events', 'ledger_merkle_roots', 'findings_projection',
'finding_history', 'triage_actions', 'ledger_attestations',
'orchestrator_exports', 'airgap_imports'
);
")
if [ "$TABLES_WITH_RLS" -ne 8 ]; then
echo "::error::Expected 8 tables with RLS enabled, found $TABLES_WITH_RLS"
exit 1
fi
echo "✓ All 8 tables have RLS enabled"
# Check policies exist
POLICIES=$(psql -h $PGHOST -p $PGPORT -U $PGUSER -d $PGDATABASE -t -A -c "
SELECT COUNT(DISTINCT tablename)
FROM pg_policies
WHERE schemaname = 'public'
AND policyname LIKE '%_tenant_isolation';
")
if [ "$POLICIES" -ne 8 ]; then
echo "::error::Expected 8 tenant isolation policies, found $POLICIES"
exit 1
fi
echo "✓ All 8 tenant isolation policies created"
# Check tenant function exists
FUNC_EXISTS=$(psql -h $PGHOST -p $PGPORT -U $PGUSER -d $PGDATABASE -t -A -c "
SELECT COUNT(*)
FROM pg_proc p
JOIN pg_namespace n ON p.pronamespace = n.oid
WHERE p.proname = 'require_current_tenant'
AND n.nspname = 'findings_ledger_app';
")
if [ "$FUNC_EXISTS" -ne 1 ]; then
echo "::error::Tenant function 'require_current_tenant' not found"
exit 1
fi
echo "✓ Tenant function 'findings_ledger_app.require_current_tenant()' exists"
echo ""
echo "=== RLS Migration Validation PASSED ==="
- name: Test rollback migration
run: |
set -euo pipefail
echo "Testing rollback migration..."
psql -h $PGHOST -p $PGPORT -U $PGUSER -d $PGDATABASE \
-f src/Findings/StellaOps.Findings.Ledger/migrations/007_enable_rls_rollback.sql
# Verify RLS is disabled
TABLES_WITH_RLS=$(psql -h $PGHOST -p $PGPORT -U $PGUSER -d $PGDATABASE -t -A -c "
SELECT COUNT(*)
FROM pg_class c
JOIN pg_namespace n ON c.relnamespace = n.oid
WHERE n.nspname = 'public'
AND c.relrowsecurity = true
AND c.relname IN (
'ledger_events', 'ledger_merkle_roots', 'findings_projection',
'finding_history', 'triage_actions', 'ledger_attestations',
'orchestrator_exports', 'airgap_imports'
);
")
if [ "$TABLES_WITH_RLS" -ne 0 ]; then
echo "::error::Rollback failed - $TABLES_WITH_RLS tables still have RLS enabled"
exit 1
fi
echo "✓ Rollback successful - RLS disabled on all tables"
- name: Validate release manifests (production)
run: |
set -euo pipefail
python devops/release/check_release_manifest.py
- name: Re-apply RLS migration (idempotency check)
run: |
set -euo pipefail
echo "Re-applying RLS migration to verify idempotency..."
psql -h $PGHOST -p $PGPORT -U $PGUSER -d $PGDATABASE \
-f src/Findings/StellaOps.Findings.Ledger/migrations/007_enable_rls.sql
echo "✓ Migration is idempotent"
generate-manifest:
runs-on: ubuntu-22.04
needs: [build-test, migration-validation]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Generate migration manifest
run: |
set -euo pipefail
MIGRATION_FILE="src/Findings/StellaOps.Findings.Ledger/migrations/007_enable_rls.sql"
ROLLBACK_FILE="src/Findings/StellaOps.Findings.Ledger/migrations/007_enable_rls_rollback.sql"
MANIFEST_DIR="out/findings-ledger/migrations"
mkdir -p "$MANIFEST_DIR"
# Compute SHA256 hashes
MIGRATION_SHA=$(sha256sum "$MIGRATION_FILE" | awk '{print $1}')
ROLLBACK_SHA=$(sha256sum "$ROLLBACK_FILE" | awk '{print $1}')
CREATED_AT=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
cat > "$MANIFEST_DIR/007_enable_rls.manifest.json" <<EOF
{
"\$schema": "https://stella-ops.org/schemas/migration-manifest.v1.json",
"schemaVersion": "1.0.0",
"migrationId": "007_enable_rls",
"module": "findings-ledger",
"version": "2025.12.0",
"createdAt": "$CREATED_AT",
"description": "Enable Row-Level Security for Findings Ledger tenant isolation",
"taskId": "LEDGER-TEN-48-001-DEV",
"contractRef": "CONTRACT-FINDINGS-LEDGER-RLS-011",
"database": {
"engine": "postgresql",
"minVersion": "16.0"
},
"files": {
"apply": {
"path": "007_enable_rls.sql",
"sha256": "$MIGRATION_SHA"
},
"rollback": {
"path": "007_enable_rls_rollback.sql",
"sha256": "$ROLLBACK_SHA"
}
},
"affects": {
"tables": [
"ledger_events",
"ledger_merkle_roots",
"findings_projection",
"finding_history",
"triage_actions",
"ledger_attestations",
"orchestrator_exports",
"airgap_imports"
],
"schemas": ["public", "findings_ledger_app"],
"roles": ["findings_ledger_admin"]
},
"prerequisites": [
"006_orchestrator_airgap"
],
"validation": {
"type": "rls-check",
"expectedTables": 8,
"expectedPolicies": 8,
"tenantFunction": "findings_ledger_app.require_current_tenant"
},
"offlineKit": {
"includedInBundle": true,
"requiresManualApply": true,
"applyOrder": 7
}
}
EOF
echo "Generated migration manifest at $MANIFEST_DIR/007_enable_rls.manifest.json"
cat "$MANIFEST_DIR/007_enable_rls.manifest.json"
- name: Copy migration files for offline-kit
run: |
set -euo pipefail
OFFLINE_DIR="out/findings-ledger/offline-kit/migrations"
mkdir -p "$OFFLINE_DIR"
cp src/Findings/StellaOps.Findings.Ledger/migrations/007_enable_rls.sql "$OFFLINE_DIR/"
cp src/Findings/StellaOps.Findings.Ledger/migrations/007_enable_rls_rollback.sql "$OFFLINE_DIR/"
cp out/findings-ledger/migrations/007_enable_rls.manifest.json "$OFFLINE_DIR/"
echo "Offline-kit migration files prepared"
ls -la "$OFFLINE_DIR"
- name: Upload migration artefacts
uses: actions/upload-artifact@v4
with:
name: findings-ledger-migrations
path: out/findings-ledger/
if-no-files-found: error

View File

@@ -1,42 +0,0 @@
name: graph-load
on:
workflow_dispatch:
inputs:
target:
description: "Graph API base URL"
required: true
default: "http://localhost:5000"
users:
description: "Virtual users"
required: false
default: "8"
duration:
description: "Duration seconds"
required: false
default: "60"
jobs:
load-test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Install k6
run: |
sudo apt-get update -qq
sudo apt-get install -y k6
- name: Run graph load test
run: |
chmod +x scripts/graph/load-test.sh
TARGET="${{ github.event.inputs.target }}" USERS="${{ github.event.inputs.users }}" DURATION="${{ github.event.inputs.duration }}" scripts/graph/load-test.sh
- name: Upload results
uses: actions/upload-artifact@v4
with:
name: graph-load-summary
path: out/graph-load/**

View File

@@ -1,57 +0,0 @@
name: graph-ui-sim
on:
workflow_dispatch:
inputs:
graph_api:
description: "Graph API base URL"
required: true
default: "http://localhost:5000"
graph_ui:
description: "Graph UI base URL"
required: true
default: "http://localhost:4200"
perf_budget_ms:
description: "Perf budget in ms"
required: false
default: "3000"
jobs:
ui-and-sim:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: "18"
- name: Install Playwright deps
run: npx playwright install --with-deps chromium
- name: Run UI perf probe
env:
GRAPH_UI_BASE: ${{ github.event.inputs.graph_ui }}
GRAPH_UI_BUDGET_MS: ${{ github.event.inputs.perf_budget_ms }}
OUT: out/graph-ui-perf
run: |
npx ts-node scripts/graph/ui-perf.ts
- name: Run simulation smoke
env:
TARGET: ${{ github.event.inputs.graph_api }}
run: |
chmod +x scripts/graph/simulation-smoke.sh
scripts/graph/simulation-smoke.sh
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: graph-ui-sim
path: |
out/graph-ui-perf/**
out/graph-sim/**

View File

@@ -1,68 +0,0 @@
name: ICS/KISA Feed Refresh
on:
schedule:
- cron: '0 2 * * MON'
workflow_dispatch:
inputs:
live_fetch:
description: 'Attempt live RSS fetch (fallback to samples on failure)'
required: false
default: true
type: boolean
offline_snapshot:
description: 'Force offline samples only (no network)'
required: false
default: false
type: boolean
jobs:
refresh:
runs-on: ubuntu-22.04
permissions:
contents: read
env:
ICSCISA_FEED_URL: ${{ secrets.ICSCISA_FEED_URL }}
KISA_FEED_URL: ${{ secrets.KISA_FEED_URL }}
FEED_GATEWAY_HOST: concelier-webservice
FEED_GATEWAY_SCHEME: http
LIVE_FETCH: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.live_fetch || 'true' }}
OFFLINE_SNAPSHOT: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.offline_snapshot || 'false' }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set run metadata
id: meta
run: |
RUN_DATE=$(date -u +%Y%m%d)
RUN_ID="icscisa-kisa-$(date -u +%Y%m%dT%H%M%SZ)"
echo "run_date=$RUN_DATE" >> $GITHUB_OUTPUT
echo "run_id=$RUN_ID" >> $GITHUB_OUTPUT
echo "RUN_DATE=$RUN_DATE" >> $GITHUB_ENV
echo "RUN_ID=$RUN_ID" >> $GITHUB_ENV
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Run ICS/KISA refresh
run: |
python scripts/feeds/run_icscisa_kisa_refresh.py \
--out-dir out/feeds/icscisa-kisa \
--run-date "${{ steps.meta.outputs.run_date }}" \
--run-id "${{ steps.meta.outputs.run_id }}"
- name: Show fetch log
run: cat out/feeds/icscisa-kisa/${{ steps.meta.outputs.run_date }}/fetch.log
- name: Upload refresh artifacts
uses: actions/upload-artifact@v4
with:
name: icscisa-kisa-${{ steps.meta.outputs.run_date }}
path: out/feeds/icscisa-kisa/${{ steps.meta.outputs.run_date }}
if-no-files-found: error
retention-days: 21

View File

@@ -1,375 +0,0 @@
# Sprint 3500.0004.0003 - T6: Integration Tests CI Gate
# Runs integration tests on PR and gates merges on failures
name: integration-tests-gate
on:
pull_request:
branches: [main, develop]
paths:
- 'src/**'
- 'src/__Tests/Integration/**'
- 'src/__Tests/__Benchmarks/golden-corpus/**'
push:
branches: [main]
workflow_dispatch:
inputs:
run_performance:
description: 'Run performance baseline tests'
type: boolean
default: false
run_airgap:
description: 'Run air-gap tests'
type: boolean
default: false
concurrency:
group: integration-${{ github.ref }}
cancel-in-progress: true
jobs:
# ==========================================================================
# T6-AC1: Integration tests run on PR
# ==========================================================================
integration-tests:
name: Integration Tests
runs-on: ubuntu-latest
timeout-minutes: 30
services:
postgres:
image: postgres:16-alpine
env:
POSTGRES_USER: stellaops
POSTGRES_PASSWORD: test-only
POSTGRES_DB: stellaops_test
ports:
- 5432:5432
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Restore dependencies
run: dotnet restore src/__Tests/Integration/**/*.csproj
- name: Build integration tests
run: dotnet build src/__Tests/Integration/**/*.csproj --configuration Release --no-restore
- name: Run Proof Chain Tests
run: |
dotnet test src/__Tests/Integration/StellaOps.Integration.ProofChain \
--configuration Release \
--no-build \
--logger "trx;LogFileName=proofchain.trx" \
--results-directory ./TestResults
env:
ConnectionStrings__StellaOps: "Host=localhost;Database=stellaops_test;Username=stellaops;Password=test-only"
- name: Run Reachability Tests
run: |
dotnet test src/__Tests/Integration/StellaOps.Integration.Reachability \
--configuration Release \
--no-build \
--logger "trx;LogFileName=reachability.trx" \
--results-directory ./TestResults
- name: Run Unknowns Workflow Tests
run: |
dotnet test src/__Tests/Integration/StellaOps.Integration.Unknowns \
--configuration Release \
--no-build \
--logger "trx;LogFileName=unknowns.trx" \
--results-directory ./TestResults
- name: Run Determinism Tests
run: |
dotnet test src/__Tests/Integration/StellaOps.Integration.Determinism \
--configuration Release \
--no-build \
--logger "trx;LogFileName=determinism.trx" \
--results-directory ./TestResults
- name: Upload test results
uses: actions/upload-artifact@v4
if: always()
with:
name: integration-test-results
path: TestResults/**/*.trx
- name: Publish test summary
uses: dorny/test-reporter@v1
if: always()
with:
name: Integration Test Results
path: TestResults/**/*.trx
reporter: dotnet-trx
# ==========================================================================
# T6-AC2: Corpus validation on release branch
# ==========================================================================
corpus-validation:
name: Golden Corpus Validation
runs-on: ubuntu-latest
if: github.ref == 'refs/heads/main' || github.event_name == 'workflow_dispatch'
timeout-minutes: 15
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Validate corpus manifest
run: |
python3 -c "
import json
import hashlib
import os
manifest_path = 'src/__Tests/__Benchmarks/golden-corpus/corpus-manifest.json'
with open(manifest_path) as f:
manifest = json.load(f)
print(f'Corpus version: {manifest.get(\"corpus_version\", \"unknown\")}')
print(f'Total cases: {manifest.get(\"total_cases\", 0)}')
errors = []
for case in manifest.get('cases', []):
case_path = os.path.join('src/__Tests/__Benchmarks/golden-corpus', case['path'])
if not os.path.isdir(case_path):
errors.append(f'Missing case directory: {case_path}')
else:
required_files = ['case.json', 'expected-score.json']
for f in required_files:
if not os.path.exists(os.path.join(case_path, f)):
errors.append(f'Missing file: {case_path}/{f}')
if errors:
print('\\nValidation errors:')
for e in errors:
print(f' - {e}')
exit(1)
else:
print('\\nCorpus validation passed!')
"
- name: Run corpus scoring tests
run: |
dotnet test src/__Tests/Integration/StellaOps.Integration.Determinism \
--filter "Category=GoldenCorpus" \
--configuration Release \
--logger "trx;LogFileName=corpus.trx" \
--results-directory ./TestResults
# ==========================================================================
# T6-AC3: Determinism tests on nightly
# ==========================================================================
nightly-determinism:
name: Nightly Determinism Check
runs-on: ubuntu-latest
if: github.event_name == 'schedule' || (github.event_name == 'workflow_dispatch' && github.event.inputs.run_performance == 'true')
timeout-minutes: 45
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Run full determinism suite
run: |
dotnet test src/__Tests/Integration/StellaOps.Integration.Determinism \
--configuration Release \
--logger "trx;LogFileName=determinism-full.trx" \
--results-directory ./TestResults
- name: Run cross-run determinism check
run: |
# Run scoring 3 times and compare hashes
for i in 1 2 3; do
dotnet test src/__Tests/Integration/StellaOps.Integration.Determinism \
--filter "FullyQualifiedName~IdenticalInput_ProducesIdenticalHash" \
--results-directory ./TestResults/run-$i
done
# Compare all results
echo "Comparing determinism across runs..."
- name: Upload determinism results
uses: actions/upload-artifact@v4
with:
name: nightly-determinism-results
path: TestResults/**
# ==========================================================================
# T6-AC4: Test coverage reported to dashboard
# ==========================================================================
coverage-report:
name: Coverage Report
runs-on: ubuntu-latest
needs: [integration-tests]
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Run tests with coverage
run: |
dotnet test src/__Tests/Integration/**/*.csproj \
--configuration Release \
--collect:"XPlat Code Coverage" \
--results-directory ./TestResults/Coverage
- name: Generate coverage report
uses: danielpalme/ReportGenerator-GitHub-Action@5.2.0
with:
reports: TestResults/Coverage/**/coverage.cobertura.xml
targetdir: TestResults/CoverageReport
reporttypes: 'Html;Cobertura;MarkdownSummary'
- name: Upload coverage report
uses: actions/upload-artifact@v4
with:
name: coverage-report
path: TestResults/CoverageReport/**
- name: Add coverage to PR comment
uses: marocchino/sticky-pull-request-comment@v2
if: github.event_name == 'pull_request'
with:
recreate: true
path: TestResults/CoverageReport/Summary.md
# ==========================================================================
# T6-AC5: Flaky test quarantine process
# ==========================================================================
flaky-test-check:
name: Flaky Test Detection
runs-on: ubuntu-latest
needs: [integration-tests]
if: failure()
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Check for known flaky tests
run: |
# Check if failure is from a known flaky test
QUARANTINE_FILE=".github/flaky-tests-quarantine.json"
if [ -f "$QUARANTINE_FILE" ]; then
echo "Checking against quarantine list..."
# Implementation would compare failed tests against quarantine
fi
- name: Create flaky test issue
uses: actions/github-script@v7
if: always()
with:
script: |
// After 2 consecutive failures, create issue for quarantine review
console.log('Checking for flaky test patterns...');
// Implementation would analyze test history
# ==========================================================================
# Performance Tests (optional, on demand)
# ==========================================================================
performance-tests:
name: Performance Baseline Tests
runs-on: ubuntu-latest
if: github.event_name == 'workflow_dispatch' && github.event.inputs.run_performance == 'true'
timeout-minutes: 30
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Run performance tests
run: |
dotnet test src/__Tests/Integration/StellaOps.Integration.Performance \
--configuration Release \
--logger "trx;LogFileName=performance.trx" \
--results-directory ./TestResults
- name: Upload performance report
uses: actions/upload-artifact@v4
with:
name: performance-report
path: |
TestResults/**
src/__Tests/Integration/StellaOps.Integration.Performance/output/**
- name: Check for regressions
run: |
# Check if any test exceeded 20% threshold
if [ -f "src/__Tests/Integration/StellaOps.Integration.Performance/output/performance-report.json" ]; then
python3 -c "
import json
with open('src/__Tests/Integration/StellaOps.Integration.Performance/output/performance-report.json') as f:
report = json.load(f)
regressions = [m for m in report.get('Metrics', []) if m.get('DeltaPercent', 0) > 20]
if regressions:
print('Performance regressions detected!')
for r in regressions:
print(f' {r[\"Name\"]}: +{r[\"DeltaPercent\"]:.1f}%')
exit(1)
print('No performance regressions detected.')
"
fi
# ==========================================================================
# Air-Gap Tests (optional, on demand)
# ==========================================================================
airgap-tests:
name: Air-Gap Integration Tests
runs-on: ubuntu-latest
if: github.event_name == 'workflow_dispatch' && github.event.inputs.run_airgap == 'true'
timeout-minutes: 30
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: "10.0.100"
- name: Run air-gap tests
run: |
dotnet test src/__Tests/Integration/StellaOps.Integration.AirGap \
--configuration Release \
--logger "trx;LogFileName=airgap.trx" \
--results-directory ./TestResults
- name: Upload air-gap test results
uses: actions/upload-artifact@v4
with:
name: airgap-test-results
path: TestResults/**

View File

@@ -1,128 +0,0 @@
name: Interop E2E Tests
on:
pull_request:
paths:
- 'src/Scanner/**'
- 'src/Excititor/**'
- 'src/__Tests/interop/**'
schedule:
- cron: '0 6 * * *' # Nightly at 6 AM UTC
workflow_dispatch:
env:
DOTNET_VERSION: '10.0.100'
jobs:
interop-tests:
runs-on: ubuntu-22.04
strategy:
fail-fast: false
matrix:
format: [cyclonedx, spdx]
arch: [amd64]
include:
- format: cyclonedx
format_flag: cyclonedx-json
- format: spdx
format_flag: spdx-json
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install Syft
run: |
curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sh -s -- -b /usr/local/bin
syft --version
- name: Install Grype
run: |
curl -sSfL https://raw.githubusercontent.com/anchore/grype/main/install.sh | sh -s -- -b /usr/local/bin
grype --version
- name: Install cosign
run: |
curl -sSfL https://github.com/sigstore/cosign/releases/latest/download/cosign-linux-amd64 -o /usr/local/bin/cosign
chmod +x /usr/local/bin/cosign
cosign version
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
- name: Restore dependencies
run: dotnet restore src/StellaOps.sln
- name: Build Stella CLI
run: dotnet build src/Cli/StellaOps.Cli/StellaOps.Cli.csproj -c Release
- name: Build interop tests
run: dotnet build src/__Tests/interop/StellaOps.Interop.Tests/StellaOps.Interop.Tests.csproj
- name: Run interop tests
run: |
dotnet test src/__Tests/interop/StellaOps.Interop.Tests \
--filter "Format=${{ matrix.format }}" \
--logger "trx;LogFileName=interop-${{ matrix.format }}.trx" \
--logger "console;verbosity=detailed" \
--results-directory ./results \
-- RunConfiguration.TestSessionTimeout=900000
- name: Generate parity report
if: always()
run: |
# TODO: Generate parity report from test results
echo '{"format": "${{ matrix.format }}", "parityPercent": 0}' > ./results/parity-report-${{ matrix.format }}.json
- name: Upload test results
if: always()
uses: actions/upload-artifact@v4
with:
name: interop-test-results-${{ matrix.format }}
path: ./results/
- name: Check parity threshold
if: always()
run: |
PARITY=$(jq '.parityPercent' ./results/parity-report-${{ matrix.format }}.json 2>/dev/null || echo "0")
echo "Parity for ${{ matrix.format }}: ${PARITY}%"
if (( $(echo "$PARITY < 95" | bc -l 2>/dev/null || echo "1") )); then
echo "::warning::Findings parity ${PARITY}% is below 95% threshold for ${{ matrix.format }}"
# Don't fail the build yet - this is initial implementation
# exit 1
fi
summary:
runs-on: ubuntu-22.04
needs: interop-tests
if: always()
steps:
- name: Download all artifacts
uses: actions/download-artifact@v4
with:
path: ./all-results
- name: Generate summary
run: |
echo "## Interop Test Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "| Format | Status |" >> $GITHUB_STEP_SUMMARY
echo "|--------|--------|" >> $GITHUB_STEP_SUMMARY
for format in cyclonedx spdx; do
if [ -f "./all-results/interop-test-results-${format}/parity-report-${format}.json" ]; then
PARITY=$(jq -r '.parityPercent // 0' "./all-results/interop-test-results-${format}/parity-report-${format}.json")
if (( $(echo "$PARITY >= 95" | bc -l 2>/dev/null || echo "0") )); then
STATUS="✅ Pass (${PARITY}%)"
else
STATUS="⚠️ Below threshold (${PARITY}%)"
fi
else
STATUS="❌ No results"
fi
echo "| ${format} | ${STATUS} |" >> $GITHUB_STEP_SUMMARY
done

View File

@@ -1,81 +0,0 @@
name: Ledger OpenAPI CI
on:
workflow_dispatch:
push:
branches: [main]
paths:
- 'api/ledger/**'
- 'devops/ledger/**'
pull_request:
paths:
- 'api/ledger/**'
jobs:
validate-oas:
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
- name: Install tools
run: |
npm install -g @stoplight/spectral-cli
npm install -g @openapitools/openapi-generator-cli
- name: Validate OpenAPI spec
run: |
chmod +x devops/ledger/validate-oas.sh
devops/ledger/validate-oas.sh
- name: Upload validation report
uses: actions/upload-artifact@v4
with:
name: ledger-oas-validation-${{ github.run_number }}
path: |
out/ledger/oas/lint-report.json
out/ledger/oas/validation-report.txt
out/ledger/oas/spec-summary.json
if-no-files-found: warn
check-wellknown:
runs-on: ubuntu-22.04
needs: validate-oas
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Check .well-known/openapi structure
run: |
# Validate .well-known structure if exists
if [ -d ".well-known" ]; then
echo "Checking .well-known/openapi..."
if [ -f ".well-known/openapi.json" ]; then
python3 -c "import json; json.load(open('.well-known/openapi.json'))"
echo ".well-known/openapi.json is valid JSON"
fi
else
echo "[info] .well-known directory not present (OK for dev)"
fi
deprecation-check:
runs-on: ubuntu-22.04
needs: validate-oas
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Check deprecation policy
run: |
if [ -f "devops/ledger/deprecation-policy.yaml" ]; then
echo "Validating deprecation policy..."
python3 -c "import yaml; yaml.safe_load(open('devops/ledger/deprecation-policy.yaml'))"
echo "Deprecation policy is valid"
else
echo "[info] No deprecation policy yet (OK for initial setup)"
fi

View File

@@ -1,101 +0,0 @@
name: Ledger Packs CI
on:
workflow_dispatch:
inputs:
snapshot_id:
description: 'Snapshot ID (leave empty for auto)'
required: false
default: ''
sign:
description: 'Sign pack (1=yes)'
required: false
default: '0'
push:
branches: [main]
paths:
- 'devops/ledger/**'
jobs:
build-pack:
runs-on: ubuntu-22.04
env:
COSIGN_PRIVATE_KEY_B64: ${{ secrets.COSIGN_PRIVATE_KEY_B64 }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup cosign
uses: sigstore/cosign-installer@v3
- name: Configure signing
run: |
if [ -z "${COSIGN_PRIVATE_KEY_B64}" ] || [ "${{ github.event.inputs.sign }}" = "1" ]; then
echo "COSIGN_ALLOW_DEV_KEY=1" >> $GITHUB_ENV
echo "COSIGN_PASSWORD=stellaops-dev" >> $GITHUB_ENV
fi
- name: Build pack
run: |
chmod +x devops/ledger/build-pack.sh
SNAPSHOT_ID="${{ github.event.inputs.snapshot_id }}"
if [ -z "$SNAPSHOT_ID" ]; then
SNAPSHOT_ID="ci-$(date +%Y%m%d%H%M%S)"
fi
SIGN_FLAG=""
if [ "${{ github.event.inputs.sign }}" = "1" ] || [ -n "${COSIGN_PRIVATE_KEY_B64}" ]; then
SIGN_FLAG="--sign"
fi
SNAPSHOT_ID="$SNAPSHOT_ID" devops/ledger/build-pack.sh $SIGN_FLAG
- name: Verify checksums
run: |
cd out/ledger/packs
for f in *.SHA256SUMS; do
if [ -f "$f" ]; then
sha256sum -c "$f"
fi
done
- name: Upload pack
uses: actions/upload-artifact@v4
with:
name: ledger-pack-${{ github.run_number }}
path: |
out/ledger/packs/*.pack.tar.gz
out/ledger/packs/*.SHA256SUMS
out/ledger/packs/*.dsse.json
if-no-files-found: warn
retention-days: 30
verify-pack:
runs-on: ubuntu-22.04
needs: build-pack
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Download pack
uses: actions/download-artifact@v4
with:
name: ledger-pack-${{ github.run_number }}
path: out/ledger/packs/
- name: Verify pack structure
run: |
cd out/ledger/packs
for pack in *.pack.tar.gz; do
if [ -f "$pack" ]; then
echo "Verifying $pack..."
tar -tzf "$pack" | head -20
# Extract and check manifest
tar -xzf "$pack" -C /tmp manifest.json 2>/dev/null || true
if [ -f /tmp/manifest.json ]; then
python3 -c "import json; json.load(open('/tmp/manifest.json'))"
echo "Pack manifest is valid JSON"
fi
fi
done

View File

@@ -1,299 +0,0 @@
name: License Audit
on:
pull_request:
paths:
- '**/*.csproj'
- '**/package.json'
- '**/package-lock.json'
- 'Directory.Build.props'
- 'Directory.Packages.props'
- 'NOTICE.md'
- 'third-party-licenses/**'
- 'docs/legal/**'
- '.gitea/workflows/license-audit.yml'
- '.gitea/scripts/validate/validate-licenses.sh'
push:
branches: [ main ]
paths:
- '**/*.csproj'
- '**/package.json'
- '**/package-lock.json'
- 'Directory.Build.props'
- 'Directory.Packages.props'
schedule:
# Weekly audit every Sunday at 00:00 UTC
- cron: '0 0 * * 0'
workflow_dispatch:
inputs:
full_scan:
description: 'Run full transitive dependency scan'
required: false
default: 'false'
type: boolean
jobs:
nuget-license-audit:
name: NuGet License Audit
runs-on: ubuntu-22.04
env:
DOTNET_NOLOGO: 1
DOTNET_CLI_TELEMETRY_OPTOUT: 1
TZ: UTC
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Setup .NET 10
uses: actions/setup-dotnet@v4
with:
dotnet-version: 10.0.100
include-prerelease: true
- name: Cache NuGet packages
uses: actions/cache@v4
with:
path: |
~/.nuget/packages
.nuget/packages
key: license-audit-nuget-${{ runner.os }}-${{ hashFiles('**/*.csproj') }}
- name: Install dotnet-delice
run: dotnet tool install --global dotnet-delice || true
- name: Extract NuGet licenses
run: |
mkdir -p out/license-audit
# List packages from key projects
for proj in \
src/Scanner/StellaOps.Scanner.WebService/StellaOps.Scanner.WebService.csproj \
src/Cli/StellaOps.Cli/StellaOps.Cli.csproj \
src/Authority/StellaOps.Authority/StellaOps.Authority.WebService/StellaOps.Authority.WebService.csproj \
src/Concelier/StellaOps.Concelier.WebService/StellaOps.Concelier.WebService.csproj
do
if [ -f "$proj" ]; then
name=$(basename $(dirname "$proj"))
echo "Scanning: $proj"
dotnet list "$proj" package --include-transitive 2>/dev/null | tee -a out/license-audit/nuget-packages.txt || true
fi
done
- name: Validate against allowlist
run: |
bash .gitea/scripts/validate/validate-licenses.sh nuget out/license-audit/nuget-packages.txt
- name: Upload NuGet license report
uses: actions/upload-artifact@v4
with:
name: nuget-license-report
path: out/license-audit
retention-days: 30
npm-license-audit:
name: npm License Audit
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'npm'
cache-dependency-path: src/Web/StellaOps.Web/package-lock.json
- name: Install license-checker
run: npm install -g license-checker
- name: Audit Angular frontend
run: |
mkdir -p out/license-audit
cd src/Web/StellaOps.Web
npm ci --prefer-offline --no-audit --no-fund 2>/dev/null || npm install
license-checker --json --production > ../../../out/license-audit/npm-angular-licenses.json
license-checker --csv --production > ../../../out/license-audit/npm-angular-licenses.csv
license-checker --summary --production > ../../../out/license-audit/npm-angular-summary.txt
- name: Audit DevPortal
run: |
cd src/DevPortal/StellaOps.DevPortal.Site
if [ -f package-lock.json ]; then
npm ci --prefer-offline --no-audit --no-fund 2>/dev/null || npm install
license-checker --json --production > ../../../out/license-audit/npm-devportal-licenses.json || true
fi
continue-on-error: true
- name: Validate against allowlist
run: |
bash .gitea/scripts/validate/validate-licenses.sh npm out/license-audit/npm-angular-licenses.json
- name: Upload npm license report
uses: actions/upload-artifact@v4
with:
name: npm-license-report
path: out/license-audit
retention-days: 30
vendored-license-check:
name: Vendored Components Check
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Verify vendored license files exist
run: |
echo "Checking vendored license files..."
# Required license files
required_files=(
"third-party-licenses/tree-sitter-MIT.txt"
"third-party-licenses/tree-sitter-ruby-MIT.txt"
"third-party-licenses/AlexMAS.GostCryptography-MIT.txt"
)
missing=0
for file in "${required_files[@]}"; do
if [ ! -f "$file" ]; then
echo "ERROR: Missing required license file: $file"
missing=$((missing + 1))
else
echo "OK: $file"
fi
done
if [ $missing -gt 0 ]; then
echo "ERROR: $missing required license file(s) missing"
exit 1
fi
echo "All vendored license files present."
- name: Verify NOTICE.md is up to date
run: |
echo "Checking NOTICE.md references..."
# Check that vendored components are mentioned in NOTICE.md
for component in "tree-sitter" "AlexMAS.GostCryptography" "CryptoPro"; do
if ! grep -q "$component" NOTICE.md; then
echo "WARNING: $component not mentioned in NOTICE.md"
else
echo "OK: $component referenced in NOTICE.md"
fi
done
- name: Verify vendored source has LICENSE
run: |
echo "Checking vendored source directories..."
# GostCryptography fork must have LICENSE file
gost_dir="src/__Libraries/StellaOps.Cryptography.Plugin.CryptoPro/third_party/AlexMAS.GostCryptography"
if [ -d "$gost_dir" ]; then
if [ ! -f "$gost_dir/LICENSE" ]; then
echo "ERROR: $gost_dir is missing LICENSE file"
exit 1
else
echo "OK: $gost_dir/LICENSE exists"
fi
fi
license-compatibility-check:
name: License Compatibility Check
runs-on: ubuntu-22.04
needs: [nuget-license-audit, npm-license-audit]
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Download NuGet report
uses: actions/download-artifact@v4
with:
name: nuget-license-report
path: out/nuget
- name: Download npm report
uses: actions/download-artifact@v4
with:
name: npm-license-report
path: out/npm
- name: Check for incompatible licenses
run: |
echo "Checking for AGPL-3.0-or-later incompatible licenses..."
# Known incompatible licenses (SPDX identifiers)
incompatible=(
"GPL-2.0-only"
"SSPL-1.0"
"BUSL-1.1"
"Commons-Clause"
"Proprietary"
)
found_issues=0
# Check npm report
if [ -f out/npm/npm-angular-licenses.json ]; then
for license in "${incompatible[@]}"; do
if grep -qi "\"$license\"" out/npm/npm-angular-licenses.json; then
echo "ERROR: Incompatible license found in npm dependencies: $license"
found_issues=$((found_issues + 1))
fi
done
fi
if [ $found_issues -gt 0 ]; then
echo "ERROR: Found $found_issues incompatible license(s)"
exit 1
fi
echo "All licenses compatible with AGPL-3.0-or-later"
- name: Generate combined report
run: |
mkdir -p out/combined
cat > out/combined/license-audit-summary.md << 'EOF'
# License Audit Summary
Generated: $(date -u +%Y-%m-%dT%H:%M:%SZ)
Commit: ${{ github.sha }}
## Status: PASSED
All dependencies use licenses compatible with AGPL-3.0-or-later.
## Allowed Licenses
- MIT
- Apache-2.0
- BSD-2-Clause
- BSD-3-Clause
- ISC
- 0BSD
- PostgreSQL
- MPL-2.0
- CC0-1.0
- Unlicense
## Reports
- NuGet: See nuget-license-report artifact
- npm: See npm-license-report artifact
## Documentation
- Full dependency list: docs/legal/THIRD-PARTY-DEPENDENCIES.md
- Compatibility analysis: docs/legal/LICENSE-COMPATIBILITY.md
EOF
- name: Upload combined report
uses: actions/upload-artifact@v4
with:
name: license-audit-summary
path: out/combined
retention-days: 90

View File

@@ -1,188 +0,0 @@
# .gitea/workflows/lighthouse-ci.yml
# Lighthouse CI for performance and accessibility testing of the StellaOps Web UI
name: Lighthouse CI
on:
push:
branches: [main]
paths:
- 'src/Web/StellaOps.Web/**'
- '.gitea/workflows/lighthouse-ci.yml'
pull_request:
branches: [main, develop]
paths:
- 'src/Web/StellaOps.Web/**'
schedule:
# Run weekly on Sunday at 2 AM UTC
- cron: '0 2 * * 0'
workflow_dispatch:
env:
NODE_VERSION: '20'
LHCI_BUILD_CONTEXT__CURRENT_BRANCH: ${{ github.head_ref || github.ref_name }}
LHCI_BUILD_CONTEXT__COMMIT_SHA: ${{ github.sha }}
jobs:
lighthouse:
name: Lighthouse Audit
runs-on: ubuntu-22.04
defaults:
run:
working-directory: src/Web/StellaOps.Web
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
cache-dependency-path: src/Web/StellaOps.Web/package-lock.json
- name: Install dependencies
run: npm ci
- name: Build production bundle
run: npm run build -- --configuration production
- name: Install Lighthouse CI
run: npm install -g @lhci/cli@0.13.x
- name: Run Lighthouse CI
run: |
lhci autorun \
--collect.staticDistDir=./dist/stella-ops-web/browser \
--collect.numberOfRuns=3 \
--assert.preset=lighthouse:recommended \
--assert.assertions.categories:performance=off \
--assert.assertions.categories:accessibility=off \
--upload.target=filesystem \
--upload.outputDir=./lighthouse-results
- name: Evaluate Lighthouse Results
id: lhci-results
run: |
# Parse the latest Lighthouse report
REPORT=$(ls -t lighthouse-results/*.json | head -1)
if [ -f "$REPORT" ]; then
PERF=$(jq '.categories.performance.score * 100' "$REPORT" | cut -d. -f1)
A11Y=$(jq '.categories.accessibility.score * 100' "$REPORT" | cut -d. -f1)
BP=$(jq '.categories["best-practices"].score * 100' "$REPORT" | cut -d. -f1)
SEO=$(jq '.categories.seo.score * 100' "$REPORT" | cut -d. -f1)
echo "performance=$PERF" >> $GITHUB_OUTPUT
echo "accessibility=$A11Y" >> $GITHUB_OUTPUT
echo "best-practices=$BP" >> $GITHUB_OUTPUT
echo "seo=$SEO" >> $GITHUB_OUTPUT
echo "## Lighthouse Results" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "| Category | Score | Threshold | Status |" >> $GITHUB_STEP_SUMMARY
echo "|----------|-------|-----------|--------|" >> $GITHUB_STEP_SUMMARY
# Performance: target >= 90
if [ "$PERF" -ge 90 ]; then
echo "| Performance | $PERF | >= 90 | :white_check_mark: |" >> $GITHUB_STEP_SUMMARY
else
echo "| Performance | $PERF | >= 90 | :warning: |" >> $GITHUB_STEP_SUMMARY
fi
# Accessibility: target >= 95
if [ "$A11Y" -ge 95 ]; then
echo "| Accessibility | $A11Y | >= 95 | :white_check_mark: |" >> $GITHUB_STEP_SUMMARY
else
echo "| Accessibility | $A11Y | >= 95 | :x: |" >> $GITHUB_STEP_SUMMARY
fi
# Best Practices: target >= 90
if [ "$BP" -ge 90 ]; then
echo "| Best Practices | $BP | >= 90 | :white_check_mark: |" >> $GITHUB_STEP_SUMMARY
else
echo "| Best Practices | $BP | >= 90 | :warning: |" >> $GITHUB_STEP_SUMMARY
fi
# SEO: target >= 90
if [ "$SEO" -ge 90 ]; then
echo "| SEO | $SEO | >= 90 | :white_check_mark: |" >> $GITHUB_STEP_SUMMARY
else
echo "| SEO | $SEO | >= 90 | :warning: |" >> $GITHUB_STEP_SUMMARY
fi
fi
- name: Check Quality Gates
run: |
PERF=${{ steps.lhci-results.outputs.performance }}
A11Y=${{ steps.lhci-results.outputs.accessibility }}
FAILED=0
# Performance gate (warning only, not blocking)
if [ "$PERF" -lt 90 ]; then
echo "::warning::Performance score ($PERF) is below target (90)"
fi
# Accessibility gate (blocking)
if [ "$A11Y" -lt 95 ]; then
echo "::error::Accessibility score ($A11Y) is below required threshold (95)"
FAILED=1
fi
if [ "$FAILED" -eq 1 ]; then
exit 1
fi
- name: Upload Lighthouse Reports
uses: actions/upload-artifact@v4
if: always()
with:
name: lighthouse-reports
path: src/Web/StellaOps.Web/lighthouse-results/
retention-days: 30
axe-accessibility:
name: Axe Accessibility Audit
runs-on: ubuntu-22.04
defaults:
run:
working-directory: src/Web/StellaOps.Web
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
cache-dependency-path: src/Web/StellaOps.Web/package-lock.json
- name: Install dependencies
run: npm ci
- name: Install Playwright browsers
run: npx playwright install --with-deps chromium
- name: Build production bundle
run: npm run build -- --configuration production
- name: Start preview server
run: |
npx serve -s dist/stella-ops-web/browser -l 4200 &
sleep 5
- name: Run Axe accessibility tests
run: |
npm run test:a11y || true
- name: Upload Axe results
uses: actions/upload-artifact@v4
if: always()
with:
name: axe-accessibility-results
path: src/Web/StellaOps.Web/test-results/
retention-days: 30

View File

@@ -1,64 +0,0 @@
name: LNM Backfill CI
on:
workflow_dispatch:
inputs:
mongo_uri:
description: 'Staging Mongo URI (read-only snapshot)'
required: true
type: string
since_commit:
description: 'Git commit to compare (default HEAD)'
required: false
type: string
dry_run:
description: 'Dry run (no writes)'
required: false
default: true
type: boolean
jobs:
lnm-backfill:
runs-on: ubuntu-22.04
env:
DOTNET_VERSION: '10.0.100'
ARTIFACT_DIR: ${{ github.workspace }}/.artifacts
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
with:
fetch-depth: 0
- name: Set up .NET SDK
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
include-prerelease: true
- name: Restore
run: dotnet restore src/Concelier/StellaOps.Concelier.Backfill/StellaOps.Concelier.Backfill.csproj
- name: Run backfill (dry-run supported)
env:
STAGING_MONGO_URI: ${{ inputs.mongo_uri }}
run: |
mkdir -p $ARTIFACT_DIR
EXTRA=()
if [ "${{ inputs.dry_run }}" = "true" ]; then EXTRA+=("--dry-run"); fi
dotnet run --project src/Concelier/StellaOps.Concelier.Backfill/StellaOps.Concelier.Backfill.csproj -- --mode=observations --batch-size=500 --max-conflicts=0 --mongo "$STAGING_MONGO_URI" "${EXTRA[@]}" | tee $ARTIFACT_DIR/backfill-observations.log
dotnet run --project src/Concelier/StellaOps.Concelier.Backfill/StellaOps.Concelier.Backfill.csproj -- --mode=linksets --batch-size=500 --max-conflicts=0 --mongo "$STAGING_MONGO_URI" "${EXTRA[@]}" | tee $ARTIFACT_DIR/backfill-linksets.log
- name: Validate counts
env:
STAGING_MONGO_URI: ${{ inputs.mongo_uri }}
run: |
STAGING_MONGO_URI="$STAGING_MONGO_URI" devops/lnm/backfill-validation.sh
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: lnm-backfill-artifacts
path: ${{ env.ARTIFACT_DIR }}

View File

@@ -1,83 +0,0 @@
name: LNM Migration CI
on:
workflow_dispatch:
inputs:
run_staging:
description: 'Run staging backfill (1=yes)'
required: false
default: '0'
push:
branches: [main]
paths:
- 'src/Concelier/__Libraries/StellaOps.Concelier.Migrations/**'
- 'devops/lnm/**'
jobs:
build-runner:
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: 10.0.100
include-prerelease: true
- name: Setup cosign
uses: sigstore/cosign-installer@v3
- name: Configure signing
run: |
if [ -z "${{ secrets.COSIGN_PRIVATE_KEY_B64 }}" ]; then
echo "COSIGN_ALLOW_DEV_KEY=1" >> $GITHUB_ENV
echo "COSIGN_PASSWORD=stellaops-dev" >> $GITHUB_ENV
fi
env:
COSIGN_PRIVATE_KEY_B64: ${{ secrets.COSIGN_PRIVATE_KEY_B64 }}
- name: Build and package runner
run: |
chmod +x devops/lnm/package-runner.sh
devops/lnm/package-runner.sh
- name: Verify checksums
run: |
cd out/lnm
sha256sum -c SHA256SUMS
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: lnm-migration-runner-${{ github.run_number }}
path: |
out/lnm/lnm-migration-runner.tar.gz
out/lnm/lnm-migration-runner.manifest.json
out/lnm/lnm-migration-runner.dsse.json
out/lnm/SHA256SUMS
if-no-files-found: warn
validate-metrics:
runs-on: ubuntu-22.04
needs: build-runner
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Validate monitoring config
run: |
# Validate alert rules syntax
if [ -f "devops/lnm/alerts/lnm-alerts.yaml" ]; then
echo "Validating alert rules..."
python3 -c "import yaml; yaml.safe_load(open('devops/lnm/alerts/lnm-alerts.yaml'))"
fi
# Validate dashboard JSON
if [ -f "devops/lnm/dashboards/lnm-migration.json" ]; then
echo "Validating dashboard..."
python3 -c "import json; json.load(open('devops/lnm/dashboards/lnm-migration.json'))"
fi
echo "Monitoring config validation complete"

View File

@@ -1,63 +0,0 @@
name: LNM VEX Backfill
on:
workflow_dispatch:
inputs:
mongo_uri:
description: 'Staging Mongo URI'
required: true
type: string
nats_url:
description: 'NATS URL'
required: true
type: string
redis_url:
description: 'Redis URL'
required: true
type: string
dry_run:
description: 'Dry run (no writes)'
required: false
default: true
type: boolean
jobs:
vex-backfill:
runs-on: ubuntu-22.04
env:
DOTNET_VERSION: '10.0.100'
ARTIFACT_DIR: ${{ github.workspace }}/.artifacts
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
with:
fetch-depth: 0
- name: Set up .NET SDK
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
include-prerelease: true
- name: Restore
run: dotnet restore src/Concelier/StellaOps.Concelier.Backfill/StellaOps.Concelier.Backfill.csproj
- name: Run VEX backfill
env:
STAGING_MONGO_URI: ${{ inputs.mongo_uri }}
NATS_URL: ${{ inputs.nats_url }}
REDIS_URL: ${{ inputs.redis_url }}
run: |
mkdir -p $ARTIFACT_DIR
EXTRA=()
if [ "${{ inputs.dry_run }}" = "true" ]; then EXTRA+=("--dry-run"); fi
dotnet run --project src/Concelier/StellaOps.Concelier.Backfill/StellaOps.Concelier.Backfill.csproj -- --mode=vex --batch-size=500 --max-conflicts=0 --mongo "$STAGING_MONGO_URI" --nats "$NATS_URL" --redis "$REDIS_URL" "${EXTRA[@]}" | tee $ARTIFACT_DIR/vex-backfill.log
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: lnm-vex-backfill-artifacts
path: ${{ env.ARTIFACT_DIR }}

View File

@@ -1,125 +0,0 @@
name: Manifest Integrity
on:
push:
branches: [main]
paths:
- 'docs/**/*.schema.json'
- 'docs/contracts/**'
- 'docs/schemas/**'
- 'scripts/packs/**'
pull_request:
paths:
- 'docs/**/*.schema.json'
- 'docs/contracts/**'
- 'docs/schemas/**'
- 'scripts/packs/**'
jobs:
validate-schemas:
name: Validate Schema Integrity
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
- name: Install dependencies
run: npm install -g ajv-cli ajv-formats
- name: Validate JSON schemas
run: |
EXIT_CODE=0
for schema in docs/schemas/*.schema.json; do
echo "Validating $schema..."
if ! ajv compile -s "$schema" --spec=draft2020 2>/dev/null; then
echo "Error: $schema is invalid"
EXIT_CODE=1
fi
done
exit $EXIT_CODE
validate-contracts:
name: Validate Contract Documents
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Check contract structure
run: |
for contract in docs/contracts/*.md; do
echo "Checking $contract..."
# Verify required sections exist
if ! grep -q "^## " "$contract"; then
echo "Warning: $contract missing section headers"
fi
# Check for decision ID
if grep -q "Decision ID" "$contract" && ! grep -q "DECISION-\|CONTRACT-" "$contract"; then
echo "Warning: $contract missing decision ID format"
fi
done
validate-pack-fixtures:
name: Validate Pack Fixtures
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: '3.12'
- name: Install dependencies
run: pip install jsonschema
- name: Run fixture validation
run: |
if [ -f .gitea/scripts/test/run-fixtures-check.sh ]; then
chmod +x .gitea/scripts/test/run-fixtures-check.sh
./.gitea/scripts/test/run-fixtures-check.sh
fi
checksum-audit:
name: Audit SHA256SUMS Files
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Validate checksums
run: |
find . -name "SHA256SUMS" -type f | while read f; do
dir=$(dirname "$f")
echo "Validating checksums in $dir..."
cd "$dir"
# Check if all referenced files exist
while read hash file; do
if [ ! -f "$file" ]; then
echo "Warning: $file referenced in SHA256SUMS but not found"
fi
done < SHA256SUMS
cd - > /dev/null
done
merkle-consistency:
name: Verify Merkle Roots
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Check DSSE Merkle roots
run: |
find . -name "*.dsse.json" -type f | while read f; do
echo "Checking Merkle root in $f..."
# Extract and validate Merkle root if present
if jq -e '.payload' "$f" > /dev/null 2>&1; then
PAYLOAD=$(jq -r '.payload' "$f" | base64 -d 2>/dev/null || echo "")
if echo "$PAYLOAD" | jq -e '._stellaops.merkleRoot' > /dev/null 2>&1; then
MERKLE=$(echo "$PAYLOAD" | jq -r '._stellaops.merkleRoot')
echo " Merkle root: $MERKLE"
fi
fi
done

View File

@@ -1,74 +0,0 @@
name: Mirror Thin Bundle Sign & Verify
on:
workflow_dispatch:
schedule:
- cron: '0 6 * * *'
jobs:
mirror-sign:
runs-on: ubuntu-22.04
env:
MIRROR_SIGN_KEY_B64: ${{ secrets.MIRROR_SIGN_KEY_B64 }}
REQUIRE_PROD_SIGNING: 1
OCI: 1
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Fallback to dev signing key when secret is absent (non-prod only)
run: |
if [ -z "${MIRROR_SIGN_KEY_B64}" ]; then
echo "[warn] MIRROR_SIGN_KEY_B64 not set; using repo dev key for non-production signing."
echo "MIRROR_SIGN_KEY_B64=$(base64 -w0 tools/cosign/cosign.dev.key)" >> $GITHUB_ENV
echo "REQUIRE_PROD_SIGNING=0" >> $GITHUB_ENV
fi
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: 10.0.100
include-prerelease: true
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Verify signing prerequisites
run: scripts/mirror/check_signing_prereqs.sh
- name: Run mirror signing
run: |
scripts/mirror/ci-sign.sh
- name: Verify signed bundle
run: |
scripts/mirror/verify_thin_bundle.py out/mirror/thin/mirror-thin-v1.tar.gz
- name: Prepare Export Center handoff (metadata + optional schedule)
run: |
scripts/mirror/export-center-wire.sh
env:
EXPORT_CENTER_BASE_URL: ${{ secrets.EXPORT_CENTER_BASE_URL }}
EXPORT_CENTER_TOKEN: ${{ secrets.EXPORT_CENTER_TOKEN }}
EXPORT_CENTER_TENANT: ${{ secrets.EXPORT_CENTER_TENANT }}
EXPORT_CENTER_PROJECT: ${{ secrets.EXPORT_CENTER_PROJECT }}
EXPORT_CENTER_AUTO_SCHEDULE: ${{ secrets.EXPORT_CENTER_AUTO_SCHEDULE }}
- name: Upload signed artifacts
uses: actions/upload-artifact@v4
with:
name: mirror-thin-v1-signed
path: |
out/mirror/thin/mirror-thin-v1.tar.gz
out/mirror/thin/mirror-thin-v1.manifest.json
out/mirror/thin/mirror-thin-v1.manifest.dsse.json
out/mirror/thin/tuf/
out/mirror/thin/oci/
out/mirror/thin/milestone.json
out/mirror/thin/export-center/export-center-handoff.json
out/mirror/thin/export-center/export-center-targets.json
out/mirror/thin/export-center/schedule-response.json
if-no-files-found: error
retention-days: 14

View File

@@ -1,44 +0,0 @@
name: mock-dev-release
on:
push:
paths:
- devops/releases/2025.09-mock-dev.yaml
- devops/downloads/manifest.json
- devops/mock-release/**
workflow_dispatch:
jobs:
package-mock-release:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Package mock dev artefacts
run: |
set -euo pipefail
mkdir -p out/mock-release
cp devops/releases/2025.09-mock-dev.yaml out/mock-release/
cp devops/downloads/manifest.json out/mock-release/
tar -czf out/mock-release/mock-dev-release.tgz -C out/mock-release .
- name: Compose config (dev + mock overlay)
run: |
set -euo pipefail
devops/mock-release/config_check.sh
- name: Helm template (mock overlay)
run: |
set -euo pipefail
helm template mock ./devops/helm/stellaops -f devops/helm/stellaops/values-mock.yaml > /tmp/helm-mock.yaml
ls -lh /tmp/helm-mock.yaml
- name: Upload mock release bundle
uses: actions/upload-artifact@v3
with:
name: mock-dev-release
path: |
out/mock-release/mock-dev-release.tgz
/tmp/compose-mock-config.yaml
/tmp/helm-mock.yaml

View File

@@ -1,405 +0,0 @@
# .gitea/workflows/module-publish.yml
# Per-module NuGet and container publishing to Gitea registry
# Sprint: SPRINT_20251226_004_CICD
name: Module Publish
on:
workflow_dispatch:
inputs:
module:
description: 'Module to publish'
required: true
type: choice
options:
- Authority
- Attestor
- Concelier
- Scanner
- Policy
- Signer
- Excititor
- Gateway
- Scheduler
- Orchestrator
- TaskRunner
- Notify
- CLI
version:
description: 'Semantic version (e.g., 1.2.3)'
required: true
type: string
publish_nuget:
description: 'Publish NuGet packages'
type: boolean
default: true
publish_container:
description: 'Publish container image'
type: boolean
default: true
prerelease:
description: 'Mark as prerelease'
type: boolean
default: false
push:
tags:
- 'module-*-v*' # e.g., module-authority-v1.2.3
env:
DOTNET_VERSION: '10.0.100'
DOTNET_NOLOGO: 1
DOTNET_CLI_TELEMETRY_OPTOUT: 1
REGISTRY: git.stella-ops.org
NUGET_SOURCE: https://git.stella-ops.org/api/packages/stella-ops.org/nuget/index.json
jobs:
# ===========================================================================
# PARSE TAG (for tag-triggered builds)
# ===========================================================================
parse-tag:
name: Parse Tag
runs-on: ubuntu-22.04
if: github.event_name == 'push'
outputs:
module: ${{ steps.parse.outputs.module }}
version: ${{ steps.parse.outputs.version }}
steps:
- name: Parse module and version from tag
id: parse
run: |
TAG="${{ github.ref_name }}"
# Expected format: module-{name}-v{version}
# Example: module-authority-v1.2.3
if [[ "$TAG" =~ ^module-([a-zA-Z]+)-v([0-9]+\.[0-9]+\.[0-9]+.*)$ ]]; then
MODULE="${BASH_REMATCH[1]}"
VERSION="${BASH_REMATCH[2]}"
# Capitalize first letter
MODULE="$(echo "${MODULE:0:1}" | tr '[:lower:]' '[:upper:]')${MODULE:1}"
echo "module=$MODULE" >> "$GITHUB_OUTPUT"
echo "version=$VERSION" >> "$GITHUB_OUTPUT"
echo "Parsed: module=$MODULE, version=$VERSION"
else
echo "::error::Invalid tag format. Expected: module-{name}-v{version}"
exit 1
fi
# ===========================================================================
# VALIDATE
# ===========================================================================
validate:
name: Validate Inputs
runs-on: ubuntu-22.04
needs: [parse-tag]
if: always() && (needs.parse-tag.result == 'success' || needs.parse-tag.result == 'skipped')
outputs:
module: ${{ steps.resolve.outputs.module }}
version: ${{ steps.resolve.outputs.version }}
publish_nuget: ${{ steps.resolve.outputs.publish_nuget }}
publish_container: ${{ steps.resolve.outputs.publish_container }}
steps:
- name: Resolve inputs
id: resolve
run: |
if [[ "${{ github.event_name }}" == "push" ]]; then
MODULE="${{ needs.parse-tag.outputs.module }}"
VERSION="${{ needs.parse-tag.outputs.version }}"
PUBLISH_NUGET="true"
PUBLISH_CONTAINER="true"
else
MODULE="${{ github.event.inputs.module }}"
VERSION="${{ github.event.inputs.version }}"
PUBLISH_NUGET="${{ github.event.inputs.publish_nuget }}"
PUBLISH_CONTAINER="${{ github.event.inputs.publish_container }}"
fi
echo "module=$MODULE" >> "$GITHUB_OUTPUT"
echo "version=$VERSION" >> "$GITHUB_OUTPUT"
echo "publish_nuget=$PUBLISH_NUGET" >> "$GITHUB_OUTPUT"
echo "publish_container=$PUBLISH_CONTAINER" >> "$GITHUB_OUTPUT"
echo "=== Resolved Configuration ==="
echo "Module: $MODULE"
echo "Version: $VERSION"
echo "Publish NuGet: $PUBLISH_NUGET"
echo "Publish Container: $PUBLISH_CONTAINER"
- name: Validate version format
run: |
VERSION="${{ steps.resolve.outputs.version }}"
if ! [[ "$VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.]+)?$ ]]; then
echo "::error::Invalid version format. Expected: MAJOR.MINOR.PATCH[-prerelease]"
exit 1
fi
# ===========================================================================
# PUBLISH NUGET
# ===========================================================================
publish-nuget:
name: Publish NuGet
runs-on: ubuntu-22.04
needs: [validate]
if: needs.validate.outputs.publish_nuget == 'true'
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
include-prerelease: true
- name: Determine project path
id: path
run: |
MODULE="${{ needs.validate.outputs.module }}"
# Map module names to project paths
case "$MODULE" in
Authority)
PROJECT="src/Authority/StellaOps.Authority.WebService/StellaOps.Authority.WebService.csproj"
;;
Attestor)
PROJECT="src/Attestor/StellaOps.Attestor.WebService/StellaOps.Attestor.WebService.csproj"
;;
Concelier)
PROJECT="src/Concelier/StellaOps.Concelier.WebService/StellaOps.Concelier.WebService.csproj"
;;
Scanner)
PROJECT="src/Scanner/StellaOps.Scanner.WebService/StellaOps.Scanner.WebService.csproj"
;;
Policy)
PROJECT="src/Policy/StellaOps.Policy.Gateway/StellaOps.Policy.Gateway.csproj"
;;
Signer)
PROJECT="src/Signer/StellaOps.Signer.WebService/StellaOps.Signer.WebService.csproj"
;;
Excititor)
PROJECT="src/Excititor/StellaOps.Excititor.WebService/StellaOps.Excititor.WebService.csproj"
;;
Gateway)
PROJECT="src/Gateway/StellaOps.Gateway.WebService/StellaOps.Gateway.WebService.csproj"
;;
Scheduler)
PROJECT="src/Scheduler/StellaOps.Scheduler.WebService/StellaOps.Scheduler.WebService.csproj"
;;
Orchestrator)
PROJECT="src/Orchestrator/StellaOps.Orchestrator.WebService/StellaOps.Orchestrator.WebService.csproj"
;;
TaskRunner)
PROJECT="src/TaskRunner/StellaOps.TaskRunner.WebService/StellaOps.TaskRunner.WebService.csproj"
;;
Notify)
PROJECT="src/Notify/StellaOps.Notify.WebService/StellaOps.Notify.WebService.csproj"
;;
CLI)
PROJECT="src/Cli/StellaOps.Cli/StellaOps.Cli.csproj"
;;
*)
echo "::error::Unknown module: $MODULE"
exit 1
;;
esac
echo "project=$PROJECT" >> "$GITHUB_OUTPUT"
echo "Project path: $PROJECT"
- name: Restore dependencies
run: dotnet restore ${{ steps.path.outputs.project }}
- name: Build
run: |
dotnet build ${{ steps.path.outputs.project }} \
--configuration Release \
--no-restore \
-p:Version=${{ needs.validate.outputs.version }}
- name: Pack NuGet
run: |
dotnet pack ${{ steps.path.outputs.project }} \
--configuration Release \
--no-build \
-p:Version=${{ needs.validate.outputs.version }} \
-p:PackageVersion=${{ needs.validate.outputs.version }} \
--output out/packages
- name: Push to Gitea NuGet registry
run: |
for nupkg in out/packages/*.nupkg; do
echo "Pushing: $nupkg"
dotnet nuget push "$nupkg" \
--source "${{ env.NUGET_SOURCE }}" \
--api-key "${{ secrets.GITEA_TOKEN }}" \
--skip-duplicate
done
- name: Upload NuGet artifacts
uses: actions/upload-artifact@v4
with:
name: nuget-${{ needs.validate.outputs.module }}-${{ needs.validate.outputs.version }}
path: out/packages/*.nupkg
retention-days: 30
# ===========================================================================
# PUBLISH CONTAINER
# ===========================================================================
publish-container:
name: Publish Container
runs-on: ubuntu-22.04
needs: [validate]
if: needs.validate.outputs.publish_container == 'true' && needs.validate.outputs.module != 'CLI'
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Gitea Container Registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITEA_TOKEN }}
- name: Determine image name
id: image
run: |
MODULE="${{ needs.validate.outputs.module }}"
VERSION="${{ needs.validate.outputs.version }}"
MODULE_LOWER=$(echo "$MODULE" | tr '[:upper:]' '[:lower:]')
IMAGE="${{ env.REGISTRY }}/stella-ops.org/${MODULE_LOWER}"
echo "name=$IMAGE" >> "$GITHUB_OUTPUT"
echo "tag_version=${IMAGE}:${VERSION}" >> "$GITHUB_OUTPUT"
echo "tag_latest=${IMAGE}:latest" >> "$GITHUB_OUTPUT"
echo "Image: $IMAGE"
echo "Tags: ${VERSION}, latest"
- name: Build and push container
uses: docker/build-push-action@v5
with:
context: .
file: devops/docker/Dockerfile.platform
target: ${{ needs.validate.outputs.module | lower }}
push: true
tags: |
${{ steps.image.outputs.tag_version }}
${{ steps.image.outputs.tag_latest }}
cache-from: type=gha
cache-to: type=gha,mode=max
labels: |
org.opencontainers.image.title=StellaOps ${{ needs.validate.outputs.module }}
org.opencontainers.image.version=${{ needs.validate.outputs.version }}
org.opencontainers.image.source=https://git.stella-ops.org/stella-ops.org/git.stella-ops.org
org.opencontainers.image.revision=${{ github.sha }}
# ===========================================================================
# PUBLISH CLI BINARIES (multi-platform)
# ===========================================================================
publish-cli:
name: Publish CLI (${{ matrix.runtime }})
runs-on: ubuntu-22.04
needs: [validate]
if: needs.validate.outputs.module == 'CLI'
strategy:
matrix:
runtime:
- linux-x64
- linux-arm64
- win-x64
- osx-x64
- osx-arm64
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
include-prerelease: true
- name: Install cross-compilation tools
if: matrix.runtime == 'linux-arm64'
run: |
sudo apt-get update
sudo apt-get install -y --no-install-recommends binutils-aarch64-linux-gnu
- name: Publish CLI
run: |
dotnet publish src/Cli/StellaOps.Cli/StellaOps.Cli.csproj \
--configuration Release \
--runtime ${{ matrix.runtime }} \
--self-contained true \
-p:Version=${{ needs.validate.outputs.version }} \
-p:PublishSingleFile=true \
-p:PublishTrimmed=true \
-p:EnableCompressionInSingleFile=true \
--output out/cli/${{ matrix.runtime }}
- name: Create archive
run: |
VERSION="${{ needs.validate.outputs.version }}"
RUNTIME="${{ matrix.runtime }}"
cd out/cli/$RUNTIME
if [[ "$RUNTIME" == win-* ]]; then
zip -r ../stellaops-cli-${VERSION}-${RUNTIME}.zip .
else
tar -czvf ../stellaops-cli-${VERSION}-${RUNTIME}.tar.gz .
fi
- name: Upload CLI artifacts
uses: actions/upload-artifact@v4
with:
name: cli-${{ needs.validate.outputs.version }}-${{ matrix.runtime }}
path: |
out/cli/*.zip
out/cli/*.tar.gz
retention-days: 30
# ===========================================================================
# SUMMARY
# ===========================================================================
summary:
name: Publish Summary
runs-on: ubuntu-22.04
needs: [validate, publish-nuget, publish-container, publish-cli]
if: always()
steps:
- name: Generate Summary
run: |
echo "## Module Publish Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "| Property | Value |" >> $GITHUB_STEP_SUMMARY
echo "|----------|-------|" >> $GITHUB_STEP_SUMMARY
echo "| Module | ${{ needs.validate.outputs.module }} |" >> $GITHUB_STEP_SUMMARY
echo "| Version | ${{ needs.validate.outputs.version }} |" >> $GITHUB_STEP_SUMMARY
echo "| NuGet | ${{ needs.publish-nuget.result || 'skipped' }} |" >> $GITHUB_STEP_SUMMARY
echo "| Container | ${{ needs.publish-container.result || 'skipped' }} |" >> $GITHUB_STEP_SUMMARY
echo "| CLI | ${{ needs.publish-cli.result || 'skipped' }} |" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Registry URLs" >> $GITHUB_STEP_SUMMARY
echo "- NuGet: \`${{ env.NUGET_SOURCE }}\`" >> $GITHUB_STEP_SUMMARY
echo "- Container: \`${{ env.REGISTRY }}/stella-ops.org/${{ needs.validate.outputs.module | lower }}\`" >> $GITHUB_STEP_SUMMARY
- name: Check for failures
if: contains(needs.*.result, 'failure')
run: |
echo "::error::One or more publish jobs failed"
exit 1

View File

@@ -1,102 +0,0 @@
name: Notify Smoke Test
on:
push:
branches: [main]
paths:
- 'src/Notify/**'
- 'src/Notifier/**'
pull_request:
paths:
- 'src/Notify/**'
- 'src/Notifier/**'
workflow_dispatch:
env:
DOTNET_VERSION: '10.0.x'
jobs:
unit-tests:
name: Notify Unit Tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
- name: Restore dependencies
run: dotnet restore src/Notify/
- name: Build
run: dotnet build src/Notify/ --no-restore
- name: Run tests
run: dotnet test src/Notify/ --no-build --verbosity normal
notifier-tests:
name: Notifier Service Tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
- name: Restore dependencies
run: dotnet restore src/Notifier/
- name: Build
run: dotnet build src/Notifier/ --no-restore
- name: Run tests
run: dotnet test src/Notifier/ --no-build --verbosity normal
smoke-test:
name: Notification Smoke Test
runs-on: ubuntu-latest
needs: [unit-tests, notifier-tests]
services:
mongodb:
image: mongo:7.0
ports:
- 27017:27017
steps:
- uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
- name: Build Notifier
run: dotnet build src/Notifier/StellaOps.Notifier/StellaOps.Notifier.WebService/
- name: Start service
run: |
dotnet run --project src/Notifier/StellaOps.Notifier/StellaOps.Notifier.WebService/ &
sleep 10
- name: Health check
run: |
for i in {1..30}; do
if curl -s http://localhost:5000/health > /dev/null; then
echo "Service is healthy"
exit 0
fi
sleep 1
done
echo "Service failed to start"
exit 1
- name: Test notification endpoint
run: |
# Test dry-run notification
curl -X POST http://localhost:5000/api/v1/notifications/test \
-H "Content-Type: application/json" \
-d '{"channel": "log", "message": "Smoke test", "dryRun": true}' \
|| echo "Warning: Notification test endpoint not available"

View File

@@ -1,59 +0,0 @@
name: oas-ci
on:
push:
paths:
- "src/Api/**"
- "scripts/api-*.mjs"
- "package.json"
- "package-lock.json"
pull_request:
paths:
- "src/Api/**"
- "scripts/api-*.mjs"
- "package.json"
- "package-lock.json"
jobs:
oas-validate:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: "18"
- name: Install deps
run: npm install --ignore-scripts --no-progress
- name: Compose aggregate OpenAPI
run: npm run api:compose
- name: Lint (spectral)
run: npm run api:lint
- name: Validate examples coverage
run: npm run api:examples
- name: Compat diff (previous commit)
run: |
set -e
if git show HEAD~1:src/Api/StellaOps.Api.OpenApi/stella.yaml > /tmp/stella-prev.yaml 2>/dev/null; then
node scripts/api-compat-diff.mjs /tmp/stella-prev.yaml src/Api/StellaOps.Api.OpenApi/stella.yaml --output text --fail-on-breaking
else
echo "[oas-ci] previous stella.yaml not found; skipping"
fi
- name: Contract tests
run: npm run api:compat:test
- name: Upload aggregate spec
uses: actions/upload-artifact@v4
with:
name: stella-openapi
path: src/Api/StellaOps.Api.OpenApi/stella.yaml

View File

@@ -1,46 +0,0 @@
name: obs-slo
on:
workflow_dispatch:
inputs:
prom_url:
description: "Prometheus base URL"
required: true
default: "http://localhost:9090"
jobs:
slo-eval:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Setup Python (telemetry schema checks)
uses: actions/setup-python@v5
with:
python-version: '3.12'
- name: Install telemetry schema deps
run: python -m pip install --upgrade pip jsonschema
- name: Run SLO evaluator
env:
PROM_URL: ${{ github.event.inputs.prom_url }}
run: |
chmod +x scripts/observability/slo-evaluator.sh
scripts/observability/slo-evaluator.sh
- name: Telemetry schema/bundle checks
env:
TELEMETRY_BUNDLE_SCHEMA: docs/modules/telemetry/schemas/telemetry-bundle.schema.json
run: |
chmod +x devops/telemetry/tests/ci-run.sh
devops/telemetry/tests/ci-run.sh
- name: Upload SLO results
uses: actions/upload-artifact@v4
with:
name: obs-slo
path: out/obs-slo/**

View File

@@ -1,37 +0,0 @@
name: obs-stream
on:
workflow_dispatch:
inputs:
nats_url:
description: "NATS server URL"
required: false
default: "nats://localhost:4222"
jobs:
stream-validate:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Install nats CLI
run: |
curl -sSL https://github.com/nats-io/natscli/releases/download/v0.1.4/nats-0.1.4-linux-amd64.tar.gz -o /tmp/natscli.tgz
tar -C /tmp -xzf /tmp/natscli.tgz
sudo mv /tmp/nats /usr/local/bin/nats
- name: Validate streaming knobs
env:
NATS_URL: ${{ github.event.inputs.nats_url }}
run: |
chmod +x scripts/observability/streaming-validate.sh
scripts/observability/streaming-validate.sh
- name: Upload stream validation
uses: actions/upload-artifact@v4
with:
name: obs-stream
path: out/obs-stream/**

View File

@@ -1,121 +0,0 @@
name: Offline E2E Tests
on:
pull_request:
paths:
- 'src/AirGap/**'
- 'src/Scanner/**'
- 'src/__Tests/offline/**'
schedule:
- cron: '0 4 * * *' # Nightly at 4 AM UTC
workflow_dispatch:
env:
STELLAOPS_OFFLINE_MODE: 'true'
DOTNET_VERSION: '10.0.100'
jobs:
offline-e2e:
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
- name: Cache NuGet packages
uses: actions/cache@v3
with:
path: ~/.nuget/packages
key: ${{ runner.os }}-nuget-${{ hashFiles('**/*.csproj') }}
restore-keys: |
${{ runner.os }}-nuget-
- name: Download offline bundle
run: |
# In real scenario, bundle would be pre-built and cached
# For now, create minimal fixture structure
mkdir -p ./offline-bundle/{images,feeds,policies,keys,certs,vex}
echo '{}' > ./offline-bundle/manifest.json
- name: Build in isolated environment
run: |
# Build offline test library
dotnet build src/__Libraries/StellaOps.Testing.AirGap/StellaOps.Testing.AirGap.csproj
# Build offline E2E tests
dotnet build src/__Tests/offline/StellaOps.Offline.E2E.Tests/StellaOps.Offline.E2E.Tests.csproj
- name: Run offline E2E tests with network isolation
run: |
# Set offline bundle path
export STELLAOPS_OFFLINE_BUNDLE=$(pwd)/offline-bundle
# Run tests
dotnet test src/__Tests/offline/StellaOps.Offline.E2E.Tests \
--logger "trx;LogFileName=offline-e2e.trx" \
--logger "console;verbosity=detailed" \
--results-directory ./results
- name: Verify no network calls
if: always()
run: |
# Parse test output for any NetworkIsolationViolationException
if [ -f "./results/offline-e2e.trx" ]; then
if grep -q "NetworkIsolationViolation" ./results/offline-e2e.trx; then
echo "::error::Tests attempted network calls in offline mode!"
exit 1
else
echo "✅ No network isolation violations detected"
fi
fi
- name: Upload results
if: always()
uses: actions/upload-artifact@v4
with:
name: offline-e2e-results
path: ./results/
verify-isolation:
runs-on: ubuntu-22.04
needs: offline-e2e
if: always()
steps:
- name: Download results
uses: actions/download-artifact@v4
with:
name: offline-e2e-results
path: ./results
- name: Generate summary
run: |
echo "## Offline E2E Test Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
if [ -f "./results/offline-e2e.trx" ]; then
# Parse test results
TOTAL=$(grep -o 'total="[0-9]*"' ./results/offline-e2e.trx | cut -d'"' -f2 || echo "0")
PASSED=$(grep -o 'passed="[0-9]*"' ./results/offline-e2e.trx | cut -d'"' -f2 || echo "0")
FAILED=$(grep -o 'failed="[0-9]*"' ./results/offline-e2e.trx | cut -d'"' -f2 || echo "0")
echo "| Metric | Value |" >> $GITHUB_STEP_SUMMARY
echo "|--------|-------|" >> $GITHUB_STEP_SUMMARY
echo "| Total Tests | ${TOTAL} |" >> $GITHUB_STEP_SUMMARY
echo "| Passed | ${PASSED} |" >> $GITHUB_STEP_SUMMARY
echo "| Failed | ${FAILED} |" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
if grep -q "NetworkIsolationViolation" ./results/offline-e2e.trx; then
echo "❌ **Network isolation was violated**" >> $GITHUB_STEP_SUMMARY
else
echo "✅ **Network isolation verified - no egress detected**" >> $GITHUB_STEP_SUMMARY
fi
else
echo "⚠️ No test results found" >> $GITHUB_STEP_SUMMARY
fi

View File

@@ -1,186 +0,0 @@
name: Parity Tests
# Parity testing workflow: compares StellaOps against competitor scanners
# (Syft, Grype, Trivy) on a standardized fixture set.
#
# Schedule: Nightly at 02:00 UTC; Weekly full run on Sunday 00:00 UTC
# NOT a PR gate - too slow and has external dependencies
on:
schedule:
# Nightly at 02:00 UTC (quick fixture set)
- cron: '0 2 * * *'
# Weekly on Sunday at 00:00 UTC (full fixture set)
- cron: '0 0 * * 0'
workflow_dispatch:
inputs:
fixture_set:
description: 'Fixture set to use'
required: false
default: 'quick'
type: choice
options:
- quick
- full
enable_drift_detection:
description: 'Enable drift detection analysis'
required: false
default: 'true'
type: boolean
env:
DOTNET_VERSION: '10.0.x'
SYFT_VERSION: '1.9.0'
GRYPE_VERSION: '0.79.3'
TRIVY_VERSION: '0.54.1'
PARITY_RESULTS_PATH: 'bench/results/parity'
jobs:
parity-tests:
name: Competitor Parity Tests
runs-on: ubuntu-latest
timeout-minutes: 120
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: ${{ env.DOTNET_VERSION }}
- name: Install Syft
run: |
curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sh -s -- -b /usr/local/bin v${{ env.SYFT_VERSION }}
syft version
- name: Install Grype
run: |
curl -sSfL https://raw.githubusercontent.com/anchore/grype/main/install.sh | sh -s -- -b /usr/local/bin v${{ env.GRYPE_VERSION }}
grype version
- name: Install Trivy
run: |
curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin v${{ env.TRIVY_VERSION }}
trivy --version
- name: Determine fixture set
id: fixtures
run: |
# Weekly runs use full fixture set
if [[ "${{ github.event.schedule }}" == "0 0 * * 0" ]]; then
echo "fixture_set=full" >> $GITHUB_OUTPUT
elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
echo "fixture_set=${{ inputs.fixture_set }}" >> $GITHUB_OUTPUT
else
echo "fixture_set=quick" >> $GITHUB_OUTPUT
fi
- name: Build parity tests
run: |
dotnet build src/__Tests/parity/StellaOps.Parity.Tests/StellaOps.Parity.Tests.csproj -c Release
- name: Run parity tests
id: parity
run: |
mkdir -p ${{ env.PARITY_RESULTS_PATH }}
RUN_ID=$(date -u +%Y%m%dT%H%M%SZ)
echo "run_id=${RUN_ID}" >> $GITHUB_OUTPUT
dotnet test src/__Tests/parity/StellaOps.Parity.Tests/StellaOps.Parity.Tests.csproj \
-c Release \
--no-build \
--logger "trx;LogFileName=parity-results.trx" \
--results-directory ${{ env.PARITY_RESULTS_PATH }} \
-e PARITY_FIXTURE_SET=${{ steps.fixtures.outputs.fixture_set }} \
-e PARITY_RUN_ID=${RUN_ID} \
-e PARITY_OUTPUT_PATH=${{ env.PARITY_RESULTS_PATH }} \
|| true # Don't fail workflow on test failures
- name: Store parity results
run: |
# Copy JSON results to time-series storage
if [ -f "${{ env.PARITY_RESULTS_PATH }}/parity-${{ steps.parity.outputs.run_id }}.json" ]; then
echo "Parity results stored successfully"
cat ${{ env.PARITY_RESULTS_PATH }}/parity-${{ steps.parity.outputs.run_id }}.json | jq .
else
echo "Warning: No parity results file found"
fi
- name: Run drift detection
if: ${{ github.event_name != 'workflow_dispatch' || inputs.enable_drift_detection == 'true' }}
run: |
# Analyze drift from historical results
dotnet run --project src/__Tests/parity/StellaOps.Parity.Tests/StellaOps.Parity.Tests.csproj \
--no-build \
-- analyze-drift \
--results-path ${{ env.PARITY_RESULTS_PATH }} \
--threshold 0.05 \
--trend-days 3 \
|| true
- name: Upload parity results
uses: actions/upload-artifact@v4
with:
name: parity-results-${{ steps.parity.outputs.run_id }}
path: ${{ env.PARITY_RESULTS_PATH }}
retention-days: 90
- name: Export Prometheus metrics
if: ${{ env.PROMETHEUS_PUSH_GATEWAY != '' }}
env:
PROMETHEUS_PUSH_GATEWAY: ${{ secrets.PROMETHEUS_PUSH_GATEWAY }}
run: |
# Push metrics to Prometheus Push Gateway if configured
if [ -f "${{ env.PARITY_RESULTS_PATH }}/parity-metrics.txt" ]; then
curl -X POST \
-H "Content-Type: text/plain" \
--data-binary @${{ env.PARITY_RESULTS_PATH }}/parity-metrics.txt \
"${PROMETHEUS_PUSH_GATEWAY}/metrics/job/parity_tests/instance/${{ steps.parity.outputs.run_id }}"
fi
- name: Generate comparison report
run: |
echo "## Parity Test Results - ${{ steps.parity.outputs.run_id }}" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Fixture Set:** ${{ steps.fixtures.outputs.fixture_set }}" >> $GITHUB_STEP_SUMMARY
echo "**Competitor Versions:**" >> $GITHUB_STEP_SUMMARY
echo "- Syft: ${{ env.SYFT_VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "- Grype: ${{ env.GRYPE_VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "- Trivy: ${{ env.TRIVY_VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
if [ -f "${{ env.PARITY_RESULTS_PATH }}/parity-${{ steps.parity.outputs.run_id }}.json" ]; then
echo "### Metrics Summary" >> $GITHUB_STEP_SUMMARY
jq -r '
"| Metric | StellaOps | Grype | Trivy |",
"|--------|-----------|-------|-------|",
"| SBOM Packages | \(.sbomMetrics.stellaOpsPackageCount) | \(.sbomMetrics.syftPackageCount) | - |",
"| Vulnerability Recall | \(.vulnMetrics.recall | . * 100 | round / 100)% | - | - |",
"| Vulnerability F1 | \(.vulnMetrics.f1Score | . * 100 | round / 100)% | - | - |",
"| Latency P95 (ms) | \(.latencyMetrics.stellaOpsP95Ms | round) | \(.latencyMetrics.grypeP95Ms | round) | \(.latencyMetrics.trivyP95Ms | round) |"
' ${{ env.PARITY_RESULTS_PATH }}/parity-${{ steps.parity.outputs.run_id }}.json >> $GITHUB_STEP_SUMMARY || echo "Could not parse results" >> $GITHUB_STEP_SUMMARY
fi
- name: Alert on critical drift
if: failure()
uses: slackapi/slack-github-action@v1.25.0
with:
payload: |
{
"text": "⚠️ Parity test drift detected",
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*Parity Test Alert*\nDrift detected in competitor comparison metrics.\n<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|View Results>"
}
}
]
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
continue-on-error: true

View File

@@ -1,70 +0,0 @@
name: Policy Lint & Smoke
on:
pull_request:
paths:
- 'docs/policy/**'
- 'docs/examples/policies/**'
- 'src/Cli/**'
- '.gitea/workflows/policy-lint.yml'
push:
branches: [ main ]
paths:
- 'docs/policy/**'
- 'docs/examples/policies/**'
- 'src/Cli/**'
- '.gitea/workflows/policy-lint.yml'
jobs:
policy-lint:
runs-on: ubuntu-22.04
env:
DOTNET_NOLOGO: 1
DOTNET_CLI_TELEMETRY_OPTOUT: 1
DOTNET_SYSTEM_GLOBALIZATION_INVARIANT: 1
TZ: UTC
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
with:
fetch-depth: 0
- name: Setup .NET 10 RC
uses: actions/setup-dotnet@v4
with:
dotnet-version: 10.0.100
include-prerelease: true
- name: Cache NuGet packages
uses: actions/cache@v4
with:
path: |
~/.nuget/packages
.nuget/packages
key: policy-lint-nuget-${{ runner.os }}-${{ hashFiles('**/*.csproj') }}
- name: Restore CLI
run: |
dotnet restore src/Cli/StellaOps.Cli/StellaOps.Cli.csproj --configfile nuget.config
- name: Lint policies (deterministic)
run: |
mkdir -p out/policy-lint
dotnet run --project src/Cli/StellaOps.Cli/StellaOps.Cli.csproj -- \
policy lint docs/examples/policies/*.stella \
--format json --no-color \
> out/policy-lint/lint.json
- name: Smoke simulate entrypoint
run: |
dotnet run --project src/Cli/StellaOps.Cli/StellaOps.Cli.csproj -- policy simulate --help > out/policy-lint/simulate-help.txt
- name: Upload lint artifacts
uses: actions/upload-artifact@v4
with:
name: policy-lint
path: out/policy-lint
retention-days: 7

View File

@@ -1,89 +0,0 @@
name: Policy Simulation
on:
pull_request:
paths:
- 'docs/policy/**'
- 'docs/examples/policies/**'
- 'scripts/policy/**'
- '.gitea/workflows/policy-simulate.yml'
push:
branches: [ main ]
paths:
- 'docs/policy/**'
- 'docs/examples/policies/**'
- 'scripts/policy/**'
- '.gitea/workflows/policy-simulate.yml'
jobs:
policy-simulate:
runs-on: ubuntu-22.04
env:
DOTNET_NOLOGO: 1
DOTNET_CLI_TELEMETRY_OPTOUT: 1
DOTNET_SYSTEM_GLOBALIZATION_INVARIANT: 1
TZ: UTC
THRESHOLD: 0
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
with:
fetch-depth: 0
- name: Setup .NET 10 RC
uses: actions/setup-dotnet@v4
with:
dotnet-version: 10.0.100
include-prerelease: true
- name: Install Cosign
uses: sigstore/cosign-installer@v3.4.0
- name: Cache NuGet packages
uses: actions/cache@v4
with:
path: |
~/.nuget/packages
.nuget/packages
key: policy-sim-nuget-${{ runner.os }}-${{ hashFiles('**/*.csproj') }}
- name: Restore CLI
run: |
dotnet restore src/Cli/StellaOps.Cli/StellaOps.Cli.csproj --configfile nuget.config
- name: Generate policy signing key (ephemeral)
run: |
OUT_DIR=out/policy-sign/keys PREFIX=ci-policy COSIGN_PASSWORD= scripts/policy/rotate-key.sh
- name: Sign sample policy blob
run: |
export COSIGN_KEY_B64=$(base64 -w0 out/policy-sign/keys/ci-policy-cosign.key)
COSIGN_PASSWORD= \
.gitea/scripts/sign/sign-policy.sh --file docs/examples/policies/baseline.stella --out-dir out/policy-sign
- name: Attest and verify sample policy blob
run: |
export COSIGN_KEY_B64=$(base64 -w0 out/policy-sign/keys/ci-policy-cosign.key)
COSIGN_PASSWORD= \
scripts/policy/attest-verify.sh --file docs/examples/policies/baseline.stella --out-dir out/policy-sign
- name: Run batch policy simulation
run: |
scripts/policy/batch-simulate.sh
- name: Upload simulation artifacts
uses: actions/upload-artifact@v4
with:
name: policy-simulation
path: out/policy-sim
retention-days: 7
- name: Upload signing artifacts
uses: actions/upload-artifact@v4
with:
name: policy-signing
path: out/policy-sign
retention-days: 7

View File

@@ -25,9 +25,6 @@ jobs:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Resolve staging credentials - name: Resolve staging credentials
id: staging id: staging
run: | run: |

View File

@@ -1,24 +0,0 @@
name: provenance-check
on:
workflow_dispatch: {}
jobs:
check:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Task Pack offline bundle fixtures
run: python3 .gitea/scripts/test/run-fixtures-check.sh
- name: Emit provenance summary
run: |
mkdir -p out/provenance
echo "run_at=$(date -u +"%Y-%m-%dT%H:%M:%SZ")" > out/provenance/summary.txt
- name: Upload provenance summary
uses: actions/upload-artifact@v4
with:
name: provenance-summary
path: out/provenance/**

View File

@@ -1,306 +0,0 @@
name: Reachability Benchmark
# Sprint: SPRINT_3500_0003_0001
# Task: CORPUS-009 - Create Gitea workflow for reachability benchmark
# Task: CORPUS-010 - Configure nightly + per-PR benchmark runs
on:
workflow_dispatch:
inputs:
baseline_version:
description: 'Baseline version to compare against'
required: false
default: 'latest'
verbose:
description: 'Enable verbose output'
required: false
type: boolean
default: false
push:
branches: [ main ]
paths:
- 'datasets/reachability/**'
- 'src/Scanner/__Libraries/StellaOps.Scanner.Benchmarks/**'
- 'bench/reachability-benchmark/**'
- '.gitea/workflows/reachability-bench.yaml'
pull_request:
paths:
- 'datasets/reachability/**'
- 'src/Scanner/__Libraries/StellaOps.Scanner.Benchmarks/**'
- 'bench/reachability-benchmark/**'
schedule:
# Nightly at 02:00 UTC
- cron: '0 2 * * *'
jobs:
benchmark:
runs-on: ubuntu-22.04
env:
DOTNET_NOLOGO: 1
DOTNET_CLI_TELEMETRY_OPTOUT: 1
DOTNET_SYSTEM_GLOBALIZATION_INVARIANT: 1
TZ: UTC
STELLAOPS_OFFLINE: 'true'
STELLAOPS_DETERMINISTIC: 'true'
outputs:
precision: ${{ steps.metrics.outputs.precision }}
recall: ${{ steps.metrics.outputs.recall }}
f1: ${{ steps.metrics.outputs.f1 }}
pr_auc: ${{ steps.metrics.outputs.pr_auc }}
regression: ${{ steps.compare.outputs.regression }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup .NET 10
uses: actions/setup-dotnet@v4
with:
dotnet-version: 10.0.100
include-prerelease: true
- name: Cache NuGet packages
uses: actions/cache@v4
with:
path: ~/.nuget/packages
key: ${{ runner.os }}-nuget-${{ hashFiles('**/*.csproj') }}
restore-keys: |
${{ runner.os }}-nuget-
- name: Restore benchmark project
run: |
dotnet restore src/Scanner/__Libraries/StellaOps.Scanner.Benchmarks/StellaOps.Scanner.Benchmarks.csproj \
--configfile nuget.config
- name: Build benchmark project
run: |
dotnet build src/Scanner/__Libraries/StellaOps.Scanner.Benchmarks/StellaOps.Scanner.Benchmarks.csproj \
-c Release \
--no-restore
- name: Validate corpus integrity
run: |
echo "::group::Validating corpus index"
if [ ! -f datasets/reachability/corpus.json ]; then
echo "::error::corpus.json not found"
exit 1
fi
python3 -c "import json; data = json.load(open('datasets/reachability/corpus.json')); print(f'Corpus contains {len(data.get(\"samples\", []))} samples')"
echo "::endgroup::"
- name: Run benchmark
id: benchmark
run: |
echo "::group::Running reachability benchmark"
mkdir -p bench/results
# Run the corpus benchmark
dotnet run \
--project src/Scanner/__Libraries/StellaOps.Scanner.Benchmarks/StellaOps.Scanner.Benchmarks.csproj \
-c Release \
--no-build \
-- corpus run \
--corpus datasets/reachability/corpus.json \
--output bench/results/benchmark-${{ github.sha }}.json \
--format json \
${{ inputs.verbose == 'true' && '--verbose' || '' }}
echo "::endgroup::"
- name: Extract metrics
id: metrics
run: |
echo "::group::Extracting metrics"
RESULT_FILE="bench/results/benchmark-${{ github.sha }}.json"
if [ -f "$RESULT_FILE" ]; then
PRECISION=$(jq -r '.metrics.precision // 0' "$RESULT_FILE")
RECALL=$(jq -r '.metrics.recall // 0' "$RESULT_FILE")
F1=$(jq -r '.metrics.f1 // 0' "$RESULT_FILE")
PR_AUC=$(jq -r '.metrics.pr_auc // 0' "$RESULT_FILE")
echo "precision=$PRECISION" >> $GITHUB_OUTPUT
echo "recall=$RECALL" >> $GITHUB_OUTPUT
echo "f1=$F1" >> $GITHUB_OUTPUT
echo "pr_auc=$PR_AUC" >> $GITHUB_OUTPUT
echo "Precision: $PRECISION"
echo "Recall: $RECALL"
echo "F1: $F1"
echo "PR-AUC: $PR_AUC"
else
echo "::error::Benchmark result file not found"
exit 1
fi
echo "::endgroup::"
- name: Get baseline
id: baseline
run: |
echo "::group::Loading baseline"
BASELINE_VERSION="${{ inputs.baseline_version || 'latest' }}"
if [ "$BASELINE_VERSION" = "latest" ]; then
BASELINE_FILE=$(ls -t bench/baselines/*.json 2>/dev/null | head -1)
else
BASELINE_FILE="bench/baselines/$BASELINE_VERSION.json"
fi
if [ -f "$BASELINE_FILE" ]; then
echo "baseline_file=$BASELINE_FILE" >> $GITHUB_OUTPUT
echo "Using baseline: $BASELINE_FILE"
else
echo "::warning::No baseline found, skipping comparison"
echo "baseline_file=" >> $GITHUB_OUTPUT
fi
echo "::endgroup::"
- name: Compare to baseline
id: compare
if: steps.baseline.outputs.baseline_file != ''
run: |
echo "::group::Comparing to baseline"
BASELINE_FILE="${{ steps.baseline.outputs.baseline_file }}"
RESULT_FILE="bench/results/benchmark-${{ github.sha }}.json"
# Extract baseline metrics
BASELINE_PRECISION=$(jq -r '.metrics.precision // 0' "$BASELINE_FILE")
BASELINE_RECALL=$(jq -r '.metrics.recall // 0' "$BASELINE_FILE")
BASELINE_PR_AUC=$(jq -r '.metrics.pr_auc // 0' "$BASELINE_FILE")
# Extract current metrics
CURRENT_PRECISION=$(jq -r '.metrics.precision // 0' "$RESULT_FILE")
CURRENT_RECALL=$(jq -r '.metrics.recall // 0' "$RESULT_FILE")
CURRENT_PR_AUC=$(jq -r '.metrics.pr_auc // 0' "$RESULT_FILE")
# Calculate deltas
PRECISION_DELTA=$(echo "$CURRENT_PRECISION - $BASELINE_PRECISION" | bc -l)
RECALL_DELTA=$(echo "$CURRENT_RECALL - $BASELINE_RECALL" | bc -l)
PR_AUC_DELTA=$(echo "$CURRENT_PR_AUC - $BASELINE_PR_AUC" | bc -l)
echo "Precision delta: $PRECISION_DELTA"
echo "Recall delta: $RECALL_DELTA"
echo "PR-AUC delta: $PR_AUC_DELTA"
# Check for regression (PR-AUC drop > 2%)
REGRESSION_THRESHOLD=-0.02
if (( $(echo "$PR_AUC_DELTA < $REGRESSION_THRESHOLD" | bc -l) )); then
echo "::error::PR-AUC regression detected: $PR_AUC_DELTA (threshold: $REGRESSION_THRESHOLD)"
echo "regression=true" >> $GITHUB_OUTPUT
else
echo "regression=false" >> $GITHUB_OUTPUT
fi
echo "::endgroup::"
- name: Generate markdown report
run: |
echo "::group::Generating report"
RESULT_FILE="bench/results/benchmark-${{ github.sha }}.json"
REPORT_FILE="bench/results/benchmark-${{ github.sha }}.md"
cat > "$REPORT_FILE" << 'EOF'
# Reachability Benchmark Report
**Commit:** ${{ github.sha }}
**Run:** ${{ github.run_number }}
**Date:** $(date -u +"%Y-%m-%dT%H:%M:%SZ")
## Metrics
| Metric | Value |
|--------|-------|
| Precision | ${{ steps.metrics.outputs.precision }} |
| Recall | ${{ steps.metrics.outputs.recall }} |
| F1 Score | ${{ steps.metrics.outputs.f1 }} |
| PR-AUC | ${{ steps.metrics.outputs.pr_auc }} |
## Comparison
${{ steps.compare.outputs.regression == 'true' && '⚠️ **REGRESSION DETECTED**' || '✅ No regression' }}
EOF
echo "Report generated: $REPORT_FILE"
echo "::endgroup::"
- name: Upload results
uses: actions/upload-artifact@v4
with:
name: benchmark-results-${{ github.sha }}
path: |
bench/results/benchmark-${{ github.sha }}.json
bench/results/benchmark-${{ github.sha }}.md
retention-days: 90
- name: Fail on regression
if: steps.compare.outputs.regression == 'true' && github.event_name == 'pull_request'
run: |
echo "::error::Benchmark regression detected. PR-AUC dropped below threshold."
exit 1
update-baseline:
needs: benchmark
if: github.event_name == 'push' && github.ref == 'refs/heads/main' && needs.benchmark.outputs.regression != 'true'
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Download results
uses: actions/download-artifact@v4
with:
name: benchmark-results-${{ github.sha }}
path: bench/results/
- name: Update baseline (nightly only)
if: github.event_name == 'schedule'
run: |
DATE=$(date +%Y%m%d)
cp bench/results/benchmark-${{ github.sha }}.json bench/baselines/baseline-$DATE.json
echo "Updated baseline to baseline-$DATE.json"
notify-pr:
needs: benchmark
if: github.event_name == 'pull_request'
runs-on: ubuntu-22.04
permissions:
pull-requests: write
steps:
- name: Comment on PR
uses: actions/github-script@v7
with:
script: |
const precision = '${{ needs.benchmark.outputs.precision }}';
const recall = '${{ needs.benchmark.outputs.recall }}';
const f1 = '${{ needs.benchmark.outputs.f1 }}';
const prAuc = '${{ needs.benchmark.outputs.pr_auc }}';
const regression = '${{ needs.benchmark.outputs.regression }}' === 'true';
const status = regression ? '⚠️ REGRESSION' : '✅ PASS';
const body = `## Reachability Benchmark Results ${status}
| Metric | Value |
|--------|-------|
| Precision | ${precision} |
| Recall | ${recall} |
| F1 Score | ${f1} |
| PR-AUC | ${prAuc} |
${regression ? '### ⚠️ Regression Detected\nPR-AUC dropped below threshold. Please review changes.' : ''}
<details>
<summary>Details</summary>
- Commit: \`${{ github.sha }}\`
- Run: [#${{ github.run_number }}](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})
</details>`;
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: body
});

View File

@@ -1,267 +0,0 @@
name: Reachability Corpus Validation
on:
workflow_dispatch:
push:
branches: [ main ]
paths:
- 'src/__Tests/reachability/corpus/**'
- 'src/__Tests/reachability/fixtures/**'
- 'src/__Tests/reachability/StellaOps.Reachability.FixtureTests/**'
- 'scripts/reachability/**'
- '.gitea/workflows/reachability-corpus-ci.yml'
pull_request:
paths:
- 'src/__Tests/reachability/corpus/**'
- 'src/__Tests/reachability/fixtures/**'
- 'src/__Tests/reachability/StellaOps.Reachability.FixtureTests/**'
- 'scripts/reachability/**'
- '.gitea/workflows/reachability-corpus-ci.yml'
jobs:
validate-corpus:
runs-on: ubuntu-22.04
env:
DOTNET_NOLOGO: 1
DOTNET_CLI_TELEMETRY_OPTOUT: 1
DOTNET_SYSTEM_GLOBALIZATION_INVARIANT: 1
TZ: UTC
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup .NET 10 RC
uses: actions/setup-dotnet@v4
with:
dotnet-version: 10.0.100
include-prerelease: true
- name: Verify corpus manifest integrity
run: |
echo "Verifying corpus manifest..."
cd src/__Tests/reachability/corpus
if [ ! -f manifest.json ]; then
echo "::error::Corpus manifest.json not found"
exit 1
fi
echo "Manifest exists, checking JSON validity..."
python3 -c "import json; json.load(open('manifest.json'))"
echo "Manifest is valid JSON"
- name: Verify reachbench index integrity
run: |
echo "Verifying reachbench fixtures..."
cd src/__Tests/reachability/fixtures/reachbench-2025-expanded
if [ ! -f INDEX.json ]; then
echo "::error::Reachbench INDEX.json not found"
exit 1
fi
echo "INDEX exists, checking JSON validity..."
python3 -c "import json; json.load(open('INDEX.json'))"
echo "INDEX is valid JSON"
- name: Restore test project
run: dotnet restore src/__Tests/reachability/StellaOps.Reachability.FixtureTests/StellaOps.Reachability.FixtureTests.csproj --configfile nuget.config
- name: Build test project
run: dotnet build src/__Tests/reachability/StellaOps.Reachability.FixtureTests/StellaOps.Reachability.FixtureTests.csproj -c Release --no-restore
- name: Run corpus fixture tests
run: |
dotnet test src/__Tests/reachability/StellaOps.Reachability.FixtureTests/StellaOps.Reachability.FixtureTests.csproj \
-c Release \
--no-build \
--logger "trx;LogFileName=corpus-results.trx" \
--results-directory ./TestResults \
--filter "FullyQualifiedName~CorpusFixtureTests"
- name: Run reachbench fixture tests
run: |
dotnet test src/__Tests/reachability/StellaOps.Reachability.FixtureTests/StellaOps.Reachability.FixtureTests.csproj \
-c Release \
--no-build \
--logger "trx;LogFileName=reachbench-results.trx" \
--results-directory ./TestResults \
--filter "FullyQualifiedName~ReachbenchFixtureTests"
- name: Verify deterministic hashes
run: |
echo "Verifying SHA-256 hashes in corpus manifest..."
chmod +x scripts/reachability/verify_corpus_hashes.sh || true
if [ -f scripts/reachability/verify_corpus_hashes.sh ]; then
scripts/reachability/verify_corpus_hashes.sh
else
echo "Hash verification script not found, using inline verification..."
cd src/__Tests/reachability/corpus
python3 << 'EOF'
import json
import hashlib
import sys
import os
with open('manifest.json') as f:
manifest = json.load(f)
errors = []
for entry in manifest:
case_id = entry['id']
lang = entry['language']
case_dir = os.path.join(lang, case_id)
for filename, expected_hash in entry['files'].items():
filepath = os.path.join(case_dir, filename)
if not os.path.exists(filepath):
errors.append(f"{case_id}: missing {filename}")
continue
with open(filepath, 'rb') as f:
actual_hash = hashlib.sha256(f.read()).hexdigest()
if actual_hash != expected_hash:
errors.append(f"{case_id}: {filename} hash mismatch (expected {expected_hash}, got {actual_hash})")
if errors:
for err in errors:
print(f"::error::{err}")
sys.exit(1)
print(f"All {len(manifest)} corpus entries verified")
EOF
fi
- name: Upload test results
uses: actions/upload-artifact@v4
if: always()
with:
name: corpus-test-results-${{ github.run_number }}
path: ./TestResults/*.trx
retention-days: 14
validate-ground-truths:
runs-on: ubuntu-22.04
env:
TZ: UTC
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Validate ground-truth schema version
run: |
echo "Validating ground-truth files..."
cd src/__Tests/reachability
python3 << 'EOF'
import json
import os
import sys
EXPECTED_SCHEMA = "reachbench.reachgraph.truth/v1"
ALLOWED_VARIANTS = {"reachable", "unreachable"}
errors = []
# Validate corpus ground-truths
corpus_manifest = 'corpus/manifest.json'
if os.path.exists(corpus_manifest):
with open(corpus_manifest) as f:
manifest = json.load(f)
for entry in manifest:
case_id = entry['id']
lang = entry['language']
truth_path = os.path.join('corpus', lang, case_id, 'ground-truth.json')
if not os.path.exists(truth_path):
errors.append(f"corpus/{case_id}: missing ground-truth.json")
continue
with open(truth_path) as f:
truth = json.load(f)
if truth.get('schema_version') != EXPECTED_SCHEMA:
errors.append(f"corpus/{case_id}: wrong schema_version")
if truth.get('variant') not in ALLOWED_VARIANTS:
errors.append(f"corpus/{case_id}: invalid variant '{truth.get('variant')}'")
if not isinstance(truth.get('paths'), list):
errors.append(f"corpus/{case_id}: paths must be an array")
# Validate reachbench ground-truths
reachbench_index = 'fixtures/reachbench-2025-expanded/INDEX.json'
if os.path.exists(reachbench_index):
with open(reachbench_index) as f:
index = json.load(f)
for case in index.get('cases', []):
case_id = case['id']
case_path = case.get('path', os.path.join('cases', case_id))
for variant in ['reachable', 'unreachable']:
truth_path = os.path.join('fixtures/reachbench-2025-expanded', case_path, 'images', variant, 'reachgraph.truth.json')
if not os.path.exists(truth_path):
errors.append(f"reachbench/{case_id}/{variant}: missing reachgraph.truth.json")
continue
with open(truth_path) as f:
truth = json.load(f)
if not truth.get('schema_version'):
errors.append(f"reachbench/{case_id}/{variant}: missing schema_version")
if not isinstance(truth.get('paths'), list):
errors.append(f"reachbench/{case_id}/{variant}: paths must be an array")
if errors:
for err in errors:
print(f"::error::{err}")
sys.exit(1)
print("All ground-truth files validated successfully")
EOF
determinism-check:
runs-on: ubuntu-22.04
env:
TZ: UTC
needs: validate-corpus
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Verify JSON determinism (sorted keys, no trailing whitespace)
run: |
echo "Checking JSON determinism..."
cd src/__Tests/reachability
python3 << 'EOF'
import json
import os
import sys
def check_json_sorted(filepath):
"""Check if JSON has sorted keys (deterministic)."""
with open(filepath) as f:
content = f.read()
parsed = json.loads(content)
reserialized = json.dumps(parsed, sort_keys=True, indent=2)
# Normalize line endings
content_normalized = content.replace('\r\n', '\n').strip()
reserialized_normalized = reserialized.strip()
return content_normalized == reserialized_normalized
errors = []
json_files = []
# Collect JSON files from corpus
for root, dirs, files in os.walk('corpus'):
for f in files:
if f.endswith('.json'):
json_files.append(os.path.join(root, f))
# Check determinism
non_deterministic = []
for filepath in json_files:
try:
if not check_json_sorted(filepath):
non_deterministic.append(filepath)
except json.JSONDecodeError as e:
errors.append(f"{filepath}: invalid JSON - {e}")
if non_deterministic:
print(f"::warning::Found {len(non_deterministic)} non-deterministic JSON files (keys not sorted or whitespace differs)")
for f in non_deterministic[:10]:
print(f" - {f}")
if len(non_deterministic) > 10:
print(f" ... and {len(non_deterministic) - 10} more")
if errors:
for err in errors:
print(f"::error::{err}")
sys.exit(1)
print(f"Checked {len(json_files)} JSON files")
EOF

View File

@@ -1,399 +0,0 @@
# .gitea/workflows/release-keyless-sign.yml
# Keyless signing for StellaOps release artifacts
#
# This workflow signs release artifacts using keyless signing (Fulcio).
# It demonstrates dogfooding of the keyless signing feature.
#
# Triggers:
# - After release bundle is published
# - Manual trigger for re-signing
#
# Artifacts signed:
# - Container images
# - CLI binaries
# - SBOM documents
# - Release manifest
name: Release Keyless Signing
on:
release:
types: [published]
workflow_dispatch:
inputs:
version:
description: 'Release version to sign (e.g., 2025.12.0)'
required: true
type: string
dry_run:
description: 'Dry run (skip actual signing)'
required: false
default: false
type: boolean
env:
STELLAOPS_URL: "https://api.stella-ops.internal"
REGISTRY: registry.stella-ops.org
jobs:
sign-images:
runs-on: ubuntu-22.04
permissions:
id-token: write
contents: read
packages: write
outputs:
scanner-attestation: ${{ steps.sign-scanner.outputs.attestation-digest }}
cli-attestation: ${{ steps.sign-cli.outputs.attestation-digest }}
gateway-attestation: ${{ steps.sign-gateway.outputs.attestation-digest }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Determine Version
id: version
run: |
if [[ -n "${{ github.event.inputs.version }}" ]]; then
VERSION="${{ github.event.inputs.version }}"
else
VERSION="${{ github.event.release.tag_name }}"
VERSION="${VERSION#v}"
fi
echo "version=${VERSION}" >> $GITHUB_OUTPUT
echo "Release version: ${VERSION}"
- name: Install StellaOps CLI
run: |
curl -sL https://get.stella-ops.org/cli | sh
echo "$HOME/.stellaops/bin" >> $GITHUB_PATH
- name: Log in to Registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ secrets.REGISTRY_USERNAME }}
password: ${{ secrets.REGISTRY_PASSWORD }}
- name: Get OIDC Token
id: oidc
run: |
OIDC_TOKEN="${ACTIONS_ID_TOKEN}"
if [[ -z "$OIDC_TOKEN" ]]; then
echo "::error::OIDC token not available"
exit 1
fi
echo "::add-mask::${OIDC_TOKEN}"
echo "token=${OIDC_TOKEN}" >> $GITHUB_OUTPUT
- name: Sign Scanner Image
id: sign-scanner
if: ${{ github.event.inputs.dry_run != 'true' }}
env:
STELLAOPS_OIDC_TOKEN: ${{ steps.oidc.outputs.token }}
run: |
VERSION="${{ steps.version.outputs.version }}"
IMAGE="${REGISTRY}/stellaops/scanner:${VERSION}"
echo "Signing scanner image: ${IMAGE}"
DIGEST=$(docker manifest inspect "${IMAGE}" -v | jq -r '.Descriptor.digest')
RESULT=$(stella attest sign \
--keyless \
--artifact "${DIGEST}" \
--type image \
--rekor \
--output json)
ATTESTATION=$(echo "$RESULT" | jq -r '.attestationDigest')
REKOR=$(echo "$RESULT" | jq -r '.rekorUuid')
echo "attestation-digest=${ATTESTATION}" >> $GITHUB_OUTPUT
echo "rekor-uuid=${REKOR}" >> $GITHUB_OUTPUT
# Push attestation to registry
stella attest push \
--attestation "${ATTESTATION}" \
--registry "stellaops/scanner"
- name: Sign CLI Image
id: sign-cli
if: ${{ github.event.inputs.dry_run != 'true' }}
env:
STELLAOPS_OIDC_TOKEN: ${{ steps.oidc.outputs.token }}
run: |
VERSION="${{ steps.version.outputs.version }}"
IMAGE="${REGISTRY}/stellaops/cli:${VERSION}"
echo "Signing CLI image: ${IMAGE}"
DIGEST=$(docker manifest inspect "${IMAGE}" -v | jq -r '.Descriptor.digest')
RESULT=$(stella attest sign \
--keyless \
--artifact "${DIGEST}" \
--type image \
--rekor \
--output json)
ATTESTATION=$(echo "$RESULT" | jq -r '.attestationDigest')
echo "attestation-digest=${ATTESTATION}" >> $GITHUB_OUTPUT
stella attest push \
--attestation "${ATTESTATION}" \
--registry "stellaops/cli"
- name: Sign Gateway Image
id: sign-gateway
if: ${{ github.event.inputs.dry_run != 'true' }}
env:
STELLAOPS_OIDC_TOKEN: ${{ steps.oidc.outputs.token }}
run: |
VERSION="${{ steps.version.outputs.version }}"
IMAGE="${REGISTRY}/stellaops/gateway:${VERSION}"
echo "Signing gateway image: ${IMAGE}"
DIGEST=$(docker manifest inspect "${IMAGE}" -v | jq -r '.Descriptor.digest')
RESULT=$(stella attest sign \
--keyless \
--artifact "${DIGEST}" \
--type image \
--rekor \
--output json)
ATTESTATION=$(echo "$RESULT" | jq -r '.attestationDigest')
echo "attestation-digest=${ATTESTATION}" >> $GITHUB_OUTPUT
stella attest push \
--attestation "${ATTESTATION}" \
--registry "stellaops/gateway"
sign-binaries:
runs-on: ubuntu-22.04
permissions:
id-token: write
contents: read
outputs:
cli-linux-x64: ${{ steps.sign-cli-linux-x64.outputs.attestation-digest }}
cli-linux-arm64: ${{ steps.sign-cli-linux-arm64.outputs.attestation-digest }}
cli-darwin-x64: ${{ steps.sign-cli-darwin-x64.outputs.attestation-digest }}
cli-darwin-arm64: ${{ steps.sign-cli-darwin-arm64.outputs.attestation-digest }}
cli-windows-x64: ${{ steps.sign-cli-windows-x64.outputs.attestation-digest }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Determine Version
id: version
run: |
if [[ -n "${{ github.event.inputs.version }}" ]]; then
VERSION="${{ github.event.inputs.version }}"
else
VERSION="${{ github.event.release.tag_name }}"
VERSION="${VERSION#v}"
fi
echo "version=${VERSION}" >> $GITHUB_OUTPUT
- name: Install StellaOps CLI
run: |
curl -sL https://get.stella-ops.org/cli | sh
echo "$HOME/.stellaops/bin" >> $GITHUB_PATH
- name: Download Release Artifacts
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
VERSION="${{ steps.version.outputs.version }}"
mkdir -p artifacts
# Download CLI binaries
gh release download "v${VERSION}" \
--pattern "stellaops-cli-*" \
--dir artifacts \
|| echo "No CLI binaries found"
- name: Get OIDC Token
id: oidc
run: |
OIDC_TOKEN="${ACTIONS_ID_TOKEN}"
echo "::add-mask::${OIDC_TOKEN}"
echo "token=${OIDC_TOKEN}" >> $GITHUB_OUTPUT
- name: Sign CLI Binary (linux-x64)
id: sign-cli-linux-x64
if: ${{ github.event.inputs.dry_run != 'true' }}
env:
STELLAOPS_OIDC_TOKEN: ${{ steps.oidc.outputs.token }}
run: |
BINARY="artifacts/stellaops-cli-linux-x64"
if [[ -f "$BINARY" ]]; then
DIGEST="sha256:$(sha256sum "$BINARY" | cut -d' ' -f1)"
RESULT=$(stella attest sign \
--keyless \
--artifact "${DIGEST}" \
--type binary \
--rekor \
--output json)
ATTESTATION=$(echo "$RESULT" | jq -r '.attestationDigest')
echo "attestation-digest=${ATTESTATION}" >> $GITHUB_OUTPUT
fi
- name: Sign CLI Binary (linux-arm64)
id: sign-cli-linux-arm64
if: ${{ github.event.inputs.dry_run != 'true' }}
env:
STELLAOPS_OIDC_TOKEN: ${{ steps.oidc.outputs.token }}
run: |
BINARY="artifacts/stellaops-cli-linux-arm64"
if [[ -f "$BINARY" ]]; then
DIGEST="sha256:$(sha256sum "$BINARY" | cut -d' ' -f1)"
RESULT=$(stella attest sign \
--keyless \
--artifact "${DIGEST}" \
--type binary \
--rekor \
--output json)
ATTESTATION=$(echo "$RESULT" | jq -r '.attestationDigest')
echo "attestation-digest=${ATTESTATION}" >> $GITHUB_OUTPUT
fi
- name: Sign CLI Binary (darwin-x64)
id: sign-cli-darwin-x64
if: ${{ github.event.inputs.dry_run != 'true' }}
env:
STELLAOPS_OIDC_TOKEN: ${{ steps.oidc.outputs.token }}
run: |
BINARY="artifacts/stellaops-cli-darwin-x64"
if [[ -f "$BINARY" ]]; then
DIGEST="sha256:$(sha256sum "$BINARY" | cut -d' ' -f1)"
RESULT=$(stella attest sign \
--keyless \
--artifact "${DIGEST}" \
--type binary \
--rekor \
--output json)
ATTESTATION=$(echo "$RESULT" | jq -r '.attestationDigest')
echo "attestation-digest=${ATTESTATION}" >> $GITHUB_OUTPUT
fi
- name: Sign CLI Binary (darwin-arm64)
id: sign-cli-darwin-arm64
if: ${{ github.event.inputs.dry_run != 'true' }}
env:
STELLAOPS_OIDC_TOKEN: ${{ steps.oidc.outputs.token }}
run: |
BINARY="artifacts/stellaops-cli-darwin-arm64"
if [[ -f "$BINARY" ]]; then
DIGEST="sha256:$(sha256sum "$BINARY" | cut -d' ' -f1)"
RESULT=$(stella attest sign \
--keyless \
--artifact "${DIGEST}" \
--type binary \
--rekor \
--output json)
ATTESTATION=$(echo "$RESULT" | jq -r '.attestationDigest')
echo "attestation-digest=${ATTESTATION}" >> $GITHUB_OUTPUT
fi
- name: Sign CLI Binary (windows-x64)
id: sign-cli-windows-x64
if: ${{ github.event.inputs.dry_run != 'true' }}
env:
STELLAOPS_OIDC_TOKEN: ${{ steps.oidc.outputs.token }}
run: |
BINARY="artifacts/stellaops-cli-windows-x64.exe"
if [[ -f "$BINARY" ]]; then
DIGEST="sha256:$(sha256sum "$BINARY" | cut -d' ' -f1)"
RESULT=$(stella attest sign \
--keyless \
--artifact "${DIGEST}" \
--type binary \
--rekor \
--output json)
ATTESTATION=$(echo "$RESULT" | jq -r '.attestationDigest')
echo "attestation-digest=${ATTESTATION}" >> $GITHUB_OUTPUT
fi
verify-signatures:
needs: [sign-images, sign-binaries]
runs-on: ubuntu-22.04
permissions:
contents: read
packages: read
steps:
- name: Install StellaOps CLI
run: |
curl -sL https://get.stella-ops.org/cli | sh
echo "$HOME/.stellaops/bin" >> $GITHUB_PATH
- name: Determine Version
id: version
run: |
if [[ -n "${{ github.event.inputs.version }}" ]]; then
VERSION="${{ github.event.inputs.version }}"
else
VERSION="${{ github.event.release.tag_name }}"
VERSION="${VERSION#v}"
fi
echo "version=${VERSION}" >> $GITHUB_OUTPUT
- name: Verify Scanner Image
if: ${{ github.event.inputs.dry_run != 'true' }}
run: |
VERSION="${{ steps.version.outputs.version }}"
IMAGE="${REGISTRY}/stellaops/scanner:${VERSION}"
DIGEST=$(docker manifest inspect "${IMAGE}" -v | jq -r '.Descriptor.digest')
stella attest verify \
--artifact "${DIGEST}" \
--certificate-identity "stella-ops.org/git.stella-ops.org:ref:refs/tags/v${VERSION}" \
--certificate-oidc-issuer "https://git.stella-ops.org" \
--require-rekor
- name: Summary
run: |
VERSION="${{ steps.version.outputs.version }}"
cat >> $GITHUB_STEP_SUMMARY << EOF
## Release v${VERSION} Signed
### Container Images
| Image | Attestation |
|-------|-------------|
| scanner | \`${{ needs.sign-images.outputs.scanner-attestation }}\` |
| cli | \`${{ needs.sign-images.outputs.cli-attestation }}\` |
| gateway | \`${{ needs.sign-images.outputs.gateway-attestation }}\` |
### CLI Binaries
| Platform | Attestation |
|----------|-------------|
| linux-x64 | \`${{ needs.sign-binaries.outputs.cli-linux-x64 }}\` |
| linux-arm64 | \`${{ needs.sign-binaries.outputs.cli-linux-arm64 }}\` |
| darwin-x64 | \`${{ needs.sign-binaries.outputs.cli-darwin-x64 }}\` |
| darwin-arm64 | \`${{ needs.sign-binaries.outputs.cli-darwin-arm64 }}\` |
| windows-x64 | \`${{ needs.sign-binaries.outputs.cli-windows-x64 }}\` |
### Verification
\`\`\`bash
stella attest verify \\
--artifact "sha256:..." \\
--certificate-identity "stella-ops.org/git.stella-ops.org:ref:refs/tags/v${VERSION}" \\
--certificate-oidc-issuer "https://git.stella-ops.org"
\`\`\`
EOF

View File

@@ -1,19 +0,0 @@
name: release-manifest-verify
on:
push:
paths:
- devops/releases/2025.09-stable.yaml
- devops/releases/2025.09-airgap.yaml
- devops/downloads/manifest.json
- devops/release/check_release_manifest.py
workflow_dispatch:
jobs:
verify:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Validate release & downloads manifests
run: |
python devops/release/check_release_manifest.py

Some files were not shown because too many files have changed in this diff Show More