up
Some checks failed
Docs CI / lint-and-preview (push) Has been cancelled
Signals CI & Image / signals-ci (push) Has been cancelled
Policy Lint & Smoke / policy-lint (push) Has been cancelled
Policy Simulation / policy-simulate (push) Has been cancelled
SDK Publish & Sign / sdk-publish (push) Has been cancelled
AOC Guard CI / aoc-guard (push) Has been cancelled
AOC Guard CI / aoc-verify (push) Has been cancelled
Concelier Attestation Tests / attestation-tests (push) Has been cancelled
devportal-offline / build-offline (push) Has been cancelled
Some checks failed
Docs CI / lint-and-preview (push) Has been cancelled
Signals CI & Image / signals-ci (push) Has been cancelled
Policy Lint & Smoke / policy-lint (push) Has been cancelled
Policy Simulation / policy-simulate (push) Has been cancelled
SDK Publish & Sign / sdk-publish (push) Has been cancelled
AOC Guard CI / aoc-guard (push) Has been cancelled
AOC Guard CI / aoc-verify (push) Has been cancelled
Concelier Attestation Tests / attestation-tests (push) Has been cancelled
devportal-offline / build-offline (push) Has been cancelled
This commit is contained in:
46
scripts/devops/cleanup-workspace.sh
Normal file
46
scripts/devops/cleanup-workspace.sh
Normal file
@@ -0,0 +1,46 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
# Cleans common build/test artifacts to reclaim disk space in this repo.
|
||||
# Defaults to a safe set; pass SAFE_ONLY=0 to include bin/obj.
|
||||
|
||||
DRY_RUN=${DRY_RUN:-0}
|
||||
SAFE_ONLY=${SAFE_ONLY:-1}
|
||||
|
||||
log() { printf "[cleanup] %s\n" "$*"; }
|
||||
run() {
|
||||
if [[ "$DRY_RUN" == "1" ]]; then
|
||||
log "DRY_RUN: $*"
|
||||
else
|
||||
eval "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
ROOT="$(git rev-parse --show-toplevel 2>/dev/null || pwd)"
|
||||
cd "$ROOT"
|
||||
|
||||
paths=(
|
||||
"out"
|
||||
"ops/devops/artifacts"
|
||||
"ops/devops/ci-110-runner/artifacts"
|
||||
"ops/devops/sealed-mode-ci/artifacts"
|
||||
"TestResults"
|
||||
"tests/TestResults"
|
||||
"local-nugets/packages"
|
||||
".nuget/packages"
|
||||
)
|
||||
|
||||
if [[ "$SAFE_ONLY" != "1" ]]; then
|
||||
while IFS= read -r dir; do
|
||||
paths+=("$dir")
|
||||
done < <(find . -maxdepth 4 -type d \( -name bin -o -name obj -o -name TestResults \) 2>/dev/null)
|
||||
fi
|
||||
|
||||
log "Safe only: $SAFE_ONLY ; Dry run: $DRY_RUN"
|
||||
for p in "${paths[@]}"; do
|
||||
if [[ -d "$p" ]]; then
|
||||
log "Removing $p"
|
||||
run "rm -rf '$p'"
|
||||
fi
|
||||
done
|
||||
|
||||
log "Done."
|
||||
51
scripts/orchestrator/probe.sh
Normal file
51
scripts/orchestrator/probe.sh
Normal file
@@ -0,0 +1,51 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
# Synthetic probe for orchestrator infra (postgres, mongo, nats).
|
||||
# Runs lightweight checks and writes a status file under out/orchestrator-probe/.
|
||||
|
||||
COMPOSE_FILE=${COMPOSE_FILE:-ops/devops/orchestrator/docker-compose.orchestrator.yml}
|
||||
STATE_DIR=${STATE_DIR:-out/orchestrator-probe}
|
||||
|
||||
mkdir -p "$STATE_DIR"
|
||||
|
||||
log() { printf "[probe] %s\n" "$*"; }
|
||||
require() { command -v "$1" >/dev/null 2>&1 || { echo "missing $1" >&2; exit 1; }; }
|
||||
|
||||
require docker
|
||||
|
||||
timestamp() { date -u +%Y-%m-%dT%H:%M:%SZ; }
|
||||
|
||||
log "compose file: $COMPOSE_FILE"
|
||||
|
||||
PG_OK=0
|
||||
MONGO_OK=0
|
||||
NATS_OK=0
|
||||
|
||||
if docker compose -f "$COMPOSE_FILE" ps orchestrator-postgres >/dev/null 2>&1; then
|
||||
if docker compose -f "$COMPOSE_FILE" exec -T orchestrator-postgres psql -U orch -tAc "select 1" | grep -q 1; then
|
||||
PG_OK=1
|
||||
fi
|
||||
fi
|
||||
|
||||
if docker compose -f "$COMPOSE_FILE" ps orchestrator-mongo >/dev/null 2>&1; then
|
||||
if docker compose -f "$COMPOSE_FILE" exec -T orchestrator-mongo mongosh --quiet --eval "db.adminCommand('ping').ok" | grep -q 1; then
|
||||
MONGO_OK=1
|
||||
fi
|
||||
fi
|
||||
|
||||
if docker compose -f "$COMPOSE_FILE" ps orchestrator-nats >/dev/null 2>&1; then
|
||||
if docker compose -f "$COMPOSE_FILE" exec -T orchestrator-nats nats --server localhost:4222 ping >/dev/null 2>&1; then
|
||||
# publish & request to ensure traffic path works
|
||||
docker compose -f "$COMPOSE_FILE" exec -T orchestrator-nats nats --server localhost:4222 pub probe.ping "ok" >/dev/null 2>&1 || true
|
||||
NATS_OK=1
|
||||
fi
|
||||
fi
|
||||
|
||||
cat > "$STATE_DIR/status.txt" <<EOF
|
||||
timestamp=$(timestamp)
|
||||
postgres_ok=$PG_OK
|
||||
mongo_ok=$MONGO_OK
|
||||
nats_ok=$NATS_OK
|
||||
EOF
|
||||
|
||||
log "probe complete (pg=$PG_OK mongo=$MONGO_OK nats=$NATS_OK)"
|
||||
17
scripts/orchestrator/replay-smoke.sh
Normal file
17
scripts/orchestrator/replay-smoke.sh
Normal file
@@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
# Replay smoke: restart infra and rerun baseline smoke to validate persistence/readiness.
|
||||
|
||||
COMPOSE_FILE=${COMPOSE_FILE:-ops/devops/orchestrator/docker-compose.orchestrator.yml}
|
||||
STATE_DIR=${STATE_DIR:-out/orchestrator-smoke}
|
||||
|
||||
log() { printf "[replay-smoke] %s\n" "$*"; }
|
||||
|
||||
log "restarting orchestrator infra (compose: $COMPOSE_FILE)"
|
||||
docker compose -f "$COMPOSE_FILE" down
|
||||
docker compose -f "$COMPOSE_FILE" up -d
|
||||
|
||||
log "running baseline smoke"
|
||||
COMPOSE_FILE="$COMPOSE_FILE" STATE_DIR="$STATE_DIR" scripts/orchestrator/smoke.sh
|
||||
|
||||
log "replay smoke done; readiness at $STATE_DIR/readiness.txt"
|
||||
85
scripts/policy/attest-verify.sh
Normal file
85
scripts/policy/attest-verify.sh
Normal file
@@ -0,0 +1,85 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
# Create and verify a DSSE attestation for a policy blob using cosign.
|
||||
# Intended for CI and offline use; works with base64 inlined keys.
|
||||
|
||||
usage() {
|
||||
cat <<'USAGE'
|
||||
Usage: attest-verify.sh --file <path> [--predicate <json>] [--type stella.policy] [--out-dir out/policy-sign]
|
||||
Env:
|
||||
COSIGN_KEY_B64 base64-encoded PEM private key (if not using COSIGN_KEY path)
|
||||
COSIGN_PASSWORD passphrase for the private key (optional)
|
||||
USAGE
|
||||
}
|
||||
|
||||
FILE=""
|
||||
PREDICATE=""
|
||||
TYPE="stella.policy"
|
||||
OUT_DIR="out/policy-sign"
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--file) FILE="$2"; shift 2;;
|
||||
--predicate) PREDICATE="$2"; shift 2;;
|
||||
--type) TYPE="$2"; shift 2;;
|
||||
--out-dir) OUT_DIR="$2"; shift 2;;
|
||||
-h|--help) usage; exit 0;;
|
||||
*) echo "Unknown arg: $1" >&2; usage; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -z "$FILE" ]]; then echo "--file is required" >&2; exit 1; fi
|
||||
if [[ ! -f "$FILE" ]]; then echo "file not found: $FILE" >&2; exit 1; fi
|
||||
|
||||
if ! command -v cosign >/dev/null 2>&1; then
|
||||
echo "cosign is required on PATH" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p "$OUT_DIR"
|
||||
BASENAME=$(basename "$FILE")
|
||||
KEY_PATH=${COSIGN_KEY:-"$OUT_DIR/cosign.key"}
|
||||
PUB_OUT="$OUT_DIR/cosign.pub"
|
||||
BUNDLE="$OUT_DIR/${BASENAME}.attestation.sigstore"
|
||||
|
||||
if [[ -n "${COSIGN_KEY_B64:-}" ]]; then
|
||||
printf "%s" "$COSIGN_KEY_B64" | base64 -d > "$KEY_PATH"
|
||||
chmod 600 "$KEY_PATH"
|
||||
fi
|
||||
|
||||
if [[ ! -f "$KEY_PATH" ]]; then
|
||||
echo "Missing signing key; set COSIGN_KEY_B64 or COSIGN_KEY path" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
export COSIGN_PASSWORD=${COSIGN_PASSWORD:-}
|
||||
|
||||
if [[ -z "$PREDICATE" ]]; then
|
||||
PREDICATE="$OUT_DIR/${BASENAME}.predicate.json"
|
||||
sha256sum "$FILE" | awk '{print $1}' > "$OUT_DIR/${BASENAME}.sha256"
|
||||
cat > "$PREDICATE" <<EOF
|
||||
{
|
||||
"file": "$FILE",
|
||||
"sha256": "$(cat "$OUT_DIR/${BASENAME}.sha256")",
|
||||
"createdAt": "$(date -u +"%Y-%m-%dT%H:%M:%SZ")",
|
||||
"type": "$TYPE"
|
||||
}
|
||||
EOF
|
||||
fi
|
||||
|
||||
cosign public-key --key "$KEY_PATH" > "$PUB_OUT"
|
||||
|
||||
cosign attest-blob \
|
||||
--predicate "$PREDICATE" \
|
||||
--type "$TYPE" \
|
||||
--bundle "$BUNDLE" \
|
||||
--key "$KEY_PATH" \
|
||||
"$FILE"
|
||||
|
||||
cosign verify-blob-attestation \
|
||||
--key "$PUB_OUT" \
|
||||
--type "$TYPE" \
|
||||
--bundle "$BUNDLE" \
|
||||
"$FILE"
|
||||
|
||||
printf "Attestation bundle -> %s\nVerified with -> %s\n" "$BUNDLE" "$PUB_OUT"
|
||||
49
scripts/policy/batch-simulate.sh
Normal file
49
scripts/policy/batch-simulate.sh
Normal file
@@ -0,0 +1,49 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
ROOT=$(cd "$(dirname "$0")/.." && pwd)
|
||||
CLI_PROJECT="$ROOT/Cli/StellaOps.Cli/StellaOps.Cli.csproj"
|
||||
POLICY_FILES=("docs/examples/policies/baseline.stella" "docs/examples/policies/internal-only.stella" "docs/examples/policies/serverless.stella")
|
||||
SBOM_FILE="docs/examples/policies/sample-sbom.json"
|
||||
OUT_DIR="${OUT_DIR:-out/policy-sim}"
|
||||
THRESHOLD=${THRESHOLD:-0}
|
||||
|
||||
usage() {
|
||||
cat <<'USAGE'
|
||||
Batch policy simulate harness (DEVOPS-POLICY-27-002)
|
||||
- Runs stella policy simulate against sample policies and a sample SBOM
|
||||
- Fails if violation count exceeds THRESHOLD (default 0)
|
||||
|
||||
Env/flags:
|
||||
OUT_DIR=out/policy-sim
|
||||
THRESHOLD=0
|
||||
SBOM_FILE=docs/examples/policies/sample-sbom.json
|
||||
USAGE
|
||||
}
|
||||
|
||||
if [[ ${1:-} == "-h" || ${1:-} == "--help" ]]; then usage; exit 0; fi
|
||||
mkdir -p "$OUT_DIR"
|
||||
|
||||
violations=0
|
||||
for policy in "${POLICY_FILES[@]}"; do
|
||||
name=$(basename "$policy" .stella)
|
||||
report="$OUT_DIR/${name}-simulate.json"
|
||||
dotnet run --project "$CLI_PROJECT" -- policy simulate --policy "$policy" --sbom "$SBOM_FILE" --format json --no-color > "$report"
|
||||
# count violations if field exists
|
||||
count=$(python - <<PY "$report"
|
||||
import json,sys
|
||||
with open(sys.argv[1]) as f:
|
||||
data=json.load(f)
|
||||
viol = 0
|
||||
if isinstance(data, dict):
|
||||
viol = len(data.get("violations", [])) if isinstance(data.get("violations", []), list) else 0
|
||||
print(viol)
|
||||
PY)
|
||||
echo "[$name] violations=$count" | tee -a "$OUT_DIR/summary.txt"
|
||||
violations=$((violations + count))
|
||||
done
|
||||
|
||||
echo "total_violations=$violations" | tee -a "$OUT_DIR/summary.txt"
|
||||
if (( violations > THRESHOLD )); then
|
||||
echo "Violation threshold exceeded ($violations > $THRESHOLD)" >&2
|
||||
exit 1
|
||||
fi
|
||||
34
scripts/policy/rotate-key.sh
Normal file
34
scripts/policy/rotate-key.sh
Normal file
@@ -0,0 +1,34 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
# Generates a new cosign keypair for policy signing.
|
||||
# Outputs PEMs in out/policy-sign/keys and base64 ready for CI secrets.
|
||||
|
||||
OUT_DIR=${OUT_DIR:-out/policy-sign/keys}
|
||||
PREFIX=${PREFIX:-policy-cosign}
|
||||
PASSWORD=${COSIGN_PASSWORD:-}
|
||||
|
||||
mkdir -p "$OUT_DIR"
|
||||
KEY_PREFIX="$OUT_DIR/$PREFIX"
|
||||
|
||||
if ! command -v cosign >/dev/null 2>&1; then
|
||||
echo "cosign is required on PATH" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
export COSIGN_PASSWORD="$PASSWORD"
|
||||
cosign version >/dev/null
|
||||
cosign generate-key-pair --output-key-prefix "$KEY_PREFIX" >/dev/null
|
||||
|
||||
BASE64_PRIV=$(base64 < "${KEY_PREFIX}.key" | tr -d '\n')
|
||||
BASE64_PUB=$(base64 < "${KEY_PREFIX}.pub" | tr -d '\n')
|
||||
|
||||
cat > "$OUT_DIR/README.txt" <<EOF
|
||||
Key prefix: $KEY_PREFIX
|
||||
Private key (base64): $BASE64_PRIV
|
||||
Public key (base64): $BASE64_PUB
|
||||
Set secrets:
|
||||
POLICY_COSIGN_KEY_B64=$BASE64_PRIV
|
||||
POLICY_COSIGN_PUB_B64=$BASE64_PUB
|
||||
EOF
|
||||
|
||||
printf "Generated keys under %s\n" "$OUT_DIR"
|
||||
50
scripts/policy/sign-policy.sh
Normal file
50
scripts/policy/sign-policy.sh
Normal file
@@ -0,0 +1,50 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
# Signs a policy file with cosign and verifies it. Intended for CI and offline use.
|
||||
# Requires COSIGN_KEY_B64 (private key PEM base64) or KMS envs; optional COSIGN_PASSWORD.
|
||||
|
||||
usage() {
|
||||
cat <<'USAGE'
|
||||
Usage: sign-policy.sh --file <path> [--out-dir out/policy-sign]
|
||||
Env:
|
||||
COSIGN_KEY_B64 base64-encoded PEM private key (if not using KMS)
|
||||
COSIGN_PASSWORD passphrase for the key (can be empty for test keys)
|
||||
COSIGN_PUBLIC_KEY_PATH optional path to write public key for verify step
|
||||
USAGE
|
||||
}
|
||||
|
||||
FILE=""
|
||||
OUT_DIR="out/policy-sign"
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--file) FILE="$2"; shift 2;;
|
||||
--out-dir) OUT_DIR="$2"; shift 2;;
|
||||
-h|--help) usage; exit 0;;
|
||||
*) echo "Unknown arg: $1" >&2; usage; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -z "$FILE" ]]; then echo "--file is required" >&2; exit 1; fi
|
||||
if [[ ! -f "$FILE" ]]; then echo "file not found: $FILE" >&2; exit 1; fi
|
||||
|
||||
mkdir -p "$OUT_DIR"
|
||||
BASENAME=$(basename "$FILE")
|
||||
SIG="$OUT_DIR/${BASENAME}.sig"
|
||||
PUB_OUT="${COSIGN_PUBLIC_KEY_PATH:-$OUT_DIR/cosign.pub}"
|
||||
|
||||
if [[ -n "${COSIGN_KEY_B64:-}" ]]; then
|
||||
KEYFILE="$OUT_DIR/cosign.key"
|
||||
printf "%s" "$COSIGN_KEY_B64" | base64 -d > "$KEYFILE"
|
||||
chmod 600 "$KEYFILE"
|
||||
export COSIGN_KEY="$KEYFILE"
|
||||
fi
|
||||
|
||||
export COSIGN_PASSWORD=${COSIGN_PASSWORD:-}
|
||||
cosign version >/dev/null
|
||||
|
||||
cosign sign-blob "$FILE" --output-signature "$SIG"
|
||||
cosign public-key --key "$COSIGN_KEY" > "$PUB_OUT"
|
||||
cosign verify-blob --key "$PUB_OUT" --signature "$SIG" "$FILE"
|
||||
|
||||
printf "Signed %s -> %s\nPublic key -> %s\n" "$FILE" "$SIG" "$PUB_OUT"
|
||||
34
scripts/sdk/generate-cert.sh
Normal file
34
scripts/sdk/generate-cert.sh
Normal file
@@ -0,0 +1,34 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
# Generates an offline-friendly code-signing certificate (self-signed) for NuGet package signing.
|
||||
|
||||
OUT_DIR=${OUT_DIR:-out/sdk-signing}
|
||||
SUBJECT=${SUBJECT:-"/CN=StellaOps SDK Signing/O=StellaOps"}
|
||||
DAYS=${DAYS:-3650}
|
||||
PFX_NAME=${PFX_NAME:-sdk-signing.pfx}
|
||||
PASSWORD=${PASSWORD:-""}
|
||||
|
||||
mkdir -p "$OUT_DIR"
|
||||
|
||||
PRIV="$OUT_DIR/sdk-signing.key"
|
||||
CRT="$OUT_DIR/sdk-signing.crt"
|
||||
PFX="$OUT_DIR/$PFX_NAME"
|
||||
|
||||
openssl req -x509 -newkey rsa:4096 -sha256 -days "$DAYS" \
|
||||
-nodes -subj "$SUBJECT" -keyout "$PRIV" -out "$CRT"
|
||||
|
||||
openssl pkcs12 -export -out "$PFX" -inkey "$PRIV" -in "$CRT" -passout pass:"$PASSWORD"
|
||||
|
||||
BASE64_PFX=$(base64 < "$PFX" | tr -d '\n')
|
||||
|
||||
cat > "$OUT_DIR/README.txt" <<EOF
|
||||
PFX file: $PFX
|
||||
Password: ${PASSWORD:-<empty>}
|
||||
Base64:
|
||||
$BASE64_PFX
|
||||
Secrets to set:
|
||||
SDK_SIGNING_CERT_B64=$BASE64_PFX
|
||||
SDK_SIGNING_CERT_PASSWORD=$PASSWORD
|
||||
EOF
|
||||
|
||||
printf "Generated signing cert -> %s (base64 in README)\n" "$PFX"
|
||||
36
scripts/sdk/publish.sh
Normal file
36
scripts/sdk/publish.sh
Normal file
@@ -0,0 +1,36 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
# Publishes signed NuGet packages to a configured feed (file or HTTP).
|
||||
|
||||
PACKAGES_GLOB=${PACKAGES_GLOB:-"out/sdk/*.nupkg"}
|
||||
SOURCE=${SDK_NUGET_SOURCE:-"local-nugets/packages"}
|
||||
API_KEY=${SDK_NUGET_API_KEY:-""}
|
||||
|
||||
mapfile -t packages < <(ls $PACKAGES_GLOB 2>/dev/null || true)
|
||||
if [[ ${#packages[@]} -eq 0 ]]; then
|
||||
echo "No packages found under glob '$PACKAGES_GLOB'; nothing to publish."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
publish_file() {
|
||||
local pkg="$1"
|
||||
mkdir -p "$SOURCE"
|
||||
cp "$pkg" "$SOURCE"/
|
||||
}
|
||||
|
||||
publish_http() {
|
||||
local pkg="$1"
|
||||
dotnet nuget push "$pkg" --source "$SOURCE" --api-key "$API_KEY" --skip-duplicate
|
||||
}
|
||||
|
||||
if [[ "$SOURCE" =~ ^https?:// ]]; then
|
||||
if [[ -z "$API_KEY" ]]; then
|
||||
echo "SDK_NUGET_API_KEY is required for HTTP source $SOURCE" >&2
|
||||
exit 1
|
||||
fi
|
||||
for pkg in "${packages[@]}"; do publish_http "$pkg"; done
|
||||
else
|
||||
for pkg in "${packages[@]}"; do publish_file "$pkg"; done
|
||||
fi
|
||||
|
||||
echo "Published ${#packages[@]} package(s) to $SOURCE"
|
||||
43
scripts/sdk/sign-packages.sh
Normal file
43
scripts/sdk/sign-packages.sh
Normal file
@@ -0,0 +1,43 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
# Signs NuGet packages using a PKCS#12 (PFX) certificate.
|
||||
|
||||
PACKAGES_GLOB=${PACKAGES_GLOB:-"out/sdk/*.nupkg"}
|
||||
OUT_DIR=${OUT_DIR:-out/sdk}
|
||||
TIMESTAMP_URL=${TIMESTAMP_URL:-""} # optional; keep empty for offline
|
||||
|
||||
PFX_PATH=${PFX_PATH:-""}
|
||||
PFX_B64=${SDK_SIGNING_CERT_B64:-}
|
||||
PFX_PASSWORD=${SDK_SIGNING_CERT_PASSWORD:-}
|
||||
|
||||
mkdir -p "$OUT_DIR"
|
||||
|
||||
if [[ -z "$PFX_PATH" ]]; then
|
||||
if [[ -z "$PFX_B64" ]]; then
|
||||
echo "No signing cert provided (SDK_SIGNING_CERT_B64/PFX_PATH); skipping signing."
|
||||
exit 0
|
||||
fi
|
||||
PFX_PATH="$OUT_DIR/sdk-signing.pfx"
|
||||
printf "%s" "$PFX_B64" | base64 -d > "$PFX_PATH"
|
||||
fi
|
||||
|
||||
mapfile -t packages < <(ls $PACKAGES_GLOB 2>/dev/null || true)
|
||||
if [[ ${#packages[@]} -eq 0 ]]; then
|
||||
echo "No packages found under glob '$PACKAGES_GLOB'; nothing to sign."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
for pkg in "${packages[@]}"; do
|
||||
echo "Signing $pkg"
|
||||
ts_args=()
|
||||
if [[ -n "$TIMESTAMP_URL" ]]; then
|
||||
ts_args=(--timestamp-url "$TIMESTAMP_URL")
|
||||
fi
|
||||
dotnet nuget sign "$pkg" \
|
||||
--certificate-path "$PFX_PATH" \
|
||||
--certificate-password "$PFX_PASSWORD" \
|
||||
--hash-algorithm sha256 \
|
||||
"${ts_args[@]}"
|
||||
done
|
||||
|
||||
echo "Signed ${#packages[@]} package(s)."
|
||||
15
scripts/signals/build.sh
Normal file
15
scripts/signals/build.sh
Normal file
@@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
# Build Signals image and export a tarball for offline use.
|
||||
|
||||
ROOT=${ROOT:-$(git rev-parse --show-toplevel)}
|
||||
OUT_DIR=${OUT_DIR:-$ROOT/out/signals}
|
||||
IMAGE_TAG=${IMAGE_TAG:-stellaops/signals:local}
|
||||
DOCKERFILE=${DOCKERFILE:-ops/devops/signals/Dockerfile}
|
||||
|
||||
mkdir -p "$OUT_DIR"
|
||||
|
||||
docker build -f "$DOCKERFILE" -t "$IMAGE_TAG" "$ROOT"
|
||||
docker save "$IMAGE_TAG" -o "$OUT_DIR/signals-image.tar"
|
||||
|
||||
printf "Image %s saved to %s/signals-image.tar\n" "$IMAGE_TAG" "$OUT_DIR"
|
||||
Reference in New Issue
Block a user