feat: Implement Filesystem and MongoDB provenance writers for PackRun execution context
- Added `FilesystemPackRunProvenanceWriter` to write provenance manifests to the filesystem. - Introduced `MongoPackRunArtifactReader` to read artifacts from MongoDB. - Created `MongoPackRunProvenanceWriter` to store provenance manifests in MongoDB. - Developed unit tests for filesystem and MongoDB provenance writers. - Established `ITimelineEventStore` and `ITimelineIngestionService` interfaces for timeline event handling. - Implemented `TimelineIngestionService` to validate and persist timeline events with hashing. - Created PostgreSQL schema and migration scripts for timeline indexing. - Added dependency injection support for timeline indexer services. - Developed tests for timeline ingestion and schema validation.
This commit is contained in:
13
ops/devops/airgap/README.md
Normal file
13
ops/devops/airgap/README.md
Normal file
@@ -0,0 +1,13 @@
|
||||
# Air-gap Egress Guard Rails
|
||||
|
||||
Artifacts supporting `DEVOPS-AIRGAP-56-001`:
|
||||
|
||||
- `k8s-deny-egress.yaml` — NetworkPolicy template that denies all egress for pods labeled `sealed=true`, except optional in-cluster DNS when enabled.
|
||||
- `compose-egress-guard.sh` — Idempotent iptables guard for Docker/compose using the `DOCKER-USER` chain to drop all outbound traffic from a compose project network while allowing loopback and RFC1918 intra-cluster ranges.
|
||||
- `verify-egress-block.sh` — Verification harness that runs curl probes from Docker or Kubernetes and reports JSON results; exits non-zero if any target is reachable.
|
||||
- `bundle_stage_import.py` — Deterministic bundle staging helper: validates sha256 manifest, copies bundles to staging dir as `<sha256>-<basename>`, emits `staging-report.json` for evidence.
|
||||
- `stage-bundle.sh` — Thin wrapper around `bundle_stage_import.py` with positional args.
|
||||
- `build_bootstrap_pack.py` — Builds a Bootstrap Pack from images/charts/extras listed in a JSON config, writing `bootstrap-manifest.json` + `checksums.sha256` deterministically.
|
||||
- `build_bootstrap_pack.sh` — Wrapper for the bootstrap pack builder.
|
||||
|
||||
See also `ops/devops/sealed-mode-ci/` for the full sealed-mode compose harness and `egress_probe.py`, which this verification script wraps.
|
||||
174
ops/devops/airgap/build_bootstrap_pack.py
Normal file
174
ops/devops/airgap/build_bootstrap_pack.py
Normal file
@@ -0,0 +1,174 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Build a deterministic Bootstrap Pack bundle for sealed/offline transfer.
|
||||
|
||||
- Reads a JSON config listing artefacts to include (images, Helm charts, extras).
|
||||
- Copies artefacts into an output directory with preserved basenames.
|
||||
- Generates `bootstrap-manifest.json` and `checksums.sha256` with sha256 hashes
|
||||
and sizes for evidence/verification.
|
||||
- Intended to satisfy DEVOPS-AIRGAP-56-003.
|
||||
|
||||
Config schema (JSON):
|
||||
{
|
||||
"name": "bootstrap-pack",
|
||||
"images": ["release/containers/taskrunner.tar", "release/containers/orchestrator.tar"],
|
||||
"charts": ["deploy/helm/stella.tgz"],
|
||||
"extras": ["docs/24_OFFLINE_KIT.md"]
|
||||
}
|
||||
|
||||
Usage:
|
||||
build_bootstrap_pack.py --config bootstrap.json --output out/bootstrap-pack
|
||||
build_bootstrap_pack.py --self-test
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple
|
||||
|
||||
DEFAULT_NAME = "bootstrap-pack"
|
||||
|
||||
|
||||
def sha256_file(path: Path) -> Tuple[str, int]:
|
||||
h = hashlib.sha256()
|
||||
size = 0
|
||||
with path.open("rb") as f:
|
||||
for chunk in iter(lambda: f.read(1024 * 1024), b""):
|
||||
h.update(chunk)
|
||||
size += len(chunk)
|
||||
return h.hexdigest(), size
|
||||
|
||||
|
||||
def load_config(path: Path) -> Dict:
|
||||
with path.open("r", encoding="utf-8") as handle:
|
||||
cfg = json.load(handle)
|
||||
if not isinstance(cfg, dict):
|
||||
raise ValueError("config must be a JSON object")
|
||||
return cfg
|
||||
|
||||
|
||||
def ensure_list(cfg: Dict, key: str) -> List[str]:
|
||||
value = cfg.get(key, [])
|
||||
if value is None:
|
||||
return []
|
||||
if not isinstance(value, list):
|
||||
raise ValueError(f"config.{key} must be a list")
|
||||
return [str(x) for x in value]
|
||||
|
||||
|
||||
def copy_item(src: Path, dest_root: Path, rel_dir: str) -> Tuple[str, str, int]:
|
||||
dest_dir = dest_root / rel_dir
|
||||
dest_dir.mkdir(parents=True, exist_ok=True)
|
||||
dest_path = dest_dir / src.name
|
||||
shutil.copy2(src, dest_path)
|
||||
digest, size = sha256_file(dest_path)
|
||||
rel_path = dest_path.relative_to(dest_root).as_posix()
|
||||
return rel_path, digest, size
|
||||
|
||||
|
||||
def build_pack(config_path: Path, output_dir: Path) -> Dict:
|
||||
cfg = load_config(config_path)
|
||||
name = cfg.get("name", DEFAULT_NAME)
|
||||
images = ensure_list(cfg, "images")
|
||||
charts = ensure_list(cfg, "charts")
|
||||
extras = ensure_list(cfg, "extras")
|
||||
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
items = []
|
||||
|
||||
def process_list(paths: List[str], kind: str, rel_dir: str):
|
||||
for raw in sorted(paths):
|
||||
src = Path(raw).expanduser().resolve()
|
||||
if not src.exists():
|
||||
items.append({
|
||||
"type": kind,
|
||||
"source": raw,
|
||||
"status": "missing"
|
||||
})
|
||||
continue
|
||||
rel_path, digest, size = copy_item(src, output_dir, rel_dir)
|
||||
items.append({
|
||||
"type": kind,
|
||||
"source": raw,
|
||||
"path": rel_path,
|
||||
"sha256": digest,
|
||||
"size": size,
|
||||
"status": "ok",
|
||||
})
|
||||
|
||||
process_list(images, "image", "images")
|
||||
process_list(charts, "chart", "charts")
|
||||
process_list(extras, "extra", "extras")
|
||||
|
||||
manifest = {
|
||||
"name": name,
|
||||
"created": datetime.now(timezone.utc).isoformat(),
|
||||
"items": items,
|
||||
}
|
||||
|
||||
# checksums file (only for ok items)
|
||||
checksum_lines = [f"{item['sha256']} {item['path']}" for item in items if item.get("status") == "ok"]
|
||||
(output_dir / "checksums.sha256").write_text("\n".join(checksum_lines) + ("\n" if checksum_lines else ""), encoding="utf-8")
|
||||
(output_dir / "bootstrap-manifest.json").write_text(json.dumps(manifest, ensure_ascii=False, indent=2) + "\n", encoding="utf-8")
|
||||
return manifest
|
||||
|
||||
|
||||
def parse_args(argv: List[str]) -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument("--config", type=Path, help="Path to bootstrap pack config JSON")
|
||||
parser.add_argument("--output", type=Path, help="Output directory for the pack")
|
||||
parser.add_argument("--self-test", action="store_true", help="Run internal self-test and exit")
|
||||
return parser.parse_args(argv)
|
||||
|
||||
|
||||
def self_test() -> int:
|
||||
import tempfile
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
tmpdir = Path(tmp)
|
||||
files = []
|
||||
for name, content in [("img1.tar", b"image-one"), ("chart1.tgz", b"chart-one"), ("readme.txt", b"hello")]:
|
||||
p = tmpdir / name
|
||||
p.write_bytes(content)
|
||||
files.append(p)
|
||||
cfg = {
|
||||
"images": [str(files[0])],
|
||||
"charts": [str(files[1])],
|
||||
"extras": [str(files[2])],
|
||||
}
|
||||
cfg_path = tmpdir / "bootstrap.json"
|
||||
cfg_path.write_text(json.dumps(cfg), encoding="utf-8")
|
||||
outdir = tmpdir / "out"
|
||||
manifest = build_pack(cfg_path, outdir)
|
||||
assert all(item.get("status") == "ok" for item in manifest["items"]), manifest
|
||||
for rel in ["images/img1.tar", "charts/chart1.tgz", "extras/readme.txt", "checksums.sha256", "bootstrap-manifest.json"]:
|
||||
assert (outdir / rel).exists(), f"missing {rel}"
|
||||
print("self-test passed")
|
||||
return 0
|
||||
|
||||
|
||||
def main(argv: List[str]) -> int:
|
||||
args = parse_args(argv)
|
||||
if args.self_test:
|
||||
return self_test()
|
||||
if not (args.config and args.output):
|
||||
print("--config and --output are required unless --self-test", file=sys.stderr)
|
||||
return 2
|
||||
manifest = build_pack(args.config, args.output)
|
||||
missing = [i for i in manifest["items"] if i.get("status") == "missing"]
|
||||
if missing:
|
||||
print("Pack built with missing items:")
|
||||
for item in missing:
|
||||
print(f" - {item['source']}")
|
||||
return 1
|
||||
print(f"Bootstrap pack written to {args.output}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__": # pragma: no cover
|
||||
sys.exit(main(sys.argv[1:]))
|
||||
10
ops/devops/airgap/build_bootstrap_pack.sh
Normal file
10
ops/devops/airgap/build_bootstrap_pack.sh
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
# Thin wrapper for build_bootstrap_pack.py
|
||||
# Usage: ./build_bootstrap_pack.sh config.json out/bootstrap-pack
|
||||
set -euo pipefail
|
||||
if [[ $# -lt 2 ]]; then
|
||||
echo "Usage: $0 <config.json> <output-dir>" >&2
|
||||
exit 2
|
||||
fi
|
||||
SCRIPT_DIR=$(cd "$(dirname "$0")" && pwd)
|
||||
python3 "$SCRIPT_DIR/build_bootstrap_pack.py" --config "$1" --output "$2"
|
||||
169
ops/devops/airgap/bundle_stage_import.py
Normal file
169
ops/devops/airgap/bundle_stage_import.py
Normal file
@@ -0,0 +1,169 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Bundle staging helper for sealed-mode imports.
|
||||
|
||||
Validates bundle files against a manifest and stages them into a target directory
|
||||
with deterministic names (`<sha256>-<basename>`). Emits a JSON report detailing
|
||||
success/failure per file for evidence capture.
|
||||
|
||||
Manifest format (JSON):
|
||||
[
|
||||
{"file": "bundle1.tar.gz", "sha256": "..."},
|
||||
{"file": "bundle2.ndjson", "sha256": "..."}
|
||||
]
|
||||
|
||||
Usage:
|
||||
bundle_stage_import.py --manifest bundles.json --root /path/to/files --out staging
|
||||
bundle_stage_import.py --manifest bundles.json --root . --out staging --prefix mirror/
|
||||
bundle_stage_import.py --self-test
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
|
||||
|
||||
def sha256_file(path: Path) -> str:
|
||||
h = hashlib.sha256()
|
||||
with path.open('rb') as f:
|
||||
for chunk in iter(lambda: f.read(1024 * 1024), b""):
|
||||
h.update(chunk)
|
||||
return h.hexdigest()
|
||||
|
||||
|
||||
def load_manifest(path: Path) -> List[Dict[str, str]]:
|
||||
with path.open('r', encoding='utf-8') as handle:
|
||||
data = json.load(handle)
|
||||
if not isinstance(data, list):
|
||||
raise ValueError("Manifest must be a list of objects")
|
||||
normalized = []
|
||||
for idx, entry in enumerate(data):
|
||||
if not isinstance(entry, dict):
|
||||
raise ValueError(f"Manifest entry {idx} is not an object")
|
||||
file = entry.get("file")
|
||||
digest = entry.get("sha256")
|
||||
if not file or not digest:
|
||||
raise ValueError(f"Manifest entry {idx} missing file or sha256")
|
||||
normalized.append({"file": str(file), "sha256": str(digest).lower()})
|
||||
return normalized
|
||||
|
||||
|
||||
def stage_file(src: Path, digest: str, out_dir: Path, prefix: str) -> Path:
|
||||
dest_name = f"{digest}-{src.name}"
|
||||
dest_rel = Path(prefix) / dest_name if prefix else Path(dest_name)
|
||||
dest_path = out_dir / dest_rel
|
||||
dest_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
shutil.copy2(src, dest_path)
|
||||
return dest_rel
|
||||
|
||||
|
||||
def process(manifest: Path, root: Path, out_dir: Path, prefix: str) -> Dict:
|
||||
items = load_manifest(manifest)
|
||||
results = []
|
||||
success = True
|
||||
for entry in items:
|
||||
rel = Path(entry["file"])
|
||||
src = (root / rel).resolve()
|
||||
expected = entry["sha256"].lower()
|
||||
status = "ok"
|
||||
actual = None
|
||||
staged = None
|
||||
message = ""
|
||||
if not src.exists():
|
||||
status = "missing"
|
||||
message = "file not found"
|
||||
success = False
|
||||
else:
|
||||
actual = sha256_file(src)
|
||||
if actual != expected:
|
||||
status = "checksum-mismatch"
|
||||
message = "sha256 mismatch"
|
||||
success = False
|
||||
else:
|
||||
staged = str(stage_file(src, expected, out_dir, prefix))
|
||||
results.append(
|
||||
{
|
||||
"file": str(rel),
|
||||
"expectedSha256": expected,
|
||||
"actualSha256": actual,
|
||||
"status": status,
|
||||
"stagedPath": staged,
|
||||
"message": message,
|
||||
}
|
||||
)
|
||||
report = {
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"root": str(root),
|
||||
"output": str(out_dir),
|
||||
"prefix": prefix,
|
||||
"summary": {
|
||||
"total": len(results),
|
||||
"success": success,
|
||||
"ok": sum(1 for r in results if r["status"] == "ok"),
|
||||
"missing": sum(1 for r in results if r["status"] == "missing"),
|
||||
"checksumMismatch": sum(1 for r in results if r["status"] == "checksum-mismatch"),
|
||||
},
|
||||
"items": results,
|
||||
}
|
||||
return report
|
||||
|
||||
|
||||
def parse_args(argv: List[str]) -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument("--manifest", type=Path, help="Path to bundle manifest JSON")
|
||||
parser.add_argument("--root", type=Path, help="Root directory containing bundle files")
|
||||
parser.add_argument("--out", type=Path, help="Output directory for staged bundles and report")
|
||||
parser.add_argument("--prefix", default="", help="Optional prefix within output dir (e.g., mirror/)")
|
||||
parser.add_argument("--report", type=Path, help="Override report path (defaults to <out>/staging-report.json)")
|
||||
parser.add_argument("--self-test", action="store_true", help="Run internal self-test and exit")
|
||||
return parser.parse_args(argv)
|
||||
|
||||
|
||||
def write_report(report: Dict, report_path: Path) -> None:
|
||||
report_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with report_path.open('w', encoding='utf-8') as handle:
|
||||
json.dump(report, handle, ensure_ascii=False, indent=2)
|
||||
handle.write("\n")
|
||||
|
||||
|
||||
def self_test() -> int:
|
||||
import tempfile
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
tmpdir = Path(tmp)
|
||||
sample = tmpdir / "sample.bin"
|
||||
sample.write_bytes(b"offline-bundle")
|
||||
digest = sha256_file(sample)
|
||||
manifest = tmpdir / "manifest.json"
|
||||
manifest.write_text(json.dumps([{ "file": "sample.bin", "sha256": digest }]), encoding='utf-8')
|
||||
out = tmpdir / "out"
|
||||
report = process(manifest, tmpdir, out, prefix="mirror/")
|
||||
assert report["summary"]["success"] is True, report
|
||||
staged = out / report["items"][0]["stagedPath"]
|
||||
assert staged.exists(), f"staged file missing: {staged}"
|
||||
print("self-test passed")
|
||||
return 0
|
||||
|
||||
|
||||
def main(argv: List[str]) -> int:
|
||||
args = parse_args(argv)
|
||||
if args.self_test:
|
||||
return self_test()
|
||||
if not (args.manifest and args.root and args.out):
|
||||
print("--manifest, --root, and --out are required unless --self-test", file=sys.stderr)
|
||||
return 2
|
||||
report = process(args.manifest, args.root, args.out, args.prefix)
|
||||
report_path = args.report or args.out / "staging-report.json"
|
||||
write_report(report, report_path)
|
||||
print(f"Staged bundles → {args.out} (report {report_path})")
|
||||
return 0 if report["summary"]["success"] else 1
|
||||
|
||||
|
||||
if __name__ == "__main__": # pragma: no cover
|
||||
sys.exit(main(sys.argv[1:]))
|
||||
54
ops/devops/airgap/compose-egress-guard.sh
Normal file
54
ops/devops/airgap/compose-egress-guard.sh
Normal file
@@ -0,0 +1,54 @@
|
||||
#!/usr/bin/env bash
|
||||
# Enforce deny-all egress for a Docker/Compose project using DOCKER-USER chain.
|
||||
# Usage: COMPOSE_PROJECT=stella ./compose-egress-guard.sh
|
||||
# Optional env: ALLOW_RFC1918=true to allow east-west traffic inside 10/172/192 ranges.
|
||||
set -euo pipefail
|
||||
|
||||
PROJECT=${COMPOSE_PROJECT:-stella}
|
||||
ALLOW_RFC1918=${ALLOW_RFC1918:-true}
|
||||
NETWORK=${COMPOSE_NETWORK:-${PROJECT}_default}
|
||||
|
||||
chain=STELLAOPS_SEALED_${PROJECT^^}
|
||||
ipset_name=${PROJECT}_cidrs
|
||||
|
||||
insert_accept() {
|
||||
local dest=$1
|
||||
iptables -C DOCKER-USER -d "$dest" -j ACCEPT 2>/dev/null || iptables -I DOCKER-USER -d "$dest" -j ACCEPT
|
||||
}
|
||||
|
||||
# 1) Ensure DOCKER-USER exists
|
||||
iptables -nL DOCKER-USER >/dev/null 2>&1 || iptables -N DOCKER-USER
|
||||
|
||||
# 2) Create dedicated chain per project for clarity
|
||||
iptables -nL "$chain" >/dev/null 2>&1 || iptables -N "$chain"
|
||||
|
||||
# 2b) Populate ipset with compose network CIDRs (if available)
|
||||
if command -v ipset >/dev/null; then
|
||||
ipset list "$ipset_name" >/dev/null 2>&1 || ipset create "$ipset_name" hash:net -exist
|
||||
cidrs=$(docker network inspect "$NETWORK" -f '{{range .IPAM.Config}}{{.Subnet}} {{end}}')
|
||||
for cidr in $cidrs; do
|
||||
ipset add "$ipset_name" "$cidr" 2>/dev/null || true
|
||||
done
|
||||
fi
|
||||
|
||||
# 3) Allow loopback and optional RFC1918 intra-cluster ranges, then drop everything else
|
||||
insert_accept 127.0.0.0/8
|
||||
if [[ "$ALLOW_RFC1918" == "true" ]]; then
|
||||
insert_accept 10.0.0.0/8
|
||||
insert_accept 172.16.0.0/12
|
||||
insert_accept 192.168.0.0/16
|
||||
fi
|
||||
iptables -C "$chain" -j DROP 2>/dev/null || iptables -A "$chain" -j DROP
|
||||
|
||||
# 4) Hook chain into DOCKER-USER for containers in this project network
|
||||
iptables -C DOCKER-USER -m addrtype --src-type LOCAL -j RETURN 2>/dev/null || true
|
||||
if command -v ipset >/dev/null && ipset list "$ipset_name" >/dev/null 2>&1; then
|
||||
iptables -C DOCKER-USER -m set --match-set "$ipset_name" dst -j "$chain" 2>/dev/null || iptables -I DOCKER-USER -m set --match-set "$ipset_name" dst -j "$chain"
|
||||
else
|
||||
# Fallback: match by destination subnet from docker inspect (first subnet only)
|
||||
first_cidr=$(docker network inspect "$NETWORK" -f '{{(index .IPAM.Config 0).Subnet}}')
|
||||
iptables -C DOCKER-USER -d "$first_cidr" -j "$chain" 2>/dev/null || iptables -I DOCKER-USER -d "$first_cidr" -j "$chain"
|
||||
fi
|
||||
|
||||
echo "Applied compose egress guard via DOCKER-USER -> $chain" >&2
|
||||
iptables -vnL "$chain"
|
||||
42
ops/devops/airgap/k8s-deny-egress.yaml
Normal file
42
ops/devops/airgap/k8s-deny-egress.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: sealed-deny-all-egress
|
||||
namespace: default
|
||||
labels:
|
||||
stellaops.dev/owner: devops
|
||||
stellaops.dev/purpose: sealed-mode
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
sealed: "true"
|
||||
policyTypes:
|
||||
- Egress
|
||||
egress: []
|
||||
---
|
||||
# Optional patch to allow in-cluster DNS while still blocking external egress.
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: sealed-allow-dns
|
||||
namespace: default
|
||||
labels:
|
||||
stellaops.dev/owner: devops
|
||||
stellaops.dev/purpose: sealed-mode
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
sealed: "true"
|
||||
policyTypes:
|
||||
- Egress
|
||||
egress:
|
||||
- to:
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
kubernetes.io/metadata.name: kube-system
|
||||
podSelector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns
|
||||
ports:
|
||||
- protocol: UDP
|
||||
port: 53
|
||||
14
ops/devops/airgap/stage-bundle.sh
Normal file
14
ops/devops/airgap/stage-bundle.sh
Normal file
@@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env bash
|
||||
# Wrapper for bundle_stage_import.py with sane defaults.
|
||||
# Usage: ./stage-bundle.sh manifest.json /path/to/files out/staging [prefix]
|
||||
set -euo pipefail
|
||||
if [[ $# -lt 3 ]]; then
|
||||
echo "Usage: $0 <manifest.json> <root> <out-dir> [prefix]" >&2
|
||||
exit 2
|
||||
fi
|
||||
manifest=$1
|
||||
root=$2
|
||||
out=$3
|
||||
prefix=${4:-}
|
||||
SCRIPT_DIR=$(cd "$(dirname "$0")" && pwd)
|
||||
python3 "$SCRIPT_DIR/bundle_stage_import.py" --manifest "$manifest" --root "$root" --out "$out" --prefix "$prefix"
|
||||
88
ops/devops/airgap/verify-egress-block.sh
Normal file
88
ops/devops/airgap/verify-egress-block.sh
Normal file
@@ -0,0 +1,88 @@
|
||||
#!/usr/bin/env bash
|
||||
# Verification harness for sealed-mode egress: Docker/Compose or Kubernetes.
|
||||
# Examples:
|
||||
# ./verify-egress-block.sh docker stella_default out/airgap-probe.json
|
||||
# ./verify-egress-block.sh k8s default out/k8s-probe.json
|
||||
set -euo pipefail
|
||||
|
||||
mode=${1:-}
|
||||
context=${2:-}
|
||||
out=${3:-}
|
||||
|
||||
if [[ -z "$mode" || -z "$context" || -z "$out" ]]; then
|
||||
echo "Usage: $0 <docker|k8s> <network|namespace> <output.json> [target ...]" >&2
|
||||
exit 2
|
||||
fi
|
||||
shift 3
|
||||
TARGETS=($@)
|
||||
|
||||
ROOT=$(cd "$(dirname "$0")/../.." && pwd)
|
||||
PROBE_PY="$ROOT/ops/devops/sealed-mode-ci/egress_probe.py"
|
||||
|
||||
case "$mode" in
|
||||
docker)
|
||||
network="$context"
|
||||
python3 "$PROBE_PY" --network "$network" --output "$out" "${TARGETS[@]}"
|
||||
;;
|
||||
k8s|kubernetes)
|
||||
ns="$context"
|
||||
targets=("${TARGETS[@]}")
|
||||
if [[ ${#targets[@]} -eq 0 ]]; then
|
||||
targets=("https://example.com" "https://www.cloudflare.com" "https://releases.stella-ops.org/healthz")
|
||||
fi
|
||||
image="curlimages/curl:8.6.0"
|
||||
tmpfile=$(mktemp)
|
||||
cat > "$tmpfile" <<MANIFEST
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: sealed-egress-probe
|
||||
namespace: ${ns}
|
||||
labels:
|
||||
sealed: "true"
|
||||
stellaops.dev/purpose: sealed-mode
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: curl
|
||||
image: ${image}
|
||||
command: ["/bin/sh","-c"]
|
||||
args:
|
||||
- >
|
||||
set -euo pipefail;
|
||||
rc=0;
|
||||
for url in ${targets[@]}; do
|
||||
echo "PROBE $url";
|
||||
if curl -fsS --max-time 8 "$url"; then
|
||||
echo "UNEXPECTED_SUCCESS $url";
|
||||
rc=1;
|
||||
else
|
||||
echo "BLOCKED $url";
|
||||
fi;
|
||||
done;
|
||||
exit $rc;
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
readOnlyRootFilesystem: true
|
||||
MANIFEST
|
||||
kubectl apply -f "$tmpfile" >/dev/null
|
||||
kubectl wait --for=condition=Ready pod/sealed-egress-probe -n "$ns" --timeout=30s >/dev/null 2>&1 || true
|
||||
set +e
|
||||
kubectl logs -n "$ns" sealed-egress-probe > "$out.log" 2>&1
|
||||
kubectl wait --for=condition=Succeeded pod/sealed-egress-probe -n "$ns" --timeout=60s
|
||||
pod_rc=$?
|
||||
kubectl get pod/sealed-egress-probe -n "$ns" -o json > "$out"
|
||||
kubectl delete pod/sealed-egress-probe -n "$ns" >/dev/null 2>&1 || true
|
||||
set -e
|
||||
if [[ $pod_rc -ne 0 ]]; then
|
||||
echo "Egress check failed; see $out and $out.log" >&2
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
echo "Unknown mode: $mode" >&2
|
||||
exit 2
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "Egress verification complete → $out"
|
||||
Reference in New Issue
Block a user