CD/CD consolidation
This commit is contained in:
22
devops/offline/airgap/README.md
Normal file
22
devops/offline/airgap/README.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# Air-gap Egress Guard Rails
|
||||
|
||||
Artifacts supporting `DEVOPS-AIRGAP-56-001`:
|
||||
|
||||
- `k8s-deny-egress.yaml` — NetworkPolicy template that denies all egress for pods labeled `sealed=true`, except optional in-cluster DNS when enabled.
|
||||
- `compose-egress-guard.sh` — Idempotent iptables guard for Docker/compose using the `DOCKER-USER` chain to drop all outbound traffic from a compose project network while allowing loopback and RFC1918 intra-cluster ranges.
|
||||
- `verify-egress-block.sh` — Verification harness that runs curl probes from Docker or Kubernetes and reports JSON results; exits non-zero if any target is reachable.
|
||||
- `bundle_stage_import.py` — Deterministic bundle staging helper: validates sha256 manifest, copies bundles to staging dir as `<sha256>-<basename>`, emits `staging-report.json` for evidence.
|
||||
- `stage-bundle.sh` — Thin wrapper around `bundle_stage_import.py` with positional args.
|
||||
- `build_bootstrap_pack.py` — Builds a Bootstrap Pack from images/charts/extras listed in a JSON config, writing `bootstrap-manifest.json` + `checksums.sha256` deterministically.
|
||||
- `build_bootstrap_pack.sh` — Wrapper for the bootstrap pack builder.
|
||||
- `build_mirror_bundle.py` — Generates mirror bundle manifest + checksums with dual-control approvals; optional cosign signing. Outputs `mirror-bundle-manifest.json`, `checksums.sha256`, and optional signature/cert.
|
||||
- `compose-syslog-smtp.yaml` — Local SMTP (MailHog) + syslog-ng stack for sealed environments.
|
||||
- `health_syslog_smtp.sh` — Brings up the syslog/SMTP stack via docker compose and performs health checks (MailHog API + syslog logger).
|
||||
- `compose-observability.yaml` — Sealed-mode observability stack (Prometheus, Grafana, Tempo, Loki) with offline configs and healthchecks.
|
||||
- `health_observability.sh` — Starts the observability stack and probes Prometheus/Grafana/Tempo/Loki readiness.
|
||||
- `compose-syslog-smtp.yaml` + `syslog-ng.conf` — Local SMTP + syslog stack for sealed-mode notifications; run via `scripts/devops/run-smtp-syslog.sh` (health check `health_syslog_smtp.sh`).
|
||||
- `observability-offline-compose.yml` + `otel-offline.yaml` + `promtail-config.yaml` — Sealed-mode observability stack (Loki, Promtail, OTEL collector with file exporters) to satisfy DEVOPS-AIRGAP-58-002.
|
||||
- `compose-syslog-smtp.yaml` — Local SMTP (MailHog) + syslog-ng stack for sealed environments.
|
||||
- `health_syslog_smtp.sh` — Brings up the syslog/SMTP stack via docker compose and performs health checks (MailHog API + syslog logger).
|
||||
|
||||
See also `ops/devops/sealed-mode-ci/` for the full sealed-mode compose harness and `egress_probe.py`, which this verification script wraps.
|
||||
174
devops/offline/airgap/build_bootstrap_pack.py
Normal file
174
devops/offline/airgap/build_bootstrap_pack.py
Normal file
@@ -0,0 +1,174 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Build a deterministic Bootstrap Pack bundle for sealed/offline transfer.
|
||||
|
||||
- Reads a JSON config listing artefacts to include (images, Helm charts, extras).
|
||||
- Copies artefacts into an output directory with preserved basenames.
|
||||
- Generates `bootstrap-manifest.json` and `checksums.sha256` with sha256 hashes
|
||||
and sizes for evidence/verification.
|
||||
- Intended to satisfy DEVOPS-AIRGAP-56-003.
|
||||
|
||||
Config schema (JSON):
|
||||
{
|
||||
"name": "bootstrap-pack",
|
||||
"images": ["release/containers/taskrunner.tar", "release/containers/orchestrator.tar"],
|
||||
"charts": ["deploy/helm/stella.tgz"],
|
||||
"extras": ["docs/24_OFFLINE_KIT.md"]
|
||||
}
|
||||
|
||||
Usage:
|
||||
build_bootstrap_pack.py --config bootstrap.json --output out/bootstrap-pack
|
||||
build_bootstrap_pack.py --self-test
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple
|
||||
|
||||
DEFAULT_NAME = "bootstrap-pack"
|
||||
|
||||
|
||||
def sha256_file(path: Path) -> Tuple[str, int]:
|
||||
h = hashlib.sha256()
|
||||
size = 0
|
||||
with path.open("rb") as f:
|
||||
for chunk in iter(lambda: f.read(1024 * 1024), b""):
|
||||
h.update(chunk)
|
||||
size += len(chunk)
|
||||
return h.hexdigest(), size
|
||||
|
||||
|
||||
def load_config(path: Path) -> Dict:
|
||||
with path.open("r", encoding="utf-8") as handle:
|
||||
cfg = json.load(handle)
|
||||
if not isinstance(cfg, dict):
|
||||
raise ValueError("config must be a JSON object")
|
||||
return cfg
|
||||
|
||||
|
||||
def ensure_list(cfg: Dict, key: str) -> List[str]:
|
||||
value = cfg.get(key, [])
|
||||
if value is None:
|
||||
return []
|
||||
if not isinstance(value, list):
|
||||
raise ValueError(f"config.{key} must be a list")
|
||||
return [str(x) for x in value]
|
||||
|
||||
|
||||
def copy_item(src: Path, dest_root: Path, rel_dir: str) -> Tuple[str, str, int]:
|
||||
dest_dir = dest_root / rel_dir
|
||||
dest_dir.mkdir(parents=True, exist_ok=True)
|
||||
dest_path = dest_dir / src.name
|
||||
shutil.copy2(src, dest_path)
|
||||
digest, size = sha256_file(dest_path)
|
||||
rel_path = dest_path.relative_to(dest_root).as_posix()
|
||||
return rel_path, digest, size
|
||||
|
||||
|
||||
def build_pack(config_path: Path, output_dir: Path) -> Dict:
|
||||
cfg = load_config(config_path)
|
||||
name = cfg.get("name", DEFAULT_NAME)
|
||||
images = ensure_list(cfg, "images")
|
||||
charts = ensure_list(cfg, "charts")
|
||||
extras = ensure_list(cfg, "extras")
|
||||
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
items = []
|
||||
|
||||
def process_list(paths: List[str], kind: str, rel_dir: str):
|
||||
for raw in sorted(paths):
|
||||
src = Path(raw).expanduser().resolve()
|
||||
if not src.exists():
|
||||
items.append({
|
||||
"type": kind,
|
||||
"source": raw,
|
||||
"status": "missing"
|
||||
})
|
||||
continue
|
||||
rel_path, digest, size = copy_item(src, output_dir, rel_dir)
|
||||
items.append({
|
||||
"type": kind,
|
||||
"source": raw,
|
||||
"path": rel_path,
|
||||
"sha256": digest,
|
||||
"size": size,
|
||||
"status": "ok",
|
||||
})
|
||||
|
||||
process_list(images, "image", "images")
|
||||
process_list(charts, "chart", "charts")
|
||||
process_list(extras, "extra", "extras")
|
||||
|
||||
manifest = {
|
||||
"name": name,
|
||||
"created": datetime.now(timezone.utc).isoformat(),
|
||||
"items": items,
|
||||
}
|
||||
|
||||
# checksums file (only for ok items)
|
||||
checksum_lines = [f"{item['sha256']} {item['path']}" for item in items if item.get("status") == "ok"]
|
||||
(output_dir / "checksums.sha256").write_text("\n".join(checksum_lines) + ("\n" if checksum_lines else ""), encoding="utf-8")
|
||||
(output_dir / "bootstrap-manifest.json").write_text(json.dumps(manifest, ensure_ascii=False, indent=2) + "\n", encoding="utf-8")
|
||||
return manifest
|
||||
|
||||
|
||||
def parse_args(argv: List[str]) -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument("--config", type=Path, help="Path to bootstrap pack config JSON")
|
||||
parser.add_argument("--output", type=Path, help="Output directory for the pack")
|
||||
parser.add_argument("--self-test", action="store_true", help="Run internal self-test and exit")
|
||||
return parser.parse_args(argv)
|
||||
|
||||
|
||||
def self_test() -> int:
|
||||
import tempfile
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
tmpdir = Path(tmp)
|
||||
files = []
|
||||
for name, content in [("img1.tar", b"image-one"), ("chart1.tgz", b"chart-one"), ("readme.txt", b"hello")]:
|
||||
p = tmpdir / name
|
||||
p.write_bytes(content)
|
||||
files.append(p)
|
||||
cfg = {
|
||||
"images": [str(files[0])],
|
||||
"charts": [str(files[1])],
|
||||
"extras": [str(files[2])],
|
||||
}
|
||||
cfg_path = tmpdir / "bootstrap.json"
|
||||
cfg_path.write_text(json.dumps(cfg), encoding="utf-8")
|
||||
outdir = tmpdir / "out"
|
||||
manifest = build_pack(cfg_path, outdir)
|
||||
assert all(item.get("status") == "ok" for item in manifest["items"]), manifest
|
||||
for rel in ["images/img1.tar", "charts/chart1.tgz", "extras/readme.txt", "checksums.sha256", "bootstrap-manifest.json"]:
|
||||
assert (outdir / rel).exists(), f"missing {rel}"
|
||||
print("self-test passed")
|
||||
return 0
|
||||
|
||||
|
||||
def main(argv: List[str]) -> int:
|
||||
args = parse_args(argv)
|
||||
if args.self_test:
|
||||
return self_test()
|
||||
if not (args.config and args.output):
|
||||
print("--config and --output are required unless --self-test", file=sys.stderr)
|
||||
return 2
|
||||
manifest = build_pack(args.config, args.output)
|
||||
missing = [i for i in manifest["items"] if i.get("status") == "missing"]
|
||||
if missing:
|
||||
print("Pack built with missing items:")
|
||||
for item in missing:
|
||||
print(f" - {item['source']}")
|
||||
return 1
|
||||
print(f"Bootstrap pack written to {args.output}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__": # pragma: no cover
|
||||
sys.exit(main(sys.argv[1:]))
|
||||
10
devops/offline/airgap/build_bootstrap_pack.sh
Normal file
10
devops/offline/airgap/build_bootstrap_pack.sh
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
# Thin wrapper for build_bootstrap_pack.py
|
||||
# Usage: ./build_bootstrap_pack.sh config.json out/bootstrap-pack
|
||||
set -euo pipefail
|
||||
if [[ $# -lt 2 ]]; then
|
||||
echo "Usage: $0 <config.json> <output-dir>" >&2
|
||||
exit 2
|
||||
fi
|
||||
SCRIPT_DIR=$(cd "$(dirname "$0")" && pwd)
|
||||
python3 "$SCRIPT_DIR/build_bootstrap_pack.py" --config "$1" --output "$2"
|
||||
154
devops/offline/airgap/build_mirror_bundle.py
Normal file
154
devops/offline/airgap/build_mirror_bundle.py
Normal file
@@ -0,0 +1,154 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Automate mirror bundle manifest + checksums with dual-control approvals.
|
||||
|
||||
Implements DEVOPS-AIRGAP-57-001.
|
||||
|
||||
Features:
|
||||
- Deterministic manifest (`mirror-bundle-manifest.json`) with sha256/size per file.
|
||||
- `checksums.sha256` for quick verification.
|
||||
- Dual-control approvals recorded via `--approver` (min 2 required to mark approved).
|
||||
- Optional cosign signing of the manifest via `--cosign-key` (sign-blob); writes
|
||||
`mirror-bundle-manifest.sig` and `mirror-bundle-manifest.pem` when available.
|
||||
- Offline-friendly: purely local file reads; no network access.
|
||||
|
||||
Usage:
|
||||
build_mirror_bundle.py --root /path/to/bundles --output out/mirror \
|
||||
--approver alice@example.com --approver bob@example.com
|
||||
|
||||
build_mirror_bundle.py --self-test
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
|
||||
def sha256_file(path: Path) -> Dict[str, int | str]:
|
||||
h = hashlib.sha256()
|
||||
size = 0
|
||||
with path.open("rb") as f:
|
||||
for chunk in iter(lambda: f.read(1024 * 1024), b""):
|
||||
h.update(chunk)
|
||||
size += len(chunk)
|
||||
return {"sha256": h.hexdigest(), "size": size}
|
||||
|
||||
|
||||
def find_files(root: Path) -> List[Path]:
|
||||
files: List[Path] = []
|
||||
for p in sorted(root.rglob("*")):
|
||||
if p.is_file():
|
||||
files.append(p)
|
||||
return files
|
||||
|
||||
|
||||
def write_checksums(items: List[Dict], output_dir: Path) -> None:
|
||||
lines = [f"{item['sha256']} {item['path']}" for item in items]
|
||||
(output_dir / "checksums.sha256").write_text("\n".join(lines) + ("\n" if lines else ""), encoding="utf-8")
|
||||
|
||||
|
||||
def maybe_sign(manifest_path: Path, key: Optional[str]) -> Dict[str, str]:
|
||||
if not key:
|
||||
return {"status": "skipped", "reason": "no key provided"}
|
||||
if shutil.which("cosign") is None:
|
||||
return {"status": "skipped", "reason": "cosign not found"}
|
||||
sig = manifest_path.with_suffix(manifest_path.suffix + ".sig")
|
||||
pem = manifest_path.with_suffix(manifest_path.suffix + ".pem")
|
||||
try:
|
||||
subprocess.run(
|
||||
["cosign", "sign-blob", "--key", key, "--output-signature", str(sig), "--output-certificate", str(pem), str(manifest_path)],
|
||||
check=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
return {
|
||||
"status": "signed",
|
||||
"signature": sig.name,
|
||||
"certificate": pem.name,
|
||||
}
|
||||
except subprocess.CalledProcessError as exc: # pragma: no cover
|
||||
return {"status": "failed", "reason": exc.stderr or str(exc)}
|
||||
|
||||
|
||||
def build_manifest(root: Path, output_dir: Path, approvers: List[str], cosign_key: Optional[str]) -> Dict:
|
||||
files = find_files(root)
|
||||
items: List[Dict] = []
|
||||
for p in files:
|
||||
rel = p.relative_to(root).as_posix()
|
||||
info = sha256_file(p)
|
||||
items.append({"path": rel, **info})
|
||||
manifest = {
|
||||
"created": datetime.now(timezone.utc).isoformat(),
|
||||
"root": str(root),
|
||||
"total": len(items),
|
||||
"items": items,
|
||||
"approvals": sorted(set(approvers)),
|
||||
"approvalStatus": "approved" if len(set(approvers)) >= 2 else "pending",
|
||||
}
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
manifest_path = output_dir / "mirror-bundle-manifest.json"
|
||||
manifest_path.write_text(json.dumps(manifest, ensure_ascii=False, indent=2) + "\n", encoding="utf-8")
|
||||
write_checksums(items, output_dir)
|
||||
signing = maybe_sign(manifest_path, cosign_key)
|
||||
manifest["signing"] = signing
|
||||
# Persist signing status in manifest for traceability
|
||||
manifest_path.write_text(json.dumps(manifest, ensure_ascii=False, indent=2) + "\n", encoding="utf-8")
|
||||
return manifest
|
||||
|
||||
|
||||
def parse_args(argv: List[str]) -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument("--root", type=Path, help="Root directory containing bundle files")
|
||||
parser.add_argument("--output", type=Path, help="Output directory for manifest + checksums")
|
||||
parser.add_argument("--approver", action="append", default=[], help="Approver identity (email or handle); provide twice for dual-control")
|
||||
parser.add_argument("--cosign-key", help="Path or KMS URI for cosign signing key (optional)")
|
||||
parser.add_argument("--self-test", action="store_true", help="Run internal self-test and exit")
|
||||
return parser.parse_args(argv)
|
||||
|
||||
|
||||
def self_test() -> int:
|
||||
import tempfile
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
tmpdir = Path(tmp)
|
||||
root = tmpdir / "bundles"
|
||||
root.mkdir()
|
||||
(root / "a.txt").write_text("hello", encoding="utf-8")
|
||||
(root / "b.bin").write_bytes(b"world")
|
||||
out = tmpdir / "out"
|
||||
manifest = build_manifest(root, out, ["alice", "bob"], cosign_key=None)
|
||||
assert manifest["approvalStatus"] == "approved"
|
||||
assert (out / "mirror-bundle-manifest.json").exists()
|
||||
assert (out / "checksums.sha256").exists()
|
||||
print("self-test passed")
|
||||
return 0
|
||||
|
||||
|
||||
def main(argv: List[str]) -> int:
|
||||
args = parse_args(argv)
|
||||
if args.self_test:
|
||||
return self_test()
|
||||
if not (args.root and args.output):
|
||||
print("--root and --output are required unless --self-test", file=sys.stderr)
|
||||
return 2
|
||||
manifest = build_manifest(args.root.resolve(), args.output.resolve(), args.approver, args.cosign_key)
|
||||
if manifest["approvalStatus"] != "approved":
|
||||
print("Manifest generated but approvalStatus=pending (need >=2 distinct approvers).", file=sys.stderr)
|
||||
return 1
|
||||
missing = [i for i in manifest["items"] if not (args.root / i["path"]).exists()]
|
||||
if missing:
|
||||
print(f"Missing files in manifest: {missing}", file=sys.stderr)
|
||||
return 1
|
||||
print(f"Mirror bundle manifest written to {args.output}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__": # pragma: no cover
|
||||
sys.exit(main(sys.argv[1:]))
|
||||
169
devops/offline/airgap/bundle_stage_import.py
Normal file
169
devops/offline/airgap/bundle_stage_import.py
Normal file
@@ -0,0 +1,169 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Bundle staging helper for sealed-mode imports.
|
||||
|
||||
Validates bundle files against a manifest and stages them into a target directory
|
||||
with deterministic names (`<sha256>-<basename>`). Emits a JSON report detailing
|
||||
success/failure per file for evidence capture.
|
||||
|
||||
Manifest format (JSON):
|
||||
[
|
||||
{"file": "bundle1.tar.gz", "sha256": "..."},
|
||||
{"file": "bundle2.ndjson", "sha256": "..."}
|
||||
]
|
||||
|
||||
Usage:
|
||||
bundle_stage_import.py --manifest bundles.json --root /path/to/files --out staging
|
||||
bundle_stage_import.py --manifest bundles.json --root . --out staging --prefix mirror/
|
||||
bundle_stage_import.py --self-test
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
|
||||
|
||||
def sha256_file(path: Path) -> str:
|
||||
h = hashlib.sha256()
|
||||
with path.open('rb') as f:
|
||||
for chunk in iter(lambda: f.read(1024 * 1024), b""):
|
||||
h.update(chunk)
|
||||
return h.hexdigest()
|
||||
|
||||
|
||||
def load_manifest(path: Path) -> List[Dict[str, str]]:
|
||||
with path.open('r', encoding='utf-8') as handle:
|
||||
data = json.load(handle)
|
||||
if not isinstance(data, list):
|
||||
raise ValueError("Manifest must be a list of objects")
|
||||
normalized = []
|
||||
for idx, entry in enumerate(data):
|
||||
if not isinstance(entry, dict):
|
||||
raise ValueError(f"Manifest entry {idx} is not an object")
|
||||
file = entry.get("file")
|
||||
digest = entry.get("sha256")
|
||||
if not file or not digest:
|
||||
raise ValueError(f"Manifest entry {idx} missing file or sha256")
|
||||
normalized.append({"file": str(file), "sha256": str(digest).lower()})
|
||||
return normalized
|
||||
|
||||
|
||||
def stage_file(src: Path, digest: str, out_dir: Path, prefix: str) -> Path:
|
||||
dest_name = f"{digest}-{src.name}"
|
||||
dest_rel = Path(prefix) / dest_name if prefix else Path(dest_name)
|
||||
dest_path = out_dir / dest_rel
|
||||
dest_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
shutil.copy2(src, dest_path)
|
||||
return dest_rel
|
||||
|
||||
|
||||
def process(manifest: Path, root: Path, out_dir: Path, prefix: str) -> Dict:
|
||||
items = load_manifest(manifest)
|
||||
results = []
|
||||
success = True
|
||||
for entry in items:
|
||||
rel = Path(entry["file"])
|
||||
src = (root / rel).resolve()
|
||||
expected = entry["sha256"].lower()
|
||||
status = "ok"
|
||||
actual = None
|
||||
staged = None
|
||||
message = ""
|
||||
if not src.exists():
|
||||
status = "missing"
|
||||
message = "file not found"
|
||||
success = False
|
||||
else:
|
||||
actual = sha256_file(src)
|
||||
if actual != expected:
|
||||
status = "checksum-mismatch"
|
||||
message = "sha256 mismatch"
|
||||
success = False
|
||||
else:
|
||||
staged = str(stage_file(src, expected, out_dir, prefix))
|
||||
results.append(
|
||||
{
|
||||
"file": str(rel),
|
||||
"expectedSha256": expected,
|
||||
"actualSha256": actual,
|
||||
"status": status,
|
||||
"stagedPath": staged,
|
||||
"message": message,
|
||||
}
|
||||
)
|
||||
report = {
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"root": str(root),
|
||||
"output": str(out_dir),
|
||||
"prefix": prefix,
|
||||
"summary": {
|
||||
"total": len(results),
|
||||
"success": success,
|
||||
"ok": sum(1 for r in results if r["status"] == "ok"),
|
||||
"missing": sum(1 for r in results if r["status"] == "missing"),
|
||||
"checksumMismatch": sum(1 for r in results if r["status"] == "checksum-mismatch"),
|
||||
},
|
||||
"items": results,
|
||||
}
|
||||
return report
|
||||
|
||||
|
||||
def parse_args(argv: List[str]) -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument("--manifest", type=Path, help="Path to bundle manifest JSON")
|
||||
parser.add_argument("--root", type=Path, help="Root directory containing bundle files")
|
||||
parser.add_argument("--out", type=Path, help="Output directory for staged bundles and report")
|
||||
parser.add_argument("--prefix", default="", help="Optional prefix within output dir (e.g., mirror/)")
|
||||
parser.add_argument("--report", type=Path, help="Override report path (defaults to <out>/staging-report.json)")
|
||||
parser.add_argument("--self-test", action="store_true", help="Run internal self-test and exit")
|
||||
return parser.parse_args(argv)
|
||||
|
||||
|
||||
def write_report(report: Dict, report_path: Path) -> None:
|
||||
report_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with report_path.open('w', encoding='utf-8') as handle:
|
||||
json.dump(report, handle, ensure_ascii=False, indent=2)
|
||||
handle.write("\n")
|
||||
|
||||
|
||||
def self_test() -> int:
|
||||
import tempfile
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
tmpdir = Path(tmp)
|
||||
sample = tmpdir / "sample.bin"
|
||||
sample.write_bytes(b"offline-bundle")
|
||||
digest = sha256_file(sample)
|
||||
manifest = tmpdir / "manifest.json"
|
||||
manifest.write_text(json.dumps([{ "file": "sample.bin", "sha256": digest }]), encoding='utf-8')
|
||||
out = tmpdir / "out"
|
||||
report = process(manifest, tmpdir, out, prefix="mirror/")
|
||||
assert report["summary"]["success"] is True, report
|
||||
staged = out / report["items"][0]["stagedPath"]
|
||||
assert staged.exists(), f"staged file missing: {staged}"
|
||||
print("self-test passed")
|
||||
return 0
|
||||
|
||||
|
||||
def main(argv: List[str]) -> int:
|
||||
args = parse_args(argv)
|
||||
if args.self_test:
|
||||
return self_test()
|
||||
if not (args.manifest and args.root and args.out):
|
||||
print("--manifest, --root, and --out are required unless --self-test", file=sys.stderr)
|
||||
return 2
|
||||
report = process(args.manifest, args.root, args.out, args.prefix)
|
||||
report_path = args.report or args.out / "staging-report.json"
|
||||
write_report(report, report_path)
|
||||
print(f"Staged bundles → {args.out} (report {report_path})")
|
||||
return 0 if report["summary"]["success"] else 1
|
||||
|
||||
|
||||
if __name__ == "__main__": # pragma: no cover
|
||||
sys.exit(main(sys.argv[1:]))
|
||||
54
devops/offline/airgap/compose-egress-guard.sh
Normal file
54
devops/offline/airgap/compose-egress-guard.sh
Normal file
@@ -0,0 +1,54 @@
|
||||
#!/usr/bin/env bash
|
||||
# Enforce deny-all egress for a Docker/Compose project using DOCKER-USER chain.
|
||||
# Usage: COMPOSE_PROJECT=stella ./compose-egress-guard.sh
|
||||
# Optional env: ALLOW_RFC1918=true to allow east-west traffic inside 10/172/192 ranges.
|
||||
set -euo pipefail
|
||||
|
||||
PROJECT=${COMPOSE_PROJECT:-stella}
|
||||
ALLOW_RFC1918=${ALLOW_RFC1918:-true}
|
||||
NETWORK=${COMPOSE_NETWORK:-${PROJECT}_default}
|
||||
|
||||
chain=STELLAOPS_SEALED_${PROJECT^^}
|
||||
ipset_name=${PROJECT}_cidrs
|
||||
|
||||
insert_accept() {
|
||||
local dest=$1
|
||||
iptables -C DOCKER-USER -d "$dest" -j ACCEPT 2>/dev/null || iptables -I DOCKER-USER -d "$dest" -j ACCEPT
|
||||
}
|
||||
|
||||
# 1) Ensure DOCKER-USER exists
|
||||
iptables -nL DOCKER-USER >/dev/null 2>&1 || iptables -N DOCKER-USER
|
||||
|
||||
# 2) Create dedicated chain per project for clarity
|
||||
iptables -nL "$chain" >/dev/null 2>&1 || iptables -N "$chain"
|
||||
|
||||
# 2b) Populate ipset with compose network CIDRs (if available)
|
||||
if command -v ipset >/dev/null; then
|
||||
ipset list "$ipset_name" >/dev/null 2>&1 || ipset create "$ipset_name" hash:net -exist
|
||||
cidrs=$(docker network inspect "$NETWORK" -f '{{range .IPAM.Config}}{{.Subnet}} {{end}}')
|
||||
for cidr in $cidrs; do
|
||||
ipset add "$ipset_name" "$cidr" 2>/dev/null || true
|
||||
done
|
||||
fi
|
||||
|
||||
# 3) Allow loopback and optional RFC1918 intra-cluster ranges, then drop everything else
|
||||
insert_accept 127.0.0.0/8
|
||||
if [[ "$ALLOW_RFC1918" == "true" ]]; then
|
||||
insert_accept 10.0.0.0/8
|
||||
insert_accept 172.16.0.0/12
|
||||
insert_accept 192.168.0.0/16
|
||||
fi
|
||||
iptables -C "$chain" -j DROP 2>/dev/null || iptables -A "$chain" -j DROP
|
||||
|
||||
# 4) Hook chain into DOCKER-USER for containers in this project network
|
||||
iptables -C DOCKER-USER -m addrtype --src-type LOCAL -j RETURN 2>/dev/null || true
|
||||
if command -v ipset >/dev/null && ipset list "$ipset_name" >/dev/null 2>&1; then
|
||||
iptables -C DOCKER-USER -m set --match-set "$ipset_name" dst -j "$chain" 2>/dev/null || iptables -I DOCKER-USER -m set --match-set "$ipset_name" dst -j "$chain"
|
||||
else
|
||||
# Fallback: match by destination subnet from docker inspect (first subnet only)
|
||||
first_cidr=$(docker network inspect "$NETWORK" -f '{{(index .IPAM.Config 0).Subnet}}')
|
||||
iptables -C DOCKER-USER -d "$first_cidr" -j "$chain" 2>/dev/null || iptables -I DOCKER-USER -d "$first_cidr" -j "$chain"
|
||||
fi
|
||||
|
||||
echo "Applied compose egress guard via DOCKER-USER -> $chain" >&2
|
||||
iptables -vnL "$chain"
|
||||
77
devops/offline/airgap/compose-observability.yaml
Normal file
77
devops/offline/airgap/compose-observability.yaml
Normal file
@@ -0,0 +1,77 @@
|
||||
version: "3.9"
|
||||
|
||||
services:
|
||||
prometheus:
|
||||
image: prom/prometheus:v2.53.0
|
||||
container_name: prometheus
|
||||
command:
|
||||
- --config.file=/etc/prometheus/prometheus.yml
|
||||
volumes:
|
||||
- ./observability/prometheus.yml:/etc/prometheus/prometheus.yml:ro
|
||||
ports:
|
||||
- "9090:9090"
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO-", "http://localhost:9090/-/ready"]
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 10s
|
||||
restart: unless-stopped
|
||||
|
||||
loki:
|
||||
image: grafana/loki:3.0.0
|
||||
container_name: loki
|
||||
command: ["-config.file=/etc/loki/config.yaml"]
|
||||
volumes:
|
||||
- ./observability/loki-config.yaml:/etc/loki/config.yaml:ro
|
||||
- ./observability/data/loki:/loki
|
||||
ports:
|
||||
- "3100:3100"
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO-", "http://localhost:3100/ready"]
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 15s
|
||||
restart: unless-stopped
|
||||
|
||||
tempo:
|
||||
image: grafana/tempo:2.4.1
|
||||
container_name: tempo
|
||||
command: ["-config.file=/etc/tempo/tempo.yaml"]
|
||||
volumes:
|
||||
- ./observability/tempo-config.yaml:/etc/tempo/tempo.yaml:ro
|
||||
- ./observability/data/tempo:/var/tempo
|
||||
ports:
|
||||
- "3200:3200"
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO-", "http://localhost:3200/ready"]
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 15s
|
||||
restart: unless-stopped
|
||||
|
||||
grafana:
|
||||
image: grafana/grafana:10.4.2
|
||||
container_name: grafana
|
||||
environment:
|
||||
- GF_AUTH_ANONYMOUS_ENABLED=true
|
||||
- GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
|
||||
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
- GF_SECURITY_ADMIN_USER=admin
|
||||
volumes:
|
||||
- ./observability/grafana/provisioning/datasources:/etc/grafana/provisioning/datasources:ro
|
||||
ports:
|
||||
- "3000:3000"
|
||||
depends_on:
|
||||
- prometheus
|
||||
- loki
|
||||
- tempo
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO-", "http://localhost:3000/api/health"]
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 20s
|
||||
restart: unless-stopped
|
||||
23
devops/offline/airgap/compose-syslog-smtp.yaml
Normal file
23
devops/offline/airgap/compose-syslog-smtp.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
version: '3.8'
|
||||
services:
|
||||
smtp:
|
||||
image: bytemark/smtp
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- MAILNAME=sealed.local
|
||||
networks: [sealed]
|
||||
ports:
|
||||
- "2525:25"
|
||||
syslog:
|
||||
image: balabit/syslog-ng:4.7.1
|
||||
restart: unless-stopped
|
||||
command: ["syslog-ng", "-F", "--no-caps"]
|
||||
networks: [sealed]
|
||||
ports:
|
||||
- "5514:514/udp"
|
||||
- "5515:601/tcp"
|
||||
volumes:
|
||||
- ./syslog-ng.conf:/etc/syslog-ng/syslog-ng.conf:ro
|
||||
networks:
|
||||
sealed:
|
||||
driver: bridge
|
||||
28
devops/offline/airgap/health_observability.sh
Normal file
28
devops/offline/airgap/health_observability.sh
Normal file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Health check for compose-observability.yaml (DEVOPS-AIRGAP-58-002)
|
||||
|
||||
COMPOSE_FILE="$(cd "$(dirname "$0")" && pwd)/compose-observability.yaml"
|
||||
|
||||
echo "Starting observability stack (Prometheus/Grafana/Tempo/Loki)..."
|
||||
docker compose -f "$COMPOSE_FILE" up -d
|
||||
|
||||
echo "Waiting for containers to report healthy..."
|
||||
docker compose -f "$COMPOSE_FILE" wait >/dev/null 2>&1 || true
|
||||
|
||||
docker compose -f "$COMPOSE_FILE" ps
|
||||
|
||||
echo "Probing Prometheus /-/ready"
|
||||
curl -sf http://127.0.0.1:9090/-/ready
|
||||
|
||||
echo "Probing Grafana /api/health"
|
||||
curl -sf http://127.0.0.1:3000/api/health
|
||||
|
||||
echo "Probing Loki /ready"
|
||||
curl -sf http://127.0.0.1:3100/ready
|
||||
|
||||
echo "Probing Tempo /ready"
|
||||
curl -sf http://127.0.0.1:3200/ready
|
||||
|
||||
echo "All probes succeeded."
|
||||
33
devops/offline/airgap/health_syslog_smtp.sh
Normal file
33
devops/offline/airgap/health_syslog_smtp.sh
Normal file
@@ -0,0 +1,33 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
# Health check for compose-syslog-smtp.yaml (DEVOPS-AIRGAP-58-001)
|
||||
ROOT=${ROOT:-$(git rev-parse --show-toplevel)}
|
||||
COMPOSE_FILE="${COMPOSE_FILE:-$ROOT/ops/devops/airgap/compose-syslog-smtp.yaml}"
|
||||
SMTP_PORT=${SMTP_PORT:-2525}
|
||||
SYSLOG_TCP=${SYSLOG_TCP:-5515}
|
||||
SYSLOG_UDP=${SYSLOG_UDP:-5514}
|
||||
|
||||
export COMPOSE_FILE
|
||||
# ensure stack up
|
||||
if ! docker compose ps >/dev/null 2>&1; then
|
||||
docker compose up -d
|
||||
fi
|
||||
sleep 2
|
||||
|
||||
# probe smtp banner
|
||||
if ! timeout 5 bash -lc "echo QUIT | nc -w2 127.0.0.1 ${SMTP_PORT}" >/dev/null 2>&1; then
|
||||
echo "smtp service not responding on ${SMTP_PORT}" >&2
|
||||
exit 1
|
||||
fi
|
||||
# probe syslog tcp
|
||||
if ! echo "test" | nc -w2 127.0.0.1 ${SYSLOG_TCP} >/dev/null 2>&1; then
|
||||
echo "syslog tcp not responding on ${SYSLOG_TCP}" >&2
|
||||
exit 1
|
||||
fi
|
||||
# probe syslog udp
|
||||
if ! echo "test" | nc -w2 -u 127.0.0.1 ${SYSLOG_UDP} >/dev/null 2>&1; then
|
||||
echo "syslog udp not responding on ${SYSLOG_UDP}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "smtp/syslog stack healthy"
|
||||
130
devops/offline/airgap/import-bundle.sh
Normal file
130
devops/offline/airgap/import-bundle.sh
Normal file
@@ -0,0 +1,130 @@
|
||||
#!/usr/bin/env bash
|
||||
# Import air-gap bundle into isolated environment
|
||||
# Usage: ./import-bundle.sh <bundle-dir> [registry]
|
||||
# Example: ./import-bundle.sh /media/usb/stellaops-bundle localhost:5000
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
BUNDLE_DIR="${1:?Bundle directory required}"
|
||||
REGISTRY="${2:-localhost:5000}"
|
||||
|
||||
echo "==> Importing air-gap bundle from ${BUNDLE_DIR}"
|
||||
|
||||
# Verify bundle structure
|
||||
if [[ ! -f "${BUNDLE_DIR}/manifest.json" ]]; then
|
||||
echo "ERROR: manifest.json not found in bundle" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Verify checksums first
|
||||
echo "==> Verifying checksums..."
|
||||
cd "${BUNDLE_DIR}"
|
||||
for sha_file in *.sha256; do
|
||||
if [[ -f "${sha_file}" ]]; then
|
||||
echo " Checking ${sha_file}..."
|
||||
sha256sum -c "${sha_file}" || { echo "CHECKSUM FAILED: ${sha_file}" >&2; exit 1; }
|
||||
fi
|
||||
done
|
||||
|
||||
# Load container images
|
||||
echo "==> Loading container images..."
|
||||
for tarball in images/*.tar images/*.tar.gz 2>/dev/null; do
|
||||
if [[ -f "${tarball}" ]]; then
|
||||
echo " Loading ${tarball}..."
|
||||
docker load -i "${tarball}"
|
||||
fi
|
||||
done
|
||||
|
||||
# Re-tag and push to local registry
|
||||
echo "==> Pushing images to ${REGISTRY}..."
|
||||
IMAGES=$(jq -r '.images[]?.name // empty' manifest.json 2>/dev/null || true)
|
||||
for IMAGE in ${IMAGES}; do
|
||||
LOCAL_TAG="${REGISTRY}/${IMAGE##*/}"
|
||||
echo " ${IMAGE} -> ${LOCAL_TAG}"
|
||||
docker tag "${IMAGE}" "${LOCAL_TAG}" 2>/dev/null || true
|
||||
docker push "${LOCAL_TAG}" 2>/dev/null || echo " (push skipped - registry may be unavailable)"
|
||||
done
|
||||
|
||||
# Import Helm charts
|
||||
echo "==> Importing Helm charts..."
|
||||
if [[ -d "${BUNDLE_DIR}/charts" ]]; then
|
||||
for chart in "${BUNDLE_DIR}"/charts/*.tgz; do
|
||||
if [[ -f "${chart}" ]]; then
|
||||
echo " Installing ${chart}..."
|
||||
helm push "${chart}" "oci://${REGISTRY}/charts" 2>/dev/null || \
|
||||
echo " (OCI push skipped - copying to local)"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Import NuGet packages
|
||||
echo "==> Importing NuGet packages..."
|
||||
if [[ -d "${BUNDLE_DIR}/nugets" ]]; then
|
||||
NUGET_CACHE="${HOME}/.nuget/packages"
|
||||
mkdir -p "${NUGET_CACHE}"
|
||||
for nupkg in "${BUNDLE_DIR}"/nugets/*.nupkg; do
|
||||
if [[ -f "${nupkg}" ]]; then
|
||||
PKG_NAME=$(basename "${nupkg}" .nupkg)
|
||||
echo " Caching ${PKG_NAME}..."
|
||||
# Extract to NuGet cache structure
|
||||
unzip -q -o "${nupkg}" -d "${NUGET_CACHE}/${PKG_NAME,,}" 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Import npm packages
|
||||
echo "==> Importing npm packages..."
|
||||
if [[ -d "${BUNDLE_DIR}/npm" ]]; then
|
||||
NPM_CACHE="${HOME}/.npm/_cacache"
|
||||
mkdir -p "${NPM_CACHE}"
|
||||
if [[ -f "${BUNDLE_DIR}/npm/cache.tar.gz" ]]; then
|
||||
tar -xzf "${BUNDLE_DIR}/npm/cache.tar.gz" -C "${HOME}/.npm" 2>/dev/null || true
|
||||
fi
|
||||
fi
|
||||
|
||||
# Import advisory feeds
|
||||
echo "==> Importing advisory feeds..."
|
||||
if [[ -d "${BUNDLE_DIR}/feeds" ]]; then
|
||||
FEEDS_DIR="/var/lib/stellaops/feeds"
|
||||
sudo mkdir -p "${FEEDS_DIR}" 2>/dev/null || mkdir -p "${FEEDS_DIR}"
|
||||
for feed in "${BUNDLE_DIR}"/feeds/*.ndjson.gz; do
|
||||
if [[ -f "${feed}" ]]; then
|
||||
FEED_NAME=$(basename "${feed}")
|
||||
echo " Installing ${FEED_NAME}..."
|
||||
cp "${feed}" "${FEEDS_DIR}/" 2>/dev/null || sudo cp "${feed}" "${FEEDS_DIR}/"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Import symbol bundles
|
||||
echo "==> Importing symbol bundles..."
|
||||
if [[ -d "${BUNDLE_DIR}/symbols" ]]; then
|
||||
SYMBOLS_DIR="/var/lib/stellaops/symbols"
|
||||
sudo mkdir -p "${SYMBOLS_DIR}" 2>/dev/null || mkdir -p "${SYMBOLS_DIR}"
|
||||
for bundle in "${BUNDLE_DIR}"/symbols/*.zip; do
|
||||
if [[ -f "${bundle}" ]]; then
|
||||
echo " Extracting ${bundle}..."
|
||||
unzip -q -o "${bundle}" -d "${SYMBOLS_DIR}" 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Generate import report
|
||||
echo "==> Generating import report..."
|
||||
cat > "${BUNDLE_DIR}/import-report.json" <<EOF
|
||||
{
|
||||
"importedAt": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
|
||||
"registry": "${REGISTRY}",
|
||||
"bundleDir": "${BUNDLE_DIR}",
|
||||
"status": "success"
|
||||
}
|
||||
EOF
|
||||
|
||||
echo "==> Import complete"
|
||||
echo " Registry: ${REGISTRY}"
|
||||
echo " Report: ${BUNDLE_DIR}/import-report.json"
|
||||
echo ""
|
||||
echo "Next steps:"
|
||||
echo " 1. Update Helm values with registry: ${REGISTRY}"
|
||||
echo " 2. Deploy: helm install stellaops deploy/helm/stellaops -f values-airgap.yaml"
|
||||
echo " 3. Verify: kubectl get pods -n stellaops"
|
||||
42
devops/offline/airgap/k8s-deny-egress.yaml
Normal file
42
devops/offline/airgap/k8s-deny-egress.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: sealed-deny-all-egress
|
||||
namespace: default
|
||||
labels:
|
||||
stellaops.dev/owner: devops
|
||||
stellaops.dev/purpose: sealed-mode
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
sealed: "true"
|
||||
policyTypes:
|
||||
- Egress
|
||||
egress: []
|
||||
---
|
||||
# Optional patch to allow in-cluster DNS while still blocking external egress.
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: sealed-allow-dns
|
||||
namespace: default
|
||||
labels:
|
||||
stellaops.dev/owner: devops
|
||||
stellaops.dev/purpose: sealed-mode
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
sealed: "true"
|
||||
policyTypes:
|
||||
- Egress
|
||||
egress:
|
||||
- to:
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
kubernetes.io/metadata.name: kube-system
|
||||
podSelector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns
|
||||
ports:
|
||||
- protocol: UDP
|
||||
port: 53
|
||||
32
devops/offline/airgap/observability-offline-compose.yml
Normal file
32
devops/offline/airgap/observability-offline-compose.yml
Normal file
@@ -0,0 +1,32 @@
|
||||
version: '3.8'
|
||||
services:
|
||||
loki:
|
||||
image: grafana/loki:3.0.1
|
||||
command: ["-config.file=/etc/loki/local-config.yaml"]
|
||||
volumes:
|
||||
- loki-data:/loki
|
||||
networks: [sealed]
|
||||
promtail:
|
||||
image: grafana/promtail:3.0.1
|
||||
command: ["-config.file=/etc/promtail/config.yml"]
|
||||
volumes:
|
||||
- promtail-data:/var/log
|
||||
- ./promtail-config.yaml:/etc/promtail/config.yml:ro
|
||||
networks: [sealed]
|
||||
otel:
|
||||
image: otel/opentelemetry-collector-contrib:0.97.0
|
||||
command: ["--config=/etc/otel/otel-offline.yaml"]
|
||||
volumes:
|
||||
- ./otel-offline.yaml:/etc/otel/otel-offline.yaml:ro
|
||||
- otel-data:/var/otel
|
||||
ports:
|
||||
- "4317:4317"
|
||||
- "4318:4318"
|
||||
networks: [sealed]
|
||||
networks:
|
||||
sealed:
|
||||
driver: bridge
|
||||
volumes:
|
||||
loki-data:
|
||||
promtail-data:
|
||||
otel-data:
|
||||
@@ -0,0 +1,16 @@
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
access: proxy
|
||||
url: http://prometheus:9090
|
||||
isDefault: true
|
||||
- name: Loki
|
||||
type: loki
|
||||
access: proxy
|
||||
url: http://loki:3100
|
||||
- name: Tempo
|
||||
type: tempo
|
||||
access: proxy
|
||||
url: http://tempo:3200
|
||||
35
devops/offline/airgap/observability/loki-config.yaml
Normal file
35
devops/offline/airgap/observability/loki-config.yaml
Normal file
@@ -0,0 +1,35 @@
|
||||
server:
|
||||
http_listen_port: 3100
|
||||
log_level: warn
|
||||
|
||||
common:
|
||||
ring:
|
||||
instance_addr: loki
|
||||
kvstore:
|
||||
store: inmemory
|
||||
replication_factor: 1
|
||||
|
||||
table_manager:
|
||||
retention_deletes_enabled: true
|
||||
retention_period: 168h
|
||||
|
||||
schema_config:
|
||||
configs:
|
||||
- from: 2024-01-01
|
||||
store: boltdb-shipper
|
||||
object_store: filesystem
|
||||
schema: v13
|
||||
index:
|
||||
prefix: index_
|
||||
period: 24h
|
||||
|
||||
storage_config:
|
||||
filesystem:
|
||||
directory: /loki/chunks
|
||||
boltdb_shipper:
|
||||
active_index_directory: /loki/index
|
||||
cache_location: /loki/cache
|
||||
shared_store: filesystem
|
||||
|
||||
limits_config:
|
||||
retention_period: 168h
|
||||
14
devops/offline/airgap/observability/prometheus.yml
Normal file
14
devops/offline/airgap/observability/prometheus.yml
Normal file
@@ -0,0 +1,14 @@
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
evaluation_interval: 15s
|
||||
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
static_configs:
|
||||
- targets: ['prometheus:9090']
|
||||
- job_name: loki
|
||||
static_configs:
|
||||
- targets: ['loki:3100']
|
||||
- job_name: tempo
|
||||
static_configs:
|
||||
- targets: ['tempo:3200']
|
||||
26
devops/offline/airgap/observability/tempo-config.yaml
Normal file
26
devops/offline/airgap/observability/tempo-config.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
server:
|
||||
http_listen_port: 3200
|
||||
log_level: warn
|
||||
|
||||
distributor:
|
||||
receivers:
|
||||
jaeger:
|
||||
protocols:
|
||||
thrift_http:
|
||||
otlp:
|
||||
protocols:
|
||||
http:
|
||||
grpc:
|
||||
zipkin:
|
||||
|
||||
storage:
|
||||
trace:
|
||||
backend: local
|
||||
wal:
|
||||
path: /var/tempo/wal
|
||||
local:
|
||||
path: /var/tempo/traces
|
||||
|
||||
compactor:
|
||||
compaction:
|
||||
block_retention: 168h
|
||||
40
devops/offline/airgap/otel-offline.yaml
Normal file
40
devops/offline/airgap/otel-offline.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
receivers:
|
||||
prometheus:
|
||||
config:
|
||||
scrape_configs:
|
||||
- job_name: 'self'
|
||||
static_configs:
|
||||
- targets: ['localhost:8888']
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
processors:
|
||||
batch:
|
||||
timeout: 1s
|
||||
send_batch_size: 512
|
||||
exporters:
|
||||
file/metrics:
|
||||
path: /var/otel/metrics.prom
|
||||
file/traces:
|
||||
path: /var/otel/traces.ndjson
|
||||
loki/offline:
|
||||
endpoint: http://loki:3100/loki/api/v1/push
|
||||
labels:
|
||||
job: sealed-observability
|
||||
tenant_id: "sealed"
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
level: info
|
||||
pipelines:
|
||||
metrics:
|
||||
receivers: [prometheus]
|
||||
processors: [batch]
|
||||
exporters: [file/metrics]
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [file/traces]
|
||||
14
devops/offline/airgap/promtail-config.yaml
Normal file
14
devops/offline/airgap/promtail-config.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
server:
|
||||
http_listen_port: 9080
|
||||
grpc_listen_port: 0
|
||||
positions:
|
||||
filename: /tmp/positions.yaml
|
||||
clients:
|
||||
- url: http://loki:3100/loki/api/v1/push
|
||||
scrape_configs:
|
||||
- job_name: promtail
|
||||
static_configs:
|
||||
- targets: [localhost]
|
||||
labels:
|
||||
job: promtail
|
||||
__path__: /var/log/*.log
|
||||
42
devops/offline/airgap/sealed-ci-smoke.sh
Normal file
42
devops/offline/airgap/sealed-ci-smoke.sh
Normal file
@@ -0,0 +1,42 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
# Simple sealed-mode CI smoke: block egress, resolve mock DNS, assert services start.
|
||||
ROOT=${ROOT:-$(cd "$(dirname "$0")/../.." && pwd)}
|
||||
LOGDIR=${LOGDIR:-$ROOT/out/airgap-smoke}
|
||||
mkdir -p "$LOGDIR"
|
||||
|
||||
# 1) Start mock DNS (returns 0.0.0.0 for everything)
|
||||
DNS_PORT=${DNS_PORT:-53535}
|
||||
python - <<PY &
|
||||
import socketserver, threading
|
||||
from dnslib import DNSRecord, RR, A
|
||||
|
||||
class Handler(socketserver.BaseRequestHandler):
|
||||
def handle(self):
|
||||
data, sock = self.request
|
||||
request = DNSRecord.parse(data)
|
||||
reply = request.reply()
|
||||
reply.add_answer(RR(request.q.qname, rdata=A('0.0.0.0')))
|
||||
sock.sendto(reply.pack(), self.client_address)
|
||||
|
||||
def run():
|
||||
with socketserver.UDPServer(('0.0.0.0', ${DNS_PORT}), Handler) as server:
|
||||
server.serve_forever()
|
||||
|
||||
threading.Thread(target=run, daemon=True).start()
|
||||
PY
|
||||
|
||||
# 2) Block egress except loopback
|
||||
iptables -I OUTPUT -d 127.0.0.1/8 -j ACCEPT
|
||||
iptables -I OUTPUT -d 0.0.0.0/8 -j ACCEPT
|
||||
iptables -A OUTPUT -j DROP
|
||||
|
||||
# 3) Placeholder: capture environment info (replace with service start once wired)
|
||||
pushd "$ROOT" >/dev/null
|
||||
DOTNET_SYSTEM_NET_HTTP_SOCKETSHTTPHANDLER_HTTP2SUPPORT=false \
|
||||
DOTNET_CLI_TELEMETRY_OPTOUT=1 \
|
||||
DNS_SERVER=127.0.0.1:${DNS_PORT} \
|
||||
dotnet --info > "$LOGDIR/dotnet-info.txt"
|
||||
popd >/dev/null
|
||||
|
||||
echo "sealed CI smoke complete; logs at $LOGDIR"
|
||||
14
devops/offline/airgap/stage-bundle.sh
Normal file
14
devops/offline/airgap/stage-bundle.sh
Normal file
@@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env bash
|
||||
# Wrapper for bundle_stage_import.py with sane defaults.
|
||||
# Usage: ./stage-bundle.sh manifest.json /path/to/files out/staging [prefix]
|
||||
set -euo pipefail
|
||||
if [[ $# -lt 3 ]]; then
|
||||
echo "Usage: $0 <manifest.json> <root> <out-dir> [prefix]" >&2
|
||||
exit 2
|
||||
fi
|
||||
manifest=$1
|
||||
root=$2
|
||||
out=$3
|
||||
prefix=${4:-}
|
||||
SCRIPT_DIR=$(cd "$(dirname "$0")" && pwd)
|
||||
python3 "$SCRIPT_DIR/bundle_stage_import.py" --manifest "$manifest" --root "$root" --out "$out" --prefix "$prefix"
|
||||
19
devops/offline/airgap/syslog-ng.conf
Normal file
19
devops/offline/airgap/syslog-ng.conf
Normal file
@@ -0,0 +1,19 @@
|
||||
@version: 4.7
|
||||
@include "scl.conf"
|
||||
|
||||
options {
|
||||
time-reopen(10);
|
||||
log-msg-size(8192);
|
||||
ts-format(iso);
|
||||
};
|
||||
|
||||
source s_net {
|
||||
tcp(port(601));
|
||||
udp(port(514));
|
||||
};
|
||||
|
||||
destination d_file {
|
||||
file("/var/log/syslog-ng/sealed.log" create-dirs(yes) perm(0644));
|
||||
};
|
||||
|
||||
log { source(s_net); destination(d_file); };
|
||||
88
devops/offline/airgap/verify-egress-block.sh
Normal file
88
devops/offline/airgap/verify-egress-block.sh
Normal file
@@ -0,0 +1,88 @@
|
||||
#!/usr/bin/env bash
|
||||
# Verification harness for sealed-mode egress: Docker/Compose or Kubernetes.
|
||||
# Examples:
|
||||
# ./verify-egress-block.sh docker stella_default out/airgap-probe.json
|
||||
# ./verify-egress-block.sh k8s default out/k8s-probe.json
|
||||
set -euo pipefail
|
||||
|
||||
mode=${1:-}
|
||||
context=${2:-}
|
||||
out=${3:-}
|
||||
|
||||
if [[ -z "$mode" || -z "$context" || -z "$out" ]]; then
|
||||
echo "Usage: $0 <docker|k8s> <network|namespace> <output.json> [target ...]" >&2
|
||||
exit 2
|
||||
fi
|
||||
shift 3
|
||||
TARGETS=($@)
|
||||
|
||||
ROOT=$(cd "$(dirname "$0")/../.." && pwd)
|
||||
PROBE_PY="$ROOT/ops/devops/sealed-mode-ci/egress_probe.py"
|
||||
|
||||
case "$mode" in
|
||||
docker)
|
||||
network="$context"
|
||||
python3 "$PROBE_PY" --network "$network" --output "$out" "${TARGETS[@]}"
|
||||
;;
|
||||
k8s|kubernetes)
|
||||
ns="$context"
|
||||
targets=("${TARGETS[@]}")
|
||||
if [[ ${#targets[@]} -eq 0 ]]; then
|
||||
targets=("https://example.com" "https://www.cloudflare.com" "https://releases.stella-ops.org/healthz")
|
||||
fi
|
||||
image="curlimages/curl:8.6.0"
|
||||
tmpfile=$(mktemp)
|
||||
cat > "$tmpfile" <<MANIFEST
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: sealed-egress-probe
|
||||
namespace: ${ns}
|
||||
labels:
|
||||
sealed: "true"
|
||||
stellaops.dev/purpose: sealed-mode
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: curl
|
||||
image: ${image}
|
||||
command: ["/bin/sh","-c"]
|
||||
args:
|
||||
- >
|
||||
set -euo pipefail;
|
||||
rc=0;
|
||||
for url in ${targets[@]}; do
|
||||
echo "PROBE $url";
|
||||
if curl -fsS --max-time 8 "$url"; then
|
||||
echo "UNEXPECTED_SUCCESS $url";
|
||||
rc=1;
|
||||
else
|
||||
echo "BLOCKED $url";
|
||||
fi;
|
||||
done;
|
||||
exit $rc;
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
readOnlyRootFilesystem: true
|
||||
MANIFEST
|
||||
kubectl apply -f "$tmpfile" >/dev/null
|
||||
kubectl wait --for=condition=Ready pod/sealed-egress-probe -n "$ns" --timeout=30s >/dev/null 2>&1 || true
|
||||
set +e
|
||||
kubectl logs -n "$ns" sealed-egress-probe > "$out.log" 2>&1
|
||||
kubectl wait --for=condition=Succeeded pod/sealed-egress-probe -n "$ns" --timeout=60s
|
||||
pod_rc=$?
|
||||
kubectl get pod/sealed-egress-probe -n "$ns" -o json > "$out"
|
||||
kubectl delete pod/sealed-egress-probe -n "$ns" >/dev/null 2>&1 || true
|
||||
set -e
|
||||
if [[ $pod_rc -ne 0 ]]; then
|
||||
echo "Egress check failed; see $out and $out.log" >&2
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
echo "Unknown mode: $mode" >&2
|
||||
exit 2
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "Egress verification complete → $out"
|
||||
Reference in New Issue
Block a user