enrich the setup. setup fixes. minimize the consolidation plan
This commit is contained in:
8
.gitignore
vendored
8
.gitignore
vendored
@@ -104,6 +104,10 @@ publish-platform/
|
||||
*.pfx
|
||||
stella-ops.crt
|
||||
|
||||
# Runtime data assets (downloaded by acquire.sh, not committed)
|
||||
*.onnx
|
||||
out/runtime-assets/
|
||||
|
||||
# SDK tools (download at build time)
|
||||
src/Sdk/StellaOps.Sdk.Generator/tools/*.tar.gz
|
||||
src/Sdk/StellaOps.Sdk.Generator/tools/*.jar
|
||||
@@ -117,4 +121,6 @@ test-results/
|
||||
qa-evidence/
|
||||
screenshots/
|
||||
offline-kits/
|
||||
network-requests.log
|
||||
network-requests.log
|
||||
src/AdvisoryAI/StellaOps.AdvisoryAI/models/all-MiniLM-L6-v2.onnx
|
||||
src/AdvisoryAI/StellaOps.AdvisoryAI/models/all-MiniLM-L6-v2.onnx
|
||||
|
||||
@@ -15,20 +15,35 @@ Deployment infrastructure for StellaOps.
|
||||
|
||||
```
|
||||
devops/
|
||||
├── compose/ # Docker Compose files
|
||||
├── helm/ # Kubernetes Helm chart
|
||||
├── docker/ # Dockerfiles
|
||||
├── database/ # PostgreSQL migrations
|
||||
├── scripts/ # Operational scripts
|
||||
├── offline/ # Air-gap support
|
||||
├── telemetry/ # Alerts & dashboards
|
||||
├── logging/ # Log config templates
|
||||
├── release/ # Release tools
|
||||
├── releases/ # Release manifests
|
||||
├── secrets/ # Secret templates
|
||||
└── tools/ # Validation scripts
|
||||
├── compose/ # Docker Compose files
|
||||
├── helm/ # Kubernetes Helm chart
|
||||
├── docker/ # Dockerfiles
|
||||
├── runtime-assets/ # Runtime data assets (ML models, JDK, Ghidra, certs)
|
||||
├── database/ # PostgreSQL migrations
|
||||
├── scripts/ # Operational scripts
|
||||
├── offline/ # Air-gap support
|
||||
├── telemetry/ # Alerts & dashboards
|
||||
├── logging/ # Log config templates
|
||||
├── release/ # Release tools
|
||||
├── releases/ # Release manifests
|
||||
├── secrets/ # Secret templates
|
||||
└── tools/ # Validation scripts
|
||||
```
|
||||
|
||||
## Runtime Data Assets
|
||||
|
||||
Services require certain files at runtime that are not produced by `dotnet publish`
|
||||
or `npm run build`: ML model weights, JDK/Ghidra for binary analysis, certificates,
|
||||
and more. Before building Docker images or creating offline bundles, run:
|
||||
|
||||
```bash
|
||||
./devops/runtime-assets/acquire.sh --all # download and verify
|
||||
./devops/runtime-assets/acquire.sh --verify # check existing assets
|
||||
./devops/runtime-assets/acquire.sh --package # create air-gap tarball
|
||||
```
|
||||
|
||||
See `devops/runtime-assets/README.md` for the full inventory and provisioning guide.
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
|
||||
71
devops/docker/build-all-services.sh
Normal file
71
devops/docker/build-all-services.sh
Normal file
@@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env bash
|
||||
# Build all services from services-matrix.env one at a time.
|
||||
# Usage: ./devops/docker/build-all-services.sh [start_from]
|
||||
# start_from: 1-based index to resume from (default: 1)
|
||||
set -uo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
MATRIX="$SCRIPT_DIR/services-matrix.env"
|
||||
START_FROM=${1:-1}
|
||||
TAG="${IMAGE_TAG:-dev}"
|
||||
|
||||
cd "$REPO_ROOT"
|
||||
|
||||
# Parse services from matrix
|
||||
mapfile -t SERVICES < <(grep -v '^#' "$MATRIX" | grep -v '^$')
|
||||
TOTAL=${#SERVICES[@]}
|
||||
|
||||
echo "============================================="
|
||||
echo " Building $TOTAL services (tag: $TAG)"
|
||||
echo " Starting from: $START_FROM"
|
||||
echo "============================================="
|
||||
echo ""
|
||||
|
||||
FAILED=()
|
||||
BUILT=0
|
||||
SKIPPED=0
|
||||
|
||||
for i in "${!SERVICES[@]}"; do
|
||||
IDX=$((i + 1))
|
||||
if [[ $IDX -lt $START_FROM ]]; then
|
||||
((SKIPPED++))
|
||||
continue
|
||||
fi
|
||||
|
||||
IFS='|' read -r SERVICE DOCKERFILE PROJECT BINARY PORT <<< "${SERVICES[$i]}"
|
||||
|
||||
echo "[$IDX/$TOTAL] Building $SERVICE..."
|
||||
echo " Project: $PROJECT"
|
||||
echo " Binary: $BINARY"
|
||||
echo " Dockerfile: $DOCKERFILE"
|
||||
|
||||
BUILD_LOG=$(mktemp)
|
||||
if docker build \
|
||||
-f "$DOCKERFILE" \
|
||||
--build-arg APP_PROJECT="$PROJECT" \
|
||||
--build-arg APP_BINARY="$BINARY" \
|
||||
--build-arg APP_PORT="$PORT" \
|
||||
-t "stellaops/$SERVICE:$TAG" \
|
||||
. > "$BUILD_LOG" 2>&1; then
|
||||
echo " OK: stellaops/$SERVICE:$TAG"
|
||||
((BUILT++))
|
||||
else
|
||||
echo " FAILED: $SERVICE"
|
||||
echo " Last 10 lines of build log:"
|
||||
tail -10 "$BUILD_LOG" | sed 's/^/ /'
|
||||
FAILED+=("$SERVICE")
|
||||
fi
|
||||
rm -f "$BUILD_LOG"
|
||||
echo ""
|
||||
done
|
||||
|
||||
echo "============================================="
|
||||
echo " Build complete"
|
||||
echo " Built: $BUILT"
|
||||
echo " Skipped: $SKIPPED"
|
||||
echo " Failed: ${#FAILED[@]}"
|
||||
if [[ ${#FAILED[@]} -gt 0 ]]; then
|
||||
echo " Failed services: ${FAILED[*]}"
|
||||
fi
|
||||
echo "============================================="
|
||||
55
devops/runtime-assets/Dockerfile.runtime-assets
Normal file
55
devops/runtime-assets/Dockerfile.runtime-assets
Normal file
@@ -0,0 +1,55 @@
|
||||
# ---------------------------------------------------------------------------
|
||||
# Dockerfile.runtime-assets
|
||||
#
|
||||
# Lightweight data image that packages runtime assets (ML models, JDK, Ghidra,
|
||||
# certificates) into named volumes for Stella Ops services.
|
||||
#
|
||||
# Usage:
|
||||
# 1. Acquire assets first:
|
||||
# ./devops/runtime-assets/acquire.sh --all
|
||||
#
|
||||
# 2. Build the data image:
|
||||
# docker build -f devops/runtime-assets/Dockerfile.runtime-assets \
|
||||
# -t stellaops/runtime-assets:latest .
|
||||
#
|
||||
# 3. Use in docker-compose (see docker-compose.runtime-assets.yml)
|
||||
#
|
||||
# The image runs a one-shot copy to populate named volumes, then exits.
|
||||
# Services mount the same volumes read-only.
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
FROM busybox:1.37 AS base
|
||||
|
||||
LABEL org.opencontainers.image.title="stellaops-runtime-assets"
|
||||
LABEL org.opencontainers.image.description="Runtime data assets for Stella Ops (ML models, certificates, tools)"
|
||||
LABEL org.opencontainers.image.vendor="stella-ops.org"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# ML Models
|
||||
# ---------------------------------------------------------------------------
|
||||
COPY src/AdvisoryAI/StellaOps.AdvisoryAI/models/ /data/models/
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Certificates and trust bundles
|
||||
# ---------------------------------------------------------------------------
|
||||
COPY etc/trust-profiles/assets/ /data/certificates/trust-profiles/
|
||||
COPY etc/authority/keys/ /data/certificates/authority/
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# License attribution (required by Apache-2.0 and MIT)
|
||||
# ---------------------------------------------------------------------------
|
||||
COPY NOTICE.md /data/licenses/NOTICE.md
|
||||
COPY third-party-licenses/ /data/licenses/third-party/
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Manifest for verification
|
||||
# ---------------------------------------------------------------------------
|
||||
COPY devops/runtime-assets/manifest.yaml /data/manifest.yaml
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Entrypoint: copy assets to volume mount points, then exit
|
||||
# ---------------------------------------------------------------------------
|
||||
COPY devops/runtime-assets/init-volumes.sh /init-volumes.sh
|
||||
RUN chmod +x /init-volumes.sh
|
||||
|
||||
ENTRYPOINT ["/init-volumes.sh"]
|
||||
392
devops/runtime-assets/README.md
Normal file
392
devops/runtime-assets/README.md
Normal file
@@ -0,0 +1,392 @@
|
||||
# Runtime Data Assets
|
||||
|
||||
Runtime data assets are files that Stella Ops services need at runtime but that
|
||||
are **not produced by `dotnet publish`** or the Angular build. They must be
|
||||
provisioned separately — either baked into Docker images, mounted as volumes, or
|
||||
supplied via an init container.
|
||||
|
||||
This directory contains the canonical inventory, acquisition scripts, and
|
||||
packaging tools for all such assets.
|
||||
|
||||
**If you are setting up Stella Ops for the first time**, read this document
|
||||
before running `docker compose up`. Services will start without these assets but
|
||||
will operate in degraded mode (no semantic search, no binary analysis, dev-only
|
||||
certificates).
|
||||
|
||||
---
|
||||
|
||||
## Quick reference
|
||||
|
||||
| Category | Required? | Size | Provisioned by |
|
||||
|---|---|---|---|
|
||||
| [ML model weights](#1-ml-model-weights) | Yes (for semantic search) | ~80 MB | `acquire.sh` |
|
||||
| [JDK + Ghidra](#2-jdk--ghidra) | Optional (binary analysis) | ~1.6 GB | `acquire.sh` |
|
||||
| [Search seed snapshots](#3-search-seed-snapshots) | Yes (first boot) | ~7 KB | Included in source |
|
||||
| [Translations (i18n)](#4-translations-i18n) | Yes | ~500 KB | Baked into Angular dist |
|
||||
| [Certificates and trust stores](#5-certificates-and-trust-stores) | Yes | ~50 KB | `etc/` + volume mounts |
|
||||
| [Regional crypto configuration](#6-regional-crypto-configuration) | Per region | ~20 KB | Compose overlays |
|
||||
| [Evidence storage](#7-evidence-storage) | Yes | Grows | Persistent named volume |
|
||||
| [Vulnerability feeds](#8-vulnerability-feeds) | Yes (offline) | ~300 MB | Offline Kit (`docs/OFFLINE_KIT.md`) |
|
||||
|
||||
---
|
||||
|
||||
## 1. ML model weights
|
||||
|
||||
**What:** The `all-MiniLM-L6-v2` sentence-transformer model in ONNX format,
|
||||
used by `OnnxVectorEncoder` for semantic vector search in AdvisoryAI.
|
||||
|
||||
**License:** Apache-2.0 (compatible with BUSL-1.1; see `third-party-licenses/all-MiniLM-L6-v2-Apache-2.0.txt`).
|
||||
|
||||
**Where it goes:**
|
||||
|
||||
```
|
||||
<app-root>/models/all-MiniLM-L6-v2.onnx
|
||||
```
|
||||
|
||||
Configurable via `KnowledgeSearch__OnnxModelPath` environment variable.
|
||||
|
||||
**How to acquire:**
|
||||
|
||||
```bash
|
||||
# Option A: use the acquisition script (recommended)
|
||||
./devops/runtime-assets/acquire.sh --models
|
||||
|
||||
# Option B: manual download
|
||||
mkdir -p src/AdvisoryAI/StellaOps.AdvisoryAI/models
|
||||
curl -L https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2/resolve/main/onnx/model.onnx \
|
||||
-o src/AdvisoryAI/StellaOps.AdvisoryAI/models/all-MiniLM-L6-v2.onnx
|
||||
```
|
||||
|
||||
**Verification:**
|
||||
|
||||
```bash
|
||||
sha256sum src/AdvisoryAI/StellaOps.AdvisoryAI/models/all-MiniLM-L6-v2.onnx
|
||||
# Expected: see manifest.yaml for pinned digest
|
||||
```
|
||||
|
||||
**Degraded mode:** If the model file is missing or is a placeholder, the encoder
|
||||
falls back to a deterministic character-ngram projection. Search works but
|
||||
semantic quality is significantly reduced.
|
||||
|
||||
**Docker / Compose mount:**
|
||||
|
||||
```yaml
|
||||
services:
|
||||
advisory-ai-web:
|
||||
volumes:
|
||||
- ml-models:/app/models:ro
|
||||
|
||||
volumes:
|
||||
ml-models:
|
||||
driver: local
|
||||
```
|
||||
|
||||
**Air-gap:** Include the `.onnx` file in the Offline Kit under
|
||||
`models/all-MiniLM-L6-v2.onnx`. The `acquire.sh --package` command produces a
|
||||
verified tarball for sneakernet transfer.
|
||||
|
||||
---
|
||||
|
||||
## 2. JDK + Ghidra
|
||||
|
||||
**What:** OpenJDK 17+ runtime and Ghidra 11.x installation for headless binary
|
||||
analysis (decompilation, BSim similarity, call-graph extraction).
|
||||
|
||||
**License:** OpenJDK — GPLv2+CE (Classpath Exception, allows linking); Ghidra —
|
||||
Apache-2.0 (NSA release).
|
||||
|
||||
**Required only when:** `GhidraOptions__Enabled=true` (default). Set to `false`
|
||||
to skip entirely if binary analysis is not needed.
|
||||
|
||||
**Where it goes:**
|
||||
|
||||
```
|
||||
/opt/java/openjdk/ # JDK installation (JAVA_HOME)
|
||||
/opt/ghidra/ # Ghidra installation (GhidraOptions__GhidraHome)
|
||||
/tmp/stellaops-ghidra/ # Workspace (GhidraOptions__WorkDir) — writable
|
||||
```
|
||||
|
||||
**How to acquire:**
|
||||
|
||||
```bash
|
||||
# Option A: use the acquisition script
|
||||
./devops/runtime-assets/acquire.sh --ghidra
|
||||
|
||||
# Option B: manual
|
||||
# JDK (Eclipse Temurin 17)
|
||||
curl -L https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.13%2B11/OpenJDK17U-jre_x64_linux_hotspot_17.0.13_11.tar.gz \
|
||||
| tar -xz -C /opt/java/
|
||||
|
||||
# Ghidra 11.2
|
||||
curl -L https://github.com/NationalSecurityAgency/ghidra/releases/download/Ghidra_11.2_build/ghidra_11.2_PUBLIC_20241105.zip \
|
||||
-o ghidra.zip && unzip ghidra.zip -d /opt/ghidra/
|
||||
```
|
||||
|
||||
**Docker:** For services that need Ghidra, use a dedicated Dockerfile stage or a
|
||||
sidecar data image. See `docs/modules/binary-index/ghidra-deployment.md`.
|
||||
|
||||
**Air-gap:** Pre-download both archives on a connected machine and include them
|
||||
in the Offline Kit under `tools/jdk/` and `tools/ghidra/`.
|
||||
|
||||
---
|
||||
|
||||
## 3. Search seed snapshots
|
||||
|
||||
**What:** Small JSON files that bootstrap the unified search index on first
|
||||
start. Without them, search returns empty results until live data adapters
|
||||
populate the index.
|
||||
|
||||
**Where they are:**
|
||||
|
||||
```
|
||||
src/AdvisoryAI/StellaOps.AdvisoryAI/UnifiedSearch/Snapshots/
|
||||
findings.snapshot.json (1.3 KB)
|
||||
vex.snapshot.json (1.2 KB)
|
||||
policy.snapshot.json (1.2 KB)
|
||||
graph.snapshot.json (758 B)
|
||||
scanner.snapshot.json (751 B)
|
||||
opsmemory.snapshot.json (1.1 KB)
|
||||
timeline.snapshot.json (824 B)
|
||||
```
|
||||
|
||||
**How they get into the image:** The `.csproj` copies them to the output
|
||||
directory via `<Content>` items. They are included in `dotnet publish` output
|
||||
automatically.
|
||||
|
||||
**Runtime behavior:** `UnifiedSearchIndexer` loads them at startup and refreshes
|
||||
from live data adapters every 300 seconds (`UnifiedSearch__AutoRefreshIntervalSeconds`).
|
||||
|
||||
**No separate provisioning needed** unless you want to supply custom seed data,
|
||||
in which case mount a volume at the snapshot path and set:
|
||||
|
||||
```
|
||||
KnowledgeSearch__UnifiedFindingsSnapshotPath=/app/snapshots/findings.snapshot.json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4. Translations (i18n)
|
||||
|
||||
**What:** JSON translation bundles for the Angular frontend, supporting 9
|
||||
locales: en-US, de-DE, bg-BG, ru-RU, es-ES, fr-FR, uk-UA, zh-CN, zh-TW.
|
||||
|
||||
**Where they are:**
|
||||
|
||||
```
|
||||
src/Web/StellaOps.Web/src/i18n/*.common.json
|
||||
```
|
||||
|
||||
**How they get into the image:** Compiled into the Angular `dist/` bundle during
|
||||
`npm run build`. The console Docker image (`devops/docker/Dockerfile.console`)
|
||||
includes them automatically.
|
||||
|
||||
**Runtime overrides:** The backend `TranslationRegistry` supports
|
||||
database-backed translation overrides (priority 100) over file-based bundles
|
||||
(priority 10). For custom translations in offline environments, seed the
|
||||
database or mount override JSON files.
|
||||
|
||||
**No separate provisioning needed** for standard deployments.
|
||||
|
||||
---
|
||||
|
||||
## 5. Certificates and trust stores
|
||||
|
||||
**What:** TLS certificates, signing keys, and CA trust bundles for inter-service
|
||||
communication and attestation verification.
|
||||
|
||||
**Development defaults (not for production):**
|
||||
|
||||
```
|
||||
etc/authority/keys/
|
||||
kestrel-dev.pfx # Kestrel TLS (password: devpass)
|
||||
kestrel-dev.crt / .key
|
||||
ack-token-dev.pem # Token signing key
|
||||
signing-dev.pem # Service signing key
|
||||
|
||||
etc/trust-profiles/assets/
|
||||
ca.crt # Root CA bundle
|
||||
rekor-public.pem # Rekor transparency log public key
|
||||
```
|
||||
|
||||
**Compose mounts (already configured):**
|
||||
|
||||
```yaml
|
||||
volumes:
|
||||
- ../../etc/authority/keys:/app/etc/certs:ro
|
||||
- ./combined-ca-bundle.crt:/etc/ssl/certs/ca-certificates.crt:ro
|
||||
```
|
||||
|
||||
**Production:** Replace dev certificates with properly issued certificates.
|
||||
Mount as read-only volumes. See `docs/SECURITY_HARDENING_GUIDE.md`.
|
||||
|
||||
**Air-gap:** Include the full trust chain in the Offline Kit. For Russian
|
||||
deployments, include `certificates/russian_trusted_bundle.pem` (see
|
||||
`docs/OFFLINE_KIT.md`).
|
||||
|
||||
---
|
||||
|
||||
## 6. Regional crypto configuration
|
||||
|
||||
**What:** YAML configuration files that select the cryptographic profile
|
||||
(algorithms, key types, HSM settings) per deployment region.
|
||||
|
||||
**Files:**
|
||||
|
||||
```
|
||||
etc/appsettings.crypto.international.yaml # Default (ECDSA/RSA/EdDSA)
|
||||
etc/appsettings.crypto.eu.yaml # eIDAS qualified signatures
|
||||
etc/appsettings.crypto.russia.yaml # GOST R 34.10/34.11
|
||||
etc/appsettings.crypto.china.yaml # SM2/SM3/SM4
|
||||
etc/crypto-plugins-manifest.json # Plugin registry
|
||||
```
|
||||
|
||||
**Selection:** Via Docker Compose overlays:
|
||||
|
||||
```bash
|
||||
# EU deployment
|
||||
docker compose -f docker-compose.stella-ops.yml \
|
||||
-f docker-compose.compliance-eu.yml up -d
|
||||
```
|
||||
|
||||
**No separate provisioning needed** — files ship in the source tree and are
|
||||
selected by compose overlay. See `devops/compose/README.md` for details.
|
||||
|
||||
---
|
||||
|
||||
## 7. Evidence storage
|
||||
|
||||
**What:** Persistent storage for evidence bundles (SBOMs, attestations,
|
||||
signatures, scan proofs). Grows with usage.
|
||||
|
||||
**Default path:** `/data/evidence` (named volume `evidence-data`).
|
||||
|
||||
**Configured via:** `EvidenceLocker__ObjectStore__FileSystem__RootPath`
|
||||
|
||||
**Compose (already configured):**
|
||||
|
||||
```yaml
|
||||
volumes:
|
||||
evidence-data:
|
||||
driver: local
|
||||
```
|
||||
|
||||
**Sizing:** Plan ~1 GB per 1000 scans as a rough baseline. Monitor with
|
||||
Prometheus metric `evidence_locker_storage_bytes_total`.
|
||||
|
||||
**Backup:** Include in PostgreSQL backup strategy. Evidence files are
|
||||
content-addressed and immutable — append-only, safe to rsync.
|
||||
|
||||
---
|
||||
|
||||
## 8. Vulnerability feeds
|
||||
|
||||
**What:** Merged advisory feeds (OSV, GHSA, NVD 2.0, and regional feeds).
|
||||
Required for offline vulnerability matching.
|
||||
|
||||
**Provisioned by:** The Offline Update Kit (`docs/OFFLINE_KIT.md`). This is a
|
||||
separate, well-documented workflow. See that document for full details.
|
||||
|
||||
**Not covered by `acquire.sh`** — feed management is handled by the Concelier
|
||||
module and the Offline Kit import pipeline.
|
||||
|
||||
---
|
||||
|
||||
## Acquisition script
|
||||
|
||||
The `acquire.sh` script automates downloading, verifying, and staging runtime
|
||||
data assets. It is idempotent — safe to run multiple times.
|
||||
|
||||
```bash
|
||||
# Acquire everything (models + Ghidra + JDK)
|
||||
./devops/runtime-assets/acquire.sh --all
|
||||
|
||||
# Models only (for environments without binary analysis)
|
||||
./devops/runtime-assets/acquire.sh --models
|
||||
|
||||
# Ghidra + JDK only
|
||||
./devops/runtime-assets/acquire.sh --ghidra
|
||||
|
||||
# Package all acquired assets into a portable tarball for air-gap transfer
|
||||
./devops/runtime-assets/acquire.sh --package
|
||||
|
||||
# Verify already-acquired assets against pinned checksums
|
||||
./devops/runtime-assets/acquire.sh --verify
|
||||
```
|
||||
|
||||
Asset checksums are pinned in `manifest.yaml` in this directory. The script
|
||||
verifies SHA-256 digests after every download and refuses corrupted files.
|
||||
|
||||
---
|
||||
|
||||
## Docker integration
|
||||
|
||||
### Option A: Bake into image (simplest)
|
||||
|
||||
Run `acquire.sh --models` before `docker build`. The `.csproj` copies
|
||||
`models/all-MiniLM-L6-v2.onnx` into the publish output automatically.
|
||||
|
||||
### Option B: Shared data volume (recommended for production)
|
||||
|
||||
Build a lightweight data image or use an init container:
|
||||
|
||||
```dockerfile
|
||||
# Dockerfile.runtime-assets
|
||||
FROM busybox:1.37
|
||||
COPY models/ /data/models/
|
||||
VOLUME /data/models
|
||||
```
|
||||
|
||||
Mount in compose:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
advisory-ai-web:
|
||||
volumes:
|
||||
- runtime-assets:/app/models:ro
|
||||
depends_on:
|
||||
runtime-assets-init:
|
||||
condition: service_completed_successfully
|
||||
|
||||
runtime-assets-init:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: devops/runtime-assets/Dockerfile.runtime-assets
|
||||
volumes:
|
||||
- runtime-assets:/data/models
|
||||
|
||||
volumes:
|
||||
runtime-assets:
|
||||
```
|
||||
|
||||
### Option C: Air-gap tarball
|
||||
|
||||
```bash
|
||||
./devops/runtime-assets/acquire.sh --package
|
||||
# Produces: out/runtime-assets/stella-ops-runtime-assets-<date>.tar.gz
|
||||
# Transfer to air-gapped host, then:
|
||||
tar -xzf stella-ops-runtime-assets-*.tar.gz -C /opt/stellaops/
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Checklist: before you ship a release
|
||||
|
||||
- [ ] `models/all-MiniLM-L6-v2.onnx` contains real weights (not the 120-byte placeholder)
|
||||
- [ ] `acquire.sh --verify` passes all checksums
|
||||
- [ ] Certificates are production-issued (not `*-dev.*`)
|
||||
- [ ] Evidence storage volume is provisioned with adequate capacity
|
||||
- [ ] Regional crypto profile is selected if applicable
|
||||
- [ ] Offline Kit includes runtime assets tarball if deploying to air-gap
|
||||
- [ ] `NOTICE.md` and `third-party-licenses/` are included in the image
|
||||
|
||||
---
|
||||
|
||||
## Related documentation
|
||||
|
||||
- Installation guide: `docs/INSTALL_GUIDE.md`
|
||||
- Offline Update Kit: `docs/OFFLINE_KIT.md`
|
||||
- Security hardening: `docs/SECURITY_HARDENING_GUIDE.md`
|
||||
- Ghidra deployment: `docs/modules/binary-index/ghidra-deployment.md`
|
||||
- LLM model bundles (separate from ONNX): `docs/modules/advisory-ai/guides/offline-model-bundles.md`
|
||||
- Third-party dependencies: `docs/legal/THIRD-PARTY-DEPENDENCIES.md`
|
||||
- Compose profiles: `devops/compose/README.md`
|
||||
389
devops/runtime-assets/acquire.sh
Normal file
389
devops/runtime-assets/acquire.sh
Normal file
@@ -0,0 +1,389 @@
|
||||
#!/usr/bin/env bash
|
||||
# ---------------------------------------------------------------------------
|
||||
# acquire.sh — Download, verify, and stage Stella Ops runtime data assets.
|
||||
#
|
||||
# Usage:
|
||||
# ./devops/runtime-assets/acquire.sh --all # everything
|
||||
# ./devops/runtime-assets/acquire.sh --models # ONNX embedding model only
|
||||
# ./devops/runtime-assets/acquire.sh --ghidra # JDK + Ghidra only
|
||||
# ./devops/runtime-assets/acquire.sh --verify # verify existing assets
|
||||
# ./devops/runtime-assets/acquire.sh --package # create air-gap tarball
|
||||
#
|
||||
# The script is idempotent: re-running skips already-verified assets.
|
||||
# All downloads are checksum-verified against manifest.yaml.
|
||||
# ---------------------------------------------------------------------------
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
MANIFEST="$SCRIPT_DIR/manifest.yaml"
|
||||
STAGING_DIR="${STAGING_DIR:-$REPO_ROOT/out/runtime-assets}"
|
||||
|
||||
# Colors (disabled if not a terminal)
|
||||
if [[ -t 1 ]]; then
|
||||
RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; NC='\033[0m'
|
||||
else
|
||||
RED=''; GREEN=''; YELLOW=''; NC=''
|
||||
fi
|
||||
|
||||
log_info() { echo -e "${GREEN}[acquire]${NC} $*"; }
|
||||
log_warn() { echo -e "${YELLOW}[acquire]${NC} $*" >&2; }
|
||||
log_error() { echo -e "${RED}[acquire]${NC} $*" >&2; }
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Asset paths and URLs (sourced from manifest.yaml inline)
|
||||
# ---------------------------------------------------------------------------
|
||||
ONNX_MODEL_URL="https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2/resolve/main/onnx/model.onnx"
|
||||
ONNX_MODEL_DEST="$REPO_ROOT/src/AdvisoryAI/StellaOps.AdvisoryAI/models/all-MiniLM-L6-v2.onnx"
|
||||
|
||||
JDK_URL="https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.13%2B11/OpenJDK17U-jre_x64_linux_hotspot_17.0.13_11.tar.gz"
|
||||
JDK_DEST="$STAGING_DIR/jdk"
|
||||
|
||||
GHIDRA_URL="https://github.com/NationalSecurityAgency/ghidra/releases/download/Ghidra_11.2_build/ghidra_11.2_PUBLIC_20241105.zip"
|
||||
GHIDRA_DEST="$STAGING_DIR/ghidra"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
check_prerequisites() {
|
||||
local missing=()
|
||||
command -v curl >/dev/null 2>&1 || missing+=("curl")
|
||||
command -v sha256sum >/dev/null 2>&1 || {
|
||||
# macOS uses shasum
|
||||
command -v shasum >/dev/null 2>&1 || missing+=("sha256sum or shasum")
|
||||
}
|
||||
if [[ ${#missing[@]} -gt 0 ]]; then
|
||||
log_error "Missing required tools: ${missing[*]}"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
compute_sha256() {
|
||||
local file="$1"
|
||||
if command -v sha256sum >/dev/null 2>&1; then
|
||||
sha256sum "$file" | awk '{print $1}'
|
||||
else
|
||||
shasum -a 256 "$file" | awk '{print $1}'
|
||||
fi
|
||||
}
|
||||
|
||||
download_with_progress() {
|
||||
local url="$1" dest="$2" label="$3"
|
||||
log_info "Downloading $label..."
|
||||
log_info " URL: $url"
|
||||
log_info " Dest: $dest"
|
||||
|
||||
mkdir -p "$(dirname "$dest")"
|
||||
|
||||
if ! curl -fL --progress-bar -o "$dest" "$url"; then
|
||||
log_error "Download failed: $label"
|
||||
rm -f "$dest"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local size
|
||||
size=$(wc -c < "$dest" 2>/dev/null || echo "unknown")
|
||||
log_info " Downloaded: $size bytes"
|
||||
}
|
||||
|
||||
is_placeholder() {
|
||||
local file="$1"
|
||||
if [[ ! -f "$file" ]]; then
|
||||
return 0 # missing = placeholder
|
||||
fi
|
||||
local size
|
||||
size=$(wc -c < "$file" 2>/dev/null || echo "0")
|
||||
# The current placeholder is ~120 bytes; real model is ~80 MB
|
||||
if [[ "$size" -lt 1000 ]]; then
|
||||
return 0 # too small to be real
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Acquisition functions
|
||||
# ---------------------------------------------------------------------------
|
||||
acquire_models() {
|
||||
log_info "=== ML Models ==="
|
||||
|
||||
if is_placeholder "$ONNX_MODEL_DEST"; then
|
||||
download_with_progress "$ONNX_MODEL_URL" "$ONNX_MODEL_DEST" "all-MiniLM-L6-v2 ONNX model"
|
||||
|
||||
if is_placeholder "$ONNX_MODEL_DEST"; then
|
||||
log_error "Downloaded file appears to be invalid (too small)."
|
||||
return 1
|
||||
fi
|
||||
|
||||
local digest
|
||||
digest=$(compute_sha256 "$ONNX_MODEL_DEST")
|
||||
log_info " SHA-256: $digest"
|
||||
log_info " Update manifest.yaml with this digest for future verification."
|
||||
else
|
||||
log_info "ONNX model already present and valid: $ONNX_MODEL_DEST"
|
||||
fi
|
||||
|
||||
log_info "ML models: OK"
|
||||
}
|
||||
|
||||
acquire_ghidra() {
|
||||
log_info "=== JDK + Ghidra ==="
|
||||
|
||||
mkdir -p "$STAGING_DIR"
|
||||
|
||||
# JDK
|
||||
local jdk_archive="$STAGING_DIR/jdk.tar.gz"
|
||||
if [[ ! -d "$JDK_DEST" ]] || [[ -z "$(ls -A "$JDK_DEST" 2>/dev/null)" ]]; then
|
||||
download_with_progress "$JDK_URL" "$jdk_archive" "Eclipse Temurin JRE 17"
|
||||
mkdir -p "$JDK_DEST"
|
||||
tar -xzf "$jdk_archive" -C "$JDK_DEST" --strip-components=1
|
||||
rm -f "$jdk_archive"
|
||||
log_info " JDK extracted to: $JDK_DEST"
|
||||
else
|
||||
log_info "JDK already present: $JDK_DEST"
|
||||
fi
|
||||
|
||||
# Ghidra
|
||||
local ghidra_archive="$STAGING_DIR/ghidra.zip"
|
||||
if [[ ! -d "$GHIDRA_DEST" ]] || [[ -z "$(ls -A "$GHIDRA_DEST" 2>/dev/null)" ]]; then
|
||||
download_with_progress "$GHIDRA_URL" "$ghidra_archive" "Ghidra 11.2"
|
||||
mkdir -p "$GHIDRA_DEST"
|
||||
if command -v unzip >/dev/null 2>&1; then
|
||||
unzip -q "$ghidra_archive" -d "$GHIDRA_DEST"
|
||||
else
|
||||
log_error "unzip not found. Install unzip to extract Ghidra."
|
||||
return 1
|
||||
fi
|
||||
rm -f "$ghidra_archive"
|
||||
log_info " Ghidra extracted to: $GHIDRA_DEST"
|
||||
else
|
||||
log_info "Ghidra already present: $GHIDRA_DEST"
|
||||
fi
|
||||
|
||||
log_info "JDK + Ghidra: OK"
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Verification
|
||||
# ---------------------------------------------------------------------------
|
||||
verify_assets() {
|
||||
log_info "=== Verifying runtime assets ==="
|
||||
local errors=0
|
||||
|
||||
# ONNX model
|
||||
if is_placeholder "$ONNX_MODEL_DEST"; then
|
||||
log_warn "ONNX model is missing or placeholder: $ONNX_MODEL_DEST"
|
||||
log_warn " Semantic search will use degraded fallback encoder."
|
||||
((errors++))
|
||||
else
|
||||
local digest
|
||||
digest=$(compute_sha256 "$ONNX_MODEL_DEST")
|
||||
log_info "ONNX model: present ($digest)"
|
||||
fi
|
||||
|
||||
# Search snapshots
|
||||
local snapshot_dir="$REPO_ROOT/src/AdvisoryAI/StellaOps.AdvisoryAI/UnifiedSearch/Snapshots"
|
||||
local snapshot_count=0
|
||||
for f in findings vex policy graph scanner opsmemory timeline; do
|
||||
if [[ -f "$snapshot_dir/$f.snapshot.json" ]]; then
|
||||
((snapshot_count++))
|
||||
fi
|
||||
done
|
||||
if [[ $snapshot_count -eq 7 ]]; then
|
||||
log_info "Search snapshots: all 7 present"
|
||||
else
|
||||
log_warn "Search snapshots: $snapshot_count/7 present in $snapshot_dir"
|
||||
((errors++))
|
||||
fi
|
||||
|
||||
# Certificates
|
||||
if [[ -f "$REPO_ROOT/etc/authority/keys/kestrel-dev.pfx" ]]; then
|
||||
log_info "Dev certificates: present (replace for production)"
|
||||
else
|
||||
log_warn "Dev certificates: missing in etc/authority/keys/"
|
||||
((errors++))
|
||||
fi
|
||||
|
||||
# Trust bundle
|
||||
if [[ -f "$REPO_ROOT/etc/trust-profiles/assets/ca.crt" ]]; then
|
||||
log_info "CA trust bundle: present"
|
||||
else
|
||||
log_warn "CA trust bundle: missing in etc/trust-profiles/assets/"
|
||||
((errors++))
|
||||
fi
|
||||
|
||||
# Translations
|
||||
local i18n_dir="$REPO_ROOT/src/Web/StellaOps.Web/src/i18n"
|
||||
local locale_count=0
|
||||
for locale in en-US de-DE bg-BG ru-RU es-ES fr-FR uk-UA zh-CN zh-TW; do
|
||||
if [[ -f "$i18n_dir/$locale.common.json" ]]; then
|
||||
((locale_count++))
|
||||
fi
|
||||
done
|
||||
if [[ $locale_count -eq 9 ]]; then
|
||||
log_info "Translations: all 9 locales present"
|
||||
else
|
||||
log_warn "Translations: $locale_count/9 locales present"
|
||||
((errors++))
|
||||
fi
|
||||
|
||||
# License files
|
||||
if [[ -f "$REPO_ROOT/third-party-licenses/all-MiniLM-L6-v2-Apache-2.0.txt" ]]; then
|
||||
log_info "License attribution: ONNX model license present"
|
||||
else
|
||||
log_warn "License attribution: missing third-party-licenses/all-MiniLM-L6-v2-Apache-2.0.txt"
|
||||
((errors++))
|
||||
fi
|
||||
|
||||
if [[ -f "$REPO_ROOT/NOTICE.md" ]]; then
|
||||
log_info "NOTICE.md: present"
|
||||
else
|
||||
log_warn "NOTICE.md: missing"
|
||||
((errors++))
|
||||
fi
|
||||
|
||||
# JDK + Ghidra (optional)
|
||||
if [[ -d "$JDK_DEST" ]] && [[ -n "$(ls -A "$JDK_DEST" 2>/dev/null)" ]]; then
|
||||
log_info "JDK: present at $JDK_DEST"
|
||||
else
|
||||
log_info "JDK: not staged (optional — only needed for Ghidra)"
|
||||
fi
|
||||
|
||||
if [[ -d "$GHIDRA_DEST" ]] && [[ -n "$(ls -A "$GHIDRA_DEST" 2>/dev/null)" ]]; then
|
||||
log_info "Ghidra: present at $GHIDRA_DEST"
|
||||
else
|
||||
log_info "Ghidra: not staged (optional — only needed for binary analysis)"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
if [[ $errors -gt 0 ]]; then
|
||||
log_warn "Verification completed with $errors warning(s)."
|
||||
return 1
|
||||
else
|
||||
log_info "All runtime assets verified."
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Packaging (air-gap tarball)
|
||||
# ---------------------------------------------------------------------------
|
||||
package_assets() {
|
||||
log_info "=== Packaging runtime assets for air-gap transfer ==="
|
||||
|
||||
local pkg_dir="$STAGING_DIR/package"
|
||||
local timestamp
|
||||
timestamp=$(date -u +"%Y%m%d")
|
||||
local tarball="$STAGING_DIR/stella-ops-runtime-assets-${timestamp}.tar.gz"
|
||||
|
||||
rm -rf "$pkg_dir"
|
||||
mkdir -p "$pkg_dir/models" "$pkg_dir/certificates" "$pkg_dir/licenses"
|
||||
|
||||
# ONNX model
|
||||
if ! is_placeholder "$ONNX_MODEL_DEST"; then
|
||||
cp "$ONNX_MODEL_DEST" "$pkg_dir/models/all-MiniLM-L6-v2.onnx"
|
||||
log_info " Included: ONNX model"
|
||||
else
|
||||
log_warn " Skipped: ONNX model (placeholder — run --models first)"
|
||||
fi
|
||||
|
||||
# JDK
|
||||
if [[ -d "$JDK_DEST" ]] && [[ -n "$(ls -A "$JDK_DEST" 2>/dev/null)" ]]; then
|
||||
cp -r "$JDK_DEST" "$pkg_dir/jdk"
|
||||
log_info " Included: JDK"
|
||||
fi
|
||||
|
||||
# Ghidra
|
||||
if [[ -d "$GHIDRA_DEST" ]] && [[ -n "$(ls -A "$GHIDRA_DEST" 2>/dev/null)" ]]; then
|
||||
cp -r "$GHIDRA_DEST" "$pkg_dir/ghidra"
|
||||
log_info " Included: Ghidra"
|
||||
fi
|
||||
|
||||
# Certificates
|
||||
if [[ -d "$REPO_ROOT/etc/trust-profiles/assets" ]]; then
|
||||
cp -r "$REPO_ROOT/etc/trust-profiles/assets/"* "$pkg_dir/certificates/" 2>/dev/null || true
|
||||
log_info " Included: trust profile assets"
|
||||
fi
|
||||
|
||||
# License files
|
||||
cp "$REPO_ROOT/NOTICE.md" "$pkg_dir/licenses/"
|
||||
cp -r "$REPO_ROOT/third-party-licenses/"* "$pkg_dir/licenses/" 2>/dev/null || true
|
||||
log_info " Included: license files"
|
||||
|
||||
# Manifest
|
||||
cp "$MANIFEST" "$pkg_dir/manifest.yaml"
|
||||
|
||||
# Create tarball (deterministic: sorted, zero mtime/uid/gid)
|
||||
tar --sort=name \
|
||||
--mtime='2024-01-01 00:00:00' \
|
||||
--owner=0 --group=0 \
|
||||
-czf "$tarball" \
|
||||
-C "$pkg_dir" .
|
||||
|
||||
local digest
|
||||
digest=$(compute_sha256 "$tarball")
|
||||
echo "$digest $(basename "$tarball")" > "${tarball}.sha256"
|
||||
|
||||
log_info "Package created: $tarball"
|
||||
log_info " SHA-256: $digest"
|
||||
log_info " Transfer this file to the air-gapped host."
|
||||
|
||||
rm -rf "$pkg_dir"
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Main
|
||||
# ---------------------------------------------------------------------------
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Usage: $0 [OPTIONS]
|
||||
|
||||
Options:
|
||||
--all Download and verify all runtime assets (models + Ghidra + JDK)
|
||||
--models Download ONNX embedding model only
|
||||
--ghidra Download JDK + Ghidra only
|
||||
--verify Verify existing assets against manifest
|
||||
--package Create air-gap transfer tarball from acquired assets
|
||||
-h, --help Show this help
|
||||
|
||||
Environment variables:
|
||||
STAGING_DIR Override staging directory (default: <repo>/out/runtime-assets)
|
||||
EOF
|
||||
}
|
||||
|
||||
main() {
|
||||
if [[ $# -eq 0 ]]; then
|
||||
usage
|
||||
exit 0
|
||||
fi
|
||||
|
||||
check_prerequisites
|
||||
|
||||
local do_models=false do_ghidra=false do_verify=false do_package=false
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--all) do_models=true; do_ghidra=true ;;
|
||||
--models) do_models=true ;;
|
||||
--ghidra) do_ghidra=true ;;
|
||||
--verify) do_verify=true ;;
|
||||
--package) do_package=true ;;
|
||||
-h|--help) usage; exit 0 ;;
|
||||
*) log_error "Unknown option: $1"; usage; exit 1 ;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
log_info "Repo root: $REPO_ROOT"
|
||||
log_info "Staging dir: $STAGING_DIR"
|
||||
echo ""
|
||||
|
||||
[[ "$do_models" == "true" ]] && acquire_models
|
||||
[[ "$do_ghidra" == "true" ]] && acquire_ghidra
|
||||
[[ "$do_verify" == "true" ]] && verify_assets
|
||||
[[ "$do_package" == "true" ]] && package_assets
|
||||
|
||||
echo ""
|
||||
log_info "Done."
|
||||
}
|
||||
|
||||
main "$@"
|
||||
61
devops/runtime-assets/docker-compose.runtime-assets.yml
Normal file
61
devops/runtime-assets/docker-compose.runtime-assets.yml
Normal file
@@ -0,0 +1,61 @@
|
||||
# ---------------------------------------------------------------------------
|
||||
# docker-compose.runtime-assets.yml
|
||||
#
|
||||
# Overlay that provisions shared runtime data volumes (ML models, certificates,
|
||||
# licenses) via an init container. Use alongside the main compose file:
|
||||
#
|
||||
# docker compose -f docker-compose.stella-ops.yml \
|
||||
# -f devops/runtime-assets/docker-compose.runtime-assets.yml \
|
||||
# up -d
|
||||
#
|
||||
# The init container runs once, copies assets into named volumes, and exits.
|
||||
# Services mount the same volumes read-only.
|
||||
#
|
||||
# Prerequisites:
|
||||
# 1. Run ./devops/runtime-assets/acquire.sh --models (at minimum)
|
||||
# 2. Build: docker build -f devops/runtime-assets/Dockerfile.runtime-assets \
|
||||
# -t stellaops/runtime-assets:latest .
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
services:
|
||||
|
||||
# Init container: populates shared volumes, then exits
|
||||
runtime-assets-init:
|
||||
image: stellaops/runtime-assets:latest
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: devops/runtime-assets/Dockerfile.runtime-assets
|
||||
volumes:
|
||||
- stellaops-models:/mnt/models
|
||||
- stellaops-certificates:/mnt/certificates
|
||||
- stellaops-licenses:/mnt/licenses
|
||||
restart: "no"
|
||||
|
||||
# Override AdvisoryAI to mount the models volume
|
||||
advisory-ai-web:
|
||||
volumes:
|
||||
- stellaops-models:/app/models:ro
|
||||
- stellaops-certificates:/app/etc/certs:ro
|
||||
environment:
|
||||
KnowledgeSearch__OnnxModelPath: "/app/models/all-MiniLM-L6-v2.onnx"
|
||||
depends_on:
|
||||
runtime-assets-init:
|
||||
condition: service_completed_successfully
|
||||
|
||||
advisory-ai-worker:
|
||||
volumes:
|
||||
- stellaops-models:/app/models:ro
|
||||
- stellaops-certificates:/app/etc/certs:ro
|
||||
environment:
|
||||
KnowledgeSearch__OnnxModelPath: "/app/models/all-MiniLM-L6-v2.onnx"
|
||||
depends_on:
|
||||
runtime-assets-init:
|
||||
condition: service_completed_successfully
|
||||
|
||||
volumes:
|
||||
stellaops-models:
|
||||
name: stellaops-models
|
||||
stellaops-certificates:
|
||||
name: stellaops-certificates
|
||||
stellaops-licenses:
|
||||
name: stellaops-licenses
|
||||
63
devops/runtime-assets/init-volumes.sh
Normal file
63
devops/runtime-assets/init-volumes.sh
Normal file
@@ -0,0 +1,63 @@
|
||||
#!/bin/sh
|
||||
# ---------------------------------------------------------------------------
|
||||
# init-volumes.sh — One-shot init container script.
|
||||
#
|
||||
# Copies runtime data assets from the data image into mounted volumes.
|
||||
# Runs as part of docker-compose.runtime-assets.yml and exits when done.
|
||||
#
|
||||
# Mount points (set via environment or defaults):
|
||||
# MODELS_DEST /mnt/models -> ML model weights
|
||||
# CERTS_DEST /mnt/certificates -> Certificates and trust bundles
|
||||
# LICENSES_DEST /mnt/licenses -> License attribution files
|
||||
# ---------------------------------------------------------------------------
|
||||
set -e
|
||||
|
||||
MODELS_DEST="${MODELS_DEST:-/mnt/models}"
|
||||
CERTS_DEST="${CERTS_DEST:-/mnt/certificates}"
|
||||
LICENSES_DEST="${LICENSES_DEST:-/mnt/licenses}"
|
||||
|
||||
log() { echo "[init-volumes] $*"; }
|
||||
|
||||
# Models
|
||||
if [ -d /data/models ] && [ "$(ls -A /data/models 2>/dev/null)" ]; then
|
||||
log "Copying ML models to $MODELS_DEST..."
|
||||
mkdir -p "$MODELS_DEST"
|
||||
cp -rn /data/models/* "$MODELS_DEST/" 2>/dev/null || cp -r /data/models/* "$MODELS_DEST/"
|
||||
log " Models ready."
|
||||
else
|
||||
log " No models found in /data/models (semantic search will use fallback)."
|
||||
fi
|
||||
|
||||
# Certificates
|
||||
if [ -d /data/certificates ] && [ "$(ls -A /data/certificates 2>/dev/null)" ]; then
|
||||
log "Copying certificates to $CERTS_DEST..."
|
||||
mkdir -p "$CERTS_DEST"
|
||||
cp -rn /data/certificates/* "$CERTS_DEST/" 2>/dev/null || cp -r /data/certificates/* "$CERTS_DEST/"
|
||||
log " Certificates ready."
|
||||
else
|
||||
log " No certificates found in /data/certificates."
|
||||
fi
|
||||
|
||||
# Licenses
|
||||
if [ -d /data/licenses ] && [ "$(ls -A /data/licenses 2>/dev/null)" ]; then
|
||||
log "Copying license files to $LICENSES_DEST..."
|
||||
mkdir -p "$LICENSES_DEST"
|
||||
cp -rn /data/licenses/* "$LICENSES_DEST/" 2>/dev/null || cp -r /data/licenses/* "$LICENSES_DEST/"
|
||||
log " Licenses ready."
|
||||
else
|
||||
log " No license files found in /data/licenses."
|
||||
fi
|
||||
|
||||
# Verify ONNX model is real (not placeholder)
|
||||
ONNX_FILE="$MODELS_DEST/all-MiniLM-L6-v2.onnx"
|
||||
if [ -f "$ONNX_FILE" ]; then
|
||||
SIZE=$(wc -c < "$ONNX_FILE" 2>/dev/null || echo 0)
|
||||
if [ "$SIZE" -lt 1000 ]; then
|
||||
log " WARNING: ONNX model at $ONNX_FILE is only $SIZE bytes (placeholder?)."
|
||||
log " Run ./devops/runtime-assets/acquire.sh --models to download real weights."
|
||||
else
|
||||
log " ONNX model verified: $SIZE bytes."
|
||||
fi
|
||||
fi
|
||||
|
||||
log "Init complete."
|
||||
204
devops/runtime-assets/manifest.yaml
Normal file
204
devops/runtime-assets/manifest.yaml
Normal file
@@ -0,0 +1,204 @@
|
||||
# Runtime Data Assets Manifest
|
||||
# Pinned versions, checksums, and licensing for all runtime data assets.
|
||||
# Used by acquire.sh for download verification and by CI for release gating.
|
||||
#
|
||||
# To update a pinned version:
|
||||
# 1. Change the entry below
|
||||
# 2. Run: ./devops/runtime-assets/acquire.sh --verify
|
||||
# 3. Update NOTICE.md and third-party-licenses/ if license changed
|
||||
|
||||
version: "1.0.0"
|
||||
updated: "2026-02-25"
|
||||
|
||||
assets:
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# ML Models
|
||||
# ---------------------------------------------------------------------------
|
||||
onnx-embedding-model:
|
||||
name: "all-MiniLM-L6-v2 (ONNX)"
|
||||
category: "ml-models"
|
||||
required: true
|
||||
degraded_without: true # falls back to character-ngram encoder
|
||||
source: "https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2/resolve/main/onnx/model.onnx"
|
||||
license: "Apache-2.0"
|
||||
license_file: "third-party-licenses/all-MiniLM-L6-v2-Apache-2.0.txt"
|
||||
notice_entry: true # listed in NOTICE.md
|
||||
destination: "src/AdvisoryAI/StellaOps.AdvisoryAI/models/all-MiniLM-L6-v2.onnx"
|
||||
runtime_path: "models/all-MiniLM-L6-v2.onnx"
|
||||
env_override: "KnowledgeSearch__OnnxModelPath"
|
||||
size_approx: "80 MB"
|
||||
sha256: "6fd5d72fe4589f189f8ebc006442dbb529bb7ce38f8082112682524616046452"
|
||||
used_by:
|
||||
- "StellaOps.AdvisoryAI (OnnxVectorEncoder)"
|
||||
notes: >
|
||||
Current file in repo is a 120-byte placeholder.
|
||||
Must be replaced with actual weights before production release.
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# JDK (for Ghidra)
|
||||
# ---------------------------------------------------------------------------
|
||||
jdk:
|
||||
name: "Eclipse Temurin JRE 17"
|
||||
category: "binary-analysis"
|
||||
required: false # only if GhidraOptions__Enabled=true
|
||||
source: "https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.13%2B11/OpenJDK17U-jre_x64_linux_hotspot_17.0.13_11.tar.gz"
|
||||
license: "GPL-2.0-with-classpath-exception"
|
||||
destination: "/opt/java/openjdk/"
|
||||
env_override: "GhidraOptions__JavaHome"
|
||||
size_approx: "55 MB"
|
||||
sha256: "PENDING" # TODO: pin after first verified download
|
||||
used_by:
|
||||
- "StellaOps.BinaryIndex.Ghidra (GhidraHeadlessManager)"
|
||||
notes: >
|
||||
GPLv2+CE allows linking without copyleft obligation.
|
||||
Only needed for deployments using Ghidra binary analysis.
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Ghidra
|
||||
# ---------------------------------------------------------------------------
|
||||
ghidra:
|
||||
name: "Ghidra 11.2 PUBLIC"
|
||||
category: "binary-analysis"
|
||||
required: false # only if GhidraOptions__Enabled=true
|
||||
source: "https://github.com/NationalSecurityAgency/ghidra/releases/download/Ghidra_11.2_build/ghidra_11.2_PUBLIC_20241105.zip"
|
||||
license: "Apache-2.0"
|
||||
destination: "/opt/ghidra/"
|
||||
env_override: "GhidraOptions__GhidraHome"
|
||||
size_approx: "1.5 GB"
|
||||
sha256: "PENDING" # TODO: pin after first verified download
|
||||
used_by:
|
||||
- "StellaOps.BinaryIndex.Ghidra (GhidraService, GhidraHeadlessManager)"
|
||||
notes: >
|
||||
Full Ghidra installation with analyzers, BSim, and Version Tracking.
|
||||
Disable with GhidraOptions__Enabled=false to skip entirely.
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Certificates (development defaults — replace for production)
|
||||
# ---------------------------------------------------------------------------
|
||||
dev-certificates:
|
||||
name: "Development TLS certificates"
|
||||
category: "certificates"
|
||||
required: true
|
||||
source: "local" # shipped in etc/authority/keys/
|
||||
destination: "etc/authority/keys/"
|
||||
runtime_path: "/app/etc/certs/"
|
||||
env_override: "Kestrel__Certificates__Default__Path"
|
||||
mount: "ro"
|
||||
used_by:
|
||||
- "All services (Kestrel TLS)"
|
||||
notes: >
|
||||
Dev-only. Replace with production certificates before deployment.
|
||||
See docs/SECURITY_HARDENING_GUIDE.md.
|
||||
|
||||
trust-bundle:
|
||||
name: "CA trust bundle"
|
||||
category: "certificates"
|
||||
required: true
|
||||
source: "local" # shipped in etc/trust-profiles/assets/
|
||||
destination: "etc/trust-profiles/assets/"
|
||||
runtime_path: "/etc/ssl/certs/ca-certificates.crt"
|
||||
mount: "ro"
|
||||
used_by:
|
||||
- "All services (HTTPS verification, attestation)"
|
||||
notes: >
|
||||
Combined CA bundle. For regional deployments include additional
|
||||
trust anchors (russian_trusted_bundle.pem, etc).
|
||||
|
||||
rekor-public-key:
|
||||
name: "Rekor transparency log public key"
|
||||
category: "certificates"
|
||||
required: true # for Sigstore verification
|
||||
source: "local"
|
||||
destination: "etc/trust-profiles/assets/rekor-public.pem"
|
||||
used_by:
|
||||
- "Attestor (Sigstore receipt verification)"
|
||||
- "AirGapTrustStoreIntegration"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Regional crypto configuration
|
||||
# ---------------------------------------------------------------------------
|
||||
crypto-profiles:
|
||||
name: "Regional crypto configuration"
|
||||
category: "configuration"
|
||||
required: false # only for regional compliance
|
||||
source: "local"
|
||||
files:
|
||||
- "etc/appsettings.crypto.international.yaml"
|
||||
- "etc/appsettings.crypto.eu.yaml"
|
||||
- "etc/appsettings.crypto.russia.yaml"
|
||||
- "etc/appsettings.crypto.china.yaml"
|
||||
- "etc/crypto-plugins-manifest.json"
|
||||
used_by:
|
||||
- "All services (crypto provider selection)"
|
||||
notes: >
|
||||
Selected via compose overlay (docker-compose.compliance-*.yml).
|
||||
See devops/compose/README.md.
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Evidence storage
|
||||
# ---------------------------------------------------------------------------
|
||||
evidence-storage:
|
||||
name: "Evidence object store"
|
||||
category: "persistent-storage"
|
||||
required: true
|
||||
type: "volume"
|
||||
runtime_path: "/data/evidence"
|
||||
env_override: "EvidenceLocker__ObjectStore__FileSystem__RootPath"
|
||||
mount: "rw"
|
||||
sizing: "~1 GB per 1000 scans"
|
||||
used_by:
|
||||
- "EvidenceLocker"
|
||||
- "Attestor"
|
||||
notes: >
|
||||
Persistent named volume. Content-addressed, append-only.
|
||||
Include in backup strategy.
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Search seed snapshots (included in dotnet publish — no acquisition needed)
|
||||
# ---------------------------------------------------------------------------
|
||||
search-snapshots:
|
||||
name: "Unified search seed snapshots"
|
||||
category: "search-data"
|
||||
required: true
|
||||
source: "included" # part of dotnet publish output
|
||||
destination: "src/AdvisoryAI/StellaOps.AdvisoryAI/UnifiedSearch/Snapshots/"
|
||||
files:
|
||||
- "findings.snapshot.json"
|
||||
- "vex.snapshot.json"
|
||||
- "policy.snapshot.json"
|
||||
- "graph.snapshot.json"
|
||||
- "scanner.snapshot.json"
|
||||
- "opsmemory.snapshot.json"
|
||||
- "timeline.snapshot.json"
|
||||
used_by:
|
||||
- "UnifiedSearchIndexer (bootstrap on first start)"
|
||||
notes: >
|
||||
Copied to output by .csproj Content items.
|
||||
Live data adapters refresh the index every 300s at runtime.
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Translations (included in Angular build — no acquisition needed)
|
||||
# ---------------------------------------------------------------------------
|
||||
translations:
|
||||
name: "UI translation bundles"
|
||||
category: "i18n"
|
||||
required: true
|
||||
source: "included" # part of Angular dist build
|
||||
destination: "src/Web/StellaOps.Web/src/i18n/"
|
||||
locales:
|
||||
- "en-US"
|
||||
- "de-DE"
|
||||
- "bg-BG"
|
||||
- "ru-RU"
|
||||
- "es-ES"
|
||||
- "fr-FR"
|
||||
- "uk-UA"
|
||||
- "zh-CN"
|
||||
- "zh-TW"
|
||||
used_by:
|
||||
- "Console (Angular frontend)"
|
||||
- "TranslationRegistry (backend override)"
|
||||
notes: >
|
||||
Baked into Angular dist bundle. Backend can override via
|
||||
database-backed ITranslationBundleProvider (priority 100).
|
||||
@@ -95,6 +95,27 @@ StellaOps is a deterministic, offline-first SBOM + VEX platform built as a micro
|
||||
- **Disk:** 50 GB free space (for Docker images, volumes, build artifacts)
|
||||
- **CPU:** 4 cores minimum, 8 cores recommended
|
||||
|
||||
### Runtime Data Assets
|
||||
|
||||
Some services need files that are not produced by `dotnet build` or `npm run build`.
|
||||
Run this after cloning to get full functionality locally:
|
||||
|
||||
```bash
|
||||
# ONNX embedding model for semantic search (~80 MB)
|
||||
./devops/runtime-assets/acquire.sh --models
|
||||
|
||||
# Optional: JDK + Ghidra for binary analysis (~1.6 GB)
|
||||
./devops/runtime-assets/acquire.sh --ghidra
|
||||
|
||||
# Verify everything
|
||||
./devops/runtime-assets/acquire.sh --verify
|
||||
```
|
||||
|
||||
Without the ONNX model, AdvisoryAI unified search falls back to a reduced-quality
|
||||
encoder. All other services work normally. See
|
||||
[`devops/runtime-assets/README.md`](../devops/runtime-assets/README.md) for the
|
||||
full asset inventory and Docker volume mount options.
|
||||
|
||||
---
|
||||
|
||||
## Quick Start
|
||||
|
||||
@@ -8,6 +8,29 @@ How to run Stella Ops from this repository using Docker Compose.
|
||||
- Enough disk for container images plus scan artifacts (SBOMs, logs, caches)
|
||||
- For production-style installs, plan for persistent volumes (PostgreSQL + object storage) and a secrets provider
|
||||
|
||||
## Runtime data assets (read before first deploy)
|
||||
|
||||
Stella Ops services depend on runtime data assets that are **not produced by
|
||||
`dotnet publish`** — ML model weights for semantic search, JDK/Ghidra for binary
|
||||
analysis, certificates, and more. Without them, services start but operate in
|
||||
degraded mode.
|
||||
|
||||
```bash
|
||||
# Download and verify all runtime assets
|
||||
./devops/runtime-assets/acquire.sh --all
|
||||
|
||||
# Or just the embedding model (required for semantic search)
|
||||
./devops/runtime-assets/acquire.sh --models
|
||||
|
||||
# Verify existing assets
|
||||
./devops/runtime-assets/acquire.sh --verify
|
||||
```
|
||||
|
||||
See `devops/runtime-assets/README.md` for the complete inventory, Docker
|
||||
volume mount instructions, and air-gap packaging.
|
||||
|
||||
---
|
||||
|
||||
## Quick path (automated setup scripts)
|
||||
|
||||
The fastest way to get running. The setup scripts validate prerequisites, configure the environment, start infrastructure, build solutions, build Docker images, and launch the full platform.
|
||||
|
||||
@@ -659,3 +659,4 @@ See the quota enforcement flow in
|
||||
* **CERT-Bund snapshots:** `python src/Tools/certbund_offline_snapshot.py --help` (see `docs/modules/concelier/operations/connectors/certbund.md`)
|
||||
* **PostgreSQL operations:** `docs/operations/postgresql-guide.md` - performance tuning, monitoring, backup/restore, and scaling
|
||||
* **Database specification:** `docs/db/SPECIFICATION.md` - schema design, migration patterns, and module ownership
|
||||
* **Runtime data assets:** `devops/runtime-assets/README.md` - ML model weights, JDK/Ghidra, certificates, and other assets that must be provisioned alongside the Offline Kit for full functionality
|
||||
|
||||
@@ -75,6 +75,8 @@ This documentation set is intentionally consolidated and does not maintain compa
|
||||
| Goal | Open this |
|
||||
| --- | --- |
|
||||
| First run and basic workflows | `quickstart.md` |
|
||||
| Installation guide | `INSTALL_GUIDE.md` |
|
||||
| Runtime data assets (ML models, JDK, certs) | `../devops/runtime-assets/README.md` |
|
||||
| Ingest advisories (Concelier + CLI) | `CONCELIER_CLI_QUICKSTART.md` |
|
||||
| Console (Web UI) operator guide | `UI_GUIDE.md` |
|
||||
| Offline / air-gap operations | `OFFLINE_KIT.md` |
|
||||
|
||||
@@ -90,6 +90,28 @@ dotnet run --project src/Cli/StellaOps.Cli/StellaOps.Cli.csproj -- `
|
||||
|
||||
---
|
||||
|
||||
## 1b. Runtime data assets
|
||||
|
||||
Some services need files that `dotnet build` does not produce. For local dev the
|
||||
most impactful one is the ONNX embedding model used by AdvisoryAI for semantic
|
||||
search. Without it the encoder falls back to a reduced-quality projection.
|
||||
|
||||
```bash
|
||||
# Download the embedding model (~80 MB) — run once after cloning
|
||||
./devops/runtime-assets/acquire.sh --models
|
||||
|
||||
# Optional: JDK + Ghidra for binary analysis (~1.6 GB)
|
||||
./devops/runtime-assets/acquire.sh --ghidra
|
||||
|
||||
# Verify all assets
|
||||
./devops/runtime-assets/acquire.sh --verify
|
||||
```
|
||||
|
||||
Full inventory, Docker volume mounts, and air-gap packaging:
|
||||
[`devops/runtime-assets/README.md`](../../devops/runtime-assets/README.md).
|
||||
|
||||
---
|
||||
|
||||
## 2. Hosts file setup
|
||||
|
||||
Each service binds to a unique loopback IP so all can use ports 443/80 without collisions.
|
||||
|
||||
@@ -11,8 +11,8 @@
|
||||
## Dependencies & Concurrency
|
||||
- No upstream dependency.
|
||||
- **Sprint 204 (Attestor) depends on this sprint** — Attestor references Feedser, which moves here. Sprint 204 must start after Sprint 203 source layout consolidation (TASK-203-002) is complete, or Attestor's ProjectReference paths will break.
|
||||
- **Sprint 205 (VexLens) depends on this sprint** — Excititor feeds VexHub. Sprint 205 must start after Sprint 203 source layout consolidation (TASK-203-002) is complete, or VexHub's ProjectReference paths to Excititor will break.
|
||||
- **Sprint 220 (SbomService → Scanner)** — SbomService.WebService references `StellaOps.Excititor.Persistence`. If Sprint 220 runs after this sprint, the SbomService .csproj must point to Excititor's new path under `src/Concelier/`.
|
||||
- **Sprint 205 (VEX consolidation)** is deferred in the current wave. If reactivated later, it depends on Sprint 203 TASK-203-002 completion because VexHub references Excititor.
|
||||
- **Sprint 220 (SbomService absorption)** was canceled (decision: do not merge SbomService in this wave). Keep note only for future reactivation of that sprint.
|
||||
- Coordinate with Sprint 216 for IssuerDirectory client dependency inside Excititor.
|
||||
|
||||
## Documentation Prerequisites
|
||||
@@ -112,4 +112,3 @@ Completion criteria:
|
||||
- Milestone 2: CLI/Web references updated and builds pass.
|
||||
- Milestone 3: docs updated and sprint ready for closure.
|
||||
|
||||
|
||||
|
||||
@@ -1,109 +0,0 @@
|
||||
# Sprint 205 - VEX Domain: VexHub and VexLens Consolidation
|
||||
|
||||
## Topic & Scope
|
||||
- Consolidate VEX aggregation and adjudication into a single VEX domain ownership model.
|
||||
- Move VexHub source ownership under VexLens domain while keeping deployables independent.
|
||||
- Merge VexHub and VexLens EF Core DbContexts into one domain DbContext. PostgreSQL schemas (`vexhub`, `vexlens`) remain separate; this is a code-level consolidation, not a schema merge.
|
||||
- Working directory: `src/VexLens/`.
|
||||
- Cross-module edits explicitly allowed for UI/runtime integration paths (`src/Web/`, `src/Cli/`, `devops/compose/`) as listed in tasks.
|
||||
- Expected evidence: no API regressions, successful DB merge rollout, and stable VEX ingestion/adjudication behavior.
|
||||
|
||||
## Dependencies & Concurrency
|
||||
- **Upstream dependency: Sprint 203 (Concelier absorbs Excititor)** — Excititor feeds VexHub. Sprint 203 moves Excititor into `src/Concelier/`. VexHub's ProjectReferences to Excititor must use post-203 paths, so Sprint 203 TASK-203-002 must be complete before this sprint starts TASK-205-002.
|
||||
- Can run in parallel with non-VEX/non-advisory consolidation sprints.
|
||||
|
||||
## Documentation Prerequisites
|
||||
- Read `docs/modules/vex-hub/architecture.md`.
|
||||
- Read `docs/modules/vex-lens/architecture.md`.
|
||||
- Read `AUDIT_20260225_cli_ui_module_reference_matrix.md`.
|
||||
|
||||
## Delivery Tracker
|
||||
|
||||
### TASK-205-001 - Define VEX domain contract and schema ownership
|
||||
Status: TODO
|
||||
Dependency: none
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Map current VexHub and VexLens DbContext/table ownership.
|
||||
- Document PostgreSQL schema ownership (`vexhub`, `vexlens`) and confirm schemas remain separate.
|
||||
- Confirm zero entity name collisions between VexHubDbContext and VexLensDbContext (9 total entities, no overlaps).
|
||||
- Document the DbContext merge plan: combine into one VEX domain DbContext while keeping schemas separate.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] VEX domain schema ownership documented.
|
||||
- [ ] Zero-collision confirmation recorded.
|
||||
- [ ] DbContext merge plan approved.
|
||||
|
||||
### TASK-205-002 - Consolidate source layout under VEX domain module
|
||||
Status: TODO
|
||||
Dependency: TASK-205-001
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Move VexHub source/projects under `src/VexLens/` domain layout.
|
||||
- Preserve deployable runtime identities and project names.
|
||||
- Update all project and solution references.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] VexHub source relocated under VexLens domain.
|
||||
- [ ] Solution and project references compile.
|
||||
- [ ] Legacy `src/VexHub/` root removed.
|
||||
|
||||
### TASK-205-003 - Merge VEX DbContexts and regenerate compiled models
|
||||
Status: TODO
|
||||
Dependency: TASK-205-001
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Merge VexHubDbContext entities into VexLensDbContext (or create a unified VexDomainDbContext).
|
||||
- PostgreSQL schemas (`vexhub`, `vexlens`) remain separate — this is a DbContext-level consolidation only, not a schema merge. No data migration, no dual-write, no backfill.
|
||||
- Regenerate EF compiled models using `dotnet ef dbcontext optimize`.
|
||||
- Verify `<Compile Remove>` entry for compiled model assembly attributes in `.csproj`.
|
||||
- Run targeted integration tests against the merged context to confirm query behavior unchanged.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] VEX DbContexts merged into a single domain context.
|
||||
- [ ] PostgreSQL schemas remain separate (no data migration).
|
||||
- [ ] EF compiled models regenerated and committed.
|
||||
- [ ] Integration tests pass with merged context.
|
||||
|
||||
### TASK-205-004 - Update Web, infra, build/test, and docs
|
||||
Status: TODO
|
||||
Dependency: TASK-205-002, TASK-205-003
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Validate/update Web integration points:
|
||||
- `src/Web/StellaOps.Web/proxy.conf.json` (`/vexhub`).
|
||||
- `src/Web/StellaOps.Web/src/app/app.config.ts` (`VEX_HUB_API_BASE_URL`, `VEX_LENS_API_BASE_URL`).
|
||||
- `src/Web/StellaOps.Web/src/app/core/config/app-config.service.ts` (`vexhub -> vex`).
|
||||
- Update compose/workflows for moved source paths.
|
||||
- Build/test VEX domain and dependent integration paths.
|
||||
- Update and archive module docs to reflect domain-first model.
|
||||
- Add ADR entry to `docs/modules/vex-lens/architecture.md` documenting the DbContext merge decision.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] Web references validated or updated.
|
||||
- [ ] Compose/workflow paths updated.
|
||||
- [ ] Builds/tests pass.
|
||||
- [ ] Docs updated for VEX domain + DbContext merge.
|
||||
- [ ] ADR entry recorded.
|
||||
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2026-02-25 | Sprint created. | Planning |
|
||||
| 2026-02-25 | CLI/UI module reference audit completed and sprint rework aligned to `AUDIT_20260225_cli_ui_module_reference_matrix.md`. | Planning |
|
||||
| 2026-02-25 | Reworked to domain-first VEX consolidation with explicit VexHub/VexLens DB merge phases. | Planning |
|
||||
| 2026-02-25 | DB merge simplified after deep analysis: 9 entities with zero collisions. DbContext merge only (no schema merge, no dual-write, no backfill). Schemas remain separate. Sprint reduced from 5 tasks to 4. | Planning |
|
||||
|
||||
## Decisions & Risks
|
||||
- Decision: VexHub and VexLens DbContexts merge into one domain DbContext. PostgreSQL schemas remain separate.
|
||||
- Rationale: 9 total entities with zero name collisions makes DbContext consolidation safe and low-risk. All data already in `stellaops_platform`. Schemas stay separate for clean lifecycle boundaries.
|
||||
- Decision: Existing public VEX APIs remain backward compatible.
|
||||
- Risk: adjudication result drift after DbContext merge. Mitigation: targeted integration tests with merged context before deploying.
|
||||
- Note: EF compiled model regeneration is required after DbContext merge (TASK-205-003).
|
||||
|
||||
## Next Checkpoints
|
||||
- Milestone 1: VEX domain contract documented and source layout consolidated.
|
||||
- Milestone 2: DbContext merge complete with compiled models regenerated.
|
||||
- Milestone 3: Web/infra updated and docs finalized.
|
||||
|
||||
|
||||
@@ -1,106 +1,108 @@
|
||||
# Sprint 206 - Policy Domain: Policy and Unknowns Consolidation
|
||||
# Sprint 206 - Policy/Unknowns Boundary Preservation (No Consolidation)
|
||||
|
||||
## Topic & Scope
|
||||
- Consolidate policy decisioning and unknowns handling into one Policy domain.
|
||||
- Move Unknowns source ownership under `src/Policy/` while preserving runtime service contracts.
|
||||
- Remove empty UnknownsDbContext placeholder (zero entities, no tables). PolicyDbContext and its schemas remain unchanged.
|
||||
- Working directory: `src/Policy/`.
|
||||
- Cross-module edits explicitly allowed for dependent consumers and UI/CLI integration (`src/Platform/`, `src/Scanner/`, `src/Cli/`, `src/Web/`, `devops/compose/`) as listed in tasks.
|
||||
- Expected evidence: policy/unknowns APIs remain stable, DB merge executed with reconciliation, and dependent modules continue to build.
|
||||
- Retain `Unknowns` as its own microservice and database owner.
|
||||
- Keep `src/Unknowns/` and `src/Policy/` as separate module roots; no source move, no DbContext merge, no schema merge.
|
||||
- Replace stale assumptions from earlier draft (Unknowns persistence is active and must not be deleted).
|
||||
- Working directory: `src/Unknowns/`.
|
||||
- Cross-module edits explicitly allowed for documentation and integration references (`src/Policy/`, `src/Platform/`, `src/Scanner/`, `src/Cli/`, `src/Web/`, `devops/compose/`, `docs/modules/policy/`, `docs/modules/unknowns/`).
|
||||
- Expected evidence: Unknowns service + DB boundary explicitly documented, compatibility validated, and no consolidation side effects introduced.
|
||||
|
||||
## Dependencies & Concurrency
|
||||
- No upstream dependency.
|
||||
- Can run in parallel with other sprints, except any sprint that attempts to move/delete `src/Unknowns/`.
|
||||
- Coordinate with Sprint 218 for final docs alignment.
|
||||
|
||||
## Documentation Prerequisites
|
||||
- Read `docs/modules/unknowns/architecture.md`.
|
||||
- Read `docs/modules/policy/architecture.md`.
|
||||
- Read Unknowns module docs/AGENTS in current source tree.
|
||||
- Read `src/Unknowns/AGENTS.md` and `src/Policy/AGENTS.md`.
|
||||
- Read `AUDIT_20260225_cli_ui_module_reference_matrix.md`.
|
||||
|
||||
## Delivery Tracker
|
||||
|
||||
### TASK-206-001 - Verify Unknowns has no persistence and document Policy domain schema ownership
|
||||
### TASK-206-001 - Re-baseline Unknowns runtime and persistence reality
|
||||
Status: TODO
|
||||
Dependency: none
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Verify UnknownsDbContext has zero entities (confirmed: empty placeholder with no tables, no data).
|
||||
- Document PolicyDbContext schema ownership and confirm it remains unchanged.
|
||||
- Record the domain boundary decision: Unknowns is absorbed as source only; its empty DbContext placeholder is deleted.
|
||||
- Prove current state with commands and capture output in sprint notes:
|
||||
- `rg -n "class UnknownsDbContext|DbSet<UnknownEntity>" src/Unknowns -g "*.cs"`
|
||||
- `rg -n "ProjectReference Include=.*Unknowns\\.Persistence" src -g "*.csproj"`
|
||||
- `rg -n "Map(Get|Post|Put|Delete|Group)\\(" src/Unknowns -g "Program.cs"`
|
||||
- Confirm Unknowns is an active service boundary with active persistence and consumers.
|
||||
- Explicitly identify any placeholder-only context so it is not confused with the active persistence context.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] UnknownsDbContext zero-entity status confirmed.
|
||||
- [ ] Policy domain schema ownership documented.
|
||||
- [ ] Absorption decision recorded.
|
||||
- [ ] Active Unknowns persistence context confirmed and documented.
|
||||
- [ ] Unknowns runtime service surface confirmed and documented.
|
||||
- [ ] Consumer list captured from project references.
|
||||
|
||||
### TASK-206-002 - Consolidate Unknowns source under Policy domain module
|
||||
### TASK-206-002 - Record decision: keep Unknowns as standalone microservice + DB owner
|
||||
Status: TODO
|
||||
Dependency: TASK-206-001
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Move Unknowns projects into Policy domain source layout.
|
||||
- Preserve runtime service identities and external API contracts.
|
||||
- Update all project/solution references including Platform and Scanner consumers.
|
||||
- Update sprint `Decisions & Risks` and module docs to state:
|
||||
- Unknowns remains independently deployable.
|
||||
- Unknowns retains its own DbContext and schema ownership.
|
||||
- No source consolidation into Policy and no DbContext merge.
|
||||
- Remove/replace any stale wording that implies Unknowns DB deletion.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] Unknowns source relocated under Policy domain.
|
||||
- [ ] References compile across Policy, Platform, and Scanner.
|
||||
- [ ] Legacy `src/Unknowns/` root removed.
|
||||
- [ ] No-consolidation decision recorded in sprint.
|
||||
- [ ] Unknowns/Policy architecture docs updated with explicit boundary statement.
|
||||
- [ ] Stale "empty DbContext delete" language removed.
|
||||
|
||||
### TASK-206-003 - Remove empty UnknownsDbContext placeholder
|
||||
### TASK-206-003 - Validate integration contracts without consolidation
|
||||
Status: TODO
|
||||
Dependency: TASK-206-001
|
||||
Dependency: TASK-206-002
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Delete the UnknownsDbContext class and its empty persistence project (zero entities, zero tables).
|
||||
- If Unknowns has any configuration that referenced a separate connection string, redirect to PolicyDbContext's connection or remove.
|
||||
- No data migration needed (zero entities means zero data). No dual-write, no backfill, no cutover.
|
||||
- Validate that Policy/Scanner/Platform integrations continue to reference Unknowns correctly after decision freeze:
|
||||
- `dotnet build src/Unknowns/StellaOps.Unknowns.WebService/StellaOps.Unknowns.WebService.csproj`
|
||||
- `dotnet build src/Policy/StellaOps.Policy.Engine/StellaOps.Policy.Engine.csproj`
|
||||
- `dotnet build src/Scanner/StellaOps.Scanner.Worker/StellaOps.Scanner.Worker.csproj`
|
||||
- `dotnet build src/Platform/__Libraries/StellaOps.Platform.Database/StellaOps.Platform.Database.csproj`
|
||||
- Verify no accidental path assumptions toward `src/Policy/` ownership of Unknowns.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] UnknownsDbContext deleted.
|
||||
- [ ] No orphaned connection strings or configuration keys.
|
||||
- [ ] Policy domain builds without the removed placeholder.
|
||||
- [ ] Affected projects build successfully.
|
||||
- [ ] No broken ProjectReference paths.
|
||||
- [ ] No accidental consolidation changes required.
|
||||
|
||||
### TASK-206-004 - CLI/Web/infrastructure updates, tests, and docs
|
||||
### TASK-206-004 - CLI/Web/infra reference validation for preserved boundary
|
||||
Status: TODO
|
||||
Dependency: TASK-206-002, TASK-206-003
|
||||
Dependency: TASK-206-003
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Validate/update CLI unknowns references:
|
||||
- `src/Cli/StellaOps.Cli/Commands/UnknownsCommandGroup.cs`.
|
||||
- `src/Cli/StellaOps.Cli/cli-routes.json` compatibility aliases.
|
||||
- Validate/update Web policy/unknowns references:
|
||||
- `src/Web/StellaOps.Web/proxy.conf.json` (`/policyGateway`).
|
||||
- `src/Web/StellaOps.Web/src/app/core/config/app-config.service.ts` policy key mapping.
|
||||
- Validate infra references (`STELLAOPS_POLICY_GATEWAY_URL`, `STELLAOPS_UNKNOWNS_URL`) and compose/build paths.
|
||||
- Build/test affected modules and update docs for domain-first model.
|
||||
- Add ADR entry to `docs/modules/policy/architecture.md` documenting the Unknowns absorption and DbContext deletion.
|
||||
- Validate references stay correct with Unknowns still standalone:
|
||||
- `rg -n "unknowns|Unknowns" src/Cli -g "*.cs"`
|
||||
- `rg -n "unknowns|Unknowns" src/Web/StellaOps.Web/src -g "*.ts"`
|
||||
- `rg -n "STELLAOPS_UNKNOWNS_URL|unknowns" devops -g "*.yml" -g "*.yaml" -g "*.json"`
|
||||
- If any references assume consolidation, create follow-up tasks and keep this sprint `DOING` until addressed.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] CLI and Web references validated or updated.
|
||||
- [ ] Infra references verified.
|
||||
- [ ] Builds/tests pass for affected modules.
|
||||
- [ ] Docs updated and legacy module docs archived.
|
||||
- [ ] ADR entry recorded.
|
||||
- [ ] CLI references validated.
|
||||
- [ ] Web references validated.
|
||||
- [ ] DevOps/env references validated.
|
||||
- [ ] Follow-up tasks created for any mismatches.
|
||||
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2026-02-25 | Sprint created. | Planning |
|
||||
| 2026-02-25 | CLI/UI module reference audit completed and sprint rework aligned to `AUDIT_20260225_cli_ui_module_reference_matrix.md`. | Planning |
|
||||
| 2026-02-25 | Reworked to domain-first Policy consolidation with Unknowns DB merge phases. | Planning |
|
||||
| 2026-02-25 | DB merge simplified after deep analysis: UnknownsDbContext is an empty placeholder (0 entities, 0 tables). No data migration needed — just delete the empty DbContext. Sprint reduced from 6 tasks to 4. | Planning |
|
||||
| 2026-02-25 | Sprint created (initial consolidation draft). | Planning |
|
||||
| 2026-02-25 | Reworked: Unknowns retained as standalone microservice and DB owner; consolidation and DbContext deletion removed. | Planning |
|
||||
| 2026-02-25 | Validation evidence captured: active Unknowns DbContext with `DbSet<UnknownEntity>` confirmed; representative builds passed for Unknowns.WebService, Policy.Engine, Scanner.Worker, and Platform.Database. | Planning |
|
||||
|
||||
## Decisions & Risks
|
||||
- Decision: Policy and Unknowns are one domain ownership model with compatible runtime APIs.
|
||||
- Rationale: UnknownsDbContext has zero entities — it is an empty placeholder. The "merge" is simply deleting the empty class. PolicyDbContext and its schemas remain unchanged. No data migration, no risk.
|
||||
- Decision: API paths remain backward compatible.
|
||||
- Risk: scanner/platform dependencies break after source move. Mitigation: targeted consumer build/test gates.
|
||||
- Note: PolicyDbContext has compiled models generated by Sprint 219. These are unaffected by the Unknowns source move since UnknownsDbContext has no entities to merge.
|
||||
- Decision: `Unknowns` remains a standalone module/service (`src/Unknowns/`) and is not consolidated into `Policy`.
|
||||
- Decision: `UnknownsDbContext` and Unknowns schema ownership are retained; no DbContext merge and no schema merge.
|
||||
- Rationale: current codebase contains active Unknowns persistence/entities and active runtime consumers; deletion/merge assumptions were stale.
|
||||
- Risk: future duplicate logic across Policy and Unknowns. Mitigation: track explicit API/contract ownership and prefer integration contracts over source moves.
|
||||
- Risk: reintroduction of consolidation assumptions in later sprints. Mitigation: add cross-reference note in Sprint 218 final docs sweep.
|
||||
|
||||
## Next Checkpoints
|
||||
- Milestone 1: Unknowns empty-DbContext status confirmed and source consolidated.
|
||||
- Milestone 2: Empty DbContext deleted and CLI/Web references updated.
|
||||
- Milestone 3: docs refreshed and sprint ready for closure.
|
||||
|
||||
|
||||
- Milestone 1: runtime/persistence re-baseline evidence captured.
|
||||
- Milestone 2: docs and decision records updated to boundary-preserved model.
|
||||
- Milestone 3: integration validation complete and sprint ready for closure.
|
||||
|
||||
@@ -1,101 +1,97 @@
|
||||
# Sprint 209 - Notify: Absorb Notifier Module
|
||||
# Sprint 209 - Notify/Notifier Boundary Preservation (No Consolidation)
|
||||
|
||||
## Topic & Scope
|
||||
- Consolidate `src/Notifier/` (2 csproj: WebService + Worker) into `src/Notify/`.
|
||||
- Notifier is a thin deployment host for Notify libraries. The 2025-11-02 separation decision should be revisited.
|
||||
- Working directory: `src/Notifier/`, `src/Notify/`.
|
||||
- Expected evidence: clean build, all tests pass.
|
||||
- Keep `Notify` and `Notifier` as separate deployable services.
|
||||
- Cancel the absorb plan: no source move, no project merge, no deletion of `src/Notifier/`.
|
||||
- Replace stale assumption that Notifier is a thin host; current code contains substantial independent API surface.
|
||||
- Working directory: `src/Notify/`, `src/Notifier/`.
|
||||
- Cross-module edits explicitly allowed for docs and integration references (`src/Cli/`, `src/Web/`, `devops/compose/`, `docs/modules/notify/`, `docs/modules/notifier/`).
|
||||
- Expected evidence: service boundaries are explicitly documented, builds remain green, and compatibility expectations are clear.
|
||||
|
||||
## Dependencies & Concurrency
|
||||
- No upstream dependencies. Can run in parallel.
|
||||
- No upstream dependency.
|
||||
- Can run in parallel with other consolidation sprints.
|
||||
- Coordinate with Sprint 218 documentation closeout.
|
||||
|
||||
## Documentation Prerequisites
|
||||
- Read `docs/modules/notifier/README.md`.
|
||||
- Read `docs/modules/notify/architecture.md`.
|
||||
- Read `AUDIT_20260225_cli_ui_module_reference_matrix.md`.
|
||||
|
||||
## Delivery Tracker
|
||||
|
||||
### TASK-209-001 - Verify Notifier is purely a host
|
||||
### TASK-209-001 - Baseline current Notify/Notifier runtime boundaries
|
||||
Status: TODO
|
||||
Dependency: none
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Confirm `StellaOps.Notifier.WebService` only references Notify libraries (no unique logic).
|
||||
- Confirm `StellaOps.Notifier.Worker` only references Notify libraries.
|
||||
- Check for any Notifier-specific configuration, middleware, or endpoints not in Notify.
|
||||
- Confirm zero external consumers of Notifier packages.
|
||||
- Capture route and complexity evidence for both services:
|
||||
- `src/Notifier/.../Program.cs` line count and mapped routes.
|
||||
- `src/Notify/.../Program.cs` line count and mapped routes.
|
||||
- Document endpoint overlap and endpoint gaps (Notifier-only and Notify-only).
|
||||
- Confirm external project references to `Notifier` are zero and record result.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] Notifier confirmed as pure host
|
||||
- [ ] No unique logic identified
|
||||
- [ ] Notify/Notifier route matrix documented.
|
||||
- [ ] Complexity and endpoint-gap evidence recorded.
|
||||
- [ ] Consumer reference scan result recorded.
|
||||
|
||||
### TASK-209-002 - Move Notifier into Notify
|
||||
### TASK-209-002 - Record decision to keep split deployment model
|
||||
Status: TODO
|
||||
Dependency: TASK-209-001
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Move `src/Notifier/StellaOps.Notifier.WebService/` → `src/Notify/StellaOps.Notifier.WebService/`.
|
||||
- Move `src/Notifier/StellaOps.Notifier.Worker/` → `src/Notify/StellaOps.Notifier.Worker/`.
|
||||
- Move tests → `src/Notify/__Tests/`.
|
||||
- Update `ProjectReference` paths (now local to Notify).
|
||||
- Add to Notify solution.
|
||||
- Remove `src/Notifier/`.
|
||||
- Update root solution.
|
||||
- Update sprint notes and module docs to state:
|
||||
- Notify and Notifier remain separate services.
|
||||
- No source consolidation and no project relocation.
|
||||
- Any future convergence requires explicit API parity plan first.
|
||||
- Remove stale wording that claims Notifier is purely a host.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] Both projects moved
|
||||
- [ ] Notify solution includes Notifier
|
||||
- [ ] Old directory removed
|
||||
- [ ] No-consolidation decision recorded in sprint.
|
||||
- [ ] Notify/notifier docs updated with explicit split rationale.
|
||||
- [ ] Stale thin-host assumptions removed.
|
||||
|
||||
### TASK-209-003 - Update Docker, CI, build, and test
|
||||
### TASK-209-003 - Validate builds and key contracts without consolidation
|
||||
Status: TODO
|
||||
Dependency: TASK-209-002
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Update `devops/compose/` for Notifier services.
|
||||
- Update `.gitea/workflows/`.
|
||||
- `dotnet build src/Notify/` — must succeed.
|
||||
- Run all Notify + Notifier tests.
|
||||
- `dotnet build StellaOps.sln`.
|
||||
- Build both services and representative consumers:
|
||||
- `dotnet build src/Notifier/StellaOps.Notifier/StellaOps.Notifier.WebService/StellaOps.Notifier.WebService.csproj`
|
||||
- `dotnet build src/Notify/StellaOps.Notify.WebService/StellaOps.Notify.WebService.csproj`
|
||||
- `dotnet build src/Cli/StellaOps.Cli/StellaOps.Cli.csproj`
|
||||
- Validate that current API base-path expectations remain unchanged.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] Docker and CI updated
|
||||
- [ ] All builds and tests pass
|
||||
- [ ] Builds pass for Notify, Notifier, and representative consumer(s).
|
||||
- [ ] API compatibility assumptions documented.
|
||||
|
||||
### TASK-209-004 - Update documentation and CLI/Web references
|
||||
### TASK-209-004 - Finalize docs and follow-up backlog items
|
||||
Status: TODO
|
||||
Dependency: TASK-209-003
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Move `docs/modules/notifier/` to `docs-archived/modules/notifier/`.
|
||||
- Update Notify architecture doc to mention the Notifier host.
|
||||
- Update `docs/INDEX.md`, `CLAUDE.md`.
|
||||
- Update Web notifier integration points:
|
||||
- `src/Web/StellaOps.Web/src/app/app.config.ts` `NOTIFIER_API_BASE_URL` provider (`/api/v1/notifier`).
|
||||
- `src/Web/StellaOps.Web/src/app/core/api/notifier.client.ts` default base URL fallback.
|
||||
- `src/Web/StellaOps.Web/src/app/features/admin-notifications/**` imports using notifier client/models.
|
||||
- Update CLI notifier references:
|
||||
- `src/Cli/StellaOps.Cli/Commands/ConfigCatalog.cs` notifier configuration keys.
|
||||
- Any notifier command/help references that include module paths.
|
||||
- Preserve `/api/v1/notifier` contract.
|
||||
- Update `docs/INDEX.md` and sprint cross-references to reflect canceled consolidation.
|
||||
- Add follow-up backlog item(s) only if explicit parity/convergence work is still desired.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] Notifier docs archived.
|
||||
- [ ] Notify architecture updated.
|
||||
- [ ] Web notifier references validated/updated.
|
||||
- [ ] CLI notifier references validated/updated.
|
||||
- [ ] Notifier API path compatibility verified.
|
||||
- [ ] Documentation index updated.
|
||||
- [ ] Follow-up items created only where actionable.
|
||||
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2026-02-25 | Sprint created. | Planning |
|
||||
| 2026-02-25 | CLI/UI module reference audit completed and sprint rework aligned to `AUDIT_20260225_cli_ui_module_reference_matrix.md`. | Planning |
|
||||
| 2026-02-25 | Sprint created (initial absorb draft). | Planning |
|
||||
| 2026-02-25 | Reworked: consolidation canceled; Notify/Notifier remain separate services. | Planning |
|
||||
| 2026-02-25 | Discovery evidence captured: Notifier Program.cs 3271 lines / 85 map calls; Notify Program.cs 1585 lines / 30 map calls; route sets are not equivalent. | Planning |
|
||||
|
||||
## Decisions & Risks
|
||||
- Decision: Revisit the 2025-11-02 separation decision. If offline kit parity still requires separate packaging, document that in Notify architecture and keep containers separate.
|
||||
- Decision: keep Notify and Notifier unconsolidated in this consolidation wave.
|
||||
- Rationale: current endpoint and logic divergence means absorb would be a feature-migration project, not a safe organizational move.
|
||||
- Risk: dual-service ownership overhead remains. Mitigation: maintain explicit boundary docs and revisit only with approved parity roadmap.
|
||||
|
||||
## Next Checkpoints
|
||||
- Estimate: 1 session (2 projects only).
|
||||
|
||||
|
||||
|
||||
- Milestone 1: boundary baseline documented.
|
||||
- Milestone 2: split-deployment decision reflected in docs.
|
||||
- Milestone 3: compatibility validation complete and sprint ready for closure.
|
||||
|
||||
@@ -1,111 +1,96 @@
|
||||
# Sprint 211 - Offline Distribution Domain: ExportCenter, Mirror, and AirGap
|
||||
# Sprint 211 - Offline Distribution Boundary Preservation (No Consolidation)
|
||||
|
||||
## Topic & Scope
|
||||
- Consolidate offline distribution capabilities into one domain ownership model.
|
||||
- Move Mirror and AirGap source ownership under `src/ExportCenter/` while preserving runtime identities.
|
||||
- Merge AirGap and ExportCenter EF Core DbContexts into one domain DbContext. PostgreSQL schemas (`export_center`, `airgap`) remain separate; this is a code-level consolidation, not a schema merge.
|
||||
- Working directory: `src/ExportCenter/`.
|
||||
- Cross-module edits explicitly allowed for offline flow integrations (`src/Cli/`, `src/Web/`, `devops/compose/`) as listed in tasks.
|
||||
- Expected evidence: offline workflows remain functional (`mirror create`, `airgap import`), DB merge reconciliation completed, and no API regressions.
|
||||
- Keep `ExportCenter`, `AirGap`, and `Mirror` as separate module roots and service boundaries.
|
||||
- Cancel merge plan: no source move under `src/ExportCenter/`, no DbContext merge, no schema merge.
|
||||
- Preserve existing database ownership: `ExportCenterDbContext` and `AirGapDbContext` stay separate.
|
||||
- Working directory: `src/ExportCenter/`, `src/AirGap/`, `src/Mirror/`.
|
||||
- Cross-module edits explicitly allowed for docs/integration checks (`src/Cli/`, `src/Web/`, `devops/compose/`, `docs/modules/export-center/`, `docs/modules/airgap/`).
|
||||
- Expected evidence: boundaries are explicit, key builds pass, and offline workflows remain stable.
|
||||
|
||||
## Dependencies & Concurrency
|
||||
- No upstream dependency.
|
||||
- Coordinate with Sprint 203 if shared export/advisory payload contracts change.
|
||||
- Can run in parallel with other consolidation sprints.
|
||||
- Coordinate with Sprint 218 documentation closeout.
|
||||
|
||||
## Documentation Prerequisites
|
||||
- Read `docs/modules/export-center/architecture.md`.
|
||||
- Read `docs/modules/airgap/architecture.md`.
|
||||
- Read module AGENTS for `Mirror` and `AirGap`.
|
||||
- Read `AUDIT_20260225_cli_ui_module_reference_matrix.md`.
|
||||
|
||||
## Delivery Tracker
|
||||
|
||||
### TASK-211-001 - Define offline distribution domain schema ownership and DbContext merge plan
|
||||
### TASK-211-001 - Baseline current offline boundary and coupling
|
||||
Status: TODO
|
||||
Dependency: none
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Map current ExportCenterDbContext and AirGapDbContext ownership and confirm zero entity name collisions (7 total entities).
|
||||
- Document PostgreSQL schema ownership (`export_center`, `airgap`) and confirm schemas remain separate.
|
||||
- Identify Mirror data artifacts that stay file-based versus persisted.
|
||||
- Document the DbContext merge plan: combine into one offline domain DbContext while keeping schemas separate.
|
||||
- Record current DbContext ownership and entity sets for AirGap and ExportCenter.
|
||||
- Record external consumer coupling (ProjectReference counts and key consumers).
|
||||
- Capture evidence that `AirGap` is cross-cutting and `ExportCenter` is narrower in dependency footprint.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] Offline domain schema ownership documented.
|
||||
- [ ] Zero-collision confirmation recorded.
|
||||
- [ ] DbContext merge plan approved.
|
||||
- [ ] File-based versus persisted boundary documented.
|
||||
- [ ] DbContext ownership map documented.
|
||||
- [ ] Coupling evidence documented.
|
||||
- [ ] Boundary rationale evidence recorded in sprint notes.
|
||||
|
||||
### TASK-211-002 - Consolidate source layout under ExportCenter domain
|
||||
### TASK-211-002 - Record no-consolidation/no-merge decision
|
||||
Status: TODO
|
||||
Dependency: TASK-211-001
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Move Mirror and AirGap source trees under `src/ExportCenter/` domain structure.
|
||||
- Preserve project names and deployable runtime identities.
|
||||
- Update project/solution references and remove legacy top-level roots.
|
||||
- Update sprint and module docs to state:
|
||||
- no source consolidation,
|
||||
- no DbContext merge,
|
||||
- no schema merge.
|
||||
- Remove stale wording about unified offline domain DbContext.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] Source trees relocated under ExportCenter domain.
|
||||
- [ ] References compile after move.
|
||||
- [ ] Legacy roots removed.
|
||||
- [ ] No-consolidation decision recorded.
|
||||
- [ ] No-merge decision recorded.
|
||||
- [ ] Stale merge wording removed.
|
||||
|
||||
### TASK-211-003 - Merge offline distribution DbContexts and regenerate compiled models
|
||||
### TASK-211-003 - Validate critical build paths without consolidation
|
||||
Status: TODO
|
||||
Dependency: TASK-211-001
|
||||
Dependency: TASK-211-002
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Merge AirGapDbContext entities into ExportCenterDbContext (or create a unified OfflineDomainDbContext).
|
||||
- PostgreSQL schemas (`export_center`, `airgap`) remain separate — this is a DbContext-level consolidation only, not a schema merge. No data migration, no dual-write, no backfill.
|
||||
- Regenerate EF compiled models using `dotnet ef dbcontext optimize`.
|
||||
- Verify `<Compile Remove>` entry for compiled model assembly attributes in `.csproj`.
|
||||
- Run targeted integration tests against the merged context to confirm query behavior unchanged.
|
||||
- Run representative builds:
|
||||
- `dotnet build src/ExportCenter/StellaOps.ExportCenter/StellaOps.ExportCenter.WebService/StellaOps.ExportCenter.WebService.csproj`
|
||||
- `dotnet build src/AirGap/StellaOps.AirGap.Controller/StellaOps.AirGap.Controller.csproj`
|
||||
- `dotnet build src/Cli/StellaOps.Cli/StellaOps.Cli.csproj`
|
||||
- Confirm no integration breaks from decision freeze.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] Offline domain DbContexts merged into a single domain context.
|
||||
- [ ] PostgreSQL schemas remain separate (no data migration).
|
||||
- [ ] EF compiled models regenerated and committed.
|
||||
- [ ] Integration tests pass with merged context.
|
||||
- [ ] Representative builds pass.
|
||||
- [ ] No integration regressions identified from boundary-preserved model.
|
||||
|
||||
### TASK-211-004 - CLI/Web/infrastructure updates, tests, and docs
|
||||
### TASK-211-004 - Document deferred convergence criteria (if ever revisited)
|
||||
Status: TODO
|
||||
Dependency: TASK-211-002, TASK-211-003
|
||||
Dependency: TASK-211-003
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Validate/update CLI references:
|
||||
- AirGap project references in `src/Cli/StellaOps.Cli/StellaOps.Cli.csproj`.
|
||||
- command handlers for `mirror create` and `airgap import`.
|
||||
- Validate/update Web references for feed/airgap routes and clients.
|
||||
- Update compose/workflow paths for moved source trees.
|
||||
- Build/test affected modules and update docs for domain-first + DbContext merge model.
|
||||
- Add ADR entry to `docs/modules/export-center/architecture.md` documenting the DbContext merge decision.
|
||||
- Add explicit criteria required before any future merge attempt (for example: reduced AirGap external coupling, approved rollback plan, measured performance gain target).
|
||||
- If no convergence objective is active, record `deferred` and close sprint.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] CLI and Web references validated or updated.
|
||||
- [ ] Compose/workflow paths updated.
|
||||
- [ ] Builds/tests pass.
|
||||
- [ ] Documentation updated and legacy standalone docs archived.
|
||||
- [ ] ADR entry recorded.
|
||||
- [ ] Future-convergence entry criteria documented.
|
||||
- [ ] Deferred state explicitly recorded when applicable.
|
||||
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2026-02-25 | Sprint created. | Planning |
|
||||
| 2026-02-25 | CLI/UI module reference audit completed and sprint rework aligned to `AUDIT_20260225_cli_ui_module_reference_matrix.md`. | Planning |
|
||||
| 2026-02-25 | Reworked to offline-domain consolidation with explicit AirGap/ExportCenter DB merge phases. | Planning |
|
||||
| 2026-02-25 | DB merge simplified after deep analysis: 7 entities with zero collisions. DbContext merge only (no schema merge, no dual-write, no backfill). Schemas remain separate. Sprint reduced from 5 tasks to 4. | Planning |
|
||||
| 2026-02-25 | Sprint created (initial consolidation draft). | Planning |
|
||||
| 2026-02-25 | Reworked: consolidation canceled; AirGap/ExportCenter/Mirror boundaries preserved. | Planning |
|
||||
| 2026-02-25 | Discovery evidence captured: AirGap has materially broader cross-module coupling than ExportCenter; merge risk exceeds benefit for current wave. | Planning |
|
||||
|
||||
## Decisions & Risks
|
||||
- Decision: AirGap and ExportCenter DbContexts merge into one domain DbContext. PostgreSQL schemas remain separate.
|
||||
- Rationale: 7 total entities with zero name collisions makes DbContext consolidation safe and low-risk. All data already in `stellaops_platform`. Schemas stay separate for clean lifecycle boundaries.
|
||||
- Decision: Runtime API paths remain backward compatible.
|
||||
- Risk: offline bundle integrity regressions. Mitigation: targeted integration tests with merged context before deploying.
|
||||
- Risk: offline kit identity drift. Mitigation: preserve project/package identities and validate CLI workflows.
|
||||
- Note: ExportCenterDbContext has compiled models generated by Sprint 219. EF compiled model regeneration is required after DbContext merge (TASK-211-003).
|
||||
- Decision: keep AirGap and ExportCenter unconsolidated in this consolidation wave.
|
||||
- Decision: keep separate DbContexts and schema ownership.
|
||||
- Rationale: asymmetric coupling and blast radius make DbContext/source merge a poor tradeoff now.
|
||||
- Risk: duplicated offline-domain concepts remain across modules. Mitigation: define explicit contracts and revisit only under measured business need.
|
||||
|
||||
## Next Checkpoints
|
||||
- Milestone 1: offline domain contract documented and source layout consolidated.
|
||||
- Milestone 2: DbContext merge complete with compiled models regenerated.
|
||||
- Milestone 3: CLI/Web/infra updated and docs finalized.
|
||||
|
||||
|
||||
- Milestone 1: boundary/coupling baseline documented.
|
||||
- Milestone 2: no-merge decision propagated to docs.
|
||||
- Milestone 3: build validation complete and sprint ready for closure.
|
||||
|
||||
@@ -1,128 +0,0 @@
|
||||
# Sprint 215 - Signals: Absorb RuntimeInstrumentation Module
|
||||
|
||||
## Topic & Scope
|
||||
- Consolidate `src/RuntimeInstrumentation/` into `src/Signals/`.
|
||||
- RuntimeInstrumentation provides eBPF/Tetragon event adapters that feed into Signals. Same domain: runtime observability.
|
||||
- Critical finding: RuntimeInstrumentation has NO .csproj files. Source code exists (12 .cs files across 3 directories) but lacks build integration.
|
||||
- Working directory: `src/RuntimeInstrumentation/`, `src/Signals/`.
|
||||
- Expected evidence: clean build with RuntimeInstrumentation integrated, all tests pass.
|
||||
|
||||
## Dependencies & Concurrency
|
||||
- No upstream dependencies. Can run in parallel.
|
||||
- Signals is heavily consumed (10+ external consumers: Platform, Policy, Scanner, Findings, etc.). Changes must not break Signals API surface.
|
||||
|
||||
## Documentation Prerequisites
|
||||
- Read `docs/modules/signals/architecture.md`.
|
||||
- Read `docs/modules/runtime-instrumentation/architecture.md`.
|
||||
- Read `src/Signals/AGENTS.md`.
|
||||
|
||||
## Delivery Tracker
|
||||
|
||||
### TASK-215-001 - Audit RuntimeInstrumentation source code
|
||||
Status: TODO
|
||||
Dependency: none
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- RuntimeInstrumentation has 3 subdirectories with no .csproj files:
|
||||
- `StellaOps.Agent.Tetragon/` — 2 .cs files (TetragonAgentCapability, TetragonGrpcClient).
|
||||
- `StellaOps.RuntimeInstrumentation.Tetragon/` — 5 .cs files (EventAdapter, FrameCanonicalizer, HotSymbolBridge, PrivacyFilter, WitnessBridge).
|
||||
- `StellaOps.RuntimeInstrumentation.Tetragon.Tests/` — 5 .cs files (benchmarks + 4 test classes).
|
||||
- Confirm zero external consumers (expected: no .csproj = no ProjectReference possible).
|
||||
- Read each .cs file to understand:
|
||||
- What namespaces are used.
|
||||
- What Signals types are referenced (if any).
|
||||
- Whether the code is complete/compilable or stub/WIP.
|
||||
- Determine if RuntimeInstrumentation code should become:
|
||||
- (a) New .csproj projects under Signals, or
|
||||
- (b) Merged directly into existing Signals projects (StellaOps.Signals.Ebpf already exists).
|
||||
- Check if `StellaOps.Signals.Ebpf` already contains some of this logic (potential duplication).
|
||||
|
||||
Completion criteria:
|
||||
- [ ] All 12 source files reviewed
|
||||
- [ ] Integration strategy decided (new project vs merge into Ebpf)
|
||||
- [ ] Duplication with Signals.Ebpf documented
|
||||
|
||||
### TASK-215-002 - Move RuntimeInstrumentation into Signals
|
||||
Status: TODO
|
||||
Dependency: TASK-215-001
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Based on TASK-215-001 decision:
|
||||
- **If new projects**: Create .csproj files under `src/Signals/__Libraries/StellaOps.RuntimeInstrumentation.Tetragon/` and `src/Signals/__Libraries/StellaOps.Agent.Tetragon/`.
|
||||
- **If merge into Ebpf**: Move source files into `src/Signals/__Libraries/StellaOps.Signals.Ebpf/` with appropriate namespace adjustments.
|
||||
- Move test files to `src/Signals/__Tests/StellaOps.RuntimeInstrumentation.Tetragon.Tests/` (or merge into `StellaOps.Signals.Ebpf.Tests`).
|
||||
- Add new/modified projects to `StellaOps.Signals.sln`.
|
||||
- Remove `src/RuntimeInstrumentation/`.
|
||||
- Update root solution file.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] All source files moved/integrated
|
||||
- [ ] Projects added to Signals solution
|
||||
- [ ] Old directory removed
|
||||
|
||||
### TASK-215-003 - Build and test
|
||||
Status: TODO
|
||||
Dependency: TASK-215-002
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- `dotnet build src/Signals/StellaOps.Signals.sln` — must succeed.
|
||||
- Run all Signals tests: `dotnet test src/Signals/StellaOps.Signals.sln`.
|
||||
- Run RuntimeInstrumentation tests (now under Signals).
|
||||
- Verify no regressions in Signals API surface (10+ external consumers depend on it).
|
||||
- Build root solution: `dotnet build StellaOps.sln`.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] Signals solution builds successfully
|
||||
- [ ] All Signals tests pass
|
||||
- [ ] RuntimeInstrumentation tests pass
|
||||
- [ ] Root solution builds successfully
|
||||
|
||||
### TASK-215-004 - Update Docker, CI, and infrastructure
|
||||
Status: TODO
|
||||
Dependency: TASK-215-003
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- RuntimeInstrumentation has no Docker service — no compose changes needed.
|
||||
- Search `.gitea/workflows/` for RuntimeInstrumentation references. Update if found.
|
||||
- Search `devops/` for RuntimeInstrumentation references. Update if found.
|
||||
- Verify Signals Docker service still works (`stellaops/signals:dev`).
|
||||
|
||||
Completion criteria:
|
||||
- [ ] CI references updated (if any exist)
|
||||
- [ ] Signals Docker service unaffected
|
||||
|
||||
### TASK-215-005 - Update documentation and CLI/Web audits
|
||||
Status: TODO
|
||||
Dependency: TASK-215-004
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Archive `docs/modules/runtime-instrumentation/` to `docs-archived/modules/runtime-instrumentation/`.
|
||||
- Add "RuntimeInstrumentation (eBPF/Tetragon Adapters)" section to Signals architecture doc.
|
||||
- Update `docs/INDEX.md`, `CLAUDE.md` section 1.4.
|
||||
- Update path references.
|
||||
- Audit `src/Cli/` and `src/Web/` for direct `RuntimeInstrumentation` references. Current audit expectation: none.
|
||||
- Record explicit `none found` evidence (or updated files if found).
|
||||
- Update `src/Signals/AGENTS.md` to document absorbed RuntimeInstrumentation components.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] Docs archived and Signals architecture updated.
|
||||
- [ ] CLI/Web audit result recorded.
|
||||
- [ ] All references updated.
|
||||
- [ ] Signals AGENTS.md updated.
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2026-02-25 | Sprint created. | Planning |
|
||||
| 2026-02-25 | CLI/UI module reference audit completed and sprint rework aligned to `AUDIT_20260225_cli_ui_module_reference_matrix.md`. | Planning |
|
||||
|
||||
## Decisions & Risks
|
||||
- Decision: Integration strategy (new .csproj vs merge into Ebpf) deferred to TASK-215-001 audit.
|
||||
- Risk: RuntimeInstrumentation has no .csproj files — source may be incomplete/WIP. If code is not compilable, document gaps and create follow-up tasks.
|
||||
- Risk: Signals has 10+ external consumers. Any API surface changes require careful coordination.
|
||||
- Note: `StellaOps.Signals.Ebpf` already exists under `src/Signals/__Libraries/`. Potential overlap with RuntimeInstrumentation.Tetragon must be resolved.
|
||||
|
||||
## Next Checkpoints
|
||||
- Estimate: 1 session.
|
||||
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
|
||||
## Dependencies & Concurrency
|
||||
- No hard upstream dependency, but **coordinate with Sprint 203** — IssuerDirectory.Client is consumed by Excititor. If Sprint 203 has already moved Excititor into `src/Concelier/`, this sprint's TASK-216-002 must update the IssuerDirectory.Client ProjectReference path in Excititor's new location under Concelier. If Sprint 203 has not yet run, this sprint's consumer path updates will target the original `src/Excititor/` location (and Sprint 203 will later update the path during its own move).
|
||||
- Coordinate with Sprint 205 (VEX trust ingest) for client compatibility.
|
||||
- Sprint 205 is deferred in the current wave; no active dependency.
|
||||
|
||||
## Documentation Prerequisites
|
||||
- Read `docs/modules/authority/architecture.md`.
|
||||
@@ -105,4 +105,3 @@ Completion criteria:
|
||||
- Milestone 2: infrastructure validated and builds pass.
|
||||
- Milestone 3: docs and ADR updated, sprint ready for closure.
|
||||
|
||||
|
||||
|
||||
@@ -1,122 +1,89 @@
|
||||
# Sprint 218 - DOCS: Domain-First Consolidation and DB Merge Finalization
|
||||
# Sprint 218 - DOCS: Consolidation Decision Finalization
|
||||
|
||||
## Topic & Scope
|
||||
- Final documentation sweep after consolidation sprints are executed in domain-first mode.
|
||||
- Align architecture docs to domain ownership model instead of module-per-service wording.
|
||||
- Publish consolidated DB merge outcomes, compatibility windows, and rollback states per domain sprint.
|
||||
- Final documentation sweep after consolidation-plan rework and boundary decisions.
|
||||
- Publish final outcomes per sprint: proceed, deferred, canceled, or boundary-preserved.
|
||||
- Remove stale claims about DbContext/service merges that were rejected.
|
||||
- Working directory: `docs/`.
|
||||
- Cross-module edits explicitly allowed for root documentation files (`CLAUDE.md`) and sprint evidence files in `docs/implplan/`.
|
||||
- Expected evidence: no stale module-path guidance, consistent domain map, and DB merge status traceable from docs.
|
||||
- Cross-module edits explicitly allowed for root documentation files and sprint evidence files under `docs/implplan/`.
|
||||
- Expected evidence: active docs reflect actual approved work; canceled/no-op sprint assumptions are removed.
|
||||
|
||||
## Dependencies & Concurrency
|
||||
- Depends on all relevant consolidation sprints being DONE (200-217, 219-221).
|
||||
- Must run after DB cutover checkpoints in domain sprints (203, 204, 205, 206, 208, 211, 216).
|
||||
- Must run after Sprint 220 (SbomService → Scanner) source move is complete.
|
||||
- Must run after Sprint 221 (Orchestrator domain rename) is complete.
|
||||
- Depends on active implementation-affecting consolidation sprints being completed or explicitly canceled.
|
||||
- Must run after Sprint 221 rename execution.
|
||||
|
||||
## Documentation Prerequisites
|
||||
- Read `docs/INDEX.md`.
|
||||
- Read `CLAUDE.md` section 1.4.
|
||||
- Read `docs/07_HIGH_LEVEL_ARCHITECTURE.md`.
|
||||
- Read `AUDIT_20260225_cli_ui_module_reference_matrix.md`.
|
||||
- Read execution logs of domain sprints for DB merge outcomes.
|
||||
- Read Sprint 220 (SbomService → Scanner) execution log for source move outcome.
|
||||
- Read execution logs of active consolidation sprints.
|
||||
|
||||
## Delivery Tracker
|
||||
|
||||
### TASK-218-001 - Audit docs against domain-first source structure
|
||||
### TASK-218-001 - Publish consolidation decision ledger
|
||||
Status: TODO
|
||||
Dependency: Consolidation sprints DONE
|
||||
Dependency: none
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Audit `docs/modules/` against actual `src/` domain ownership after consolidation.
|
||||
- Confirm standalone module docs were replaced or archived per domain decisions.
|
||||
- Verify active docs no longer describe consolidation as only folder movement where DB merge was executed.
|
||||
- Create/update a decision ledger that marks each consolidation sprint as one of:
|
||||
- Proceed (implementation)
|
||||
- Boundary-preserved (no consolidation)
|
||||
- Deferred (future wave)
|
||||
- Canceled/no-op (removed from active plan)
|
||||
- Link each row to sprint file evidence.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] Active module docs match current domain ownership.
|
||||
- [ ] Archived standalone module docs are in `docs-archived/modules/`.
|
||||
- [ ] No stale module-structure claims remain.
|
||||
- [ ] Decision ledger published.
|
||||
- [ ] Every impacted sprint has explicit state.
|
||||
|
||||
### TASK-218-002 - Publish domain DB merge ledger and outcomes
|
||||
### TASK-218-002 - Remove stale merge language from active docs
|
||||
Status: TODO
|
||||
Dependency: TASK-218-001
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Create/refresh a documentation section that records DB merge status per domain sprint:
|
||||
- Contract defined.
|
||||
- Expand migration complete.
|
||||
- Dual-write complete.
|
||||
- Backfill reconciliation complete.
|
||||
- Cutover complete.
|
||||
- Rollback status.
|
||||
- Link each status row to sprint execution log evidence.
|
||||
- Remove claims that DbContext merges were executed where they are now rejected/deferred.
|
||||
- Ensure docs describe preserved boundaries for Unknowns, Notify/Notifier, AirGap/ExportCenter, and SbomService.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] Domain DB merge ledger published.
|
||||
- [ ] Each domain sprint has linked evidence.
|
||||
- [ ] Rollback window state documented per domain.
|
||||
- [ ] Stale merge claims removed.
|
||||
- [ ] Boundary-preserved outcomes reflected in docs.
|
||||
|
||||
### TASK-218-003 - Update CLAUDE.md and architecture docs to domain paradigm
|
||||
### TASK-218-003 - Align indexes and architecture maps with approved scope
|
||||
Status: TODO
|
||||
Dependency: TASK-218-001, TASK-218-002
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Update root module-location guidance to domain-first language.
|
||||
- Update high-level architecture docs to show domain groupings and bounded runtime services.
|
||||
- Update module count claims to match post-consolidation reality.
|
||||
- Update `docs/INDEX.md` and architecture references so they match approved sprint outcomes.
|
||||
- Ensure renamed orchestration domain references remain consistent with Sprint 221 execution.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] CLAUDE.md reflects domain-first structure.
|
||||
- [ ] Architecture docs reflect domain ownership and service boundaries.
|
||||
- [ ] Module/domain count claims are accurate.
|
||||
- [ ] Index and architecture references aligned.
|
||||
- [ ] No stale references to canceled/no-op consolidations.
|
||||
|
||||
### TASK-218-004 - Validate CLI/Web and infra documentation references
|
||||
### TASK-218-004 - Final documentation quality gate
|
||||
Status: TODO
|
||||
Dependency: TASK-218-001
|
||||
Dependency: TASK-218-003
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Re-run docs cross-reference checks against authoritative config surfaces:
|
||||
- CLI project/route files.
|
||||
- Web proxy/config files.
|
||||
- compose and launch settings env vars.
|
||||
- Ensure docs reference current domain endpoints and compatibility aliases.
|
||||
- Run final docs cross-reference checks.
|
||||
- Record residual risks and deferred items.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] CLI/Web doc references validated.
|
||||
- [ ] Infra env var references validated.
|
||||
- [ ] Compatibility aliases documented where still required.
|
||||
|
||||
### TASK-218-005 - Final cross-reference and quality gate
|
||||
Status: TODO
|
||||
Dependency: TASK-218-002, TASK-218-003, TASK-218-004
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Run repo-wide doc checks for stale absorbed-module paths and outdated architecture claims.
|
||||
- Verify all links and references in updated docs are valid.
|
||||
- Add final execution log summary with open risks (if any) and remaining deprecation deadlines.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] No stale path references remain in active docs.
|
||||
- [ ] All updated links resolve.
|
||||
- [ ] Final summary and residual risks recorded.
|
||||
- [ ] Cross-reference checks completed.
|
||||
- [ ] Residual risks/deferred items documented.
|
||||
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2026-02-25 | Sprint created. | Planning |
|
||||
| 2026-02-25 | CLI/UI module reference audit completed and sprint rework aligned to `AUDIT_20260225_cli_ui_module_reference_matrix.md`. | Planning |
|
||||
| 2026-02-25 | Reworked to domain-first documentation closeout with DB merge ledger requirements. | Planning |
|
||||
| 2026-02-25 | DB merge verdicts finalized: REJECT (source-only) for Advisory/203, Trust/204, Orchestration/208, Identity/216. PROCEED (DbContext merge) for VEX/205, Offline/211. PROCEED (delete empty placeholder) for Policy/206. TASK-218-002 DB merge ledger reflects these outcomes. | Planning |
|
||||
| 2026-02-25 | Reworked to decision-finalization closeout after consolidation scope changes. | Planning |
|
||||
| 2026-02-25 | Updated outcomes: 206 boundary-preserved; 209 boundary-preserved; 211 boundary-preserved; 205 deferred/no-op; 215 no-op in consolidation wave; 220 canceled per decision not to merge SbomService; 221 proceed. | Planning |
|
||||
|
||||
## Decisions & Risks
|
||||
- Decision: this sprint finalizes domain-first architecture language and DB merge traceability.
|
||||
- Risk: if any domain sprint lacks reconciliation evidence, docs may overstate completion. Mitigation: gate documentation closure on evidence links.
|
||||
- Decision: absorbed module docs remain archived, not deleted, for audit history.
|
||||
- Decision: final docs must mirror approved execution scope, not earlier consolidation drafts.
|
||||
- Risk: stale references to canceled/deferred merges may reappear from older notes. Mitigation: decision ledger + final grep gate.
|
||||
|
||||
## Next Checkpoints
|
||||
- Milestone 1: domain audit and DB merge ledger draft complete.
|
||||
- Milestone 2: architecture + CLAUDE update complete.
|
||||
- Milestone 3: final cross-reference gate passed and sprint ready for closure.
|
||||
|
||||
|
||||
- Milestone 1: decision ledger complete.
|
||||
- Milestone 2: stale merge language removed.
|
||||
- Milestone 3: final docs gate passed and sprint ready for closure.
|
||||
|
||||
@@ -1,133 +0,0 @@
|
||||
# Sprint 220 - Scanner Domain: Absorb SbomService
|
||||
|
||||
## Topic & Scope
|
||||
- Consolidate `src/SbomService/` (6 csproj) into `src/Scanner/` domain ownership.
|
||||
- SbomService generates, processes, and tracks SBOM lineage from scanned artifacts — this is Scanner's domain (scan -> produce SBOM -> index -> track lineage).
|
||||
- SbomServiceDbContext stub was already deleted in a prior session — no DB merge required.
|
||||
- SbomService.WebService keeps its own Docker container and port (10390/10391).
|
||||
- Working directory: `src/Scanner/`, `src/SbomService/`.
|
||||
- Cross-module edits explicitly allowed for: `src/Platform/` (Platform.Database references SbomService.Lineage), `src/Cli/`, `src/Web/`, `devops/compose/`.
|
||||
- Expected evidence: clean builds, all tests pass, Docker service remains operational, no API regressions.
|
||||
|
||||
## Dependencies & Concurrency
|
||||
- No upstream sprint dependency.
|
||||
- Can run in parallel with Sprint 201 (Cartographer -> Scanner) but if both are in flight simultaneously, coordinate Scanner solution file edits to avoid merge conflicts. Recommend serializing: run 201 first (smaller, 1 csproj), then 220.
|
||||
- Coordinate with Sprint 203 (Concelier absorbs Excititor) because SbomService.WebService has a `ProjectReference` to `StellaOps.Excititor.Persistence` — if Sprint 203 moves Excititor first, the path in SbomService's .csproj must be updated to point to the new location under `src/Concelier/`.
|
||||
|
||||
## Documentation Prerequisites
|
||||
- Read `src/SbomService/AGENTS.md`.
|
||||
- Read `docs/modules/sbom-service/architecture.md`.
|
||||
- Read `AUDIT_20260225_cli_ui_module_reference_matrix.md`.
|
||||
|
||||
## Delivery Tracker
|
||||
|
||||
### TASK-220-001 - Map SbomService structure and consumer references
|
||||
Status: TODO
|
||||
Dependency: none
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Enumerate all 6 csproj files and their dependencies:
|
||||
- `StellaOps.SbomService` (WebService) — depends on Configuration, DependencyInjection, Excititor.Persistence, SbomService.Lineage, Auth.ServerIntegration.
|
||||
- `StellaOps.SbomService.Persistence` (library).
|
||||
- `StellaOps.SbomService.Lineage` (library, EF Core).
|
||||
- `StellaOps.SbomService.Tests` (tests).
|
||||
- `StellaOps.SbomService.Persistence.Tests` (tests).
|
||||
- `StellaOps.SbomService.Lineage.Tests` (tests).
|
||||
- Identify all external consumers:
|
||||
- `src/Platform/__Libraries/StellaOps.Platform.Database/` references `StellaOps.SbomService.Lineage`.
|
||||
- E2E integration tests reference SbomService.
|
||||
- Confirm SbomServiceDbContext stub is deleted (no DB merge needed).
|
||||
- Document Docker service definition (`sbomservice` slot 39, image `stellaops/sbomservice:dev`).
|
||||
|
||||
Completion criteria:
|
||||
- [ ] Full dependency and consumer map documented.
|
||||
- [ ] DbContext deletion confirmed.
|
||||
- [ ] Docker definition documented.
|
||||
|
||||
### TASK-220-002 - Move SbomService source tree into Scanner domain
|
||||
Status: TODO
|
||||
Dependency: TASK-220-001
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Move `src/SbomService/StellaOps.SbomService/` -> `src/Scanner/StellaOps.SbomService/` (WebService).
|
||||
- Move `src/SbomService/__Libraries/` -> `src/Scanner/__Libraries/` (merge with existing Scanner libraries):
|
||||
- `StellaOps.SbomService.Persistence/`
|
||||
- `StellaOps.SbomService.Lineage/`
|
||||
- Move `src/SbomService/__Tests/` -> `src/Scanner/__Tests/` (merge with existing Scanner tests):
|
||||
- `StellaOps.SbomService.Persistence.Tests/`
|
||||
- `StellaOps.SbomService.Lineage.Tests/`
|
||||
- Move `src/SbomService/StellaOps.SbomService.Tests/` -> `src/Scanner/__Tests/StellaOps.SbomService.Tests/`.
|
||||
- Keep all project names unchanged — no namespace renames.
|
||||
- Update all `ProjectReference` paths in:
|
||||
- Moved SbomService projects (internal references).
|
||||
- `src/Platform/__Libraries/StellaOps.Platform.Database/` (references SbomService.Lineage).
|
||||
- Any other consumers identified in TASK-220-001.
|
||||
- Add moved projects to Scanner solution file (`StellaOps.Scanner.sln` or equivalent).
|
||||
- Remove SbomService entries from root solution (`StellaOps.sln`) old paths and re-add at new paths.
|
||||
- Remove `src/SbomService/` directory after all moves complete.
|
||||
- Move `src/SbomService/AGENTS.md` -> `src/Scanner/AGENTS_SBOMSERVICE.md` or merge into Scanner's AGENTS.md.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] All 6 projects moved under Scanner domain.
|
||||
- [ ] All ProjectReference paths updated and compile.
|
||||
- [ ] Scanner solution includes SbomService projects.
|
||||
- [ ] Root solution updated.
|
||||
- [ ] Legacy `src/SbomService/` directory removed.
|
||||
|
||||
### TASK-220-003 - Update Docker, CI, and infrastructure references
|
||||
Status: TODO
|
||||
Dependency: TASK-220-002
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Update `devops/compose/docker-compose.stella-ops.yml`:
|
||||
- SbomService build context and Dockerfile path to new location under Scanner.
|
||||
- Update `.gitea/workflows/` if any workflow references SbomService source paths.
|
||||
- Update `src/Platform/StellaOps.Platform.WebService/Properties/launchSettings.json` if SbomService URLs are defined there.
|
||||
- Build verification:
|
||||
- `dotnet build` Scanner solution — must succeed.
|
||||
- `dotnet test` all SbomService test projects — must pass.
|
||||
- `dotnet build StellaOps.sln` — root solution must succeed.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] Docker compose updated with new paths.
|
||||
- [ ] CI workflows updated if affected.
|
||||
- [ ] All builds and tests pass.
|
||||
|
||||
### TASK-220-004 - Update documentation and validate CLI/Web references
|
||||
Status: TODO
|
||||
Dependency: TASK-220-003
|
||||
Owners: Developer
|
||||
Task description:
|
||||
- Move `docs/modules/sbom-service/` to `docs-archived/modules/sbom-service/` (standalone docs).
|
||||
- Add SbomService subsection to Scanner architecture doc (`docs/modules/scanner/architecture.md`).
|
||||
- Update `docs/INDEX.md` — mark SbomService as consolidated into Scanner.
|
||||
- Update `CLAUDE.md` section 1.4 if SbomService is listed.
|
||||
- Audit CLI references:
|
||||
- Search `src/Cli/` for SbomService-specific references.
|
||||
- Update any source-path references.
|
||||
- Audit Web references:
|
||||
- Search `src/Web/` for SbomService API base URLs or proxy config.
|
||||
- Validate runtime API paths remain unchanged.
|
||||
- Search all `docs/**/*.md` for references to `src/SbomService/` and update.
|
||||
|
||||
Completion criteria:
|
||||
- [ ] Standalone SbomService docs archived.
|
||||
- [ ] Scanner architecture doc updated with SbomService subsection.
|
||||
- [ ] INDEX.md and CLAUDE.md updated.
|
||||
- [ ] CLI and Web audits completed.
|
||||
- [ ] No broken references remain.
|
||||
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2026-02-25 | Sprint created. | Planning |
|
||||
|
||||
## Decisions & Risks
|
||||
- Decision: SbomService is treated as part of Scanner's domain (scan -> SBOM -> lineage).
|
||||
- Decision: SbomServiceDbContext stub was already deleted — no DB merge work required.
|
||||
- Decision: Project names preserved — no namespace renames to avoid breaking serialized types or API contracts.
|
||||
- Risk: SbomService.WebService references `StellaOps.Excititor.Persistence` (cross-domain dependency). If Sprint 203 moves Excititor first, this ProjectReference path must be updated. Mitigation: coordinate with Sprint 203 or update path after both sprints complete.
|
||||
- Risk: Platform.Database references SbomService.Lineage — path must be updated atomically with the move. Low risk (single consumer, clear path update).
|
||||
|
||||
## Next Checkpoints
|
||||
- Estimate: 1 session (medium scope — 6 csproj, straightforward organizational move, no DB merge).
|
||||
@@ -15,6 +15,25 @@ Get Stella Ops running locally for development and evaluation.
|
||||
| RAM | 16 GB (32 GB recommended) | |
|
||||
| Disk | 50 GB free | |
|
||||
|
||||
## 0. Runtime data assets (optional but recommended)
|
||||
|
||||
Some services depend on files not produced by `dotnet build` — most notably the
|
||||
ONNX embedding model for semantic search. Without it, search works but with
|
||||
reduced quality.
|
||||
|
||||
```bash
|
||||
# After cloning, download the embedding model (~80 MB)
|
||||
./devops/runtime-assets/acquire.sh --models
|
||||
|
||||
# Verify everything is in place
|
||||
./devops/runtime-assets/acquire.sh --verify
|
||||
```
|
||||
|
||||
For binary analysis (Ghidra), add `--ghidra` (~1.6 GB). Full details:
|
||||
`devops/runtime-assets/README.md`.
|
||||
|
||||
---
|
||||
|
||||
## 1. Clone the repository
|
||||
|
||||
```bash
|
||||
|
||||
@@ -398,7 +398,7 @@ public sealed record FinalizeSetupSessionRequest(
|
||||
/// <summary>
|
||||
/// Request to test connectivity for a setup step.
|
||||
/// </summary>
|
||||
public sealed record TestConnectionRequest(
|
||||
public sealed record SetupTestConnectionRequest(
|
||||
Dictionary<string, string>? ConfigValues = null);
|
||||
|
||||
#endregion
|
||||
|
||||
@@ -579,7 +579,7 @@ public static class SetupEndpoints
|
||||
Dictionary<string, string>? configValues = null;
|
||||
try
|
||||
{
|
||||
var body = await context.Request.ReadFromJsonAsync<TestConnectionRequest>(ct);
|
||||
var body = await context.Request.ReadFromJsonAsync<SetupTestConnectionRequest>(ct);
|
||||
configValues = body?.ConfigValues;
|
||||
}
|
||||
catch { /* empty body acceptable */ }
|
||||
|
||||
Reference in New Issue
Block a user