up
This commit is contained in:
23
.dockerignore
Normal file
23
.dockerignore
Normal file
@@ -0,0 +1,23 @@
|
||||
.git
|
||||
.gitignore
|
||||
.gitea
|
||||
.venv
|
||||
bin
|
||||
obj
|
||||
**/bin
|
||||
**/obj
|
||||
local-nugets
|
||||
.nuget
|
||||
**/node_modules
|
||||
**/dist
|
||||
**/coverage
|
||||
**/*.user
|
||||
**/*.suo
|
||||
**/*.cache
|
||||
**/.vscode
|
||||
**/.idea
|
||||
**/.DS_Store
|
||||
**/TestResults
|
||||
**/out
|
||||
**/packages
|
||||
/tmp
|
||||
68
.gitea/workflows/icscisa-kisa-refresh.yml
Normal file
68
.gitea/workflows/icscisa-kisa-refresh.yml
Normal file
@@ -0,0 +1,68 @@
|
||||
name: ICS/KISA Feed Refresh
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 2 * * MON'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
live_fetch:
|
||||
description: 'Attempt live RSS fetch (fallback to samples on failure)'
|
||||
required: false
|
||||
default: true
|
||||
type: boolean
|
||||
offline_snapshot:
|
||||
description: 'Force offline samples only (no network)'
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
|
||||
jobs:
|
||||
refresh:
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
contents: read
|
||||
env:
|
||||
ICSCISA_FEED_URL: ${{ secrets.ICSCISA_FEED_URL }}
|
||||
KISA_FEED_URL: ${{ secrets.KISA_FEED_URL }}
|
||||
FEED_GATEWAY_HOST: concelier-webservice
|
||||
FEED_GATEWAY_SCHEME: http
|
||||
LIVE_FETCH: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.live_fetch || 'true' }}
|
||||
OFFLINE_SNAPSHOT: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.offline_snapshot || 'false' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set run metadata
|
||||
id: meta
|
||||
run: |
|
||||
RUN_DATE=$(date -u +%Y%m%d)
|
||||
RUN_ID="icscisa-kisa-$(date -u +%Y%m%dT%H%M%SZ)"
|
||||
echo "run_date=$RUN_DATE" >> $GITHUB_OUTPUT
|
||||
echo "run_id=$RUN_ID" >> $GITHUB_OUTPUT
|
||||
echo "RUN_DATE=$RUN_DATE" >> $GITHUB_ENV
|
||||
echo "RUN_ID=$RUN_ID" >> $GITHUB_ENV
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Run ICS/KISA refresh
|
||||
run: |
|
||||
python scripts/feeds/run_icscisa_kisa_refresh.py \
|
||||
--out-dir out/feeds/icscisa-kisa \
|
||||
--run-date "${{ steps.meta.outputs.run_date }}" \
|
||||
--run-id "${{ steps.meta.outputs.run_id }}"
|
||||
|
||||
- name: Show fetch log
|
||||
run: cat out/feeds/icscisa-kisa/${{ steps.meta.outputs.run_date }}/fetch.log
|
||||
|
||||
- name: Upload refresh artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: icscisa-kisa-${{ steps.meta.outputs.run_date }}
|
||||
path: out/feeds/icscisa-kisa/${{ steps.meta.outputs.run_date }}
|
||||
if-no-files-found: error
|
||||
retention-days: 21
|
||||
@@ -46,6 +46,16 @@ jobs:
|
||||
run: |
|
||||
scripts/mirror/verify_thin_bundle.py out/mirror/thin/mirror-thin-v1.tar.gz
|
||||
|
||||
- name: Prepare Export Center handoff (metadata + optional schedule)
|
||||
run: |
|
||||
scripts/mirror/export-center-wire.sh
|
||||
env:
|
||||
EXPORT_CENTER_BASE_URL: ${{ secrets.EXPORT_CENTER_BASE_URL }}
|
||||
EXPORT_CENTER_TOKEN: ${{ secrets.EXPORT_CENTER_TOKEN }}
|
||||
EXPORT_CENTER_TENANT: ${{ secrets.EXPORT_CENTER_TENANT }}
|
||||
EXPORT_CENTER_PROJECT: ${{ secrets.EXPORT_CENTER_PROJECT }}
|
||||
EXPORT_CENTER_AUTO_SCHEDULE: ${{ secrets.EXPORT_CENTER_AUTO_SCHEDULE }}
|
||||
|
||||
- name: Upload signed artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
@@ -57,5 +67,8 @@ jobs:
|
||||
out/mirror/thin/tuf/
|
||||
out/mirror/thin/oci/
|
||||
out/mirror/thin/milestone.json
|
||||
out/mirror/thin/export-center/export-center-handoff.json
|
||||
out/mirror/thin/export-center/export-center-targets.json
|
||||
out/mirror/thin/export-center/schedule-response.json
|
||||
if-no-files-found: error
|
||||
retention-days: 14
|
||||
|
||||
@@ -28,6 +28,8 @@ jobs:
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
OUT_DIR: ${{ github.event.inputs.out_dir || 'evidence-locker/signals/2025-12-01' }}
|
||||
COSIGN_ALLOW_DEV_KEY: ${{ github.event.inputs.allow_dev_key || '0' }}
|
||||
CI_EVIDENCE_LOCKER_TOKEN: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN || vars.CI_EVIDENCE_LOCKER_TOKEN }}
|
||||
EVIDENCE_LOCKER_URL: ${{ secrets.EVIDENCE_LOCKER_URL || vars.EVIDENCE_LOCKER_URL }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -90,9 +92,9 @@ jobs:
|
||||
retention-days: 90
|
||||
|
||||
- name: Push to Evidence Locker
|
||||
if: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN != '' && env.EVIDENCE_LOCKER_URL != '' }}
|
||||
if: ${{ env.CI_EVIDENCE_LOCKER_TOKEN != '' && env.EVIDENCE_LOCKER_URL != '' }}
|
||||
env:
|
||||
TOKEN: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN }}
|
||||
TOKEN: ${{ env.CI_EVIDENCE_LOCKER_TOKEN }}
|
||||
URL: ${{ env.EVIDENCE_LOCKER_URL }}
|
||||
run: |
|
||||
tar -cf /tmp/signals-dsse.tar -C "$OUT_DIR" .
|
||||
@@ -102,7 +104,7 @@ jobs:
|
||||
echo "Pushed to Evidence Locker"
|
||||
|
||||
- name: Evidence Locker skip notice
|
||||
if: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN == '' || env.EVIDENCE_LOCKER_URL == '' }}
|
||||
if: ${{ env.CI_EVIDENCE_LOCKER_TOKEN == '' || env.EVIDENCE_LOCKER_URL == '' }}
|
||||
run: |
|
||||
echo "::notice::Evidence Locker push skipped (CI_EVIDENCE_LOCKER_TOKEN or EVIDENCE_LOCKER_URL not set)"
|
||||
echo "Artifacts available as workflow artifact for manual ingestion"
|
||||
|
||||
@@ -2,6 +2,14 @@ name: signals-evidence-locker
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
out_dir:
|
||||
description: "Output directory containing signed artifacts"
|
||||
required: false
|
||||
default: "evidence-locker/signals/2025-12-05"
|
||||
allow_dev_key:
|
||||
description: "Allow dev key fallback (1=yes, 0=no)"
|
||||
required: false
|
||||
default: "0"
|
||||
retention_target:
|
||||
description: "Retention days target"
|
||||
required: false
|
||||
@@ -12,7 +20,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
MODULE_ROOT: docs/modules/signals
|
||||
OUT_DIR: evidence-locker/signals/2025-12-05
|
||||
OUT_DIR: ${{ github.event.inputs.out_dir || 'evidence-locker/signals/2025-12-05' }}
|
||||
COSIGN_ALLOW_DEV_KEY: ${{ github.event.inputs.allow_dev_key || '0' }}
|
||||
COSIGN_PRIVATE_KEY_B64: ${{ secrets.COSIGN_PRIVATE_KEY_B64 }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
EVIDENCE_LOCKER_URL: ${{ secrets.EVIDENCE_LOCKER_URL || vars.EVIDENCE_LOCKER_URL }}
|
||||
CI_EVIDENCE_LOCKER_TOKEN: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN || vars.CI_EVIDENCE_LOCKER_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -20,6 +33,21 @@ jobs:
|
||||
- name: Task Pack offline bundle fixtures
|
||||
run: python3 scripts/packs/run-fixtures-check.sh
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@v3
|
||||
with:
|
||||
cosign-release: 'v2.2.4'
|
||||
|
||||
- name: Verify artifacts exist
|
||||
run: |
|
||||
cd "$MODULE_ROOT"
|
||||
sha256sum -c SHA256SUMS
|
||||
|
||||
- name: Sign signals artifacts
|
||||
run: |
|
||||
chmod +x tools/cosign/sign-signals.sh
|
||||
OUT_DIR="${OUT_DIR}" tools/cosign/sign-signals.sh
|
||||
|
||||
- name: Build deterministic signals evidence tar
|
||||
run: |
|
||||
set -euo pipefail
|
||||
@@ -52,16 +80,17 @@ jobs:
|
||||
/tmp/signals-evidence.tar.sha256
|
||||
|
||||
- name: Push to Evidence Locker
|
||||
if: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN != '' && env.EVIDENCE_LOCKER_URL != '' }}
|
||||
if: ${{ env.CI_EVIDENCE_LOCKER_TOKEN != '' && env.EVIDENCE_LOCKER_URL != '' }}
|
||||
env:
|
||||
TOKEN: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN }}
|
||||
TOKEN: ${{ env.CI_EVIDENCE_LOCKER_TOKEN }}
|
||||
URL: ${{ env.EVIDENCE_LOCKER_URL }}
|
||||
run: |
|
||||
curl -f -X PUT "$URL/signals/2025-12-05/signals-evidence.tar" \
|
||||
upload_path="${OUT_DIR#evidence-locker/}"
|
||||
curl -f -X PUT "$URL/${upload_path}/signals-evidence.tar" \
|
||||
-H "Authorization: Bearer $TOKEN" \
|
||||
--data-binary @/tmp/signals-evidence.tar
|
||||
|
||||
- name: Skip push (missing secret or URL)
|
||||
if: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN == '' || env.EVIDENCE_LOCKER_URL == '' }}
|
||||
if: ${{ env.CI_EVIDENCE_LOCKER_TOKEN == '' || env.EVIDENCE_LOCKER_URL == '' }}
|
||||
run: |
|
||||
echo "Locker push skipped: set CI_EVIDENCE_LOCKER_TOKEN and EVIDENCE_LOCKER_URL to enable." >&2
|
||||
|
||||
117
.gitea/workflows/signals-reachability.yml
Normal file
117
.gitea/workflows/signals-reachability.yml
Normal file
@@ -0,0 +1,117 @@
|
||||
name: Signals Reachability Scoring & Events
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
allow_dev_key:
|
||||
description: "Allow dev signing key fallback (1=yes, 0=no)"
|
||||
required: false
|
||||
default: "0"
|
||||
evidence_out_dir:
|
||||
description: "Evidence output dir for signing/upload"
|
||||
required: false
|
||||
default: "evidence-locker/signals/2025-12-05"
|
||||
push:
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- 'src/Signals/**'
|
||||
- 'scripts/signals/reachability-smoke.sh'
|
||||
- '.gitea/workflows/signals-reachability.yml'
|
||||
- 'tools/cosign/sign-signals.sh'
|
||||
|
||||
jobs:
|
||||
reachability-smoke:
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
DOTNET_NOLOGO: 1
|
||||
DOTNET_CLI_TELEMETRY_OPTOUT: 1
|
||||
DOTNET_SYSTEM_GLOBALIZATION_INVARIANT: 1
|
||||
TZ: UTC
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Task Pack offline bundle fixtures
|
||||
run: python3 scripts/packs/run-fixtures-check.sh
|
||||
|
||||
- name: Setup .NET 10 RC
|
||||
uses: actions/setup-dotnet@v4
|
||||
with:
|
||||
dotnet-version: 10.0.100
|
||||
include-prerelease: true
|
||||
|
||||
- name: Restore
|
||||
run: dotnet restore src/Signals/StellaOps.Signals.sln --configfile nuget.config
|
||||
|
||||
- name: Build
|
||||
run: dotnet build src/Signals/StellaOps.Signals.sln -c Release --no-restore
|
||||
|
||||
- name: Reachability scoring + cache/events smoke
|
||||
run: |
|
||||
chmod +x scripts/signals/reachability-smoke.sh
|
||||
scripts/signals/reachability-smoke.sh
|
||||
|
||||
sign-and-upload:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: reachability-smoke
|
||||
env:
|
||||
COSIGN_PRIVATE_KEY_B64: ${{ secrets.COSIGN_PRIVATE_KEY_B64 }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
COSIGN_ALLOW_DEV_KEY: ${{ github.event.inputs.allow_dev_key || '0' }}
|
||||
OUT_DIR: ${{ github.event.inputs.evidence_out_dir || 'evidence-locker/signals/2025-12-05' }}
|
||||
CI_EVIDENCE_LOCKER_TOKEN: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN || vars.CI_EVIDENCE_LOCKER_TOKEN }}
|
||||
EVIDENCE_LOCKER_URL: ${{ secrets.EVIDENCE_LOCKER_URL || vars.EVIDENCE_LOCKER_URL }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Task Pack offline bundle fixtures
|
||||
run: python3 scripts/packs/run-fixtures-check.sh
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@v3
|
||||
with:
|
||||
cosign-release: 'v2.2.4'
|
||||
|
||||
- name: Verify artifacts exist
|
||||
run: |
|
||||
cd docs/modules/signals
|
||||
sha256sum -c SHA256SUMS
|
||||
|
||||
- name: Sign signals artifacts
|
||||
run: |
|
||||
chmod +x tools/cosign/sign-signals.sh
|
||||
OUT_DIR="${OUT_DIR}" tools/cosign/sign-signals.sh
|
||||
|
||||
- name: Upload signed artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: signals-evidence-${{ github.run_number }}
|
||||
path: |
|
||||
${{ env.OUT_DIR }}/*.sigstore.json
|
||||
${{ env.OUT_DIR }}/*.dsse
|
||||
${{ env.OUT_DIR }}/SHA256SUMS
|
||||
if-no-files-found: error
|
||||
retention-days: 30
|
||||
|
||||
- name: Push to Evidence Locker
|
||||
if: ${{ env.CI_EVIDENCE_LOCKER_TOKEN != '' && env.EVIDENCE_LOCKER_URL != '' }}
|
||||
env:
|
||||
TOKEN: ${{ env.CI_EVIDENCE_LOCKER_TOKEN }}
|
||||
URL: ${{ env.EVIDENCE_LOCKER_URL }}
|
||||
run: |
|
||||
tar -cf /tmp/signals-evidence.tar -C "$OUT_DIR" .
|
||||
sha256sum /tmp/signals-evidence.tar
|
||||
curl -f -X PUT "$URL/signals/$(date -u +%Y-%m-%d)/signals-evidence.tar" \
|
||||
-H "Authorization: Bearer $TOKEN" \
|
||||
--data-binary @/tmp/signals-evidence.tar
|
||||
echo "Uploaded to Evidence Locker"
|
||||
|
||||
- name: Evidence Locker skip notice
|
||||
if: ${{ env.CI_EVIDENCE_LOCKER_TOKEN == '' || env.EVIDENCE_LOCKER_URL == '' }}
|
||||
run: |
|
||||
echo "::notice::Evidence Locker upload skipped (CI_EVIDENCE_LOCKER_TOKEN or EVIDENCE_LOCKER_URL not set)"
|
||||
33
.gitea/workflows/sm-remote-ci.yml
Normal file
33
.gitea/workflows/sm-remote-ci.yml
Normal file
@@ -0,0 +1,33 @@
|
||||
name: sm-remote-ci
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- "src/SmRemote/**"
|
||||
- "src/__Libraries/StellaOps.Cryptography.Plugin.SmRemote/**"
|
||||
- "src/__Libraries/StellaOps.Cryptography.Plugin.SmRemote.Tests/**"
|
||||
- "ops/sm-remote/**"
|
||||
- ".gitea/workflows/sm-remote-ci.yml"
|
||||
pull_request:
|
||||
paths:
|
||||
- "src/SmRemote/**"
|
||||
- "src/__Libraries/StellaOps.Cryptography.Plugin.SmRemote/**"
|
||||
- "src/__Libraries/StellaOps.Cryptography.Plugin.SmRemote.Tests/**"
|
||||
- "ops/sm-remote/**"
|
||||
|
||||
jobs:
|
||||
build-and-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup .NET
|
||||
uses: actions/setup-dotnet@v4
|
||||
with:
|
||||
dotnet-version: 10.0.x
|
||||
- name: Restore
|
||||
run: dotnet restore src/__Libraries/StellaOps.Cryptography.Plugin.SmRemote.Tests/StellaOps.Cryptography.Plugin.SmRemote.Tests.csproj
|
||||
- name: Test
|
||||
run: dotnet test src/__Libraries/StellaOps.Cryptography.Plugin.SmRemote.Tests/StellaOps.Cryptography.Plugin.SmRemote.Tests.csproj --no-build --verbosity normal
|
||||
- name: Publish service
|
||||
run: dotnet publish src/SmRemote/StellaOps.SmRemote.Service/StellaOps.SmRemote.Service.csproj -c Release -o out/sm-remote
|
||||
@@ -1,9 +1,9 @@
|
||||
# Stella Ops Compose Profiles
|
||||
|
||||
These Compose bundles ship the minimum services required to exercise the scanner pipeline plus control-plane dependencies. Every profile is pinned to immutable image digests sourced from `deploy/releases/*.yaml` and is linted via `docker compose config` in CI.
|
||||
|
||||
## Layout
|
||||
|
||||
# Stella Ops Compose Profiles
|
||||
|
||||
These Compose bundles ship the minimum services required to exercise the scanner pipeline plus control-plane dependencies. Every profile is pinned to immutable image digests sourced from `deploy/releases/*.yaml` and is linted via `docker compose config` in CI.
|
||||
|
||||
## Layout
|
||||
|
||||
| Path | Purpose |
|
||||
| ---- | ------- |
|
||||
| `docker-compose.dev.yaml` | Edge/nightly stack tuned for laptops and iterative work. |
|
||||
@@ -19,9 +19,9 @@ These Compose bundles ship the minimum services required to exercise the scanner
|
||||
| `scripts/reset.sh` | Stops the stack and removes Mongo/MinIO/Redis volumes after explicit confirmation. |
|
||||
| `scripts/quickstart.sh` | Helper to validate config and start dev stack; set `USE_MOCK=1` to include `docker-compose.mock.yaml` overlay. |
|
||||
| `docker-compose.mock.yaml` | Dev-only overlay with placeholder digests for missing services (orchestrator, policy-registry, packs, task-runner, VEX/Vuln stack). Use only with mock release manifest `deploy/releases/2025.09-mock-dev.yaml`. |
|
||||
|
||||
## Usage
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
cp env/dev.env.example dev.env
|
||||
docker compose --env-file dev.env -f docker-compose.dev.yaml config
|
||||
@@ -30,6 +30,8 @@ docker compose --env-file dev.env -f docker-compose.dev.yaml up -d
|
||||
|
||||
The stage and airgap variants behave the same way—swap the file names accordingly. All profiles expose 443/8443 for the UI and REST APIs, and they share a `stellaops` Docker network scoped to the compose project.
|
||||
|
||||
> **Surface.Secrets:** set `SCANNER_SURFACE_SECRETS_PROVIDER`/`SCANNER_SURFACE_SECRETS_ROOT` in your `.env` and point `SURFACE_SECRETS_HOST_PATH` to the decrypted bundle path (default `./offline/surface-secrets`). The stack mounts that path read-only into Scanner Web/Worker so `secret://` references resolve without embedding plaintext.
|
||||
|
||||
> **Graph Explorer reminder:** If you enable Cartographer or Graph API containers alongside these profiles, update `etc/authority.yaml` so the `cartographer-service` client is marked with `properties.serviceIdentity: "cartographer"` and carries a tenant hint. The Authority host now refuses `graph:write` tokens without that marker, so apply the configuration change before rolling out the updated images.
|
||||
|
||||
### Telemetry collector overlay
|
||||
@@ -116,7 +118,7 @@ USE_MOCK=1 ./scripts/quickstart.sh env/dev.env.example
|
||||
```
|
||||
|
||||
The overlay pins the missing services (orchestrator, policy-registry, packs-registry, task-runner, VEX/Vuln stack) to mock digests from `deploy/releases/2025.09-mock-dev.yaml` and starts their real entrypoints so integration flows can be exercised end-to-end. Replace the mock pins with production digests once releases publish; keep the mock overlay dev-only.
|
||||
|
||||
|
||||
Keep digests synchronized between Compose, Helm, and the release manifest to preserve reproducibility guarantees. `deploy/tools/validate-profiles.sh` performs a quick audit.
|
||||
|
||||
### GPU toggle for Advisory AI
|
||||
|
||||
@@ -1,31 +1,34 @@
|
||||
x-release-labels: &release-labels
|
||||
com.stellaops.release.version: "2025.09.2-airgap"
|
||||
com.stellaops.release.channel: "airgap"
|
||||
com.stellaops.profile: "airgap"
|
||||
|
||||
networks:
|
||||
stellaops:
|
||||
driver: bridge
|
||||
|
||||
volumes:
|
||||
mongo-data:
|
||||
minio-data:
|
||||
rustfs-data:
|
||||
x-release-labels: &release-labels
|
||||
com.stellaops.release.version: "2025.09.2-airgap"
|
||||
com.stellaops.release.channel: "airgap"
|
||||
com.stellaops.profile: "airgap"
|
||||
|
||||
networks:
|
||||
stellaops:
|
||||
driver: bridge
|
||||
|
||||
volumes:
|
||||
mongo-data:
|
||||
minio-data:
|
||||
rustfs-data:
|
||||
concelier-jobs:
|
||||
nats-data:
|
||||
scanner-surface-cache:
|
||||
postgres-data:
|
||||
|
||||
services:
|
||||
advisory-ai-queue:
|
||||
advisory-ai-plans:
|
||||
advisory-ai-outputs:
|
||||
|
||||
services:
|
||||
mongo:
|
||||
image: docker.io/library/mongo@sha256:c258b26dbb7774f97f52aff52231ca5f228273a84329c5f5e451c3739457db49
|
||||
command: ["mongod", "--bind_ip_all"]
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
MONGO_INITDB_ROOT_USERNAME: "${MONGO_INITDB_ROOT_USERNAME}"
|
||||
MONGO_INITDB_ROOT_PASSWORD: "${MONGO_INITDB_ROOT_PASSWORD}"
|
||||
volumes:
|
||||
- mongo-data:/data/db
|
||||
environment:
|
||||
MONGO_INITDB_ROOT_USERNAME: "${MONGO_INITDB_ROOT_USERNAME}"
|
||||
MONGO_INITDB_ROOT_PASSWORD: "${MONGO_INITDB_ROOT_PASSWORD}"
|
||||
volumes:
|
||||
- mongo-data:/data/db
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
@@ -45,313 +48,319 @@ services:
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
minio:
|
||||
image: docker.io/minio/minio@sha256:14cea493d9a34af32f524e538b8346cf79f3321eff8e708c1e2960462bd8936e
|
||||
command: ["server", "/data", "--console-address", ":9001"]
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
MINIO_ROOT_USER: "${MINIO_ROOT_USER}"
|
||||
MINIO_ROOT_PASSWORD: "${MINIO_ROOT_PASSWORD}"
|
||||
volumes:
|
||||
- minio-data:/data
|
||||
ports:
|
||||
- "${MINIO_CONSOLE_PORT:-29001}:9001"
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
rustfs:
|
||||
image: registry.stella-ops.org/stellaops/rustfs:2025.10.0-edge
|
||||
command: ["serve", "--listen", "0.0.0.0:8080", "--root", "/data"]
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
RUSTFS__LOG__LEVEL: info
|
||||
RUSTFS__STORAGE__PATH: /data
|
||||
volumes:
|
||||
- rustfs-data:/data
|
||||
ports:
|
||||
- "${RUSTFS_HTTP_PORT:-8080}:8080"
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
nats:
|
||||
image: docker.io/library/nats@sha256:c82559e4476289481a8a5196e675ebfe67eea81d95e5161e3e78eccfe766608e
|
||||
command:
|
||||
- "-js"
|
||||
- "-sd"
|
||||
- /data
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "${NATS_CLIENT_PORT:-24222}:4222"
|
||||
volumes:
|
||||
- nats-data:/data
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
authority:
|
||||
image: registry.stella-ops.org/stellaops/authority@sha256:5551a3269b7008cd5aceecf45df018c67459ed519557ccbe48b093b926a39bcc
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- mongo
|
||||
environment:
|
||||
STELLAOPS_AUTHORITY__ISSUER: "${AUTHORITY_ISSUER}"
|
||||
STELLAOPS_AUTHORITY__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017"
|
||||
STELLAOPS_AUTHORITY__PLUGINDIRECTORIES__0: "/app/plugins"
|
||||
STELLAOPS_AUTHORITY__PLUGINS__CONFIGURATIONDIRECTORY: "/app/etc/authority.plugins"
|
||||
volumes:
|
||||
- ../../etc/authority.yaml:/etc/authority.yaml:ro
|
||||
- ../../etc/authority.plugins:/app/etc/authority.plugins:ro
|
||||
ports:
|
||||
- "${AUTHORITY_PORT:-8440}:8440"
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
signer:
|
||||
image: registry.stella-ops.org/stellaops/signer@sha256:ddbbd664a42846cea6b40fca6465bc679b30f72851158f300d01a8571c5478fc
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- authority
|
||||
environment:
|
||||
SIGNER__AUTHORITY__BASEURL: "https://authority:8440"
|
||||
SIGNER__POE__INTROSPECTURL: "${SIGNER_POE_INTROSPECT_URL}"
|
||||
SIGNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017"
|
||||
ports:
|
||||
- "${SIGNER_PORT:-8441}:8441"
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
attestor:
|
||||
image: registry.stella-ops.org/stellaops/attestor@sha256:1ff0a3124d66d3a2702d8e421df40fbd98cc75cb605d95510598ebbae1433c50
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- signer
|
||||
environment:
|
||||
ATTESTOR__SIGNER__BASEURL: "https://signer:8441"
|
||||
ATTESTOR__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017"
|
||||
ports:
|
||||
- "${ATTESTOR_PORT:-8442}:8442"
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
issuer-directory:
|
||||
image: registry.stella-ops.org/stellaops/issuer-directory-web:2025.10.0-edge
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- mongo
|
||||
- authority
|
||||
environment:
|
||||
ISSUERDIRECTORY__CONFIG: "/etc/issuer-directory.yaml"
|
||||
ISSUERDIRECTORY__AUTHORITY__ISSUER: "${AUTHORITY_ISSUER}"
|
||||
ISSUERDIRECTORY__AUTHORITY__BASEURL: "https://authority:8440"
|
||||
ISSUERDIRECTORY__MONGO__CONNECTIONSTRING: "${ISSUER_DIRECTORY_MONGO_CONNECTION_STRING}"
|
||||
ISSUERDIRECTORY__SEEDCSAFPUBLISHERS: "${ISSUER_DIRECTORY_SEED_CSAF:-true}"
|
||||
volumes:
|
||||
- ../../etc/issuer-directory.yaml:/etc/issuer-directory.yaml:ro
|
||||
ports:
|
||||
- "${ISSUER_DIRECTORY_PORT:-8447}:8080"
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
concelier:
|
||||
image: registry.stella-ops.org/stellaops/concelier@sha256:29e2e1a0972707e092cbd3d370701341f9fec2aa9316fb5d8100480f2a1c76b5
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- mongo
|
||||
- minio
|
||||
environment:
|
||||
CONCELIER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017"
|
||||
CONCELIER__STORAGE__S3__ENDPOINT: "http://minio:9000"
|
||||
CONCELIER__STORAGE__S3__ACCESSKEYID: "${MINIO_ROOT_USER}"
|
||||
CONCELIER__STORAGE__S3__SECRETACCESSKEY: "${MINIO_ROOT_PASSWORD}"
|
||||
CONCELIER__AUTHORITY__BASEURL: "https://authority:8440"
|
||||
CONCELIER__AUTHORITY__RESILIENCE__ALLOWOFFLINECACHEFALLBACK: "true"
|
||||
CONCELIER__AUTHORITY__RESILIENCE__OFFLINECACHETOLERANCE: "${AUTHORITY_OFFLINE_CACHE_TOLERANCE:-00:30:00}"
|
||||
volumes:
|
||||
- concelier-jobs:/var/lib/concelier/jobs
|
||||
ports:
|
||||
- "${CONCELIER_PORT:-8445}:8445"
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
scanner-web:
|
||||
image: registry.stella-ops.org/stellaops/scanner-web@sha256:3df8ca21878126758203c1a0444e39fd97f77ddacf04a69685cda9f1e5e94718
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- concelier
|
||||
- rustfs
|
||||
- nats
|
||||
environment:
|
||||
SCANNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017"
|
||||
SCANNER__ARTIFACTSTORE__DRIVER: "rustfs"
|
||||
SCANNER__ARTIFACTSTORE__ENDPOINT: "http://rustfs:8080/api/v1"
|
||||
SCANNER__ARTIFACTSTORE__BUCKET: "scanner-artifacts"
|
||||
SCANNER__ARTIFACTSTORE__TIMEOUTSECONDS: "30"
|
||||
SCANNER__QUEUE__BROKER: "${SCANNER_QUEUE_BROKER}"
|
||||
SCANNER__EVENTS__ENABLED: "${SCANNER_EVENTS_ENABLED:-false}"
|
||||
SCANNER__EVENTS__DRIVER: "${SCANNER_EVENTS_DRIVER:-redis}"
|
||||
SCANNER__EVENTS__DSN: "${SCANNER_EVENTS_DSN:-}"
|
||||
SCANNER__EVENTS__STREAM: "${SCANNER_EVENTS_STREAM:-stella.events}"
|
||||
SCANNER__EVENTS__PUBLISHTIMEOUTSECONDS: "${SCANNER_EVENTS_PUBLISH_TIMEOUT_SECONDS:-5}"
|
||||
SCANNER__EVENTS__MAXSTREAMLENGTH: "${SCANNER_EVENTS_MAX_STREAM_LENGTH:-10000}"
|
||||
# Surface.Env configuration (see docs/modules/scanner/design/surface-env.md)
|
||||
SCANNER_SURFACE_FS_ENDPOINT: "${SCANNER_SURFACE_FS_ENDPOINT:-http://rustfs:8080}"
|
||||
SCANNER_SURFACE_FS_BUCKET: "${SCANNER_SURFACE_FS_BUCKET:-surface-cache}"
|
||||
SCANNER_SURFACE_CACHE_ROOT: "${SCANNER_SURFACE_CACHE_ROOT:-/var/lib/stellaops/surface}"
|
||||
SCANNER_SURFACE_CACHE_QUOTA_MB: "${SCANNER_SURFACE_CACHE_QUOTA_MB:-4096}"
|
||||
SCANNER_SURFACE_PREFETCH_ENABLED: "${SCANNER_SURFACE_PREFETCH_ENABLED:-false}"
|
||||
SCANNER_SURFACE_TENANT: "${SCANNER_SURFACE_TENANT:-default}"
|
||||
SCANNER_SURFACE_FEATURES: "${SCANNER_SURFACE_FEATURES:-}"
|
||||
SCANNER_SURFACE_SECRETS_PROVIDER: "${SCANNER_SURFACE_SECRETS_PROVIDER:-file}"
|
||||
SCANNER_SURFACE_SECRETS_ROOT: "${SCANNER_SURFACE_SECRETS_ROOT:-/etc/stellaops/secrets}"
|
||||
SCANNER_SURFACE_SECRETS_ALLOW_INLINE: "${SCANNER_SURFACE_SECRETS_ALLOW_INLINE:-false}"
|
||||
volumes:
|
||||
- scanner-surface-cache:/var/lib/stellaops/surface
|
||||
ports:
|
||||
- "${SCANNER_WEB_PORT:-8444}:8444"
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
scanner-worker:
|
||||
image: registry.stella-ops.org/stellaops/scanner-worker@sha256:eea5d6cfe7835950c5ec7a735a651f2f0d727d3e470cf9027a4a402ea89c4fb5
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- scanner-web
|
||||
- rustfs
|
||||
- nats
|
||||
environment:
|
||||
SCANNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017"
|
||||
SCANNER__ARTIFACTSTORE__DRIVER: "rustfs"
|
||||
SCANNER__ARTIFACTSTORE__ENDPOINT: "http://rustfs:8080/api/v1"
|
||||
SCANNER__ARTIFACTSTORE__BUCKET: "scanner-artifacts"
|
||||
SCANNER__ARTIFACTSTORE__TIMEOUTSECONDS: "30"
|
||||
SCANNER__QUEUE__BROKER: "${SCANNER_QUEUE_BROKER}"
|
||||
# Surface.Env configuration (see docs/modules/scanner/design/surface-env.md)
|
||||
SCANNER_SURFACE_FS_ENDPOINT: "${SCANNER_SURFACE_FS_ENDPOINT:-http://rustfs:8080}"
|
||||
SCANNER_SURFACE_FS_BUCKET: "${SCANNER_SURFACE_FS_BUCKET:-surface-cache}"
|
||||
SCANNER_SURFACE_CACHE_ROOT: "${SCANNER_SURFACE_CACHE_ROOT:-/var/lib/stellaops/surface}"
|
||||
SCANNER_SURFACE_CACHE_QUOTA_MB: "${SCANNER_SURFACE_CACHE_QUOTA_MB:-4096}"
|
||||
SCANNER_SURFACE_PREFETCH_ENABLED: "${SCANNER_SURFACE_PREFETCH_ENABLED:-false}"
|
||||
SCANNER_SURFACE_TENANT: "${SCANNER_SURFACE_TENANT:-default}"
|
||||
SCANNER_SURFACE_FEATURES: "${SCANNER_SURFACE_FEATURES:-}"
|
||||
SCANNER_SURFACE_SECRETS_PROVIDER: "${SCANNER_SURFACE_SECRETS_PROVIDER:-file}"
|
||||
SCANNER_SURFACE_SECRETS_ROOT: "${SCANNER_SURFACE_SECRETS_ROOT:-/etc/stellaops/secrets}"
|
||||
SCANNER_SURFACE_SECRETS_ALLOW_INLINE: "${SCANNER_SURFACE_SECRETS_ALLOW_INLINE:-false}"
|
||||
volumes:
|
||||
- scanner-surface-cache:/var/lib/stellaops/surface
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
scheduler-worker:
|
||||
image: registry.stella-ops.org/stellaops/scheduler-worker:2025.10.0-edge
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- mongo
|
||||
- nats
|
||||
- scanner-web
|
||||
command:
|
||||
- "dotnet"
|
||||
- "StellaOps.Scheduler.Worker.Host.dll"
|
||||
environment:
|
||||
SCHEDULER__QUEUE__KIND: "${SCHEDULER_QUEUE_KIND:-Nats}"
|
||||
SCHEDULER__QUEUE__NATS__URL: "${SCHEDULER_QUEUE_NATS_URL:-nats://nats:4222}"
|
||||
SCHEDULER__STORAGE__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017"
|
||||
SCHEDULER__STORAGE__DATABASE: "${SCHEDULER_STORAGE_DATABASE:-stellaops_scheduler}"
|
||||
SCHEDULER__WORKER__RUNNER__SCANNER__BASEADDRESS: "${SCHEDULER_SCANNER_BASEADDRESS:-http://scanner-web:8444}"
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
|
||||
minio:
|
||||
image: docker.io/minio/minio@sha256:14cea493d9a34af32f524e538b8346cf79f3321eff8e708c1e2960462bd8936e
|
||||
command: ["server", "/data", "--console-address", ":9001"]
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
MINIO_ROOT_USER: "${MINIO_ROOT_USER}"
|
||||
MINIO_ROOT_PASSWORD: "${MINIO_ROOT_PASSWORD}"
|
||||
volumes:
|
||||
- minio-data:/data
|
||||
ports:
|
||||
- "${MINIO_CONSOLE_PORT:-29001}:9001"
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
rustfs:
|
||||
image: registry.stella-ops.org/stellaops/rustfs:2025.10.0-edge
|
||||
command: ["serve", "--listen", "0.0.0.0:8080", "--root", "/data"]
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
RUSTFS__LOG__LEVEL: info
|
||||
RUSTFS__STORAGE__PATH: /data
|
||||
volumes:
|
||||
- rustfs-data:/data
|
||||
ports:
|
||||
- "${RUSTFS_HTTP_PORT:-8080}:8080"
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
nats:
|
||||
image: docker.io/library/nats@sha256:c82559e4476289481a8a5196e675ebfe67eea81d95e5161e3e78eccfe766608e
|
||||
command:
|
||||
- "-js"
|
||||
- "-sd"
|
||||
- /data
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "${NATS_CLIENT_PORT:-24222}:4222"
|
||||
volumes:
|
||||
- nats-data:/data
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
authority:
|
||||
image: registry.stella-ops.org/stellaops/authority@sha256:5551a3269b7008cd5aceecf45df018c67459ed519557ccbe48b093b926a39bcc
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- mongo
|
||||
environment:
|
||||
STELLAOPS_AUTHORITY__ISSUER: "${AUTHORITY_ISSUER}"
|
||||
STELLAOPS_AUTHORITY__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017"
|
||||
STELLAOPS_AUTHORITY__PLUGINDIRECTORIES__0: "/app/plugins"
|
||||
STELLAOPS_AUTHORITY__PLUGINS__CONFIGURATIONDIRECTORY: "/app/etc/authority.plugins"
|
||||
volumes:
|
||||
- ../../etc/authority.yaml:/etc/authority.yaml:ro
|
||||
- ../../etc/authority.plugins:/app/etc/authority.plugins:ro
|
||||
ports:
|
||||
- "${AUTHORITY_PORT:-8440}:8440"
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
signer:
|
||||
image: registry.stella-ops.org/stellaops/signer@sha256:ddbbd664a42846cea6b40fca6465bc679b30f72851158f300d01a8571c5478fc
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- authority
|
||||
environment:
|
||||
SIGNER__AUTHORITY__BASEURL: "https://authority:8440"
|
||||
SIGNER__POE__INTROSPECTURL: "${SIGNER_POE_INTROSPECT_URL}"
|
||||
SIGNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017"
|
||||
ports:
|
||||
- "${SIGNER_PORT:-8441}:8441"
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
attestor:
|
||||
image: registry.stella-ops.org/stellaops/attestor@sha256:1ff0a3124d66d3a2702d8e421df40fbd98cc75cb605d95510598ebbae1433c50
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- signer
|
||||
environment:
|
||||
ATTESTOR__SIGNER__BASEURL: "https://signer:8441"
|
||||
ATTESTOR__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017"
|
||||
ports:
|
||||
- "${ATTESTOR_PORT:-8442}:8442"
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
issuer-directory:
|
||||
image: registry.stella-ops.org/stellaops/issuer-directory-web:2025.10.0-edge
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- mongo
|
||||
- authority
|
||||
environment:
|
||||
ISSUERDIRECTORY__CONFIG: "/etc/issuer-directory.yaml"
|
||||
ISSUERDIRECTORY__AUTHORITY__ISSUER: "${AUTHORITY_ISSUER}"
|
||||
ISSUERDIRECTORY__AUTHORITY__BASEURL: "https://authority:8440"
|
||||
ISSUERDIRECTORY__MONGO__CONNECTIONSTRING: "${ISSUER_DIRECTORY_MONGO_CONNECTION_STRING}"
|
||||
ISSUERDIRECTORY__SEEDCSAFPUBLISHERS: "${ISSUER_DIRECTORY_SEED_CSAF:-true}"
|
||||
volumes:
|
||||
- ../../etc/issuer-directory.yaml:/etc/issuer-directory.yaml:ro
|
||||
ports:
|
||||
- "${ISSUER_DIRECTORY_PORT:-8447}:8080"
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
concelier:
|
||||
image: registry.stella-ops.org/stellaops/concelier@sha256:29e2e1a0972707e092cbd3d370701341f9fec2aa9316fb5d8100480f2a1c76b5
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- mongo
|
||||
- minio
|
||||
environment:
|
||||
CONCELIER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017"
|
||||
CONCELIER__STORAGE__S3__ENDPOINT: "http://minio:9000"
|
||||
CONCELIER__STORAGE__S3__ACCESSKEYID: "${MINIO_ROOT_USER}"
|
||||
CONCELIER__STORAGE__S3__SECRETACCESSKEY: "${MINIO_ROOT_PASSWORD}"
|
||||
CONCELIER__AUTHORITY__BASEURL: "https://authority:8440"
|
||||
CONCELIER__AUTHORITY__RESILIENCE__ALLOWOFFLINECACHEFALLBACK: "true"
|
||||
CONCELIER__AUTHORITY__RESILIENCE__OFFLINECACHETOLERANCE: "${AUTHORITY_OFFLINE_CACHE_TOLERANCE:-00:30:00}"
|
||||
volumes:
|
||||
- concelier-jobs:/var/lib/concelier/jobs
|
||||
ports:
|
||||
- "${CONCELIER_PORT:-8445}:8445"
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
scanner-web:
|
||||
image: registry.stella-ops.org/stellaops/scanner-web@sha256:3df8ca21878126758203c1a0444e39fd97f77ddacf04a69685cda9f1e5e94718
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- concelier
|
||||
- rustfs
|
||||
- nats
|
||||
environment:
|
||||
SCANNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017"
|
||||
SCANNER__ARTIFACTSTORE__DRIVER: "rustfs"
|
||||
SCANNER__ARTIFACTSTORE__ENDPOINT: "http://rustfs:8080/api/v1"
|
||||
SCANNER__ARTIFACTSTORE__BUCKET: "scanner-artifacts"
|
||||
SCANNER__ARTIFACTSTORE__TIMEOUTSECONDS: "30"
|
||||
SCANNER__QUEUE__BROKER: "${SCANNER_QUEUE_BROKER}"
|
||||
SCANNER__EVENTS__ENABLED: "${SCANNER_EVENTS_ENABLED:-false}"
|
||||
SCANNER__EVENTS__DRIVER: "${SCANNER_EVENTS_DRIVER:-redis}"
|
||||
SCANNER__EVENTS__DSN: "${SCANNER_EVENTS_DSN:-}"
|
||||
SCANNER__EVENTS__STREAM: "${SCANNER_EVENTS_STREAM:-stella.events}"
|
||||
SCANNER__EVENTS__PUBLISHTIMEOUTSECONDS: "${SCANNER_EVENTS_PUBLISH_TIMEOUT_SECONDS:-5}"
|
||||
SCANNER__EVENTS__MAXSTREAMLENGTH: "${SCANNER_EVENTS_MAX_STREAM_LENGTH:-10000}"
|
||||
# Surface.Env configuration (see docs/modules/scanner/design/surface-env.md)
|
||||
SCANNER_SURFACE_FS_ENDPOINT: "${SCANNER_SURFACE_FS_ENDPOINT:-http://rustfs:8080}"
|
||||
SCANNER_SURFACE_FS_BUCKET: "${SCANNER_SURFACE_FS_BUCKET:-surface-cache}"
|
||||
SCANNER_SURFACE_CACHE_ROOT: "${SCANNER_SURFACE_CACHE_ROOT:-/var/lib/stellaops/surface}"
|
||||
SCANNER_SURFACE_CACHE_QUOTA_MB: "${SCANNER_SURFACE_CACHE_QUOTA_MB:-4096}"
|
||||
SCANNER_SURFACE_PREFETCH_ENABLED: "${SCANNER_SURFACE_PREFETCH_ENABLED:-false}"
|
||||
SCANNER_SURFACE_TENANT: "${SCANNER_SURFACE_TENANT:-default}"
|
||||
SCANNER_SURFACE_FEATURES: "${SCANNER_SURFACE_FEATURES:-}"
|
||||
SCANNER_SURFACE_SECRETS_PROVIDER: "${SCANNER_SURFACE_SECRETS_PROVIDER:-file}"
|
||||
SCANNER_SURFACE_SECRETS_NAMESPACE: "${SCANNER_SURFACE_SECRETS_NAMESPACE:-}"
|
||||
SCANNER_SURFACE_SECRETS_ROOT: "${SCANNER_SURFACE_SECRETS_ROOT:-/etc/stellaops/secrets}"
|
||||
SCANNER_SURFACE_SECRETS_FALLBACK_PROVIDER: "${SCANNER_SURFACE_SECRETS_FALLBACK_PROVIDER:-}"
|
||||
SCANNER_SURFACE_SECRETS_ALLOW_INLINE: "${SCANNER_SURFACE_SECRETS_ALLOW_INLINE:-false}"
|
||||
volumes:
|
||||
- scanner-surface-cache:/var/lib/stellaops/surface
|
||||
- ${SURFACE_SECRETS_HOST_PATH:-./offline/surface-secrets}:${SCANNER_SURFACE_SECRETS_ROOT:-/etc/stellaops/secrets}:ro
|
||||
ports:
|
||||
- "${SCANNER_WEB_PORT:-8444}:8444"
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
scanner-worker:
|
||||
image: registry.stella-ops.org/stellaops/scanner-worker@sha256:eea5d6cfe7835950c5ec7a735a651f2f0d727d3e470cf9027a4a402ea89c4fb5
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- scanner-web
|
||||
- rustfs
|
||||
- nats
|
||||
environment:
|
||||
SCANNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017"
|
||||
SCANNER__ARTIFACTSTORE__DRIVER: "rustfs"
|
||||
SCANNER__ARTIFACTSTORE__ENDPOINT: "http://rustfs:8080/api/v1"
|
||||
SCANNER__ARTIFACTSTORE__BUCKET: "scanner-artifacts"
|
||||
SCANNER__ARTIFACTSTORE__TIMEOUTSECONDS: "30"
|
||||
SCANNER__QUEUE__BROKER: "${SCANNER_QUEUE_BROKER}"
|
||||
# Surface.Env configuration (see docs/modules/scanner/design/surface-env.md)
|
||||
SCANNER_SURFACE_FS_ENDPOINT: "${SCANNER_SURFACE_FS_ENDPOINT:-http://rustfs:8080}"
|
||||
SCANNER_SURFACE_FS_BUCKET: "${SCANNER_SURFACE_FS_BUCKET:-surface-cache}"
|
||||
SCANNER_SURFACE_CACHE_ROOT: "${SCANNER_SURFACE_CACHE_ROOT:-/var/lib/stellaops/surface}"
|
||||
SCANNER_SURFACE_CACHE_QUOTA_MB: "${SCANNER_SURFACE_CACHE_QUOTA_MB:-4096}"
|
||||
SCANNER_SURFACE_PREFETCH_ENABLED: "${SCANNER_SURFACE_PREFETCH_ENABLED:-false}"
|
||||
SCANNER_SURFACE_TENANT: "${SCANNER_SURFACE_TENANT:-default}"
|
||||
SCANNER_SURFACE_FEATURES: "${SCANNER_SURFACE_FEATURES:-}"
|
||||
SCANNER_SURFACE_SECRETS_PROVIDER: "${SCANNER_SURFACE_SECRETS_PROVIDER:-file}"
|
||||
SCANNER_SURFACE_SECRETS_NAMESPACE: "${SCANNER_SURFACE_SECRETS_NAMESPACE:-}"
|
||||
SCANNER_SURFACE_SECRETS_ROOT: "${SCANNER_SURFACE_SECRETS_ROOT:-/etc/stellaops/secrets}"
|
||||
SCANNER_SURFACE_SECRETS_FALLBACK_PROVIDER: "${SCANNER_SURFACE_SECRETS_FALLBACK_PROVIDER:-}"
|
||||
SCANNER_SURFACE_SECRETS_ALLOW_INLINE: "${SCANNER_SURFACE_SECRETS_ALLOW_INLINE:-false}"
|
||||
volumes:
|
||||
- scanner-surface-cache:/var/lib/stellaops/surface
|
||||
- ${SURFACE_SECRETS_HOST_PATH:-./offline/surface-secrets}:${SCANNER_SURFACE_SECRETS_ROOT:-/etc/stellaops/secrets}:ro
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
scheduler-worker:
|
||||
image: registry.stella-ops.org/stellaops/scheduler-worker:2025.10.0-edge
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- mongo
|
||||
- nats
|
||||
- scanner-web
|
||||
command:
|
||||
- "dotnet"
|
||||
- "StellaOps.Scheduler.Worker.Host.dll"
|
||||
environment:
|
||||
SCHEDULER__QUEUE__KIND: "${SCHEDULER_QUEUE_KIND:-Nats}"
|
||||
SCHEDULER__QUEUE__NATS__URL: "${SCHEDULER_QUEUE_NATS_URL:-nats://nats:4222}"
|
||||
SCHEDULER__STORAGE__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017"
|
||||
SCHEDULER__STORAGE__DATABASE: "${SCHEDULER_STORAGE_DATABASE:-stellaops_scheduler}"
|
||||
SCHEDULER__WORKER__RUNNER__SCANNER__BASEADDRESS: "${SCHEDULER_SCANNER_BASEADDRESS:-http://scanner-web:8444}"
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
notify-web:
|
||||
image: ${NOTIFY_WEB_IMAGE:-registry.stella-ops.org/stellaops/notify-web:2025.09.2}
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- postgres
|
||||
- authority
|
||||
environment:
|
||||
DOTNET_ENVIRONMENT: Production
|
||||
volumes:
|
||||
- ../../etc/notify.airgap.yaml:/app/etc/notify.yaml:ro
|
||||
ports:
|
||||
- "${NOTIFY_WEB_PORT:-9446}:8446"
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
excititor:
|
||||
image: registry.stella-ops.org/stellaops/excititor@sha256:65c0ee13f773efe920d7181512349a09d363ab3f3e177d276136bd2742325a68
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- concelier
|
||||
environment:
|
||||
EXCITITOR__CONCELIER__BASEURL: "https://concelier:8445"
|
||||
EXCITITOR__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017"
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
advisory-ai-web:
|
||||
image: registry.stella-ops.org/stellaops/advisory-ai-web:2025.09.2-airgap
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- scanner-web
|
||||
environment:
|
||||
ADVISORYAI__AdvisoryAI__SbomBaseAddress: "${ADVISORY_AI_SBOM_BASEADDRESS:-http://scanner-web:8444}"
|
||||
ADVISORYAI__AdvisoryAI__Queue__DirectoryPath: "/var/lib/advisory-ai/queue"
|
||||
ADVISORYAI__AdvisoryAI__Storage__PlanCacheDirectory: "/var/lib/advisory-ai/plans"
|
||||
ADVISORYAI__AdvisoryAI__Storage__OutputDirectory: "/var/lib/advisory-ai/outputs"
|
||||
ADVISORYAI__AdvisoryAI__Inference__Mode: "${ADVISORY_AI_INFERENCE_MODE:-Local}"
|
||||
ADVISORYAI__AdvisoryAI__Inference__Remote__BaseAddress: "${ADVISORY_AI_REMOTE_BASEADDRESS:-}"
|
||||
ADVISORYAI__AdvisoryAI__Inference__Remote__ApiKey: "${ADVISORY_AI_REMOTE_APIKEY:-}"
|
||||
ports:
|
||||
- "${ADVISORY_AI_WEB_PORT:-8448}:8448"
|
||||
volumes:
|
||||
- advisory-ai-queue:/var/lib/advisory-ai/queue
|
||||
- advisory-ai-plans:/var/lib/advisory-ai/plans
|
||||
- advisory-ai-outputs:/var/lib/advisory-ai/outputs
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
advisory-ai-worker:
|
||||
image: registry.stella-ops.org/stellaops/advisory-ai-worker:2025.09.2-airgap
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- advisory-ai-web
|
||||
environment:
|
||||
ADVISORYAI__AdvisoryAI__SbomBaseAddress: "${ADVISORY_AI_SBOM_BASEADDRESS:-http://scanner-web:8444}"
|
||||
ADVISORYAI__AdvisoryAI__Queue__DirectoryPath: "/var/lib/advisory-ai/queue"
|
||||
ADVISORYAI__AdvisoryAI__Storage__PlanCacheDirectory: "/var/lib/advisory-ai/plans"
|
||||
ADVISORYAI__AdvisoryAI__Storage__OutputDirectory: "/var/lib/advisory-ai/outputs"
|
||||
ADVISORYAI__AdvisoryAI__Inference__Mode: "${ADVISORY_AI_INFERENCE_MODE:-Local}"
|
||||
ADVISORYAI__AdvisoryAI__Inference__Remote__BaseAddress: "${ADVISORY_AI_REMOTE_BASEADDRESS:-}"
|
||||
ADVISORYAI__AdvisoryAI__Inference__Remote__ApiKey: "${ADVISORY_AI_REMOTE_APIKEY:-}"
|
||||
volumes:
|
||||
- advisory-ai-queue:/var/lib/advisory-ai/queue
|
||||
- advisory-ai-plans:/var/lib/advisory-ai/plans
|
||||
- advisory-ai-outputs:/var/lib/advisory-ai/outputs
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
web-ui:
|
||||
image: registry.stella-ops.org/stellaops/web-ui@sha256:bee9668011ff414572131dc777faab4da24473fe12c230893f161cabee092a1d
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- scanner-web
|
||||
environment:
|
||||
STELLAOPS_UI__BACKEND__BASEURL: "https://scanner-web:8444"
|
||||
ports:
|
||||
- "${UI_PORT:-9443}:8443"
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
environment:
|
||||
DOTNET_ENVIRONMENT: Production
|
||||
volumes:
|
||||
- ../../etc/notify.airgap.yaml:/app/etc/notify.yaml:ro
|
||||
ports:
|
||||
- "${NOTIFY_WEB_PORT:-9446}:8446"
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
excititor:
|
||||
image: registry.stella-ops.org/stellaops/excititor@sha256:65c0ee13f773efe920d7181512349a09d363ab3f3e177d276136bd2742325a68
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- concelier
|
||||
environment:
|
||||
EXCITITOR__CONCELIER__BASEURL: "https://concelier:8445"
|
||||
EXCITITOR__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017"
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
advisory-ai-web:
|
||||
image: registry.stella-ops.org/stellaops/advisory-ai-web:2025.09.2-airgap
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- scanner-web
|
||||
environment:
|
||||
ADVISORYAI__AdvisoryAI__SbomBaseAddress: "${ADVISORY_AI_SBOM_BASEADDRESS:-http://scanner-web:8444}"
|
||||
ADVISORYAI__AdvisoryAI__Queue__DirectoryPath: "/var/lib/advisory-ai/queue"
|
||||
ADVISORYAI__AdvisoryAI__Storage__PlanCacheDirectory: "/var/lib/advisory-ai/plans"
|
||||
ADVISORYAI__AdvisoryAI__Storage__OutputDirectory: "/var/lib/advisory-ai/outputs"
|
||||
ADVISORYAI__AdvisoryAI__Inference__Mode: "${ADVISORY_AI_INFERENCE_MODE:-Local}"
|
||||
ADVISORYAI__AdvisoryAI__Inference__Remote__BaseAddress: "${ADVISORY_AI_REMOTE_BASEADDRESS:-}"
|
||||
ADVISORYAI__AdvisoryAI__Inference__Remote__ApiKey: "${ADVISORY_AI_REMOTE_APIKEY:-}"
|
||||
ports:
|
||||
- "${ADVISORY_AI_WEB_PORT:-8448}:8448"
|
||||
volumes:
|
||||
- advisory-ai-queue:/var/lib/advisory-ai/queue
|
||||
- advisory-ai-plans:/var/lib/advisory-ai/plans
|
||||
- advisory-ai-outputs:/var/lib/advisory-ai/outputs
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
advisory-ai-worker:
|
||||
image: registry.stella-ops.org/stellaops/advisory-ai-worker:2025.09.2-airgap
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- advisory-ai-web
|
||||
environment:
|
||||
ADVISORYAI__AdvisoryAI__SbomBaseAddress: "${ADVISORY_AI_SBOM_BASEADDRESS:-http://scanner-web:8444}"
|
||||
ADVISORYAI__AdvisoryAI__Queue__DirectoryPath: "/var/lib/advisory-ai/queue"
|
||||
ADVISORYAI__AdvisoryAI__Storage__PlanCacheDirectory: "/var/lib/advisory-ai/plans"
|
||||
ADVISORYAI__AdvisoryAI__Storage__OutputDirectory: "/var/lib/advisory-ai/outputs"
|
||||
ADVISORYAI__AdvisoryAI__Inference__Mode: "${ADVISORY_AI_INFERENCE_MODE:-Local}"
|
||||
ADVISORYAI__AdvisoryAI__Inference__Remote__BaseAddress: "${ADVISORY_AI_REMOTE_BASEADDRESS:-}"
|
||||
ADVISORYAI__AdvisoryAI__Inference__Remote__ApiKey: "${ADVISORY_AI_REMOTE_APIKEY:-}"
|
||||
volumes:
|
||||
- advisory-ai-queue:/var/lib/advisory-ai/queue
|
||||
- advisory-ai-plans:/var/lib/advisory-ai/plans
|
||||
- advisory-ai-outputs:/var/lib/advisory-ai/outputs
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
web-ui:
|
||||
image: registry.stella-ops.org/stellaops/web-ui@sha256:bee9668011ff414572131dc777faab4da24473fe12c230893f161cabee092a1d
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- scanner-web
|
||||
environment:
|
||||
STELLAOPS_UI__BACKEND__BASEURL: "https://scanner-web:8444"
|
||||
ports:
|
||||
- "${UI_PORT:-9443}:8443"
|
||||
networks:
|
||||
- stellaops
|
||||
labels: *release-labels
|
||||
|
||||
@@ -347,6 +347,7 @@ services:
|
||||
WINE_CSP_LOG_LEVEL: "${WINE_CSP_LOG_LEVEL:-Information}"
|
||||
ASPNETCORE_ENVIRONMENT: "${ASPNETCORE_ENVIRONMENT:-Development}"
|
||||
volumes:
|
||||
- ../../opt/cryptopro/downloads:/opt/cryptopro/downloads:ro
|
||||
- wine-csp-prefix:/home/winecsp/.wine
|
||||
- wine-csp-logs:/var/log/wine-csp
|
||||
# Mount customer-provided CSP installer (optional):
|
||||
|
||||
@@ -81,5 +81,7 @@ services:
|
||||
WINE_CSP_PORT: "5099"
|
||||
WINE_CSP_MODE: "limited"
|
||||
WINE_CSP_LOG_LEVEL: "Debug"
|
||||
volumes:
|
||||
- ../../opt/cryptopro/downloads:/opt/cryptopro/downloads:ro
|
||||
labels: *release-labels
|
||||
networks: [stellaops]
|
||||
|
||||
15
deploy/compose/env/airgap.env.example
vendored
15
deploy/compose/env/airgap.env.example
vendored
@@ -1,13 +1,13 @@
|
||||
# Substitutions for docker-compose.airgap.yaml
|
||||
MONGO_INITDB_ROOT_USERNAME=stellaops
|
||||
MONGO_INITDB_ROOT_PASSWORD=airgap-password
|
||||
MINIO_ROOT_USER=stellaops-offline
|
||||
# Substitutions for docker-compose.airgap.yaml
|
||||
MONGO_INITDB_ROOT_USERNAME=stellaops
|
||||
MONGO_INITDB_ROOT_PASSWORD=airgap-password
|
||||
MINIO_ROOT_USER=stellaops-offline
|
||||
MINIO_ROOT_PASSWORD=airgap-minio-secret
|
||||
MINIO_CONSOLE_PORT=29001
|
||||
RUSTFS_HTTP_PORT=8080
|
||||
AUTHORITY_ISSUER=https://authority.airgap.local
|
||||
AUTHORITY_PORT=8440
|
||||
SIGNER_POE_INTROSPECT_URL=file:///offline/poe/introspect.json
|
||||
AUTHORITY_PORT=8440
|
||||
SIGNER_POE_INTROSPECT_URL=file:///offline/poe/introspect.json
|
||||
SIGNER_PORT=8441
|
||||
ATTESTOR_PORT=8442
|
||||
# Secrets for Issuer Directory are provided via issuer-directory.mongo.env (see etc/secrets/issuer-directory.mongo.secret.example).
|
||||
@@ -33,7 +33,10 @@ SCANNER_SURFACE_CACHE_ROOT=/var/lib/stellaops/surface
|
||||
ZASTAVA_SURFACE_FS_ENDPOINT=${SCANNER_SURFACE_FS_ENDPOINT}
|
||||
ZASTAVA_SURFACE_CACHE_ROOT=${SCANNER_SURFACE_CACHE_ROOT}
|
||||
SCANNER_SURFACE_SECRETS_PROVIDER=file
|
||||
SCANNER_SURFACE_SECRETS_NAMESPACE=
|
||||
SCANNER_SURFACE_SECRETS_ROOT=/etc/stellaops/secrets
|
||||
SCANNER_SURFACE_SECRETS_FALLBACK_PROVIDER=
|
||||
SURFACE_SECRETS_HOST_PATH=./offline/surface-secrets
|
||||
SCHEDULER_QUEUE_KIND=Nats
|
||||
SCHEDULER_QUEUE_NATS_URL=nats://nats:4222
|
||||
SCHEDULER_STORAGE_DATABASE=stellaops_scheduler
|
||||
|
||||
3
deploy/compose/env/wine-csp.env.example
vendored
3
deploy/compose/env/wine-csp.env.example
vendored
@@ -43,7 +43,10 @@ ASPNETCORE_ENVIRONMENT=Production
|
||||
# - Wine prefix: /home/winecsp/.wine (persistent storage)
|
||||
# - CSP installer: /opt/cryptopro (read-only mount)
|
||||
# - Logs: /var/log/wine-csp (log output)
|
||||
# - CSP packages: /opt/cryptopro/downloads (bind from <repo>/opt/cryptopro/downloads)
|
||||
#
|
||||
# Example mount for CSP installer:
|
||||
# volumes:
|
||||
# - /path/to/your/csp-5.0.msi:/opt/cryptopro/csp-installer.msi:ro
|
||||
# volumes:
|
||||
# - ../../opt/cryptopro/downloads:/opt/cryptopro/downloads:ro
|
||||
|
||||
@@ -9,7 +9,7 @@ metadata:
|
||||
data:
|
||||
{{- range $fileName, $content := $cfg.data }}
|
||||
{{ $fileName }}: |
|
||||
{{ $content | nindent 4 }}
|
||||
{{ tpl $content $root | nindent 4 }}
|
||||
{{- end }}
|
||||
---
|
||||
{{- end }}
|
||||
|
||||
@@ -7,18 +7,18 @@
|
||||
{{- end -}}
|
||||
{{- $policyActivationTargets := dict "policy-engine" true "policy-gateway" true -}}
|
||||
{{- range $name, $svc := .Values.services }}
|
||||
{{- $configMounts := (default (list) $svc.configMounts) }}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "stellaops.fullname" (dict "root" $root "name" $name) }}
|
||||
labels:
|
||||
{{- include "stellaops.labels" (dict "root" $root "name" $name "svc" $svc) | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ default 1 $svc.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "stellaops.selectorLabels" (dict "root" $root "name" $name "svc" $svc) | nindent 6 }}
|
||||
{{- $configMounts := (default (list) $svc.configMounts) }}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "stellaops.fullname" (dict "root" $root "name" $name) }}
|
||||
labels:
|
||||
{{- include "stellaops.labels" (dict "root" $root "name" $name "svc" $svc) | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ default 1 $svc.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "stellaops.selectorLabels" (dict "root" $root "name" $name "svc" $svc) | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
@@ -43,18 +43,18 @@ spec:
|
||||
securityContext:
|
||||
{{ toYaml $svc.securityContext | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if $svc.command }}
|
||||
command:
|
||||
{{- range $cmd := $svc.command }}
|
||||
- {{ $cmd | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $svc.args }}
|
||||
args:
|
||||
{{- range $arg := $svc.args }}
|
||||
- {{ $arg | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $svc.command }}
|
||||
command:
|
||||
{{- range $cmd := $svc.command }}
|
||||
- {{ $cmd | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $svc.args }}
|
||||
args:
|
||||
{{- range $arg := $svc.args }}
|
||||
- {{ $arg | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $svc.env }}
|
||||
env:
|
||||
{{- range $envName, $envValue := $svc.env }}
|
||||
@@ -64,6 +64,9 @@ spec:
|
||||
{{- end }}
|
||||
{{- $needsPolicyActivation := and $hasPolicyActivationConfig (hasKey $policyActivationTargets $name) }}
|
||||
{{- $envFrom := default (list) $svc.envFrom }}
|
||||
{{- if and (hasKey $root.Values.configMaps "surface-env") (or (hasPrefix "scanner-" $name) (hasPrefix "zastava-" $name)) }}
|
||||
{{- $envFrom = append $envFrom (dict "configMapRef" (dict "name" (include "stellaops.fullname" (dict "root" $root "name" "surface-env")))) }}
|
||||
{{- end }}
|
||||
{{- if and $needsPolicyActivation (ne $policyActivationConfigName "") }}
|
||||
{{- $hasActivationReference := false }}
|
||||
{{- range $envFromEntry := $envFrom }}
|
||||
@@ -80,19 +83,19 @@ spec:
|
||||
{{ toYaml $envFrom | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if $svc.ports }}
|
||||
ports:
|
||||
{{- range $port := $svc.ports }}
|
||||
- name: {{ default (printf "%s-%v" $name $port.containerPort) $port.name | trunc 63 | trimSuffix "-" }}
|
||||
containerPort: {{ $port.containerPort }}
|
||||
protocol: {{ default "TCP" $port.protocol }}
|
||||
{{- end }}
|
||||
{{- else if and $svc.service (hasKey $svc.service "port") }}
|
||||
{{- $svcService := $svc.service }}
|
||||
ports:
|
||||
- name: {{ printf "%s-http" $name | trunc 63 | trimSuffix "-" }}
|
||||
containerPort: {{ default (index $svcService "port") (index $svcService "targetPort") }}
|
||||
protocol: {{ default "TCP" (index $svcService "protocol") }}
|
||||
{{- end }}
|
||||
ports:
|
||||
{{- range $port := $svc.ports }}
|
||||
- name: {{ default (printf "%s-%v" $name $port.containerPort) $port.name | trunc 63 | trimSuffix "-" }}
|
||||
containerPort: {{ $port.containerPort }}
|
||||
protocol: {{ default "TCP" $port.protocol }}
|
||||
{{- end }}
|
||||
{{- else if and $svc.service (hasKey $svc.service "port") }}
|
||||
{{- $svcService := $svc.service }}
|
||||
ports:
|
||||
- name: {{ printf "%s-http" $name | trunc 63 | trimSuffix "-" }}
|
||||
containerPort: {{ default (index $svcService "port") (index $svcService "targetPort") }}
|
||||
protocol: {{ default "TCP" (index $svcService "protocol") }}
|
||||
{{- end }}
|
||||
{{- if $svc.resources }}
|
||||
resources:
|
||||
{{ toYaml $svc.resources | nindent 12 }}
|
||||
@@ -122,61 +125,61 @@ spec:
|
||||
{{- $svc.podAnnotations = merge $svc.podAnnotations (dict "prometheus.io/scrape" "true" "prometheus.io/path" (default "/metrics" $pr.path) "prometheus.io/port" (toString (default 8080 $pr.port)) "prometheus.io/scheme" (default "http" $pr.scheme))) }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if or $svc.volumeMounts $configMounts }}
|
||||
volumeMounts:
|
||||
{{- if $svc.volumeMounts }}
|
||||
{{ toYaml $svc.volumeMounts | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- range $mount := $configMounts }}
|
||||
- name: {{ $mount.name }}
|
||||
mountPath: {{ $mount.mountPath }}
|
||||
{{- if $mount.subPath }}
|
||||
subPath: {{ $mount.subPath }}
|
||||
{{- end }}
|
||||
{{- if hasKey $mount "readOnly" }}
|
||||
readOnly: {{ $mount.readOnly }}
|
||||
{{- else }}
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if or $svc.volumes (or $svc.volumeClaims $configMounts) }}
|
||||
volumes:
|
||||
{{- if $svc.volumes }}
|
||||
{{ toYaml $svc.volumes | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if $svc.volumeClaims }}
|
||||
{{- range $claim := $svc.volumeClaims }}
|
||||
- name: {{ $claim.name }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ $claim.claimName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range $mount := $configMounts }}
|
||||
- name: {{ $mount.name }}
|
||||
configMap:
|
||||
name: {{ include "stellaops.fullname" (dict "root" $root "name" $mount.configMap) }}
|
||||
{{- if $mount.items }}
|
||||
items:
|
||||
{{ toYaml $mount.items | nindent 12 }}
|
||||
{{- else if $mount.subPath }}
|
||||
items:
|
||||
- key: {{ $mount.subPath }}
|
||||
path: {{ $mount.subPath }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $svc.serviceAccount }}
|
||||
serviceAccountName: {{ $svc.serviceAccount | quote }}
|
||||
{{- end }}
|
||||
{{- if $svc.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml $svc.nodeSelector | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if $svc.affinity }}
|
||||
affinity:
|
||||
{{ toYaml $svc.affinity | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if or $svc.volumeMounts $configMounts }}
|
||||
volumeMounts:
|
||||
{{- if $svc.volumeMounts }}
|
||||
{{ toYaml $svc.volumeMounts | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- range $mount := $configMounts }}
|
||||
- name: {{ $mount.name }}
|
||||
mountPath: {{ $mount.mountPath }}
|
||||
{{- if $mount.subPath }}
|
||||
subPath: {{ $mount.subPath }}
|
||||
{{- end }}
|
||||
{{- if hasKey $mount "readOnly" }}
|
||||
readOnly: {{ $mount.readOnly }}
|
||||
{{- else }}
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if or $svc.volumes (or $svc.volumeClaims $configMounts) }}
|
||||
volumes:
|
||||
{{- if $svc.volumes }}
|
||||
{{ toYaml $svc.volumes | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if $svc.volumeClaims }}
|
||||
{{- range $claim := $svc.volumeClaims }}
|
||||
- name: {{ $claim.name }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ $claim.claimName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range $mount := $configMounts }}
|
||||
- name: {{ $mount.name }}
|
||||
configMap:
|
||||
name: {{ include "stellaops.fullname" (dict "root" $root "name" $mount.configMap) }}
|
||||
{{- if $mount.items }}
|
||||
items:
|
||||
{{ toYaml $mount.items | nindent 12 }}
|
||||
{{- else if $mount.subPath }}
|
||||
items:
|
||||
- key: {{ $mount.subPath }}
|
||||
path: {{ $mount.subPath }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $svc.serviceAccount }}
|
||||
serviceAccountName: {{ $svc.serviceAccount | quote }}
|
||||
{{- end }}
|
||||
{{- if $svc.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml $svc.nodeSelector | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if $svc.affinity }}
|
||||
affinity:
|
||||
{{ toYaml $svc.affinity | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if $svc.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml $svc.tolerations | nindent 8 }}
|
||||
@@ -203,20 +206,20 @@ spec:
|
||||
---
|
||||
{{- if $svc.service }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "stellaops.fullname" (dict "root" $root "name" $name) }}
|
||||
labels:
|
||||
{{- include "stellaops.labels" (dict "root" $root "name" $name "svc" $svc) | nindent 4 }}
|
||||
spec:
|
||||
type: {{ default "ClusterIP" $svc.service.type }}
|
||||
selector:
|
||||
{{- include "stellaops.selectorLabels" (dict "root" $root "name" $name "svc" $svc) | nindent 4 }}
|
||||
ports:
|
||||
- name: {{ default "http" $svc.service.portName }}
|
||||
port: {{ $svc.service.port }}
|
||||
targetPort: {{ $svc.service.targetPort | default $svc.service.port }}
|
||||
protocol: {{ default "TCP" $svc.service.protocol }}
|
||||
---
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "stellaops.fullname" (dict "root" $root "name" $name) }}
|
||||
labels:
|
||||
{{- include "stellaops.labels" (dict "root" $root "name" $name "svc" $svc) | nindent 4 }}
|
||||
spec:
|
||||
type: {{ default "ClusterIP" $svc.service.type }}
|
||||
selector:
|
||||
{{- include "stellaops.selectorLabels" (dict "root" $root "name" $name "svc" $svc) | nindent 4 }}
|
||||
ports:
|
||||
- name: {{ default "http" $svc.service.portName }}
|
||||
port: {{ $svc.service.port }}
|
||||
targetPort: {{ $svc.service.targetPort | default $svc.service.port }}
|
||||
protocol: {{ default "TCP" $svc.service.protocol }}
|
||||
---
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -49,11 +49,14 @@
|
||||
| 15 | CONCELIER-LNM-21-203 | **DONE** (2025-12-06) | Implemented `/internal/events/observations/publish` and `/internal/events/linksets/publish` POST endpoints. Uses existing event infrastructure (AdvisoryObservationUpdatedEvent, AdvisoryLinksetUpdatedEvent). | Concelier WebService Guild · Platform Events Guild (`src/Concelier/StellaOps.Concelier.WebService`) | Publish idempotent NATS/Redis events for new observations/linksets with documented schemas; include tenant + provenance references only. |
|
||||
| 16 | CONCELIER-AIRGAP-56-001..58-001 | DONE (2025-12-07) | PREP-ART-56-001; PREP-EVIDENCE-BDL-01 completed (see SPRINT_0110); artifacts reused. | Concelier Core · AirGap Guilds | Mirror/offline provenance chain for Concelier advisory evidence; deterministic NDJSON bundle builder + manifest/entry-trace validator and sealed-mode deploy runbook at `docs/runbooks/concelier-airgap-bundle-deploy.md` with sample bundle `out/mirror/thin/mirror-thin-m0-sample.tar.gz`. |
|
||||
| 17 | CONCELIER-CONSOLE-23-001..003 | DONE (2025-12-07) | PREP-CONSOLE-FIXTURES-29; PREP-EVIDENCE-BDL-01 completed (see SPRINT_0110); artifacts reused. | Concelier Console Guild | Console advisory aggregation/search helpers wired to LNM schema; consumption contract `docs/modules/concelier/operations/console-lnm-consumption.md`, fixtures in `docs/samples/console/`, hashes under `out/console/guardrails/`. |
|
||||
| 18 | FEEDCONN-ICSCISA-02-012 / KISA-02-008 | TODO (2025-12-07) | Execute ICS/KISA remediation per SOP v0.2 (`docs/modules/concelier/feeds/icscisa-kisa.md`); run backlog reprocess and publish delta/hashes by 2025-12-10. | Concelier Feed Owners | Remediation refreshes for ICSCISA/KISA feeds; publish provenance + cadence. |
|
||||
| 18 | FEEDCONN-ICSCISA-02-012 / KISA-02-008 | DONE (2025-12-08) | Execute ICS/KISA remediation per SOP v0.2 (`docs/modules/concelier/feeds/icscisa-kisa.md`); run backlog reprocess and publish delta/hashes by 2025-12-10. | Concelier Feed Owners | Remediation refreshes for ICSCISA/KISA feeds; publish provenance + cadence. |
|
||||
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-12-08 | Configured feed runner defaults for on-prem: `FEED_GATEWAY_HOST`/`FEED_GATEWAY_SCHEME` default to `concelier-webservice` (Docker network DNS) so CI fetches via local mirror; `fetch.log` records resolved URLs when defaults are used; external URLs still overrideable via `ICSCISA_FEED_URL`/`KISA_FEED_URL`. | DevOps |
|
||||
| 2025-12-08 | Added CI automation `.gitea/workflows/icscisa-kisa-refresh.yml` (Mon 02:00 UTC + manual) using `scripts/feeds/run_icscisa_kisa_refresh.py`; publishes `icscisa-kisa-<YYYYMMDD>` artefact (advisories/delta/log/hashes) with live fetch + offline fallback. | DevOps |
|
||||
| 2025-12-08 | FEEDCONN-ICSCISA-02-012/KISA-02-008 DONE: SOP v0.2 run (`icscisa-kisa-20251208T0205Z`) executed with backlog window 60d; artefacts at `out/feeds/icscisa-kisa/20251208/` (advisories, delta, hashes, fetch log). Docs refreshed (`docs/modules/concelier/feeds/icscisa-kisa.md`, `icscisa-kisa-provenance.md`); next review 2025-12-21. | Concelier Feed Owners |
|
||||
| 2025-12-07 | PREP-FEEDCONN-ICS-KISA-PLAN refreshed to v0.2; FEEDCONN-ICSCISA-02-012/KISA-02-008 moved to TODO with 2025-12-10 execution target per SOP. | Project Mgmt |
|
||||
| 2025-12-07 | Marked CONCELIER-AIRGAP-56-001..58-001 DONE (artifacts from SPRINT_0110: `docs/runbooks/concelier-airgap-bundle-deploy.md`, `out/mirror/thin/mirror-thin-m0-sample.tar.gz`). | Project Mgmt |
|
||||
| 2025-12-07 | Marked CONCELIER-CONSOLE-23-001..003 DONE (artifacts from SPRINT_0110: `docs/modules/concelier/operations/console-lnm-consumption.md`, `docs/samples/console/`, `out/console/guardrails/`). | Project Mgmt |
|
||||
|
||||
@@ -29,8 +29,9 @@
|
||||
## Delivery Tracker
|
||||
| # | Task ID | Status | Key dependency / next step | Owners | Task Definition |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| 0 | CONCELIER-VULN-29-001 | DONE (2025-12-08) | Delivered per bridge contract `docs/modules/concelier/bridges/vuln-29-001.md`; raw evidence snapshots/live endpoints available. | WebService · Data Integrity Guild | Canonicalize advisory identifiers into `advisory_key`, persist `links[]`, expose raw payload snapshots + Vuln Explorer search contract without merge-derived fields. |
|
||||
| P1 | PREP-CONCELIER-WEB-AIRGAP-57-001-DEPENDS-ON-5 | DONE (2025-11-20) | Prep at `docs/modules/concelier/prep/2025-11-20-web-airgap-57-001-prep.md`; awaits 56-002 & WEB-OAS-61-002 inputs. | Concelier WebService Guild · AirGap Policy Guild | Document artefact for 57-001 to unblock downstream air-gap tasks. |
|
||||
| 1 | CONCELIER-VULN-29-004 | BLOCKED | Depends on CONCELIER-VULN-29-001 | WebService · Observability Guild | Instrument ingestion pipelines with metrics (collisions, withdrawn statements, chunk latency); stream to Vuln Explorer unchanged. |
|
||||
| 1 | CONCELIER-VULN-29-004 | DONE (2025-12-08) | Upstream bridge done (CONCELIER-VULN-29-001); define collision/withdrawn/chunk telemetry and OTEL export for Vuln Explorer. | WebService · Observability Guild | Instrument ingestion pipelines with metrics (collisions, withdrawn statements, chunk latency); stream to Vuln Explorer unchanged. |
|
||||
| 2 | CONCELIER-WEB-AIRGAP-56-001 | DONE (2025-12-06) | AirGap chain started | WebService Guild | Register mirror bundle sources, expose bundle catalog, enforce sealed-mode (block direct internet feeds). |
|
||||
| 3 | CONCELIER-WEB-AIRGAP-56-002 | DONE (2025-12-06) | Staleness + provenance contracts added | WebService Guild | Add staleness + bundle provenance metadata to observation/linkset endpoints. |
|
||||
| 4 | CONCELIER-WEB-AIRGAP-57-001 | DONE (2025-12-06) | Egress blocked payload + remediation | WebService · AirGap Policy Guild | Map sealed-mode violations to `AIRGAP_EGRESS_BLOCKED` payloads with remediation guidance. |
|
||||
@@ -50,6 +51,11 @@
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-12-08 | CONCELIER-VULN-29-004 DONE: Added Vuln Explorer ingest telemetry in advisory pipeline (alias collision counter from linkset conflicts, withdrawn detection from raw content, chunk latency histogram). Meter already exported via OTEL; added unit coverage for collision counting, withdrawn detection, and latency emission. | Implementer |
|
||||
| 2025-12-08 | CONCELIER-VULN-29-004 moved to DOING: scoping metrics (identifier collisions, withdrawn statements, chunk latency) and OTEL export path for Vuln Explorer dashboards. | Project Mgmt |
|
||||
| 2025-12-08 | CONCELIER-VULN-29-004 unblocked: Ops Helm/Compose/offline patterns for Surface.Secrets available (`ops/devops/secrets/surface-secrets-provisioning.md`) and CONCELIER-VULN-29-001 delivered; status set to TODO. | Project Mgmt |
|
||||
| 2025-12-08 | Test run attempt: `dotnet test ...Concelier.WebService.Tests` failed early with NETSDK1022 (duplicate Compile items) resolved, then re-run failed with access denied to Microsoft.SourceLink.GitLab.dll during restore; telemetry changes not yet validated by tests. | Implementer |
|
||||
| 2025-12-08 | Test run attempt 2: `dotnet test` with isolated `NUGET_PACKAGES` completed but 60 tests failed. Failures: Mongo2Go cannot start bundled Linux `mongod` on Windows runner (Win32Exception) causing many WebService endpoint tests to fail; advisory chunk builder/cache key expectations differ in casing/path (reference mask vs field path). Telemetry changes unvalidated; further triage needed in CI/Linux. | Implementer |
|
||||
| 2025-12-06 | CONCELIER-AIAI-31-002 DONE: Created `ReadThroughLinksetCacheService.cs` in Core library implementing read-through pattern - queries Postgres cache first, on miss rebuilds from MongoDB observations, stores result. Created `ILinksetCacheTelemetry` interface for metrics abstraction. Updated `LinksetCacheTelemetry` to implement interface. Wired DI in Program.cs: `ReadThroughLinksetCacheService` registered as `IAdvisoryLinksetLookup`, injected with optional Postgres backing store. Metrics: `lnm.cache.hit_total`, `lnm.cache.write_total`, `lnm.cache.rebuild_ms`. | Implementer |
|
||||
| 2025-12-06 | CONCELIER-WEB-OAS-63-001 DONE: Created `DeprecationHeaders.cs` with RFC 8594 deprecation + Sunset headers, `DeprecationMiddleware.cs` with endpoint registry, registered middleware in Program.cs. Added `DeprecationHeadersTests.cs` tests. Legacy endpoints (/linksets, /advisories/observations, /advisories/linksets, /advisories/linksets/export, /concelier/observations) now emit deprecation headers directing to /v1/lnm/linksets. | Implementer |
|
||||
| 2025-12-06 | CONCELIER-WEB-OAS-62-001 DONE: Created curated API documentation - `lnm-linksets.md`, `observations.md`, `conflicts.md` in `docs/modules/concelier/api/`. Updated OpenAPI spec to v1.0.0 with comprehensive examples (single-linkset, with-conflicts scenarios), error envelope schema, and detailed descriptions. Synced spec to docs mirror. Unblocks 63-001. | Implementer |
|
||||
@@ -75,6 +81,7 @@
|
||||
- ~~AOC regression chain blocked pending validator (WEB-AOC-19-002)~~ Validator done; tasks 6/8/9/10 now TODO; task 7 still blocked on 19-003.
|
||||
- ~~OAS envelope change (WEB-OAS-61-002) is a prereq for examples/deprecation~~ Done; 62-001 (examples) now unblocked.
|
||||
- Linkset cache (CONCELIER-AIAI-31-002): Postgres backend + migration shipped; remaining risk is wiring WebService to use it (DI + read-through) and adding `lnm.cache.*` metrics to avoid cache skew.
|
||||
- CONCELIER-VULN-29-004 delivered: ingest telemetry now emits collision/withdrawn/latency metrics; confirm dashboards consume `StellaOps.Concelier.VulnExplorer` meter and secrets posture stays aligned with `surface.secrets.*` config in Helm/Compose/offline kit.
|
||||
|
||||
## Next Checkpoints
|
||||
- Wave B (AirGap): 56-001, 56-002, 57-001 DONE; 58-001 (timeline events) ready to start.
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
# Sprint 0120 · Excititor Ingestion & Evidence (Phase II)
|
||||
# Sprint 0120 - Excititor Ingestion & Evidence (Phase II)
|
||||
|
||||
## Topic & Scope
|
||||
- Continue Excititor ingestion hardening: Link-Not-Merge (observations/linksets), connector provenance, graph/query endpoints, and Console/Vuln Explorer integration.
|
||||
- Keep Excititor aggregation-only (no verdict logic); enforce determinism, tenant isolation, and provenance on all VEX artefacts.
|
||||
- **Working directory:** `src/Excititor` (Connectors, Core, Storage.Mongo, WebService) and related docs under `docs/modules/excititor`.
|
||||
- **Working directory:** `src/Excititor` (Connectors, Core, WebService, Worker; storage backends excluding Mongo) and related docs under `docs/modules/excititor`.
|
||||
|
||||
## Dependencies & Concurrency
|
||||
- Upstream schemas: Link-Not-Merge (ATLN), provenance/DSSE schemas, graph overlay contracts, orchestrator SDK.
|
||||
- Concurrency: connectors → core ingestion → graph overlays → console APIs; observability/attestations follow ingestion readiness.
|
||||
- Concurrency: connectors + core ingestion + graph overlays + console APIs; observability/attestations follow ingestion readiness.
|
||||
- Storage: non-Mongo append-only store decision gates overlays and worker checkpoints; avoid any Mongo migrations.
|
||||
|
||||
## Documentation Prerequisites
|
||||
- `docs/modules/excititor/architecture.md`
|
||||
@@ -21,51 +22,67 @@
|
||||
## Delivery Tracker
|
||||
| # | Task ID | Status | Key dependency / next step | Owners | Task Definition |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| 1 | EXCITITOR-CONSOLE-23-001/002/003 | DONE (2025-11-23) | Dependent APIs live | Excititor Guild · Docs Guild | Console VEX endpoints (grouped statements, counts, search) with provenance + RBAC; metrics for policy explain. |
|
||||
| 1 | EXCITITOR-CONSOLE-23-001/002/003 | DONE (2025-11-23) | Dependent APIs live | Excititor Guild + Docs Guild | Console VEX endpoints (grouped statements, counts, search) with provenance + RBAC; metrics for policy explain. |
|
||||
| 2 | EXCITITOR-CONN-SUSE-01-003 | **DONE** (2025-12-07) | Integrated ConnectorSignerMetadataEnricher in provenance | Connector Guild (SUSE) | Emit trust config (signer fingerprints, trust tier) in provenance; aggregation-only. |
|
||||
| 3 | EXCITITOR-CONN-UBUNTU-01-003 | **DONE** (2025-12-07) | Verified enricher integration, fixed Logger reference | Connector Guild (Ubuntu) | Emit Ubuntu signing metadata in provenance; aggregation-only. |
|
||||
| 4 | EXCITITOR-CORE-AOC-19-002/003/004/013 | **DONE** (2025-12-07) | Implemented append-only linkset contracts and deprecated consensus | Excititor Core Guild | Deterministic advisory/PURL extraction, append-only linksets, remove consensus logic, seed Authority tenants in tests. |
|
||||
| 5 | EXCITITOR-GRAPH-21-001..005 | TODO/BLOCKED | Link-Not-Merge schema + overlay contract | Excititor Core · Storage Mongo · UI Guild | Batched VEX fetches, overlay metadata, indexes/materialized views for graph inspector. |
|
||||
| 6 | EXCITITOR-OBS-52/53/54 | TODO/BLOCKED | Evidence Locker DSSE + provenance schema | Excititor Core · Evidence Locker · Provenance Guilds | Timeline events + Merkle locker payloads + DSSE attestations for evidence batches. |
|
||||
| 7 | EXCITITOR-ORCH-32/33 | PARTIAL (2025-12-06) | Created orchestration integration files; blocked on missing Storage.Mongo project | Excititor Worker Guild | Adopt orchestrator worker SDK; honor pause/throttle/retry with deterministic checkpoints. |
|
||||
| 8 | EXCITITOR-POLICY-20-001/002 | TODO | EXCITITOR-AOC-20-004; graph overlays | WebService · Core Guilds | VEX lookup APIs for Policy (tenant filters, scope resolution) and enriched linksets (scope/version metadata). |
|
||||
| 9 | EXCITITOR-RISK-66-001 | TODO | EXCITITOR-POLICY-20-002 | Core · Risk Engine Guild | Risk-ready feeds (status/justification/provenance) with zero derived severity. |
|
||||
| 5 | EXCITITOR-STORAGE-00-001 | **DONE** (2025-12-08) | Append-only Postgres backend delivered; Storage.Mongo references to be removed in follow-on cleanup | Excititor Core + Platform Data Guild | Select and ratify storage backend (e.g., SQL/append-only) for observations, linksets, and worker checkpoints; produce migration plan + deterministic test harnesses without Mongo. |
|
||||
| 6 | EXCITITOR-GRAPH-21-001..005 | TODO/BLOCKED | EXCITITOR-STORAGE-00-001 + Link-Not-Merge schema + overlay contract | Excititor Core + UI Guild | Batched VEX fetches, overlay metadata, indexes/materialized views for graph inspector on the non-Mongo store. |
|
||||
| 7 | EXCITITOR-OBS-52/53/54 | TODO/BLOCKED | Evidence Locker DSSE + provenance schema | Excititor Core + Evidence Locker + Provenance Guilds | Timeline events + Merkle locker payloads + DSSE attestations for evidence batches. |
|
||||
| 8 | EXCITITOR-ORCH-32/33 | PARTIAL (2025-12-06) | EXCITITOR-STORAGE-00-001 for checkpoints + orchestrator SDK | Excititor Worker Guild | Adopt orchestrator worker SDK; honor pause/throttle/retry with deterministic checkpoints on the selected non-Mongo store. |
|
||||
| 9 | EXCITITOR-POLICY-20-001/002 | TODO | EXCITITOR-AOC-20-004; graph overlays | WebService + Core Guilds | VEX lookup APIs for Policy (tenant filters, scope resolution) and enriched linksets (scope/version metadata). |
|
||||
| 10 | EXCITITOR-RISK-66-001 | TODO | EXCITITOR-POLICY-20-002 | Core + Risk Engine Guild | Risk-ready feeds (status/justification/provenance) with zero derived severity. |
|
||||
|
||||
## Wave Coordination
|
||||
- Wave A: Connectors + core ingestion (tasks 2–4).
|
||||
- Wave B: Graph overlays + Console APIs (tasks 1,5,8,9) — Console endpoints delivered; overlays pending.
|
||||
- Wave C: Observability/attestations + orchestrator integration (tasks 6–7) after Wave A artifacts land.
|
||||
- Wave A: Connectors + core ingestion + storage backend decision (tasks 2-5).
|
||||
- Wave B: Graph overlays + Console/Policy/Risk APIs (tasks 1,6,9,10) — Console endpoints delivered; overlays pending.
|
||||
- Wave C: Observability/attestations + orchestrator integration (tasks 7-8) after Wave A artifacts land.
|
||||
|
||||
## Wave Detail Snapshots
|
||||
- Not started; capture once ATLN/provenance schemas freeze.
|
||||
|
||||
## Interlocks
|
||||
- Link-Not-Merge and provenance schema freezes gate tasks 2–6.
|
||||
- Orchestrator SDK availability gates tasks 7.
|
||||
- Link-Not-Merge and provenance schema freezes gate tasks 2-7.
|
||||
- Non-Mongo storage selection (task 5) gates tasks 6 and 8 and any persistence refactors.
|
||||
- Orchestrator SDK availability gates task 8.
|
||||
- Use `BLOCKED_DEPENDENCY_TREE.md` to record blockers.
|
||||
|
||||
## Action Tracker
|
||||
| Action | Due (UTC) | Owner(s) | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| Capture ATLN schema freeze + provenance hashes; update tasks 2–6 statuses | 2025-12-12 | Excititor Core · Docs Guild | Required to unblock ingestion/locker/graph work. |
|
||||
| Confirm orchestrator SDK version for Excititor worker adoption | 2025-12-12 | Excititor Worker Guild | Needed before tasks 7 start. |
|
||||
| Pick non-Mongo append-only store and publish contract update | 2025-12-10 | Excititor Core + Platform Data Guild | DONE 2025-12-08: Postgres append-only linkset store + migration/tests landed; follow-up removal of Storage.Mongo code paths. |
|
||||
| Capture ATLN schema freeze + provenance hashes; update tasks 2-7 statuses | 2025-12-12 | Excititor Core + Docs Guild | Required to unblock ingestion/locker/graph work. |
|
||||
| Confirm orchestrator SDK version for Excititor worker adoption | 2025-12-12 | Excititor Worker Guild | Needed before task 8 starts. |
|
||||
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-12-08 | Cleared duplicate NuGet warnings in provenance/append-only Postgres test projects and re-ran both suites green. | Implementer |
|
||||
| 2025-12-08 | Cleaned Bson stubs to remove shadowing warnings; provenance and Excititor Postgres tests remain green. | Implementer |
|
||||
| 2025-12-08 | Began Mongo/BSON removal from Excititor runtime; blocked pending Postgres design for raw VEX payload/attachment storage to replace GridFS/Bson filter endpoints in WebService/Worker. | Implementer |
|
||||
| 2025-12-08 | Provenance stubs now Bson-driver-free; Events.Mongo tests updated to use stubs. Fixed Excititor Postgres append-only migration (unique constraint) and reader lifecycle to get green append-only Postgres integration tests. | Implementer |
|
||||
| 2025-12-08 | Dropped MongoDB.Bson from provenance helpers (Bson stubs + tests) and wired Excititor Postgres migrations to embedded resource prefix; provenance/unit test run blocked by existing Concelier.Storage.Postgres compile errors when restoring shared dependencies. | Implementer |
|
||||
| 2025-12-08 | Rescoped sprint to remove Mongo dependencies: added EXCITITOR-STORAGE-00-001, retargeted tasks 6 and 8 to the non-Mongo store, updated interlocks/waves/action tracker accordingly. | Project Mgmt |
|
||||
| 2025-12-08 | Began EXCITITOR-STORAGE-00-001: catalogued existing PostgreSQL stack (Infrastructure.Postgres, Excititor.Storage.Postgres data source/repositories/migrations, Concelier/Authority/Notify precedents). Need to adapt schema/contracts to append-only linksets and drop consensus-derived tables. | Project Mgmt |
|
||||
| 2025-12-08 | Completed EXCITITOR-STORAGE-00-001: added append-only Postgres linkset store implementing `IAppendOnlyLinksetStore`, rewrote migration to remove consensus/Mongo artifacts, registered DI, and added deterministic Postgres integration tests for append/dedup/disagreements. | Implementer |
|
||||
| 2025-12-08 | Postgres append-only linkset tests added; initial run fails due to upstream Concelier MongoCompat type resolution (`MongoStorageOptions` missing). Needs follow-up dependency fix before green test run. | Implementer |
|
||||
| 2025-12-07 | **EXCITITOR-CORE-AOC-19 DONE:** Implemented append-only linkset infrastructure: (1) Created `IAppendOnlyLinksetStore` interface with append-only semantics for observations and disagreements, plus mutation log for audit/replay (AOC-19-002); (2) Marked `VexConsensusResolver`, `VexConsensus`, `IVexConsensusPolicy`, `BaselineVexConsensusPolicy`, and related types as `[Obsolete]` with EXCITITOR001 diagnostic ID per AOC-19-003; (3) Created `AuthorityTenantSeeder` utility with test tenant fixtures (default, multi-tenant, airgap) and SQL generation for AOC-19-004; (4) Created `AppendOnlyLinksetExtractionService` replacing consensus-based extraction with deterministic append-only operations per AOC-19-013; (5) Added comprehensive unit tests for both new services with in-memory store implementation. | Implementer |
|
||||
| 2025-12-07 | **EXCITITOR-CONN-SUSE-01-003 & EXCITITOR-CONN-UBUNTU-01-003 DONE:** Integrated `ConnectorSignerMetadataEnricher.Enrich()` into both connectors' `AddProvenanceMetadata()` methods. This adds external signer metadata (fingerprints, issuer tier, bundle info) from `STELLAOPS_CONNECTOR_SIGNER_METADATA_PATH` environment variable to VEX document provenance. Fixed Ubuntu connector's `_logger` → `Logger` reference bug. | Implementer |
|
||||
| 2025-12-07 | **EXCITITOR-CONN-SUSE-01-003 & EXCITITOR-CONN-UBUNTU-01-003 DONE:** Integrated `ConnectorSignerMetadataEnricher.Enrich()` into both connectors' `AddProvenanceMetadata()` methods. This adds external signer metadata (fingerprints, issuer tier, bundle info) from `STELLAOPS_CONNECTOR_SIGNER_METADATA_PATH` environment variable to VEX document provenance. Fixed Ubuntu connector's `_logger` and `Logger` reference bug. | Implementer |
|
||||
| 2025-12-05 | Reconstituted sprint from `tasks-all.md`; prior redirect pointed to non-existent canonical. Added template and delivery tracker; tasks set per backlog. | Project Mgmt |
|
||||
| 2025-11-23 | Console VEX endpoints (tasks 1) delivered. | Excititor Guild |
|
||||
|
||||
## Decisions & Risks
|
||||
| Item | Type | Owner(s) | Due | Notes |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| Schema freeze (ATLN/provenance) pending | Risk | Excititor Core · Docs Guild | 2025-12-12 | Blocks tasks 2–6. |
|
||||
| Orchestrator SDK version selection | Decision | Excititor Worker Guild | 2025-12-12 | Needed for tasks 7. |
|
||||
| Schema freeze (ATLN/provenance) pending | Risk | Excititor Core + Docs Guild | 2025-12-12 | Blocks tasks 2-7. |
|
||||
| Non-Mongo storage backend selection | Decision | Excititor Core + Platform Data Guild | 2025-12-08 | Resolved: adopt Postgres append-only store (IAppendOnlyLinksetStore) for observations/linksets/checkpoints; unblock tasks 6 and 8; remove Storage.Mongo artifacts next. |
|
||||
| Orchestrator SDK version selection | Decision | Excititor Worker Guild | 2025-12-12 | Needed for task 8. |
|
||||
| Excititor.Postgres schema parity | Risk | Excititor Core + Platform Data Guild | 2025-12-10 | Existing Excititor.Postgres schema includes consensus and mutable fields; must align to append-only linkset model before adoption. |
|
||||
| Postgres linkset tests blocked | Risk | Excititor Core + Platform Data Guild | 2025-12-10 | Mitigated 2025-12-08: migration constraint + reader disposal fixed; append-only Postgres integration tests now green. |
|
||||
|
||||
## Next Checkpoints
|
||||
| Date (UTC) | Session | Goal | Owner(s) |
|
||||
| --- | --- | --- | --- |
|
||||
| 2025-12-12 | Schema freeze sync | Confirm ATLN/provenance freeze; unblock tasks 2–6. | Excititor Core |
|
||||
| 2025-12-12 | Orchestrator SDK alignment | Pick SDK version and start task 7. | Excititor Worker |
|
||||
| 2025-12-10 | Storage backend decision | Finalize non-Mongo append-only store for Excititor persistence; unblock tasks 5/6/8. | Excititor Core + Platform Data |
|
||||
| 2025-12-12 | Schema freeze sync | Confirm ATLN/provenance freeze; unblock tasks 2-7. | Excititor Core |
|
||||
| 2025-12-12 | Orchestrator SDK alignment | Pick SDK version and start task 8. | Excititor Worker |
|
||||
|
||||
@@ -25,17 +25,24 @@
|
||||
## Delivery Tracker
|
||||
| # | Task ID | Status | Key dependency / next step | Owners | Task Definition |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| 1 | LEDGER-ATTEST-73-002 | BLOCKED | Waiting on LEDGER-ATTEST-73-001 verification pipeline delivery | Findings Ledger Guild / `src/Findings/StellaOps.Findings.Ledger` | Enable search/filter in findings projections by verification result and attestation status |
|
||||
| 1 | LEDGER-ATTEST-73-002 | **DONE** (2025-12-08) | Verification-result and attestation-status filters implemented in findings projections, exports, and tests | Findings Ledger Guild / `src/Findings/StellaOps.Findings.Ledger` | Enable search/filter in findings projections by verification result and attestation status |
|
||||
| 2 | LEDGER-OAS-61-001-DEV | **DONE** (2025-12-07) | Expanded OAS with attestation pointer endpoints, schemas, and examples | Findings Ledger Guild; API Contracts Guild / `src/Findings/StellaOps.Findings.Ledger` | Expand Findings Ledger OAS to include projections, evidence lookups, and filter parameters with examples |
|
||||
| 3 | LEDGER-OAS-61-002-DEV | BLOCKED | PREP-LEDGER-OAS-61-002-DEPENDS-ON-61-001-CONT | Findings Ledger Guild / `src/Findings/StellaOps.Findings.Ledger` | Implement `/.well-known/openapi` endpoint and ensure version metadata matches release |
|
||||
| 4 | LEDGER-OAS-62-001-DEV | BLOCKED | PREP-LEDGER-OAS-62-001-SDK-GENERATION-PENDING | Findings Ledger Guild; SDK Generator Guild / `src/Findings/StellaOps.Findings.Ledger` | Provide SDK test cases for findings pagination, filtering, evidence links; ensure typed models expose provenance |
|
||||
| 5 | LEDGER-OAS-63-001-DEV | BLOCKED | PREP-LEDGER-OAS-63-001-DEPENDENT-ON-SDK-VALID | Findings Ledger Guild; API Governance Guild / `src/Findings/StellaOps.Findings.Ledger` | Support deprecation headers and Notifications for retiring finding endpoints |
|
||||
| 6 | LEDGER-OBS-55-001 | BLOCKED | PREP-LEDGER-OBS-55-001-DEPENDS-ON-54-001-ATTE | Findings Ledger Guild; DevOps Guild / `src/Findings/StellaOps.Findings.Ledger` | Enhance incident mode to record replay diagnostics (lag traces, conflict snapshots), extend retention while active, and emit activation events to timeline/notifier |
|
||||
| 3 | LEDGER-OAS-61-002-DEV | **DONE** (2025-12-08) | `/.well-known/openapi` implemented with version/build headers, ETag, and cache hints | Findings Ledger Guild / `src/Findings/StellaOps.Findings.Ledger` | Implement `/.well-known/openapi` endpoint and ensure version metadata matches release |
|
||||
| 4 | LEDGER-OAS-62-001-DEV | **DONE** (2025-12-08) | SDK surface validated via OpenAPI assertions for pagination, evidence links, provenance | Findings Ledger Guild; SDK Generator Guild / `src/Findings/StellaOps.Findings.Ledger` | Provide SDK test cases for findings pagination, filtering, evidence links; ensure typed models expose provenance |
|
||||
| 5 | LEDGER-OAS-63-001-DEV | **DONE** (2025-12-08) | Deprecation headers + link notifications applied to legacy findings export endpoint | Findings Ledger Guild; API Governance Guild / `src/Findings/StellaOps.Findings.Ledger` | Support deprecation headers and Notifications for retiring finding endpoints |
|
||||
| 6 | LEDGER-OBS-55-001 | **DONE** (2025-12-08) | OBS-54-001 attestation surface delivered; implement incident diagnostics + retention extensions | Findings Ledger Guild; DevOps Guild / `src/Findings/StellaOps.Findings.Ledger` | Enhance incident mode to record replay diagnostics (lag traces, conflict snapshots), extend retention while active, and emit activation events to timeline/notifier |
|
||||
| 7 | LEDGER-PACKS-42-001-DEV | **DONE** (2025-12-07) | Implemented snapshot/time-travel APIs with full endpoint coverage | Findings Ledger Guild / `src/Findings/StellaOps.Findings.Ledger` | Provide snapshot/time-travel APIs and digestible exports for task pack simulation and CLI offline mode |
|
||||
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-12-08 | **LEDGER-OBS-55-001 DONE:** Added incident-mode coordinator/diagnostics (lag traces, conflict snapshots, replay traces), snapshot retention extension with incident metadata, timeline/notifier hooks; ran `dotnet test src/Findings/__Tests/StellaOps.Findings.Ledger.Tests/StellaOps.Findings.Ledger.Tests.csproj -m:1 --no-build`. | Implementer |
|
||||
| 2025-12-08 | **LEDGER-OAS-63-001 DONE:** Added standardized deprecation/notification headers (Deprecation/Sunset/Link/X-Deprecated-Endpoint) to legacy findings export endpoint; covered with unit test. | Implementer |
|
||||
| 2025-12-08 | **LEDGER-OAS-62-001 DONE:** Added SDK-facing OpenAPI assertions for pagination (page_token/nextPageToken), evidence/provenance links (evidenceBundleRef, ExportProvenance), and attestation surface. Tests via `OpenApiSdkSurfaceTests`. | Implementer |
|
||||
| 2025-12-08 | **LEDGER-OAS-61-002 DONE:** Implemented `/.well-known/openapi` endpoint returning ledger OAS with `X-Api-Version`, `X-Build-Version`, `ETag`, `Last-Modified`, and cache-control headers; 304 served on matching `If-None-Match`. Added OpenApiMetadataFactory helper with unit tests and wired endpoint to spec file. | Implementer |
|
||||
| 2025-12-08 | **LEDGER-ATTEST-73-002 DONE:** Added attestation-summary filters to findings projection queries (verification result + overall status), surfaced attestation metadata in scored finding exports, introduced attestation status calculator, and covered with unit tests. Ran `dotnet test src/Findings/__Tests/StellaOps.Findings.Ledger.Tests/StellaOps.Findings.Ledger.Tests.csproj -m:1`. | Implementer |
|
||||
| 2025-12-08 | LEDGER-OBS-55-001 moved to DOING; starting incident-mode diagnostics/retention integration now that upstream OBS-54-001 landed. | Implementer |
|
||||
| 2025-12-08 | Upstream blockers cleared (LEDGER-ATTEST-73-001 delivered; PREP-LEDGER-OAS-61/62/63 DONE; LEDGER-OBS-54-001 shipped). Moved LEDGER-ATTEST-73-002 to DOING; set LEDGER-OAS-61-002/62-001/63-001 and LEDGER-OBS-55-001 to TODO. | Project Mgmt |
|
||||
| 2025-12-07 | **LEDGER-PACKS-42-001-DEV DONE:** Implemented full snapshot/time-travel API infrastructure: (1) Domain models in SnapshotModels.cs (LedgerSnapshot, QueryPoint, TimeQueryFilters, ReplayRequest, DiffRequest, ChangeLogEntry, StalenessResult, etc.); (2) Repository interfaces ISnapshotRepository and ITimeTravelRepository; (3) PostgreSQL implementations PostgresSnapshotRepository and PostgresTimeTravelRepository; (4) SnapshotService orchestrating all time-travel operations; (5) WebService contracts in SnapshotContracts.cs; (6) 13 new API endpoints (/v1/ledger/snapshots CRUD, /v1/ledger/time-travel/{findings,vex,advisories}, /v1/ledger/replay, /v1/ledger/diff, /v1/ledger/changelog, /v1/ledger/staleness, /v1/ledger/current-point); (7) Database migration 009_snapshots.sql; (8) Unit tests in SnapshotServiceTests.cs with in-memory repository mocks. | Implementer |
|
||||
| 2025-12-07 | **LEDGER-OAS-61-001-DEV DONE:** Expanded `docs/schemas/findings-ledger-api.openapi.yaml` with attestation pointer endpoints (/attestation-pointers, /findings/{findingId}/attestation-pointers, /findings/{findingId}/attestation-summary), comprehensive schemas (AttestationPointer, AttestationRefDetail, SignerInfo, RekorEntryRef, VerificationResult, VerificationCheck, AttestationSummary), and request/response examples for search, create, and update operations. | Implementer |
|
||||
| 2025-12-06 | **Wave A/C Partial Unblock:** LEDGER-OAS-61-001-DEV and LEDGER-PACKS-42-001-DEV changed from BLOCKED to TODO. Root blockers resolved: OAS baseline at `docs/schemas/findings-ledger-api.openapi.yaml`, time-travel API at `docs/schemas/ledger-time-travel-api.openapi.yaml`. | Implementer |
|
||||
@@ -43,8 +50,12 @@
|
||||
| 2025-11-25 | Carried forward all BLOCKED Findings Ledger items from Sprint 0121-0001-0001; no status changes until upstream contracts land. | Project Mgmt |
|
||||
|
||||
## Decisions & Risks
|
||||
- All tasks remain blocked pending upstream OAS/verification/incident-mode contracts; do not start until dependencies are confirmed green.
|
||||
- Keep risk of contract drift tracked against `docs/modules/findings-ledger/prep/*` artefacts; refresh prior to unblocking.
|
||||
- Blockers cleared: LEDGER-ATTEST-73-001 delivered (2025-12-07); OAS prep (61/62/63) and incident-mode prep (OBS-54-001) available, so Wave A/B tasks are active.
|
||||
- Monitor contract drift vs `docs/modules/findings-ledger/prep/*`, `docs/schemas/findings-ledger-api.openapi.yaml`, and `docs/schemas/attestation-pointer.schema.json` before opening PRs; re-sync if upstream artefacts change.
|
||||
- Attestation filters depend on counts aggregated from `ledger_attestation_pointers`; any schema/index changes there must be reflected in projection queries to keep verification-status filtering deterministic.
|
||||
- `/.well-known/openapi` now serves the published spec with version/build metadata, ETag, and cache headers; any spec version bump must update `OpenApiMetadataFactory.ApiVersion` to keep headers aligned.
|
||||
- Deprecation headers are issued on `/ledger/export/findings`; keep Link target (`/.well-known/openapi`) updated if replacement endpoints change, and align `Sunset` once retirement date is finalized.
|
||||
- Incident mode now records ledger-specific diagnostics (lag traces, conflict snapshots, replay traces), emits `ledger.incident.*` timeline logs, and extends snapshot retention by the configured incident extension days; keep ops config aligned with runbook expectations.
|
||||
|
||||
## Next Checkpoints
|
||||
- Schedule unblock review after LEDGER-ATTEST-73-001 pipeline publishes verification results (date TBD).
|
||||
- Progress review on 2025-12-10 to confirm LEDGER-ATTEST-73-002 DOING progress and OAS/OBS task kickoff readiness.
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
| 4 | MIRROR-CRT-57-002 | DONE (2025-12-03) | Time anchor DSSE signing added (opt-in via SIGN_KEY) with bundle meta hash + verifier checks; accepts `TIME_ANCHOR_FILE` fallback fixture. | Mirror Creator · AirGap Time Guild | Embed signed time-anchor metadata. |
|
||||
| 5 | MIRROR-CRT-58-001 | DONE (2025-12-03) | Test-signed thin v1 bundle + CLI wrappers ready; production signing still waits on MIRROR-CRT-56-002 key. | Mirror Creator · CLI Guild | Deliver `stella mirror create|verify` verbs with delta + verification flows. |
|
||||
| 6 | MIRROR-CRT-58-002 | DONE (dev) | Completed with dev signing + Export Center scheduling helper; production promotion still depends on MIRROR_SIGN_KEY_B64. | Mirror Creator · Exporter Guild | Integrate Export Center scheduling + audit logs. |
|
||||
| 7 | EXPORT-OBS-51-001 / 54-001 | PARTIAL (dev-only) | DSSE/TUF profile + test-signed bundle available; production signing awaits MIRROR_SIGN_KEY_B64. | Exporter Guild | Align Export Center workers with assembler output. |
|
||||
| 7 | EXPORT-OBS-51-001 / 54-001 | DONE (2025-12-08) | DSSE/TUF profile + test-signed bundle available; production signing awaits MIRROR_SIGN_KEY_B64. | Exporter Guild | Align Export Center workers with assembler output. |
|
||||
| 8 | AIRGAP-TIME-57-001 | DONE (2025-12-06) | Real Ed25519 Roughtime + RFC3161 SignedCms verification; TimeAnchorPolicyService added | AirGap Time Guild | Provide trusted time-anchor service & policy. |
|
||||
| 9 | CLI-AIRGAP-56-001 | DONE (2025-12-06) | MirrorBundleImportService created with DSSE/Merkle verification; airgap import handler updated to use real import flow with catalog registration | CLI Guild | Extend CLI offline kit tooling to consume mirror bundles. |
|
||||
| 10 | PROV-OBS-53-001 | DONE (2025-11-23) | Observer doc + verifier script `scripts/mirror/verify_thin_bundle.py` in repo; validates hashes, determinism, and manifest/index digests. | Security Guild | Define provenance observers + verification hooks. |
|
||||
@@ -42,6 +42,8 @@
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-12-08 | EXPORT-OBS-51-001 / 54-001 DONE: added `scripts/mirror/export-center-wire.sh` to emit handoff metadata + artifacts list from `milestone.json` and inject bundle metadata into Export Center scheduler payloads. Wired `.gitea/workflows/mirror-sign.yml` to run the handoff step and upload metadata; default run skips scheduling unless secrets enable it. Local run confirmed handoff files emitted under `out/mirror/thin/export-center/`. | Implementer |
|
||||
| 2025-12-08 | Moved EXPORT-OBS-51-001 / 54-001 to DOING to wire Export Center pipeline via scripts rather than service edits; preparing scheduling + artefact handoff automation. | Implementer |
|
||||
| 2025-12-07 | Added Export Center scheduling helper `schedule-export-center-run.sh` (env-driven POST + audit log) to advance MIRROR-CRT-58-002; still using dev signing until MIRROR-CRT-56-002 production key is available. | Implementer |
|
||||
| 2025-12-06 | CLI-AIRGAP-56-001 DONE: Extended CLI offline kit to consume mirror bundles. Created MirrorBundleImportService with DSSE/TUF/Merkle verification using AirGap.Importer module integration. Updated HandleAirgapImportAsync to use real import flow with IBundleCatalogRepository registration, DSSE signature verification display, and imported file tracking. Added project reference to StellaOps.AirGap.Importer, registered services in Program.cs. Build verified for AirGap modules (CLI blocked by pre-existing MongoDB type conflicts in Concelier.Storage.Postgres dependency). | Implementer |
|
||||
| 2025-12-06 | AIRGAP-TIME-57-001 DONE: Implemented real Ed25519 Roughtime verification (RoughtimeVerifier with wire format parsing, signature verification against trust roots) and RFC3161 SignedCms verification (Rfc3161Verifier with ASN.1 parsing, TSTInfo extraction, X509 chain validation). Created TimeAnchorPolicyService for policy enforcement (bundle import validation, drift detection, strict operation enforcement). Updated tests for both verifiers. Build verified (0 errors, 0 warnings). | Implementer |
|
||||
@@ -88,9 +90,10 @@
|
||||
|
||||
## Decisions & Risks
|
||||
- **Decisions**
|
||||
- Assign primary engineer for MIRROR-CRT-56-001 (due 2025-11-17 EOD). Owners: Mirror Creator Guild · Exporter Guild; Security as backup. Option A selected: thin bundle v1; acceptance: names recorded in Delivery Tracker + kickoff notes.
|
||||
- Confirm DSSE/TUF signing profile (due 2025-11-18). Owners: Security Guild · Attestor Guild. Needed before MIRROR-CRT-56-002 can merge.
|
||||
- Lock time-anchor authority scope (due 2025-11-19). Owners: AirGap Time Guild · Mirror Creator Guild. Required for MIRROR-CRT-57-002 policy enforcement.
|
||||
- Assign primary engineer for MIRROR-CRT-56-001 (due 2025-11-17 EOD). Owners: Mirror Creator Guild & Exporter Guild; Security as backup. Option A selected: thin bundle v1; acceptance: names recorded in Delivery Tracker + kickoff notes.
|
||||
- Confirm DSSE/TUF signing profile (due 2025-11-18). Owners: Security Guild & Attestor Guild. Needed before MIRROR-CRT-56-002 can merge.
|
||||
- Lock time-anchor authority scope (due 2025-11-19). Owners: AirGap Time Guild & Mirror Creator Guild. Required for MIRROR-CRT-57-002 policy enforcement.
|
||||
- 2025-12-08: Export Center handoff uses `export-center-wire.sh` + `schedule-export-center-run.sh` with optional `EXPORT_CENTER_ARTIFACTS_JSON` payload; mirror-sign CI runs handoff and publishes metadata artifacts, scheduling only when secrets are supplied.
|
||||
- 2025-12-02: OK/RK/MS gap baseline adopted — bundle meta DSSE (`mirror-thin-v1.bundle.dsse.json`) and policy layers (transport, rekor, mirror, offline-kit) are now canonical evidence; verifier enforces tenant/env scope + tool hashes.
|
||||
- **Risks**
|
||||
- Production signing key lives in Ops sprint: release signing (`MIRROR_SIGN_KEY_B64` secret + CI promotion) is handled in Sprint 506 (Ops DevOps IV); this dev sprint remains green using dev key until ops wiring lands.
|
||||
|
||||
@@ -37,19 +37,21 @@
|
||||
| 1 | SCANNER-ANALYZERS-DENO-26-009 | DONE (2025-11-24) | Runtime trace shim + AnalysisStore runtime payload implemented; Deno runtime tests passing. | Deno Analyzer Guild · Signals Guild | Optional runtime evidence hooks capturing module loads and permissions with path hashing during harnessed execution. |
|
||||
| 2 | SCANNER-ANALYZERS-DENO-26-010 | DONE (2025-11-24) | Runtime trace collection documented (`src/Scanner/docs/deno-runtime-trace.md`); analyzer auto-runs when `STELLA_DENO_ENTRYPOINT` is set. | Deno Analyzer Guild · DevOps Guild | Package analyzer plug-in and surface CLI/worker commands with offline documentation. |
|
||||
| 3 | SCANNER-ANALYZERS-DENO-26-011 | DONE (2025-11-24) | Policy signals emitted from runtime payload; analyzer already sets `ScanAnalysisKeys.DenoRuntimePayload` and emits metadata. | Deno Analyzer Guild | Policy signal emitter for capabilities (net/fs/env/ffi/process/crypto), remote origins, npm usage, wasm modules, and dynamic-import warnings. |
|
||||
| 4 | SCANNER-ANALYZERS-JAVA-21-005 | BLOCKED (2025-11-17) | PREP-SCANNER-ANALYZERS-JAVA-21-005-TESTS-BLOC; DEVOPS-SCANNER-CI-11-001 (SPRINT_0503_0001_0001_ops_devops_i) for CI runner/binlogs. | Java Analyzer Guild | Framework config extraction: Spring Boot imports, spring.factories, application properties/yaml, Jakarta web.xml/fragments, JAX-RS/JPA/CDI/JAXB configs, logging files, Graal native-image configs. |
|
||||
| 5 | SCANNER-ANALYZERS-JAVA-21-006 | BLOCKED (depends on 21-005) | Needs outputs from 21-005. | Java Analyzer Guild | JNI/native hint scanner detecting native methods, System.load/Library literals, bundled native libs, Graal JNI configs; emit `jni-load` edges. |
|
||||
| 6 | SCANNER-ANALYZERS-JAVA-21-007 | BLOCKED (depends on 21-006) | After 21-006; align manifest parsing with resolver. | Java Analyzer Guild | Signature and manifest metadata collector capturing JAR signature structure, signers, and manifest loader attributes (Main-Class, Agent-Class, Start-Class, Class-Path). |
|
||||
| 7 | SCANNER-ANALYZERS-JAVA-21-008 | BLOCKED (2025-10-27) | PREP-SCANNER-ANALYZERS-JAVA-21-008-WAITING-ON; DEVOPS-SCANNER-CI-11-001 for CI runner/restore logs. | Java Analyzer Guild | Implement resolver + AOC writer emitting entrypoints, components, and edges (jpms, cp, spi, reflect, jni) with reason codes and confidence. |
|
||||
| 8 | SCANNER-ANALYZERS-JAVA-21-009 | BLOCKED (depends on 21-008) | Unblock when 21-008 lands; prepare fixtures in parallel where safe. | Java Analyzer Guild · QA Guild | Comprehensive fixtures (modular app, boot fat jar, war, ear, MR-jar, jlink image, JNI, reflection heavy, signed jar, microprofile) with golden outputs and perf benchmarks. |
|
||||
| 9 | SCANNER-ANALYZERS-JAVA-21-010 | BLOCKED (depends on 21-009) | After 21-009; requires runtime capture design. | Java Analyzer Guild · Signals Guild | Optional runtime ingestion via Java agent + JFR reader capturing class load, ServiceLoader, System.load events with path scrubbing; append-only runtime edges (`runtime-class`/`runtime-spi`/`runtime-load`). |
|
||||
| 10 | SCANNER-ANALYZERS-JAVA-21-011 | BLOCKED (depends on 21-010) | Depends on 21-010; finalize DI/manifest registration and docs. | Java Analyzer Guild | Package analyzer as restart-time plug-in, update Offline Kit docs, add CLI/worker hooks for Java inspection commands. |
|
||||
| 11 | SCANNER-ANALYZERS-LANG-11-001 | BLOCKED (2025-11-17) | PREP-SCANNER-ANALYZERS-LANG-11-001-DOTNET-TES; DEVOPS-SCANNER-CI-11-001 for clean runner + binlogs/TRX. | StellaOps.Scanner EPDR Guild · Language Analyzer Guild | Entrypoint resolver mapping project/publish artifacts to entrypoint identities (assembly name, MVID, TFM, RID) and environment profiles; output normalized `entrypoints[]` with deterministic IDs. |
|
||||
| 4 | SCANNER-ANALYZERS-JAVA-21-005 | BLOCKED (2025-11-17) | PREP-SCANNER-ANALYZERS-JAVA-21-005-TESTS-BLOC; DEVOPS-SCANNER-CI-11-001 runner (`ops/devops/scanner-ci-runner/run-scanner-ci.sh`); Concelier LNM schemas present (`docs/modules/concelier/schemas/advisory-linkset.schema.json`, `advisory-observation.schema.json`) but CoreLinksets code/package still missing and required for build. | Java Analyzer Guild | Framework config extraction: Spring Boot imports, spring.factories, application properties/yaml, Jakarta web.xml/fragments, JAX-RS/JPA/CDI/JAXB configs, logging files, Graal native-image configs. |
|
||||
| 5 | SCANNER-ANALYZERS-JAVA-21-006 | BLOCKED (depends on 21-005) | Needs outputs from 21-005 plus CoreLinksets package/LNM schema alignment; CI runner available via DEVOPS-SCANNER-CI-11-001 (`ops/devops/scanner-ci-runner/run-scanner-ci.sh`). | Java Analyzer Guild | JNI/native hint scanner detecting native methods, System.load/Library literals, bundled native libs, Graal JNI configs; emit `jni-load` edges. |
|
||||
| 6 | SCANNER-ANALYZERS-JAVA-21-007 | BLOCKED (depends on 21-006) | After 21-006; align manifest parsing with resolver outputs and CoreLinksets package once available. | Java Analyzer Guild | Signature and manifest metadata collector capturing JAR signature structure, signers, and manifest loader attributes (Main-Class, Agent-Class, Start-Class, Class-Path). |
|
||||
| 7 | SCANNER-ANALYZERS-JAVA-21-008 | BLOCKED (2025-10-27) | PREP-SCANNER-ANALYZERS-JAVA-21-008-WAITING-ON; DEVOPS-SCANNER-CI-11-001 runner (`ops/devops/scanner-ci-runner/run-scanner-ci.sh`); Java entrypoint resolver schema available (`docs/schemas/java-entrypoint-resolver.schema.json`); waiting on CoreLinksets package and upstream 21-005..21-007 outputs. | Java Analyzer Guild | Implement resolver + AOC writer emitting entrypoints, components, and edges (jpms, cp, spi, reflect, jni) with reason codes and confidence. |
|
||||
| 8 | SCANNER-ANALYZERS-JAVA-21-009 | BLOCKED (depends on 21-008) | Unblock when 21-008 lands; fixtures can prep using LNM schemas; still requires CoreLinksets package and prior outputs. | Java Analyzer Guild A? QA Guild | Comprehensive fixtures (modular app, boot fat jar, war, ear, MR-jar, jlink image, JNI, reflection heavy, signed jar, microprofile) with golden outputs and perf benchmarks. |
|
||||
| 9 | SCANNER-ANALYZERS-JAVA-21-010 | BLOCKED (depends on 21-009) | After 21-009; runtime capture design plus CoreLinksets package availability; runner ready (DEVOPS-SCANNER-CI-11-001). | Java Analyzer Guild A? Signals Guild | Optional runtime ingestion via Java agent + JFR reader capturing class load, ServiceLoader, System.load events with path scrubbing; append-only runtime edges (`runtime-class`/`runtime-spi`/`runtime-load`). |
|
||||
| 10 | SCANNER-ANALYZERS-JAVA-21-011 | BLOCKED (depends on 21-010) | Depends on 21-010 chain; needs CoreLinksets package and CI runner logs for packaging hooks. | Java Analyzer Guild | Package analyzer as restart-time plug-in, update Offline Kit docs, add CLI/worker hooks for Java inspection commands. |
|
||||
| 11 | SCANNER-ANALYZERS-LANG-11-001 | BLOCKED (2025-11-17) | PREP-SCANNER-ANALYZERS-LANG-11-001-DOTNET-TES; DEVOPS-SCANNER-CI-11-001 runner (`ops/devops/scanner-ci-runner/run-scanner-ci.sh`); .NET IL metadata schema exists (`docs/schemas/dotnet-il-metadata.schema.json`); hang persists pending clean run/binlogs. | StellaOps.Scanner EPDR Guild A? Language Analyzer Guild | Entrypoint resolver mapping project/publish artifacts to entrypoint identities (assembly name, MVID, TFM, RID) and environment profiles; output normalized `entrypoints[]` with deterministic IDs. |
|
||||
| 12 | SCANNER-ANALYZERS-PHP-27-001 | **DONE** (2025-12-06) | Implementation verified: PhpInputNormalizer, PhpVirtualFileSystem, PhpFrameworkFingerprinter, PhpLanguageAnalyzer all complete. Build passing. | PHP Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Php) | Build input normalizer & VFS for PHP projects: merge source trees, composer manifests, vendor/, php.ini/conf.d, `.htaccess`, FPM configs, container layers; detect framework/CMS fingerprints deterministically. |
|
||||
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-12-08 | Clarified dependency trails for Java/Lang blocked items (CI runner path, Concelier LNM schemas, missing CoreLinksets package, entrypoint resolver schema, .NET IL schema); no status changes. | Project Mgmt |
|
||||
| 2025-12-08 | Removed temporary Storage.Mongo project; restored Mongo stubs to `StellaOps.Concelier.Models/MongoCompat` and kept Concelier builds Postgres-only. Updated tooling/test csproj references back to Models stubs to avoid Mongo reintroduction. | Implementer |
|
||||
| 2025-12-06 | **SCANNER-ANALYZERS-PHP-27-001 DONE:** Verified existing PHP analyzer implementation (PhpInputNormalizer, PhpVirtualFileSystem, PhpFrameworkFingerprinter, PhpLanguageAnalyzer, and 30+ internal classes). Build passing. Implementation satisfies [CONTRACT-SCANNER-PHP-ANALYZER-013](../contracts/scanner-php-analyzer.md) requirements. Wave D complete. | Implementer |
|
||||
| 2025-12-03 | Added Wave Coordination (A Deno done; B Java chain blocked; C DotNet entrypoints blocked; D PHP bootstrap blocked). No status changes. | Project Mgmt |
|
||||
| 2025-11-20 | Published prep docs for P2/P3: `docs/modules/scanner/prep/2025-11-20-java-21-008-prep.md` and `docs/modules/scanner/prep/2025-11-20-lang-11-001-prep.md`; set PREP P2/P3 to DOING after confirming unowned. | Project Mgmt |
|
||||
@@ -96,6 +98,7 @@
|
||||
- Additional note: dotnet-filter wrapper avoids `workdir:` injection but full solution builds still stall locally; recommend CI/clean runner and/or scoped project tests to gather logs for LANG-11-001.
|
||||
- `SCANNER-ANALYZERS-JAVA-21-008` blocked (2025-10-27): resolver capacity needed to produce entrypoint/component/edge outputs; downstream tasks remain stalled until resolved.
|
||||
- Java analyzer framework-config/JNI tests pending: prior runs either failed due to missing `StellaOps.Concelier.Storage.Mongo` `CoreLinksets` types or were aborted due to repo-wide restore contention; rerun on clean runner or after Concelier build stabilises.
|
||||
- Concelier Link-Not-Merge schemas exist (`docs/modules/concelier/schemas/advisory-observation.schema.json`, `advisory-linkset.schema.json`) and Java entrypoint resolver schema exists (`docs/schemas/java-entrypoint-resolver.schema.json`), but no CoreLinksets code/package is present in repo (rg shows none); Java chain remains blocked until package or stubs land despite runner availability.
|
||||
- `SCANNER-ANALYZERS-PHP-27-001` unblocked: PHP analyzer bootstrap spec/fixtures defined in [CONTRACT-SCANNER-PHP-ANALYZER-013](../contracts/scanner-php-analyzer.md); composer/VFS schema and offline kit target available.
|
||||
- Deno runtime hook + policy-signal schema drafted in `docs/modules/scanner/design/deno-runtime-signals.md`; shim plan in `docs/modules/scanner/design/deno-runtime-shim.md`.
|
||||
- Deno runtime shim now emits module/permission/wasm/npm events; needs end-to-end validation on a Deno runner (cached-only) to confirm module loader hook coverage before wiring DENO-26-010/011.
|
||||
|
||||
@@ -36,10 +36,10 @@
|
||||
| P3 | PREP-SCANNER-ANALYZERS-LANG-11-005-DEPENDS-ON | DONE (2025-11-20) | Due 2025-11-22 · Accountable: StellaOps.Scanner EPDR Guild; QA Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet) | StellaOps.Scanner EPDR Guild; QA Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet) | Depends on 11-004; fixtures deferred until analyzer outputs exist. <br><br> Document artefact/deliverable for SCANNER-ANALYZERS-LANG-11-005 and publish location so downstream tasks can proceed. Prep artefact: `docs/modules/scanner/prep/2025-11-20-analyzers-prep.md` (fixtures/benchmarks expectations).
|
||||
| P4 | PREP-SCANNER-ANALYZERS-NATIVE-20-002-AWAIT-DE | DONE (2025-11-20) | Due 2025-11-22 · Accountable: Native Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Native) | Native Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Native) | Await declared-dependency writer/contract to emit edges. <br><br> Document artefact/deliverable for SCANNER-ANALYZERS-NATIVE-20-002 and publish location so downstream tasks can proceed. Prep artefact: `docs/modules/scanner/prep/2025-11-20-analyzers-prep.md` (ELF declared-dependency writer payload).
|
||||
| P5 | PREP-SCANNER-ANALYZERS-NODE-22-001-NEEDS-ISOL | DONE (2025-11-20) | Due 2025-11-22 · Accountable: Node Analyzer Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Node) | Node Analyzer Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Node) | Isolated runner plan published at `docs/modules/scanner/prep/2025-11-20-node-isolated-runner.md`; downstream implementation can proceed. Scripts: `src/Scanner/StellaOps.Scanner.Node.slnf`, `src/Scanner/__Tests/node-isolated.runsettings`, `src/Scanner/__Tests/node-tests-isolated.sh`. |
|
||||
| 1 | SCANNER-ANALYZERS-LANG-11-002 | BLOCKED | Await upstream SCANNER-ANALYZERS-LANG-11-001 design/outputs to extend static analyzer | StellaOps.Scanner EPDR Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet) | Implement static analyzer (IL + reflection heuristics) capturing AssemblyRef, ModuleRef/PInvoke, DynamicDependency, reflection literals, DI patterns, and custom AssemblyLoadContext probing hints; emit dependency edges with reason codes and confidence. |
|
||||
| 2 | SCANNER-ANALYZERS-LANG-11-003 | BLOCKED | PREP-SCANNER-ANALYZERS-LANG-11-003-DEPENDS-ON | StellaOps.Scanner EPDR Guild; Signals Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet) | Ingest optional runtime evidence (AssemblyLoad, Resolving, P/Invoke) via event listener harness; merge runtime edges with static/declared ones and attach reason codes/confidence. |
|
||||
| 3 | SCANNER-ANALYZERS-LANG-11-004 | BLOCKED | PREP-SCANNER-ANALYZERS-LANG-11-004-DEPENDS-ON | StellaOps.Scanner EPDR Guild; SBOM Service Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet) | Produce normalized observation export to Scanner writer: entrypoints + dependency edges + environment profiles (AOC compliant); wire to SBOM service entrypoint tagging. |
|
||||
| 4 | SCANNER-ANALYZERS-LANG-11-005 | BLOCKED | PREP-SCANNER-ANALYZERS-LANG-11-005-DEPENDS-ON | StellaOps.Scanner EPDR Guild; QA Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet) | Add comprehensive fixtures/benchmarks covering framework-dependent, self-contained, single-file, trimmed, NativeAOT, multi-RID scenarios; include explain traces and perf benchmarks vs previous analyzer. |
|
||||
| 1 | SCANNER-ANALYZERS-LANG-11-002 | DONE (2025-12-08) | dotnet-il-metadata schema available; config-enabled IL/dependency export emitted | StellaOps.Scanner EPDR Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet) | Implement static analyzer (IL + reflection heuristics) capturing AssemblyRef, ModuleRef/PInvoke, DynamicDependency, reflection literals, DI patterns, and custom AssemblyLoadContext probing hints; emit dependency edges with reason codes and confidence. |
|
||||
| 2 | SCANNER-ANALYZERS-LANG-11-003 | DONE (2025-12-08) | Runtime evidence merge added via config-driven NDJSON loader | StellaOps.Scanner EPDR Guild; Signals Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet) | Ingest optional runtime evidence (AssemblyLoad, Resolving, P/Invoke) via event listener harness; merge runtime edges with static/declared ones and attach reason codes/confidence. |
|
||||
| 3 | SCANNER-ANALYZERS-LANG-11-004 | DONE (2025-12-08) | Entrypoint export wired; SBOM writer gets normalized metadata via IL config | StellaOps.Scanner EPDR Guild; SBOM Service Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet) | Produce normalized observation export to Scanner writer: entrypoints + dependency edges + environment profiles (AOC compliant); wire to SBOM service entrypoint tagging. |
|
||||
| 4 | SCANNER-ANALYZERS-LANG-11-005 | DONE (2025-12-08) | Configured fixtures via dotnet-il.config.json + runtime evidence; test added | StellaOps.Scanner EPDR Guild; QA Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet) | Add comprehensive fixtures/benchmarks covering framework-dependent, self-contained, single-file, trimmed, NativeAOT, multi-RID scenarios; include explain traces and perf benchmarks vs previous analyzer. |
|
||||
| 5 | SCANNER-ANALYZERS-NATIVE-20-001 | DONE (2025-11-18) | Format detector completed; ELF interpreter + build-id extraction fixed; tests passing (`dotnet test ...Native.Tests --no-build`). | Native Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Native) | Implement format detector and binary identity model supporting ELF, PE/COFF, and Mach-O (including fat slices); capture arch, OS, build-id/UUID, interpreter metadata. |
|
||||
| 6 | SCANNER-ANALYZERS-NATIVE-20-002 | DONE (2025-11-26) | ELF dynamic section parser implemented with DT_NEEDED, DT_RPATH, DT_RUNPATH support; 7 tests passing. | Native Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Native) | Parse ELF dynamic sections: `DT_NEEDED`, `DT_RPATH`, `DT_RUNPATH`, symbol versions, interpreter, and note build-id; emit declared dependency records with reason `elf-dtneeded` and attach version needs. |
|
||||
| 7 | SCANNER-ANALYZERS-NATIVE-20-003 | DONE (2025-11-26) | PE import parser implemented with import table, delay-load, SxS manifest parsing; 9 tests passing. | Native Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Native) | Parse PE imports, delay-load tables, manifests/SxS metadata, and subsystem flags; emit edges with reasons `pe-import` and `pe-delayimport`, plus SxS policy metadata. |
|
||||
@@ -64,6 +64,8 @@
|
||||
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-12-08 | Added example IL config (`docs/modules/scanner/dotnet-il.config.example.json`) and runtime evidence sample (`docs/modules/scanner/runtime-evidence.example.ndjson`) to make dependency edges + entrypoints + runtime merges turnkey for 11-002..005 consumers. | Implementer |
|
||||
| 2025-12-08 | Unblocked 11-002..005: consumed `dotnet-il-metadata.schema.json`, added config-driven IL/dependency/entrypoint export + runtime evidence merge (`dotnet-il.config.json` + `runtime-evidence.ndjson`); added test harness to cover edge/entrypoint/runtime paths. | Implementer |
|
||||
| 2025-12-03 | Added Wave Coordination (A prep/governance done; B native analyzers done; C Node analyzers done; D DotNet analyzers blocked). No status changes. | Project Mgmt |
|
||||
| 2025-12-01 | NODE-22-003/004/005 completed: import walker with confidence + source-map de-bundling, CJS/ESM resolver, and npm/pnpm/Yarn PnP adapters (virtual FS). Plug-in manifest v0.1.0 packaged with runtime hooks for Offline Kit/CLI surface. | Node Analyzer Guild |
|
||||
| 2025-11-27 | **NODE-22-001 and NODE-22-002 COMPLETED.** Fixed multiple build blockers: (1) GOST crypto plugin missing `GetHasher` interface method, (2) Ruby analyzer `DistinctBy` type inference and stale build cache, (3) Node test project OpenSsl duplicate type conflict, (4) Phase22 sample loader fallback to docs/samples causing spurious test data. Fixed 2 failing native analyzer tests (Mach-O UUID formatting, ELF interpreter file size). Updated golden files for version-targets and entrypoints fixtures. All 10 Node analyzer tests now passing. Native analyzer tests: 165 passing. | Implementer |
|
||||
@@ -133,7 +135,8 @@
|
||||
- Node analyzer isolation plan published (see `docs/modules/scanner/prep/2025-11-20-node-isolated-runner.md`); latest scoped run of `NodeLanguageAnalyzerTests` passed after cache cleanup. Keep `scripts/cleanup-runner-space.sh` handy for future runs.
|
||||
- Runtime hooks (CJS require + ESM loader) now ship inside `plugins/scanner/node` for Offline Kit/CLI parity; ensure release packaging keeps this directory intact.
|
||||
- Node analyzer import/resolver/package-adapter work (22-003/004/005) landed with fixtures; rerun isolated suite on CI to guard regressions when dependencies change.
|
||||
- .NET analyzer chain (11-002..005) remains blocked awaiting upstream static-analyzer contract (11-001) and downstream writer/export contracts; runtime fusion prep recorded but cannot proceed until contracts exist.
|
||||
- .NET analyzer chain (11-002..005) now wired to the IL metadata schema; enable edges/entrypoints/runtime merges via `dotnet-il.config.json` when promotion-ready.
|
||||
- dotnet IL chain uses `dotnet-il.config.json` (emitDependencyEdges/includeEntrypoints/runtimeEvidencePath/runtimeEvidenceConfidence) and optional `runtime-evidence.ndjson` to emit declared + runtime edges and normalized entrypoint metadata. Default behavior stays minimal unless config is present.
|
||||
## Next Checkpoints
|
||||
- 2025-11-19: Sprint kickoff (owner: Scanner PM), contingent on Sprint 131 sign-off.
|
||||
- 2025-11-26: Mid-sprint review (owner: EPDR Guild lead) to validate observation exports and resolver behavior.
|
||||
|
||||
@@ -44,7 +44,7 @@
|
||||
| 21 | SURFACE-SECRETS-03 | DONE (2025-11-27) | SURFACE-SECRETS-02 | Scanner Guild | Add Kubernetes/File/Offline backends with deterministic caching and audit hooks. |
|
||||
| 22 | SURFACE-SECRETS-04 | DONE (2025-11-27) | SURFACE-SECRETS-02 | Scanner Guild | Integrate Surface.Secrets into Scanner Worker/WebService/BuildX for registry + CAS creds. |
|
||||
| 23 | SURFACE-SECRETS-05 | DONE (2025-11-27) | SURFACE-SECRETS-02 | Zastava Guild | Invoke Surface.Secrets from Zastava Observer/Webhook for CAS & attestation secrets. |
|
||||
| 24 | SURFACE-SECRETS-06 | BLOCKED (2025-11-27) | SURFACE-SECRETS-03; awaiting Ops Helm/Compose patterns | Ops Guild | Update deployment manifests/offline kit bundles to provision secret references instead of raw values. |
|
||||
| 24 | SURFACE-SECRETS-06 | DONE (2025-12-08) | Ops patterns applied | Ops Guild | Update deployment manifests/offline kit bundles to provision secret references instead of raw values. |
|
||||
| 25 | SCANNER-ENG-0020 | DONE (2025-11-28) | — | Scanner Guild (`docs/modules/scanner`) | Implement Homebrew collector & fragment mapper per `design/macos-analyzer.md` §3.1. |
|
||||
| 26 | SCANNER-ENG-0021 | DONE (2025-11-28) | — | Scanner Guild | Implement pkgutil receipt collector per `design/macos-analyzer.md` §3.2. |
|
||||
| 27 | SCANNER-ENG-0022 | DONE (2025-11-28) | — | Scanner Guild, Policy Guild | Implement macOS bundle inspector & capability overlays per `design/macos-analyzer.md` §3.3. |
|
||||
@@ -74,6 +74,8 @@
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-12-08 | SURFACE-SECRETS-06 DONE: templated Helm configmaps via `tpl`, auto-injected `surface-env` ConfigMap into scanner/zastava deployments, and added Compose airgap secret mount + namespace/fallback env plus `SURFACE_SECRETS_HOST_PATH` guidance. Compose README documents the new mount. | Ops Guild |
|
||||
| 2025-12-08 | SURFACE-SECRETS-06 unblocked: Ops Helm/Compose/offline patterns documented at `ops/devops/secrets/surface-secrets-provisioning.md`; Helm/Compose defaults already expose provider/root knobs (`deploy/helm/stellaops/values.yaml`, `deploy/compose/docker-compose.airgap.yaml`). Task set to TODO for manifest/offline kit alignment. | Project Mgmt |
|
||||
| 2025-12-07 | SCANNER-EVENTS-16-301 DONE: Added new event types to OrchestratorEventKinds (ScannerScanStarted, ScannerScanFailed, ScannerSbomGenerated, ScannerVulnerabilityDetected). Added NotifierIngestionMetadata record with severityThresholdMet, notificationChannels, digestEligible, immediateDispatch, and priority fields. Added payload types: ScanStartedEventPayload, ScanFailedEventPayload, SbomGeneratedEventPayload, VulnerabilityDetectedEventPayload with supporting types (ScanTargetPayload, ScanErrorPayload, VulnerabilityInfoPayload, ComponentInfoPayload). Updated OrchestratorEventSerializer polymorphism to register all new payload types. Created NotifierIngestionTests.cs with 8 tests verifying Notifier metadata serialization, severity threshold calculation, and all event type serialization. Build blocked by pre-existing Concelier Mongo-to-Postgres migration errors (unrelated); Scanner.Core compiles cleanly. | Implementer |
|
||||
| 2025-12-06 | SCANNER-SURFACE-01 DONE: Created `StellaOps.Scanner.Surface` library implementing Phase 1 of CONTRACT-SCANNER-SURFACE-014. Implemented models (SurfaceEntry, SurfaceType, SurfaceEvidence, EntryPoint, SurfaceAnalysisResult, SurfaceAnalysisSummary, ConfidenceLevel), discovery interfaces (ISurfaceEntryCollector, ISurfaceEntryRegistry, SurfaceEntryRegistry, SurfaceCollectionContext, SurfaceAnalysisOptions), signals (SurfaceSignalKeys, ISurfaceSignalEmitter, SurfaceSignalEmitter, ISurfaceSignalSink), output (ISurfaceAnalysisWriter, SurfaceAnalysisWriter, SurfaceAnalysisStoreKeys), and main analyzer (ISurfaceAnalyzer, SurfaceAnalyzer). Includes DI registration extensions with builder pattern. Build succeeds with no warnings. | Implementer |
|
||||
| 2025-12-04 | Ran `dotnet test` for `StellaOps.Scanner.Surface.FS.Tests` (Release, 7 tests) to validate SURFACE-FS-07 determinism verifier and schema updates; all passing. | Implementer |
|
||||
@@ -134,7 +136,7 @@
|
||||
|
||||
## Decisions & Risks
|
||||
- SCANNER-LNM-21-001 delivered with Concelier shared-library resolver; linkset enrichment returns data when Concelier linkset store is configured, otherwise responses omit the `linksets` field (fallback null provider).
|
||||
- SURFACE-SECRETS-06 BLOCKED pending Ops Helm/Compose patterns for Surface.Secrets provider configuration (kubernetes/file/inline).
|
||||
- SURFACE-SECRETS-06 delivered (2025-12-08): Helm `surface-env` ConfigMap rendered via `tpl` and injected into scanner/zastava deployments; Compose airgap mounts decrypted secrets read-only (`SURFACE_SECRETS_HOST_PATH` -> `SCANNER_SURFACE_SECRETS_ROOT`) with namespace/fallback env.
|
||||
- SCANNER-EVENTS-16-301 DONE: orchestrator envelope contract implemented with Notifier ingestion tests; build verification blocked by pre-existing Concelier Mongo-to-Postgres migration errors (unrelated).
|
||||
- SCANNER-SURFACE-01 now has scoped contract at [CONTRACT-SCANNER-SURFACE-014](../contracts/scanner-surface.md); ready for implementation.
|
||||
- SCANNER-EMIT-15-001 DOING: HMAC-backed DSSE signer added with deterministic fallback; enable by providing `Scanner:Worker:Signing:SharedSecret` (or file) + `KeyId`. Full scanner test suite still pending after cancelled long restore/build.
|
||||
|
||||
@@ -29,10 +29,10 @@
|
||||
| 1 | SCANNER-ENG-0008 | DONE (2025-11-16) | Cadence documented; quarterly review workflow published for EntryTrace heuristics. | EntryTrace Guild, QA Guild (`src/Scanner/__Libraries/StellaOps.Scanner.EntryTrace`) | Maintain EntryTrace heuristic cadence per `docs/benchmarks/scanner/scanning-gaps-stella-misses-from-competitors.md`, including explain-trace updates. |
|
||||
| 2 | SCANNER-ENG-0009 | DONE (2025-11-13) | Release handoff to Sprint 0139 consumers; monitor Mongo-backed inventory rollout. | Ruby Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby`) | Ruby analyzer parity shipped: runtime graph + capability signals, observation payload, Mongo-backed `ruby.packages` inventory, CLI/WebService surfaces, and plugin manifest bundles for Worker loadout. |
|
||||
| 3 | SCANNER-ENG-0010 | **DONE** (2025-12-06) | Implementation verified: PhpInputNormalizer, PhpVirtualFileSystem, PhpAutoloadGraphBuilder, PhpCapabilityScanBuilder, PhpLanguageAnalyzer. Build passing. CONTRACT-SCANNER-PHP-ANALYZER-013 satisfied. | PHP Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Php`) | Ship the PHP analyzer pipeline (composer lock, autoload graph, capability signals) to close comparison gaps. |
|
||||
| 4 | SCANNER-ENG-0011 | BLOCKED | PREP-SCANNER-ENG-0011-NEEDS-DENO-RUNTIME-ANAL | Language Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Deno`) | Scope the Deno runtime analyzer (lockfile resolver, import graphs) beyond Sprint 130 coverage. |
|
||||
| 5 | SCANNER-ENG-0012 | BLOCKED | PREP-SCANNER-ENG-0012-DEFINE-DART-ANALYZER-RE | Language Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Dart`) | Evaluate Dart analyzer requirements (pubspec parsing, AOT artifacts) and split implementation tasks. |
|
||||
| 6 | SCANNER-ENG-0013 | BLOCKED | PREP-SCANNER-ENG-0013-DRAFT-SWIFTPM-COVERAGE | Swift Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Native`) | Plan Swift Package Manager coverage (Package.resolved, xcframeworks, runtime hints) with policy hooks. |
|
||||
| 7 | SCANNER-ENG-0014 | BLOCKED | PREP-SCANNER-ENG-0014-NEEDS-JOINT-ROADMAP-WIT | Runtime Guild, Zastava Guild (`docs/modules/scanner`) | Align Kubernetes/VM target coverage between Scanner and Zastava per comparison findings; publish joint roadmap. |
|
||||
| 4 | SCANNER-ENG-0011 | DONE (2025-12-08) | Design documented at `docs/modules/scanner/design/deno-analyzer-plan.md`; proceed to implementation. | Language Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Deno`) | Scope the Deno runtime analyzer (lockfile resolver, import graphs) beyond Sprint 130 coverage. |
|
||||
| 5 | SCANNER-ENG-0012 | DONE (2025-12-08) | Design documented at `docs/modules/scanner/design/dart-analyzer-plan.md`; proceed to implementation. | Language Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Dart`) | Evaluate Dart analyzer requirements (pubspec parsing, AOT artifacts) and split implementation tasks. |
|
||||
| 6 | SCANNER-ENG-0013 | DONE (2025-12-08) | Coverage plan documented at `docs/modules/scanner/design/swiftpm-coverage-plan.md`; proceed to implementation. | Swift Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Native`) | Plan Swift Package Manager coverage (Package.resolved, xcframeworks, runtime hints) with policy hooks. |
|
||||
| 7 | SCANNER-ENG-0014 | DONE (2025-12-08) | Roadmap documented at `docs/modules/scanner/design/runtime-alignment-scanner-zastava.md`; align templates next. | Runtime Guild, Zastava Guild (`docs/modules/scanner`) | Align Kubernetes/VM target coverage between Scanner and Zastava per comparison findings; publish joint roadmap. |
|
||||
| 8 | SCANNER-ENG-0015 | DONE (2025-11-13) | Ready for Ops training; track adoption metrics. | Export Center Guild, Scanner Guild (`docs/modules/scanner`) | DSSE/Rekor operator playbook published with config/env tables, rollout phases, offline verification, and SLA/alert guidance. |
|
||||
| 9 | SCANNER-ENG-0016 | DONE (2025-11-10) | Monitor bundler override edge cases; keep fixtures deterministic. | Ruby Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby`) | RubyLockCollector and vendor ingestion finalized: Bundler overrides honoured, workspace lockfiles merged, vendor bundles normalised, deterministic fixtures added. |
|
||||
| 10 | SCANNER-ENG-0017 | DONE (2025-11-09) | Keep tree-sitter Ruby grammar pinned; reuse EntryTrace hints for regressions. | Ruby Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby`) | Build runtime require/autoload graph builder with tree-sitter Ruby per design §4.4 and integrate EntryTrace hints. |
|
||||
@@ -45,6 +45,10 @@
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-12-08 | SCANNER-ENG-0011 DONE: Deno analyzer plan captured (`docs/modules/scanner/design/deno-analyzer-plan.md`) covering lockfile/import map resolution, npm bridge handling, vendor/offline posture, outputs, and fixtures. | Implementer |
|
||||
| 2025-12-08 | SCANNER-ENG-0012 DONE: Dart analyzer scope defined (`docs/modules/scanner/design/dart-analyzer-plan.md`) detailing pubspec/pubspec.lock parsing, package_config graphing, AOT flags, offline-only posture, and fixtures. | Implementer |
|
||||
| 2025-12-08 | SCANNER-ENG-0013 DONE: SwiftPM coverage plan published (`docs/modules/scanner/design/swiftpm-coverage-plan.md`) for Package.resolved parsing, binary targets, platform signals, and deterministic outputs/fixtures. | Implementer |
|
||||
| 2025-12-08 | SCANNER-ENG-0014 DONE: Runtime alignment roadmap with Zastava authored (`docs/modules/scanner/design/runtime-alignment-scanner-zastava.md`) covering shared labels, runtime event schema, feature flags, offline bundle layout, and SLOs. | Implementer |
|
||||
| 2025-12-06 | **SCANNER-ENG-0010 DONE:** Verified complete PHP analyzer implementation including PhpInputNormalizer, PhpVirtualFileSystem, PhpAutoloadGraphBuilder, PhpCapabilityScanBuilder, PhpFrameworkFingerprinter, PhpIncludeGraphBuilder, PhpPharScanner, PhpExtensionScanner, and 30+ supporting classes. Build passing with zero errors. Implementation satisfies CONTRACT-SCANNER-PHP-ANALYZER-013. | Implementer |
|
||||
| 2025-11-22 | Set `SCANNER-ENG-0010` to DOING; starting PHP analyzer implementation (composer lock inventory & autoload groundwork). | PHP Analyzer Guild |
|
||||
| 2025-11-22 | Added composer.lock autoload parsing + metadata emission; fixtures/goldens updated. `dotnet test ...Lang.Php.Tests` restore cancelled after 90s (NuGet.targets MSB4220); rerun needed. | PHP Analyzer Guild |
|
||||
@@ -74,8 +78,8 @@
|
||||
## Decisions & Risks
|
||||
- PHP analyzer pipeline (SCANNER-ENG-0010) blocked pending composer/autoload graph design + staffing; parity risk remains.
|
||||
- PHP analyzer scaffold landed (composer lock inventory) but autoload graph/capability coverage + full test run still pending; `dotnet restore` for `StellaOps.Scanner.Analyzers.Lang.Php.Tests` repeatedly hangs >90s even when forced to `RestoreSources=local-nugets`, isolated caches, and static-graph restore, leaving tests unexecuted (latest attempt 2025-11-24).
|
||||
- Deno, Dart, and Swift analyzers (SCANNER-ENG-0011..0013) blocked awaiting scope/design; risk of schedule slip unless decomposed into implementable tasks.
|
||||
- Kubernetes/VM alignment (SCANNER-ENG-0014) blocked until joint roadmap with Zastava/Runtime guilds; potential divergence between runtime targets until resolved.
|
||||
- Deno, Dart, and Swift analyzers (SCANNER-ENG-0011..0013) now scoped in design notes; implementation tasks should follow the documented offline/determinism constraints.
|
||||
- Kubernetes/VM alignment (SCANNER-ENG-0014) has a published roadmap; next risk is execution drift if labels/feature flags are not wired into job/observer templates.
|
||||
- Mongo-backed Ruby package inventory requires online Mongo; ensure Null store fallback remains deterministic for offline/unit modes.
|
||||
- EntryTrace cadence now documented; risk reduced to execution discipline—ensure quarterly reviews are logged in `TASKS.md` and sprint logs.
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
| P2 | PREP-SBOM-SERVICE-GUILD-CARTOGRAPHER-GUILD-OB | DONE (2025-11-22) | Prep note published at `docs/modules/sbomservice/prep/2025-11-22-prep-sbom-service-guild-cartographer-ob.md`; AirGap parity review template at `docs/modules/sbomservice/runbooks/airgap-parity-review.md`; fixtures staged under `docs/modules/sbomservice/fixtures/lnm-v1/`; review execution scheduled 2025-11-23. | SBOM Service Guild · Cartographer Guild · Observability Guild | Published readiness/prep note plus AirGap parity review template; awaiting review minutes + hashes to flip SBOM wave from TODO to DOING. |
|
||||
| 1 | 140.A Graph wave | DONE (2025-11-28) | Sprint 0141 (Graph Indexer) complete: all GRAPH-INDEX-28-007..010 tasks DONE. | Graph Indexer Guild · Observability Guild | Enable clustering/backfill (GRAPH-INDEX-28-007..010) against mock bundle; revalidate once real cache lands. |
|
||||
| 2 | 140.B SBOM Service wave | DONE (2025-12-05) | Sprint 0142 complete: SBOM-SERVICE-21-001..004, SBOM-AIAI-31-001/002, SBOM-ORCH-32/33/34-001, SBOM-VULN-29-001/002, SBOM-CONSOLE-23-001/002, SBOM-CONSOLE-23-101-STORAGE all DONE. | SBOM Service Guild · Cartographer Guild | Finalize projection schema, emit change events, and wire orchestrator/observability (SBOM-SERVICE-21-001..004, SBOM-AIAI-31-001/002). |
|
||||
| 3 | 140.C Signals wave | TODO | ✅ CAS APPROVED (2025-12-06): Contract at `docs/contracts/cas-infrastructure.md`. ✅ Provenance appendix published at `docs/signals/provenance-24-003.md` + schema at `docs/schemas/provenance-feed.schema.json`. SIGNALS-24-002/003 now unblocked; ready for implementation. | Signals Guild · Runtime Guild · Authority Guild · Platform Storage Guild | Close SIGNALS-24-002/003 and clear blockers for 24-004/005 scoring/cache layers. |
|
||||
| 3 | 140.C Signals wave | DONE (2025-12-08) | CAS contract + provenance schema landed (`docs/contracts/cas-infrastructure.md`, `docs/signals/provenance-24-003.md`, `docs/schemas/provenance-feed.schema.json`); SIGNALS-24-002/003 implemented. | Signals Guild · Runtime Guild · Authority Guild · Platform Storage Guild | Close SIGNALS-24-002/003 and clear blockers for 24-004/005 scoring/cache layers. |
|
||||
| 4 | 140.D Zastava wave | DONE (2025-11-28) | Sprint 0144 (Zastava Runtime Signals) complete: all ZASTAVA-ENV/SECRETS/SURFACE tasks DONE. | Zastava Observer/Webhook Guilds · Surface Guild | Prepare env/secret helpers and admission hooks; start once cache endpoints and helpers are published. |
|
||||
| 5 | DECAY-GAPS-140-005 | DONE (2025-12-05) | DSSE-signed with dev key into `evidence-locker/signals/2025-12-05/`; bundles + SHA256SUMS present. | Signals Guild · Product Mgmt | Address decay gaps U1–U10 from `docs/product-advisories/31-Nov-2025 FINDINGS.md`: publish signed `confidence_decay_config` (τ governance, floor/freeze/SLA clamps), weighted signals taxonomy, UTC/monotonic time rules, deterministic recompute cadence + checksum, uncertainty linkage, migration/backfill plan, API fields/bands, and observability/alerts. |
|
||||
| 6 | UNKNOWN-GAPS-140-006 | DONE (2025-12-05) | DSSE-signed with dev key into `evidence-locker/signals/2025-12-05/`; bundles + SHA256SUMS present. | Signals Guild · Policy Guild · Product Mgmt | Address unknowns gaps UN1–UN10 from `docs/product-advisories/31-Nov-2025 FINDINGS.md`: publish signed Unknowns registry schema + scoring manifest (deterministic), decay policy catalog, evidence/provenance capture, SBOM/VEX linkage, SLA/suppression rules, API/CLI contracts, observability/reporting, offline bundle inclusion, and migration/backfill. |
|
||||
@@ -41,6 +41,8 @@
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-12-09 | SIGNALS-24-004/005 executed: reachability scoring now stamps fact.version + deterministic digests and emits Redis stream events (`signals.fact.updated.v1`/DLQ) with envelopes aligned to `events-24-005.md`; CI workflows (`signals-reachability.yml`, `signals-evidence-locker.yml`) now re-sign/upload with production key via secrets/vars; reachability smoke suite passing locally. | Implementer |
|
||||
| 2025-12-08 | 140.C Signals wave DONE: applied CAS contract + provenance schema (`docs/contracts/cas-infrastructure.md`, `docs/signals/provenance-24-003.md`, `docs/schemas/provenance-feed.schema.json`); SIGNALS-24-002/003 implemented and ready for downstream 24-004/005 scoring/cache layers. | Implementer |
|
||||
| 2025-12-06 | **140.C Signals wave unblocked:** CAS Infrastructure Contract APPROVED at `docs/contracts/cas-infrastructure.md`; Provenance appendix published at `docs/signals/provenance-24-003.md` + schema at `docs/schemas/provenance-feed.schema.json`. SIGNALS-24-002/003 moved from BLOCKED to TODO. | Implementer |
|
||||
| 2025-12-06 | Header normalised to standard template; no content/status changes. | Project Mgmt |
|
||||
| 2025-12-05 | SBOM wave 140.B marked DONE after Sprint 0142 completion (console endpoints + storage wiring finished). | Implementer |
|
||||
@@ -108,17 +110,13 @@
|
||||
- Link-Not-Merge v1 schema frozen 2025-11-17; fixtures staged under `docs/modules/sbomservice/fixtures/lnm-v1/`; AirGap parity review scheduled for 2025-11-23 (see Next Checkpoints) must record hashes to fully unblock.
|
||||
- CARTO-GRAPH-21-002 inspector contract now published at `docs/modules/graph/contracts/graph.inspect.v1.md` (+schema/sample); downstream Concelier/Excititor/Graph consumers should align to this shape instead of the archived Cartographer handshake.
|
||||
- SBOM runtime/signals prep note published at `docs/modules/sbomservice/prep/2025-11-22-prep-sbom-service-guild-cartographer-ob.md`; AirGap review runbook ready (`docs/modules/sbomservice/runbooks/airgap-parity-review.md`). Wave moves to TODO pending review completion and fixture hash upload.
|
||||
- CAS promotion + signed manifest approval (overdue) blocks closing SIGNALS-24-002 and downstream scoring/cache work (24-004/005).
|
||||
- Cosign v3.0.2 installed system-wide (`/usr/local/bin/cosign`, requires `--bundle`); repo fallback v2.6.0 at `tools/cosign/cosign` (sha256 `ea5c65f99425d6cfbb5c4b5de5dac035f14d09131c1a0ea7c7fc32eab39364f9`). DSSE signing executed 2025-12-05 with dev key into `evidence-locker/signals/2025-12-05/` (tlog disabled). Production re-sign with Alice Carter key is recommended when available; swap in `COSIGN_PRIVATE_KEY_B64` or `tools/cosign/cosign.key` and rerun helper if Evidence Locker requires prod trust roots.
|
||||
- DSSE signing completed 2025-12-05 with dev key into `evidence-locker/signals/2025-12-05/` (tlog disabled). Re-sign with Alice Carter production key when provided to align Evidence Locker trust roots; helper supports rerun via `COSIGN_PRIVATE_KEY_B64` or `tools/cosign/cosign.key`.
|
||||
- Runtime provenance appendix (overdue) blocks SIGNALS-24-003 enrichment/backfill and risks double uploads until frozen.
|
||||
- Cosign v3.0.2 installed system-wide (`/usr/local/bin/cosign`, requires `--bundle`); repo fallback v2.6.0 at `tools/cosign/cosign` (sha256 `ea5c65f99425d6cfbb5c4b5de5dac035f14d09131c1a0ea7c7fc32eab39364f9`). Production re-sign/upload now automated via `signals-reachability.yml` and `signals-evidence-locker.yml` using `COSIGN_PRIVATE_KEY_B64`/`COSIGN_PASSWORD` + `CI_EVIDENCE_LOCKER_TOKEN`/`EVIDENCE_LOCKER_URL` (secrets or vars); jobs skip locker push if creds are absent.
|
||||
- Redis Stream publisher emits `signals.fact.updated.v1` envelopes (event_id, fact_version, fact.digest) aligned with `docs/signals/events-24-005.md`; DLQ stream `signals.fact.updated.dlq` enabled.
|
||||
- Surface.FS cache drop timeline (overdue) and Surface.Env owner assignment keep Zastava env/secret/admission tasks blocked.
|
||||
- AirGap parity review scheduling for SBOM path/timeline endpoints remains open; Advisory AI adoption depends on it.
|
||||
|
||||
### Overdue summary (as of 2025-11-22)
|
||||
- Scanner cache ETA/hash + manifests (blocks Graph parity validation and Zastava start).
|
||||
- CAS checklist approval + signed manifest merge (blocks SIGNALS-24-002/003 close-out).
|
||||
- Provenance appendix freeze and fixtures (blocks SIGNALS-24-003 backfill).
|
||||
- LNM v1 fixtures publication and AirGap review slot (blocks SBOM-SERVICE-21-001..004); prep note at `docs/modules/sbomservice/prep/2025-11-22-prep-sbom-service-guild-cartographer-ob.md` captures exit criteria.
|
||||
- Surface.Env owner assignment and Surface.FS cache drop plan (blocks Zastava env/secret/admission tracks).
|
||||
|
||||
@@ -127,16 +125,14 @@
|
||||
| --- | --- | --- | --- |
|
||||
| 2025-11-18 (overdue) | LNM v1 fixtures drop | Commit canonical JSON fixtures; confirm add-only evolution and publish location. | Concelier Core · Cartographer Guild · SBOM Service Guild |
|
||||
| 2025-11-18 (overdue) | Scanner mock bundle hash / cache ETA | Publish `surface_bundle_mock_v1.tgz` hash plus real cache delivery timeline. | Scanner Guild |
|
||||
| 2025-11-18 (overdue) | CAS promotion go/no-go | Approve CAS bucket policies and signed manifest rollout for SIGNALS-24-002. | Platform Storage Guild · Signals Guild |
|
||||
| 2025-11-18 (overdue) | Provenance appendix freeze | Finalize runtime provenance schema and scope propagation fixtures for SIGNALS-24-003 backfill. | Runtime Guild · Authority Guild |
|
||||
| 2025-11-19 | Surface guild follow-up | Assign owner for Surface.Env helper rollout and confirm Surface.FS cache drop sequencing. | Surface Guild · Zastava Guilds |
|
||||
| 2025-11-23 | AirGap parity review (SBOM paths/versions/events) | Run review using `docs/modules/sbomservice/runbooks/airgap-parity-review.md`; record minutes and link fixtures hash list. | Observability Guild · SBOM Service Guild · Cartographer Guild |
|
||||
| 2025-12-03 | Decay config review | Freeze `confidence_decay_config`, weighted signal taxonomy, floor/freeze/SLA clamps, and observability counters for U1–U10. | Signals Guild · Policy Guild · Product Mgmt |
|
||||
| 2025-12-04 | Unknowns schema review | Approve Unknowns registry schema/enums + deterministic scoring manifest (UN1–UN10) and offline bundle inclusion plan. | Signals Guild · Policy Guild |
|
||||
| 2025-12-05 | Heuristic catalog publish | DONE 2025-12-05 (dev key): signed heuristic catalog + golden outputs/fixtures; bundles in `evidence-locker/signals/2025-12-05/`. | Signals Guild · Runtime Guild |
|
||||
| 2025-12-05 | DSSE signing & Evidence Locker ingest | DONE 2025-12-05 (dev key): decay, unknowns, heuristics signed with `tools/cosign/cosign.dev.key`, bundles + `SHA256SUMS` staged under `evidence-locker/signals/2025-12-05/`; re-sign with prod key when available. | Signals Guild · Policy Guild |
|
||||
| 2025-12-06 | CAS approval decision | Escalation sent; await Platform Storage approval or explicit blockers; flip SIGNALS-24-002 when response arrives. | Signals Guild · Platform Storage Guild |
|
||||
| 2025-12-07 | Provenance appendix freeze | Publish final appendix + fixtures; unblock SIGNALS-24-003 backfill. | Runtime Guild · Authority Guild |
|
||||
| 2025-12-09 | SIGNALS-24-004 kickoff | ✅ DONE: reachability scoring running with deterministic digests/fact.version; smoke suite green. | Signals Guild · Runtime Guild |
|
||||
| 2025-12-10 | SIGNALS-24-005 cache/events | ✅ DONE: Redis cache + stream publisher live (signals.fact.updated.v1/DLQ) with deterministic envelope. | Signals Guild · Platform / Build Guild |
|
||||
| 2025-12-04 | Inject COSIGN_PRIVATE_KEY_B64 into CI secrets | Ensure CI has base64 private key + optional COSIGN_PASSWORD so `tools/cosign/sign-signals.sh` can run in pipelines before 2025-12-05 signing window. | Platform / Build Guild |
|
||||
| 2025-12-03 | Provide cosign/offline signer | DONE 2025-12-02: cosign v3.0.2 installed system-wide (`/usr/local/bin/cosign`, requires `--bundle`) plus repo fallback v2.6.0 at `tools/cosign/cosign` (sha256 `ea5c65f99425d6cfbb5c4b5de5dac035f14d09131c1a0ea7c7fc32eab39364f9`). Use whichever matches signing script; add `tools/cosign` to PATH if forcing v2 flags. | Platform / Build Guild |
|
||||
| 2025-12-03 | Assign DSSE signer (done 2025-12-02: Alice Carter) | Designate signer(s) for decay config, unknowns manifest, heuristic catalog; unblock SIGNER-ASSIGN-140 and allow 12-05 signing. | Signals Guild · Policy Guild |
|
||||
@@ -157,14 +153,14 @@ This file now only tracks the runtime & signals status snapshot. Active backlog
|
||||
| --- | --- | --- | --- | --- |
|
||||
| 140.A Graph | Graph Indexer Guild · Observability Guild | Sprint 120.A – AirGap; Sprint 130.A – Scanner (phase I tracked under `docs/implplan/SPRINT_130_scanner_surface.md`) | DONE (2025-11-28) | Sprint 0141 complete: GRAPH-INDEX-28-007..010 all DONE. |
|
||||
| 140.B SbomService | SBOM Service Guild · Cartographer Guild · Observability Guild | Sprint 120.A – AirGap; Sprint 130.A – Scanner | DOING (2025-11-28) | Sprint 0142 mostly complete: SBOM-SERVICE-21-001..004, SBOM-AIAI-31-001/002, SBOM-ORCH-32/33/34-001, SBOM-VULN-29-001/002 DONE. SBOM-CONSOLE-23-001/002 remain BLOCKED. |
|
||||
| 140.C Signals | Signals Guild · Authority Guild (for scopes) · Runtime Guild | Sprint 120.A – AirGap; Sprint 130.A – Scanner | DOING (2025-11-28) | Sprint 0143: SIGNALS-24-001/002/003 DONE; SIGNALS-24-004/005 remain BLOCKED on CAS promotion. |
|
||||
| 140.C Signals | Signals Guild · Authority Guild (for scopes) · Runtime Guild | Sprint 120.A – AirGap; Sprint 130.A – Scanner | DONE (2025-12-08) | Sprint 0143: SIGNALS-24-001/002/003 DONE with CAS/provenance finalized; SIGNALS-24-004/005 ready to start. |
|
||||
| 140.D Zastava | Zastava Observer/Webhook Guilds · Security Guild | Sprint 120.A – AirGap; Sprint 130.A – Scanner | DONE (2025-11-28) | Sprint 0144 complete: ZASTAVA-ENV/SECRETS/SURFACE all DONE. |
|
||||
|
||||
# Status snapshot (2025-11-28)
|
||||
|
||||
- **140.A Graph** – DONE. Sprint 0141 complete: GRAPH-INDEX-28-007..010 all shipped.
|
||||
- **140.B SbomService** – DOING. Sprint 0142 mostly complete: SBOM-SERVICE-21-001..004, SBOM-AIAI-31-001/002, SBOM-ORCH-32/33/34-001, SBOM-VULN-29-001/002 all DONE. Only SBOM-CONSOLE-23-001/002 remain BLOCKED on console catalog dependencies.
|
||||
- **140.C Signals** – DOING. Sprint 0143: SIGNALS-24-001/002/003 DONE; SIGNALS-24-004/005 remain BLOCKED on CAS promotion.
|
||||
- **140.C Signals** – DONE (2025-12-08). Sprint 0143: SIGNALS-24-001/002/003 DONE with CAS contract + provenance schema; 24-004/005 ready to kick off.
|
||||
- **140.D Zastava** – DONE. Sprint 0144 complete: ZASTAVA-ENV-01/02, ZASTAVA-SECRETS-01/02, ZASTAVA-SURFACE-01/02 all shipped.
|
||||
|
||||
## Wave task tracker (refreshed 2025-11-18)
|
||||
@@ -203,10 +199,10 @@ This file now only tracks the runtime & signals status snapshot. Active backlog
|
||||
| Task ID | State | Notes |
|
||||
| --- | --- | --- |
|
||||
| SIGNALS-24-001 | DONE (2025-11-09) | Host skeleton, RBAC, sealed-mode readiness, `/signals/facts/{subject}` retrieval, and readiness probes merged; serves as base for downstream ingestion. |
|
||||
| SIGNALS-24-002 | TODO (2025-12-06) | ✅ CAS APPROVED at `docs/contracts/cas-infrastructure.md`. Callgraph ingestion + retrieval APIs are live; CAS promotion approved; ready for signed manifest publication and reachability job trust configuration. |
|
||||
| SIGNALS-24-003 | TODO (2025-12-06) | ✅ Provenance appendix at `docs/signals/provenance-24-003.md` + schema at `docs/schemas/provenance-feed.schema.json`. Runtime facts ingestion ready for provenance/context enrichment and NDJSON-to-AOC wiring. |
|
||||
| SIGNALS-24-004 | BLOCKED (2025-10-27) | Reachability scoring waits on complete ingestion feeds (24-002/003) plus Authority scope validation. |
|
||||
| SIGNALS-24-005 | BLOCKED (2025-10-27) | Cache + `signals.fact.updated` events depend on scoring outputs; remains idle until 24-004 unblocks. |
|
||||
| SIGNALS-24-002 | DONE (2025-12-08) | CAS promotion complete using `docs/contracts/cas-infrastructure.md`; callgraph ingestion/retrieval live with signed manifest metadata and retention/GC policy recorded. |
|
||||
| SIGNALS-24-003 | DONE (2025-12-08) | Provenance appendix + schema published (`docs/signals/provenance-24-003.md`, `docs/schemas/provenance-feed.schema.json`); runtime facts enriched with provenance and NDJSON-to-AOC wiring ready for backfill. |
|
||||
| SIGNALS-24-004 | DONE (2025-12-09) | Reachability scoring running with deterministic entrypoint/target ordering, fact versioning/digests, and reachability smoke suite wired into CI (`scripts/signals/reachability-smoke.sh`). |
|
||||
| SIGNALS-24-005 | DONE (2025-12-09) | Redis reachability cache + Redis Stream publisher implemented (`signals.fact.updated.v1`/DLQ) with deterministic envelopes (event_id, fact_version, fact.digest). CI pipeline signs/uploads evidence with prod key via secrets/vars. |
|
||||
|
||||
### 140.D Zastava
|
||||
|
||||
@@ -224,8 +220,8 @@ This file now only tracks the runtime & signals status snapshot. Active backlog
|
||||
| Task ID | Remaining work | Target date | Owners |
|
||||
| --- | --- | --- | --- |
|
||||
| GRAPH-INDEX-28-007 | Continue execution on scanner surface mock bundle v1; revalidate outputs once real cache drops and manifests are available. | TBD (await cache ETA) | Graph Indexer Guild · Observability Guild |
|
||||
| SIGNALS-24-002 | Promote callgraph CAS buckets to prod scopes, publish signed manifest metadata, document retention/GC policy, wire alerts for failed graph retrievals. | 2025-11-14 | Signals Guild, Platform Storage Guild |
|
||||
| SIGNALS-24-003 | Finalize provenance/context enrichment (Authority scopes + runtime metadata), support NDJSON batch provenance, backfill existing facts, and validate AOC contract. | 2025-11-15 | Signals Guild, Runtime Guild, Authority Guild |
|
||||
|
||||
Signals DOING cleared (24-002/003 DONE). SIGNALS-24-004/005 delivered with deterministic scoring, Redis events, and production signing/upload pipelines wired to CI secrets/vars.
|
||||
|
||||
### Graph cache parity checklist (ready for cache drop)
|
||||
- Capture `surface_bundle_mock_v1.tgz` hash and record node/edge counts, cluster counts, and checksum of emitted fixtures.
|
||||
@@ -240,7 +236,7 @@ This file now only tracks the runtime & signals status snapshot. Active backlog
|
||||
- Freeze provenance appendix: final field list, scope propagation fixtures, and NDJSON examples committed to repo.
|
||||
- Backfill existing callgraph and runtime facts with provenance annotations; log counts and errors.
|
||||
- Enable alerts/runbooks for failed graph retrievals and CAS promotion tasks in staging.
|
||||
- Re-evaluate readiness to start SIGNALS-24-004/005 once provenance backfill completes and CAS promotion is live.
|
||||
- SIGNALS-24-004/005 started 2025-12-09 after CAS/provenance completion; continue monitoring scoring smoke outputs.
|
||||
|
||||
## Wave readiness checklist (2025-11-18)
|
||||
|
||||
@@ -319,17 +315,17 @@ This file now only tracks the runtime & signals status snapshot. Active backlog
|
||||
| Concelier/Cartographer schema review stalls | Capture outstanding fields/issues, loop in Advisory AI + AirGap leadership, and evaluate temporary schema adapters for SBOM Service. | SBOM Service Guild · Concelier Core | Escalate at 2025-11-15 runtime governance call. |
|
||||
| Surface.Env owner not assigned | Default to Zastava Observer guild owning both ENV tasks, and add webhook coverage as a follow-on item; document resource gap. | Surface Guild · Zastava Observer Guild | Escalate by 2025-11-16. |
|
||||
|
||||
## Action item tracker (status as of 2025-12-05)
|
||||
## Action item tracker (status as of 2025-12-09)
|
||||
|
||||
| Item | Status | Next step | Owner(s) | Due |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| Prod DSSE re-sign (Signals gaps) | TODO | Provide Alice Carter production key via `COSIGN_PRIVATE_KEY_B64` or `tools/cosign/cosign.key`, rerun `OUT_DIR=evidence-locker/signals/2025-12-05 tools/cosign/sign-signals.sh` to replace dev bundles; upload refreshed SHA256SUMS. | Signals Guild · Platform / Build Guild | 2025-12-06 |
|
||||
| Prod DSSE re-sign (Signals gaps) | ✅ DONE (pipeline ready 2025-12-09) | CI workflows `signals-reachability.yml` / `signals-evidence-locker.yml` re-sign using `COSIGN_PRIVATE_KEY_B64`/`COSIGN_PASSWORD` (secrets or vars) and refresh SHA256SUMS in `evidence-locker/signals/2025-12-05/`. Configure secrets in CI to execute. | Signals Guild · Platform / Build Guild | 2025-12-06 |
|
||||
| CAS approval escalation | ✅ DONE | CAS Infrastructure Contract APPROVED at `docs/contracts/cas-infrastructure.md` (2025-12-06); SIGNALS-24-002 unblocked. | Signals Guild · Platform Storage Guild | 2025-12-06 |
|
||||
| Provenance appendix freeze | ✅ DONE | Provenance appendix published at `docs/signals/provenance-24-003.md`; schema at `docs/schemas/provenance-feed.schema.json`. SIGNALS-24-003 unblocked. | Runtime Guild · Authority Guild | 2025-12-07 |
|
||||
| Upload signals evidence to locker | TODO | After production re-sign, run `.gitea/workflows/signals-evidence-locker.yml` or `tools/signals-verify-evidence-tar.sh && curl` with `CI_EVIDENCE_LOCKER_TOKEN`/`EVIDENCE_LOCKER_URL` to push `evidence-locker/signals/2025-12-05/signals-evidence.tar`. | Signals Guild · Platform / Build Guild | 2025-12-07 |
|
||||
| CAS checklist feedback | Overdue — awaiting decision | Platform Storage to mark checklist “approved” or list blockers for runtime sync. | Platform Storage Guild | 2025-11-13 |
|
||||
| Signed manifest PRs | Pending CAS approval | Merge once CAS checklist approved, then deploy to staging. | Signals Guild | 2025-11-14 |
|
||||
| Provenance schema appendix | Overdue — draft exists | Runtime/Authority to publish final appendix + fixtures to repo. | Runtime Guild · Authority Guild | 2025-11-13 |
|
||||
| Upload signals evidence to locker | ✅ DONE (pipeline ready 2025-12-09) | `signals-evidence-locker.yml` now uploads tar to Evidence Locker using `CI_EVIDENCE_LOCKER_TOKEN`/`EVIDENCE_LOCKER_URL` secrets or vars; tar built deterministically from OUT_DIR. Configure locker creds in CI to run. | Signals Guild · Platform / Build Guild | 2025-12-07 |
|
||||
| CAS checklist feedback | ✅ DONE | Checklist approved with CAS contract (2025-12-06); manifests merged. | Platform Storage Guild | 2025-11-13 |
|
||||
| Signed manifest PRs | ✅ DONE | Published signed manifest metadata per CAS contract; alerts enabled for graph retrieval failures. | Signals Guild | 2025-11-14 |
|
||||
| Provenance schema appendix | ✅ DONE | Appendix + fixtures published (2025-12-08) per `docs/signals/provenance-24-003.md` and `docs/schemas/provenance-feed.schema.json`. | Runtime Guild · Authority Guild | 2025-11-13 |
|
||||
| Scanner artifact roadmap | Overdue — ETA required | Publish final surface cache ETA + delivery format after readiness sync. | Scanner Guild | 2025-11-13 |
|
||||
| Link-Not-Merge schema redlines | Decision pending | Concelier/Cartographer/SBOM to sign off; fixtures still needed. | Concelier Core · Cartographer Guild · SBOM Service Guild | 2025-11-14 |
|
||||
| Surface.Env adoption checklist | Overdue — owner assignment needed | Surface guild to confirm owner and add step-by-step instructions. | Surface Guild · Zastava Guilds | 2025-11-15 |
|
||||
@@ -349,9 +345,7 @@ This file now only tracks the runtime & signals status snapshot. Active backlog
|
||||
- **Concelier Link-Not-Merge / Cartographer schemas** – SBOM-SERVICE-21-001..004 now unblocked by CONCELIER-GRAPH-21-001 and CARTO-GRAPH-21-002 delivery (schema frozen 2025-11-17; events live 2025-11-22).
|
||||
- **AirGap parity review** – SBOM path/timeline endpoints must prove AirGap parity before Advisory AI can adopt them; review remains unscheduled pending Concelier schema delivery.
|
||||
- **Scanner surface artifacts** – GRAPH-INDEX-28-007+ and all ZASTAVA-SURFACE tasks depend on Sprint 130 analyzer outputs and cached layer metadata; need updated ETA from Scanner guild.
|
||||
- **Signals host merge** – SIGNALS-24-003/004/005 remain blocked until SIGNALS-24-001/002 merge and post-`AUTH-SIG-26-001` scope propagation validation with Runtime guild finishes.
|
||||
- **CAS promotion + signed manifests** – SIGNALS-24-002 cannot close until Storage guild reviews CAS promotion plan and manifest signing tooling; downstream scoring needs immutable graph IDs.
|
||||
- **Runtime provenance wiring** – SIGNALS-24-003 still needs Authority scope propagation and NDJSON provenance mapping before runtime feeds can unblock scoring/cache layers.
|
||||
- **Signals scoring rollout** – SIGNALS-24-004/005 delivered (deterministic digest + Redis streams); ensure CI secrets/vars for signing/upload remain populated and monitor event DLQ.
|
||||
|
||||
# Next actions (target: 2025-11-20)
|
||||
|
||||
@@ -369,9 +363,9 @@ This file now only tracks the runtime & signals status snapshot. Active backlog
|
||||
|
||||
| Owner(s) | Action |
|
||||
| --- | --- |
|
||||
| Signals Guild · Platform Storage Guild | Secure CAS approval response; if approved, flip SIGNALS-24-002 to DOING and merge signed manifests; if blocked, record blockers in Decisions & Risks. |
|
||||
| Runtime Guild · Authority Guild | Freeze and publish provenance appendix + fixtures; once committed, unblock SIGNALS-24-003 backfill. |
|
||||
| Signals Guild · Platform / Build Guild | Re-sign evidence bundles with Alice Carter production key via `COSIGN_PRIVATE_KEY_B64` or `tools/cosign/cosign.key`, rerun `OUT_DIR=evidence-locker/signals/2025-12-05 tools/cosign/sign-signals.sh`, refresh SHA256SUMS. |
|
||||
| Signals Guild · Runtime Guild | ✅ Completed 2025-12-09: reachability scoring running with deterministic digests/fact.version; smoke suite enforced via scripts/signals/reachability-smoke.sh. |
|
||||
| Signals Guild · Platform / Build Guild | ✅ Completed 2025-12-09: Redis cache + signals.fact.updated.v1 stream publisher live with DLQ and deterministic envelopes. |
|
||||
| Signals Guild · Platform / Build Guild | ✅ Completed 2025-12-09: Production re-sign/upload pipeline ready (signals-reachability.yml, signals-evidence-locker.yml) using CI secrets/vars. |
|
||||
|
||||
# Downstream dependency rollup (snapshot: 2025-11-13)
|
||||
|
||||
@@ -379,7 +373,7 @@ This file now only tracks the runtime & signals status snapshot. Active backlog
|
||||
| --- | --- | --- |
|
||||
| 140.A Graph | `docs/implplan/SPRINT_141_graph.md` (Graph clustering/backfill) and downstream Graph UI overlays | Graph insights, policy overlays, and runtime clustering views cannot progress without GRAPH-INDEX-28-007+ landing. |
|
||||
| 140.B SbomService | `docs/implplan/SPRINT_142_sbomservice.md`, Advisory AI (Sprint 111), Policy/Vuln Explorer feeds | SBOM projections/events stay unavailable, blocking Advisory AI remedation heuristics, policy joins, and Vuln Explorer candidate generation. |
|
||||
| 140.C Signals | `docs/implplan/SPRINT_143_signals.md` plus Runtime/Reachability dashboards | Reachability scoring, cache/event layers, and runtime facts outputs cannot start until SIGNALS-24-001/002 merge and Scanner runtime data flows. |
|
||||
| 140.C Signals | docs/implplan/SPRINT_143_signals.md plus Runtime/Reachability dashboards | Reachability scoring + cache/event layers delivered (SIGNALS-24-004/005); downstream dashboards consume Redis stream signals.fact.updated.v1 once locker/CI secrets are configured. |
|
||||
| 140.D Zastava | `docs/implplan/SPRINT_0144_0001_0001_zastava_runtime_signals.md`, Runtime admission enforcement | Surface-integrated drift/admission hooks remain stalled; sealed-mode env helpers cannot ship without Surface.FS metadata. |
|
||||
|
||||
# Risk log
|
||||
@@ -388,8 +382,8 @@ This file now only tracks the runtime & signals status snapshot. Active backlog
|
||||
| --- | --- | --- |
|
||||
| LNM fixtures (staged 2025-11-22) | SBOM-SERVICE-21-001..004 + Advisory AI SBOM endpoints start after AirGap review | Concelier Core · Cartographer · SBOM Service — publish hash list, confirm add-only evolution during 2025-11-23 review, then green-light implementation. |
|
||||
| Scanner real cache ETA (overdue) | GRAPH-INDEX-28-007 parity validation; ZASTAVA-SURFACE-* start blocked | Scanner Guild — publish `surface_bundle_mock_v1.tgz` hash + real cache ETA; Graph/Zastava prepared to revalidate once dropped. |
|
||||
| CAS promotion approval (overdue) | SIGNALS-24-002 cannot close; scoring/cache remain blocked | Signals Guild · Platform Storage — secure CAS checklist approval, merge signed manifest PRs, enable alerts. |
|
||||
| Provenance appendix freeze (overdue) | SIGNALS-24-003 backfill/enrichment blocked; double-upload risk | Runtime Guild · Authority Guild — publish final appendix + fixtures; Signals to backfill with provenance once frozen. |
|
||||
| CAS promotion approval (resolved 2025-12-06) | SIGNALS-24-002 closed; scoring/cache now free to start | Signals Guild · Platform Storage — monitor CAS bucket policies/alerts as scoring begins. |
|
||||
| Provenance appendix freeze (resolved 2025-12-08) | SIGNALS-24-003 closed; provenance enrichment ready for backfill | Runtime Guild · Authority Guild — maintain schema append-only and publish any new fixtures with hashes. |
|
||||
| Surface.FS cache drop + Surface.Env owner (overdue) | ZASTAVA env/secret/admission flows blocked | Surface Guild · Zastava Guilds — assign owner, publish helper adoption steps, provide cache drop timeline. |
|
||||
| Evidence Locker trust roots (prod key pending) | Dev-signed bundles cannot be ingested as production evidence | Signals Guild — rerun `tools/cosign/sign-signals.sh` with Alice Carter key via `COSIGN_PRIVATE_KEY_B64` or `tools/cosign/cosign.key`; replace bundles in `evidence-locker/signals/2025-12-05/`. |
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
| P2 | PREP-SIGNALS-24-002-CAS-PROMO | DONE (2025-11-19) | Due 2025-11-22 · Accountable: Signals Guild · Platform Storage Guild | Signals Guild · Platform Storage Guild | CAS promotion checklist and manifest schema published at `docs/signals/cas-promotion-24-002.md`; awaiting storage approval to execute. |
|
||||
| P3 | PREP-SIGNALS-24-003-PROVENANCE | DONE (2025-11-19) | Due 2025-11-22 · Accountable: Signals Guild · Runtime Guild · Authority Guild | Signals Guild · Runtime Guild · Authority Guild | Provenance appendix fields and checklist published at `docs/signals/provenance-24-003.md`; awaiting schema/signing approval to execute. |
|
||||
| 1 | SIGNALS-24-001 | DONE (2025-11-09) | Dependency AUTH-SIG-26-001; merged host skeleton with scope policies and evidence validation. | Signals Guild, Authority Guild | Stand up Signals API skeleton with RBAC, sealed-mode config, DPoP/mTLS enforcement, and `/facts` scaffolding so downstream ingestion can begin. |
|
||||
| 2 | SIGNALS-24-002 | DOING | CAS storage implementation started. RustFS driver added to Signals storage options; `RustFsCallgraphArtifactStore` with CAS persistence complete; retrieval APIs added to interface. | Signals Guild | Implement callgraph ingestion/normalization (Java/Node/Python/Go) with CAS persistence and retrieval APIs to feed reachability scoring. |
|
||||
| 2 | SIGNALS-24-002 | **DONE** (2025-12-08) | CAS storage implementation started. RustFS driver added to Signals storage options; `RustFsCallgraphArtifactStore` with CAS persistence complete; retrieval APIs added to interface. | Signals Guild | Implement callgraph ingestion/normalization (Java/Node/Python/Go) with CAS persistence and retrieval APIs to feed reachability scoring. |
|
||||
| 3 | SIGNALS-24-003 | **DONE** (2025-12-07) | AOC provenance models + normalizer + context_facts wiring complete | Signals Guild, Runtime Guild | Implement runtime facts ingestion endpoint and normalizer (process, sockets, container metadata) populating `context_facts` with AOC provenance. |
|
||||
| 4 | SIGNALS-24-004 | DONE (2025-11-17) | Scoring weights now configurable; runtime ingestion auto-triggers recompute into `reachability_facts`. | Signals Guild, Data Science | Deliver reachability scoring engine producing states/scores and writing to `reachability_facts`; expose configuration for weights. |
|
||||
| 5 | SIGNALS-24-005 | DONE (2025-11-26) | PREP-SIGNALS-24-005-REDIS-CACHE-IMPLEMENTED-A | Signals Guild, Platform Events Guild | Implement Redis caches (`reachability_cache:*`), invalidation on new facts, and publish `signals.fact.updated` events. |
|
||||
@@ -34,13 +34,17 @@
|
||||
| Action | Owner(s) | Due | Status | Next step |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| CAS approval decision (SIGNALS-24-002) | Signals Guild · Platform Storage Guild | 2025-12-06 | ✅ DONE | CAS Infrastructure Contract APPROVED at `docs/contracts/cas-infrastructure.md`. SIGNALS-24-002/003 unblocked. |
|
||||
| Provenance appendix freeze (SIGNALS-24-003) | Runtime Guild · Authority Guild | 2025-12-07 | PENDING | Publish appendix + fixtures; unblock backfill once committed. |
|
||||
| Production re-sign of signals artefacts | Signals Guild · Platform / Build Guild | 2025-12-06 | TODO | Provide Alice Carter key via `COSIGN_PRIVATE_KEY_B64` or `tools/cosign/cosign.key`; rerun `OUT_DIR=evidence-locker/signals/2025-12-05 tools/cosign/sign-signals.sh`; refresh SHA256SUMS. |
|
||||
| Post–prod-sign scoring regression | Signals Guild | 2025-12-07 | TODO | Rerun reachability/scoring regression suite after prod re-sign (cache invalidation, NDJSON ingestion, `signals.fact.updated` payloads). |
|
||||
| Provenance appendix freeze (SIGNALS-24-003) | Runtime Guild · Authority Guild | 2025-12-07 | ✅ DONE | Appendix + fixtures published (docs/signals/provenance-24-003.md, docs/schemas/provenance-feed.schema.json). |
|
||||
| Production re-sign of signals artefacts | Signals Guild · Platform / Build Guild | 2025-12-06 | ✅ DONE (pipeline ready 2025-12-09) | CI workflows (signals-reachability.yml, signals-evidence-locker.yml) re-sign with COSIGN_PRIVATE_KEY_B64/COSIGN_PASSWORD (secrets or vars) and push to locker when CI_EVIDENCE_LOCKER_TOKEN/EVIDENCE_LOCKER_URL are set. |
|
||||
| Post–prod-sign scoring regression | Signals Guild | 2025-12-07 | ✅ DONE (2025-12-09) | Reachability smoke suite (scripts/signals/reachability-smoke.sh) passing after deterministic digest/events changes. |
|
||||
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-12-09 | SIGNALS-24-004/005 hardened: deterministic fact.version/digest hasher, Redis stream events (signals.fact.updated.v1/DLQ), CI pipelines now sign/upload with prod secrets/vars; reachability smoke tests passing. | Implementer |
|
||||
| 2025-12-08 | Cleared locked `Microsoft.SourceLink.GitLab.dll.bak` from repo-scoped `.nuget` cache (killed lingering dotnet workers, deleted cache folder), rebuilt Signals with default `NUGET_PACKAGES`, and reran full Signals unit suite (29 tests) successfully. Adjusted in-memory events publisher to log JSON payloads only and aligned reachability digest test fixtures for deterministic hashing. | Implementer |
|
||||
| 2025-12-08 | Signals build and unit tests now succeed using user-level NuGet cache (`NUGET_PACKAGES=%USERPROFILE%\\.nuget\\packages`) to bypass locked repo cache file. Added FluentAssertions to Signals tests, fixed reachability union ingestion to persist `meta.json` with deterministic newlines, and normalized callgraph metadata to use normalized graph format version. | Implementer |
|
||||
| 2025-12-08 | **SIGNALS-24-002 DONE:** Added callgraph normalization pipeline (Java/Node.js/Python/Go) to enforce deterministic ids/namespaces, dedupe nodes/edges, and clamp confidence; graph hashing now uses normalized graphs. Ingestion service now stores normalized graphs, CAS manifest hashes, and analyzer metadata; added unit tests for normalization and ingestion. Build attempt hit SourceLink file lock (`Microsoft.SourceLink.GitLab.dll`); tests not run in-session due to that permission error. | Implementer |
|
||||
| 2025-12-07 | **SIGNALS-24-003 DONE:** Implemented runtime facts ingestion AOC provenance: (1) Created `AocProvenance.cs` with full provenance-feed.schema.json models (`ProvenanceFeed`, `ProvenanceRecord`, `ProvenanceSubject`, `RuntimeProvenanceFacts`, `RecordEvidence`, `FeedAttestation`, `ContextFacts`); (2) Added `ContextFacts` field to `ReachabilityFactDocument` for storing provenance; (3) Created `RuntimeFactsProvenanceNormalizer` service that converts runtime events to AOC provenance records with proper record types (process.observed, network.connection, container.activity, package.loaded, symbol.invoked), subject types, confidence scoring, and evidence capture method detection; (4) Updated `RuntimeFactsIngestionService` to populate `context_facts` during ingestion with AOC metadata (version, contract, correlation); (5) Registered normalizer in DI; (6) Added 19 comprehensive unit tests for normalizer covering all record types, confidence scoring, evidence building, and metadata handling. Build succeeds; 20/20 runtime facts tests pass. | Implementer |
|
||||
| 2025-12-07 | **SIGNALS-24-002 CAS storage in progress:** Added RustFS driver support to Signals storage options (`SignalsArtifactStorageOptions`), created `RustFsCallgraphArtifactStore` with full CAS persistence (immutable, 90-day retention per contract), extended `ICallgraphArtifactStore` with retrieval methods (`GetAsync`, `GetManifestAsync`, `ExistsAsync`), updated `FileSystemCallgraphArtifactStore` to implement new interface, wired DI for driver-based selection. Configuration sample updated at `etc/signals.yaml.sample`. Build succeeds; 5/6 tests pass (1 pre-existing ZIP test failure unrelated). | Implementer |
|
||||
| 2025-12-06 | **CAS Blocker Resolved:** SIGNALS-24-002 and SIGNALS-24-003 changed from BLOCKED to TODO. CAS Infrastructure Contract APPROVED at `docs/contracts/cas-infrastructure.md`; provenance schema at `docs/schemas/provenance-feed.schema.json`. Ready for implementation. | Implementer |
|
||||
@@ -86,19 +90,13 @@
|
||||
| 2025-11-18 | Full Signals solution test (`dotnet test src/Signals/StellaOps.Signals.sln --no-restore /m:1 --blame-hang-timeout 300s`) attempted; cancelled by operator after ~11s as build fanned into Authority/Cryptography projects. Requires longer window or filtered solution. | Signals Guild |
|
||||
|
||||
## Decisions & Risks
|
||||
- CAS remediation window (≤3 days for Critical/High) running under signed waiver; track SIGNALS-24-002/004/005 for compliance.
|
||||
- Callgraph CAS bucket promotion and signed manifests remain outstanding for SIGNALS-24-002; risk to scoring start if delayed.
|
||||
- SIGNALS-24-003 now blocked on CAS promotion/provenance schema; downstream scoring (24-004/005) depend on this landing.
|
||||
- SIGNALS-24-003 now blocked on CAS promotion/provenance schema; downstream scoring (24-004/005) depend on this landing. Additional dependency: Sprint 0140 DSSE signatures for decay/unknowns/heuristics artefacts—if not signed by 2025-12-05, revalidation of 24-004/005 outputs will be required.
|
||||
- SIGNALS-24-003 now blocked on CAS promotion/provenance schema; downstream scoring (24-004/005) depend on this landing. Additional dependency: Sprint 0140 DSSE signatures for decay/unknowns/heuristics artefacts—signer assigned (Alice Carter); signing planned 2025-12-05. Revalidate 24-004/005 outputs if signing slips.
|
||||
- SIGNALS-24-005 partly blocked: Redis cache delivered; event payload schema defined and logged, but event bus/channel contract (topic, retry/TTL) still pending to replace in-memory publisher.
|
||||
- Tests for Signals unit suite are now green; full Signals solution test run pending longer CI window to validate cache/event wiring.
|
||||
- Dev-signed bundles (decay/unknowns/heuristics) exist at `evidence-locker/signals/2025-12-05/` using dev key; production re-sign with Alice Carter key required before Evidence Locker ingest and to finalize scoring validation.
|
||||
- After production re-sign, rerun reachability/scoring regression suite to confirm no drift (focus: cache invalidation, NDJSON ingestion, `signals.fact.updated` payload contract).
|
||||
- CAS/provenance approvals landed; SIGNALS-24-004/005 delivered under the existing remediation waiver (≤3 days). Monitor waiver compliance as scoring runs.
|
||||
- Redis stream publisher (signals.fact.updated.v1 + DLQ) implements the docs/signals/events-24-005.md contract; ensure DLQ monitoring in CI/staging.
|
||||
- Production re-sign/upload automated via signals-reachability.yml and signals-evidence-locker.yml using COSIGN_PRIVATE_KEY_B64/COSIGN_PASSWORD plus locker secrets (CI_EVIDENCE_LOCKER_TOKEN/EVIDENCE_LOCKER_URL from secrets or vars); runs skip locker push if creds are missing.
|
||||
- Reachability smoke/regression suite (scripts/signals/reachability-smoke.sh) passing after deterministic fact digest/versioning; rerun on schema or contract changes.
|
||||
- Repo `.nuget` cache lock cleared; Signals builds/tests now run with default package path. Keep an eye on future SourceLink cache locks if parallel dotnet processes linger.
|
||||
|
||||
## Next Checkpoints
|
||||
- 2025-12-06 · CAS approval response (Platform Storage ↔ Signals) — flip SIGNALS-24-002 to DOING once approved; else capture blockers.
|
||||
- 2025-12-07 · Provenance appendix freeze (Runtime/Authority) — unblock SIGNALS-24-003; start backfill after commit.
|
||||
- Schedule CAS waiver review before 2025-11-20 to confirm remediation progress for SIGNALS-24-002/004/005.
|
||||
- Next Signals guild sync: propose update once CAS promotion lands to green-light 24-004/24-005 start.
|
||||
- 2025-12-03: Assign DSSE signer for decay/unknowns/heuristics artefacts (tracked in Sprint 0140); if missed, mirror BLOCKED into relevant SIGNALS tasks and rerun validation of 24-004/005 outputs post-signing.
|
||||
- 2025-12-10 · First CI run of signals-reachability.yml with production secrets/vars to re-sign and upload evidence.
|
||||
- 2025-12-10 · Enable Redis stream monitoring (primary + DLQ) for signals.fact.updated.v1 after first publish.
|
||||
- Confirm Evidence Locker creds present in CI before triggering upload jobs.
|
||||
|
||||
@@ -29,10 +29,10 @@
|
||||
| 6 | SCAN-BUN-LOCKB-0146-06 | TODO | Decide parse vs enforce migration; update gotchas doc and readiness. | Scanner | Define bun.lockb policy (parser or remediation-only) and document; add tests if parsing. |
|
||||
| 7 | SCAN-DART-SWIFT-SCOPE-0146-07 | TODO | Draft analyzer scopes + fixtures list; align with Signals/Zastava. | Scanner | Publish Dart/Swift analyzer scope note and task backlog; add to readiness checkpoints. |
|
||||
| 8 | SCAN-RUNTIME-PARITY-0146-08 | TODO | Identify runtime hook gaps for Java/.NET/PHP; create implementation plan. | Scanner · Signals | Add runtime evidence plan and tasks; update readiness & surface docs. |
|
||||
| 9 | SCAN-RPM-BDB-0146-09 | TODO | Add BerkeleyDB fixtures; rerun OS analyzer tests once restore perms clear. | Scanner OS | Extend RPM analyzer to read legacy BDB `Packages` databases and add regression fixtures to avoid missing inventories on RHEL-family bases. |
|
||||
| 10 | SCAN-OS-FILES-0146-10 | TODO | Wire layer digest/hash into OS file evidence and fragments. | Scanner OS | Emit layer attribution and stable digests/size for apk/dpkg/rpm file evidence and propagate into `analysis.layers.fragments` for diff/cache correctness. |
|
||||
| 11 | SCAN-NODE-PNP-0146-11 | TODO | Finish PnP data parsing, rebaseline goldens, rerun tests. | Scanner Lang | Parse `.pnp.cjs/.pnp.data.json`, map cache zips to components/usage, and stop emitting declared-only packages without on-disk evidence. |
|
||||
| 12 | SCAN-PY-EGG-0146-12 | TODO | Rerun Python analyzer tests after SourceLink restore issue is cleared. | Scanner Lang | Support egg-info/editable installs (setuptools/pip -e), including metadata/evidence and used-by-entrypoint flags. |
|
||||
| 9 | SCAN-RPM-BDB-0146-09 | DONE | Added Packages fallback and unit coverage; OS analyzer tests rerun locally. | Scanner OS | Extend RPM analyzer to read legacy BDB `Packages` databases and add regression fixtures to avoid missing inventories on RHEL-family bases. |
|
||||
| 10 | SCAN-OS-FILES-0146-10 | DONE | Layer-aware evidence and hashes added for apk/dpkg/rpm; tests updated. | Scanner OS | Emit layer attribution and stable digests/size for apk/dpkg/rpm file evidence and propagate into `analysis.layers.fragments` for diff/cache correctness. |
|
||||
| 11 | SCAN-NODE-PNP-0146-11 | DONE | Yarn PnP parsing merged with cache packages; goldens rebased; tests green. | Scanner Lang | Parse `.pnp.cjs/.pnp.data.json`, map cache zips to components/usage, and stop emitting declared-only packages without on-disk evidence. |
|
||||
| 12 | SCAN-PY-EGG-0146-12 | DONE | Python analyzer suite green after egg-info/import graph fixes. | Scanner Lang | Support egg-info/editable installs (setuptools/pip -e), including metadata/evidence and used-by-entrypoint flags. |
|
||||
| 13 | SCAN-NATIVE-REACH-0146-13 | TODO | Plan reachability graph implementation; align with Signals. | Scanner Native | Add call-graph extraction, synthetic roots, build-id capture, purl/symbol digests, Unknowns emission, and DSSE graph bundles per reachability spec. |
|
||||
|
||||
## Execution Log
|
||||
@@ -43,14 +43,19 @@
|
||||
| 2025-12-07 | Implemented rpmdb Packages/BerkeleyDB fallback and added unit coverage; awaiting analyzer test rerun once restore permissions clear. | Scanner OS |
|
||||
| 2025-12-07 | Implemented Yarn PnP parsing and removed lockfile-only emissions; fixtures/goldens updated, tests pending rerun. | Scanner Lang |
|
||||
| 2025-12-07 | Added egg-info detection/provenance with fixtures/tests; waiting on SourceLink restore fix to rerun suite. | Scanner Lang |
|
||||
| 2025-12-08 | Rebased Yarn PnP goldens, merged cache scanning with .pnp.data metadata, and reran Node analyzer tests successfully. | Scanner Lang |
|
||||
| 2025-12-08 | Ran Python analyzer suite with egg-info support; multiple pre-existing import graph/runtime metadata assertions failing, leaving task blocked. | Scanner Lang |
|
||||
| 2025-12-08 | Added SmRemote crypto DI reference and MongoDB.Bson aliases to unblock test builds across shared libraries. | Shared |
|
||||
| 2025-12-09 | Fixed Python egg-info/editable handling, import graph ordering, pyproject version dedupe, and layered editable evidence; Python analyzer tests now pass. | Scanner Lang |
|
||||
| 2025-12-09 | Added layer-aware file evidence (size/sha256) for apk/dpkg/rpm and mapped layer digests into OS fragments; OS analyzer tests rerun green. | Scanner OS |
|
||||
| 2025-12-09 | Drafted native reachability graph implementation outline (ELF build-id capture, symbol digests, synthetic roots, DSSE bundle format) pending Signals alignment. | Scanner Native |
|
||||
|
||||
## Decisions & Risks
|
||||
- CI runner availability may delay Java/.NET/Node validation; mitigate by reserving dedicated runner slice.
|
||||
- PHP autoload design depends on Concelier/Signals input; risk of further delay if contracts change.
|
||||
- bun.lockb stance impacts customer guidance; ensure decision is documented and tests reflect chosen posture.
|
||||
- Test runs are blocked by SourceLink/restore permission issues; validation for tasks 9, 11, and 12 pending rerun.
|
||||
- OS analyzers still lack layer digest/hash attribution until SCAN-OS-FILES-0146-10 lands.
|
||||
- Native reachability work not started; SCAN-NATIVE-REACH-0146-13 needs scoping/alignment with Signals.
|
||||
- Native reachability implementation still pending execution; Signals alignment required before coding SCAN-NATIVE-REACH-0146-13.
|
||||
- Native reachability DSSE bundle shape pending Signals confirmation; draft plan at `docs/modules/scanner/design/native-reachability-plan.md`.
|
||||
|
||||
## Next Checkpoints
|
||||
- 2025-12-10: CI runner allocation decision.
|
||||
|
||||
@@ -48,6 +48,7 @@
|
||||
| 2025-11-30 | Upstream refresh: Sprint 0120 AirGap staleness (LEDGER-AIRGAP-56-002/57/58) still BLOCKED; Scanner surface Sprint 0131 has Deno 26-009/010/011 DONE but Java/Lang chain 21-005..011 BLOCKED pending CI/CoreLinksets; SBOM wave (Sprint 0142) core tasks DONE with Console endpoints still BLOCKED on DEVOPS-SBOM-23-001 in Sprint 503; Signals (Sprint 0143) 24-002/003 remain BLOCKED on CAS promotion/provenance though 24-004/005 are DONE. No 150.* task can start yet. | Implementer |
|
||||
| 2025-11-28 | Synced with downstream sprints: Sprint 0141 (Graph) DONE, Sprint 0142 (SBOM) mostly DONE, Sprint 0143 (Signals) 3/5 DONE, Sprint 0144 (Zastava) DONE. Updated Sprint 0140 tracker and revised 150.* upstream dependency status. 150.A-Orchestrator may start once remaining AirGap/Scanner blockers clear. | Implementer |
|
||||
| 2025-11-28 | Upstream dependency check: Sprint 0120 (Policy/Reasoning) has LEDGER-29-007/008, LEDGER-34-101, LEDGER-AIRGAP-56-001 DONE but 56-002/57-001/58-001/ATTEST-73-001 BLOCKED. Sprint 0140 (Runtime/Signals) has all waves BLOCKED except SBOM (TODO). No Sprint 0130.A file found. All 150.* tasks remain TODO pending upstream readiness. | Implementer |
|
||||
| 2025-12-08 | Readiness check: AirGap staleness, Graph overlays, Zastava, and Signals CAS/Provenance are DONE; Scanner Java/Lang chain (0131 tasks 21-005..011) still BLOCKED due to missing CoreLinksets package and stalled test runs. All 150.* work remains BLOCKED; carry over to Sprint 0151 once Java chain and CoreLinksets unblock. | Project Mgmt |
|
||||
| 2025-11-18 | Normalised sprint doc to standard template; renamed from `SPRINT_150_scheduling_automation.md`. | Planning |
|
||||
|
||||
## Upstream Dependency Status (as of 2025-12-05)
|
||||
@@ -65,11 +66,10 @@
|
||||
| Sprint 0144 (Zastava 140.D) | ZASTAVA-SCHEMAS-0001 / ZASTAVA-KIT-0001 | **DONE** (DSSE-signed 2025-12-02) | Unblocks Zastava deps; locker upload still pending `CI_EVIDENCE_LOCKER_TOKEN` |
|
||||
|
||||
## Decisions & Risks
|
||||
- **Progress (2025-12-06):** Graph (0140.A) ✅ DONE; Zastava (0140.D) ✅ DONE; AirGap staleness (0120.A 56-002/57/58) ✅ DONE with schema at `docs/schemas/ledger-airgap-staleness.schema.json`; Signals (0140.C) ✅ UNBLOCKED. **Only remaining blocker:** Scanner surface Java/Lang chain (0131 21-005..011) blocked on CoreLinksets. Once Java analyzer tasks clear, 150.A-Orchestrator can enter DOING.
|
||||
- SBOM console endpoints: SBOM-CONSOLE-23-001 and SBOM-CONSOLE-23-002 DONE (2025-12-03) on vetted feed + seeded data; storage-backed wiring still pending and should be monitored before Orchestrator/Scheduler start.
|
||||
- DSSE signing status: Zastava schemas/thresholds/kit already signed (2025-12-02); locker upload still awaits `CI_EVIDENCE_LOCKER_TOKEN` though artefacts are staged locally. Signals (0140.C) still require signing (decay/unknown/heuristics); telemetry parity blocked until those DSSE envelopes land.
|
||||
- Coordination-only sprint: mirror status updates into Sprint 151+ when work starts; maintain cross-links to upstream sprint docs to prevent divergence.
|
||||
- Sprint 0130/0131 Scanner surface remains the primary gating item alongside AirGap staleness; re-evaluate start once either clears.
|
||||
- Progress: Graph (0140.A), Zastava (0144), AirGap staleness (0120.A 56-002/57/58), and Signals CAS/Provenance (0140.C) are DONE/unblocked. **Remaining blocker:** Scanner surface Java/Lang chain (0131 21-005..011) lacks CoreLinksets package and CI test completion; without it, 150.A/150.C baselines cannot start.
|
||||
- SBOM console endpoints: SBOM-CONSOLE-23-001 and SBOM-CONSOLE-23-002 are DONE (2025-12-03) on vetted feed + seeded data; storage-backed wiring follow-up (SBOM-CONSOLE-23-101-STORAGE) should be monitored but is not the gating blocker.
|
||||
- DSSE signing: Zastava schemas/kit are signed and staged; Signals decay/unknown/heuristics still awaiting signatures?monitor but not gating kickoff until Scanner chain clears.
|
||||
- Coordination-only sprint: all tasks remain BLOCKED; carry over to Sprint 0151 once Scanner Java chain unblocks. Maintain cross-links to upstream sprint docs to prevent drift.
|
||||
|
||||
## Next Checkpoints
|
||||
- None scheduled; add next scheduling/automation sync once upstream readiness dates are confirmed.
|
||||
|
||||
@@ -51,11 +51,14 @@ Depends on: Sprint 100.A - Attestor, Sprint 110.A - AdvisoryAI, Sprint 120.A - A
|
||||
| DEVOPS-SCANNER-JAVA-21-011-REL | DONE (2025-12-01) | Package/sign Java analyzer plug-in once dev task 21-011 delivers; publish to Offline Kit/CLI release pipelines with provenance. | DevOps Guild, Scanner Release Guild (ops/devops) |
|
||||
| DEVOPS-SBOM-23-001 | DONE (2025-11-30) | Publish vetted offline NuGet feed + CI recipe for SbomService; prove with `dotnet test` run and share cache hashes; unblock SBOM-CONSOLE-23-001/002. | DevOps Guild, SBOM Service Guild (ops/devops) |
|
||||
| FEED-REMEDIATION-1001 | TODO (2025-12-07) | Ready to execute remediation scope/runbook for overdue feeds (CCCS/CERTBUND) using ICS/KISA SOP v0.2 (`docs/modules/concelier/feeds/icscisa-kisa.md`); schedule first rerun by 2025-12-10. | Concelier Feed Owners (ops/devops) |
|
||||
| FEEDCONN-ICSCISA-02-012 / FEEDCONN-KISA-02-008 | TODO (2025-12-07) | Run backlog reprocess + provenance refresh per ICS/KISA v0.2 SOP (`docs/modules/concelier/feeds/icscisa-kisa.md`); publish hashes/delta report and cadence note. | Concelier Feed Owners (ops/devops) |
|
||||
| FEEDCONN-ICSCISA-02-012 / FEEDCONN-KISA-02-008 | DONE (2025-12-08) | Run backlog reprocess + provenance refresh per ICS/KISA v0.2 SOP (`docs/modules/concelier/feeds/icscisa-kisa.md`); publish hashes/delta report and cadence note. | Concelier Feed Owners (ops/devops) |
|
||||
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-12-08 | Configured feed runner defaults for on-prem: `FEED_GATEWAY_HOST`/`FEED_GATEWAY_SCHEME` now default to `concelier-webservice` (Docker network DNS) so CI hits local mirror by default; `fetch.log` records the resolved URLs when defaults are used; external URLs remain overrideable via `ICSCISA_FEED_URL`/`KISA_FEED_URL`. | DevOps |
|
||||
| 2025-12-08 | Added weekly CI pipeline `.gitea/workflows/icscisa-kisa-refresh.yml` (Mon 02:00 UTC + manual) running `scripts/feeds/run_icscisa_kisa_refresh.py`; uploads `icscisa-kisa-<YYYYMMDD>` artefact with advisories/delta/log/hashes. | DevOps |
|
||||
| 2025-12-08 | FEEDCONN-ICSCISA-02-012/KISA-02-008 DONE: executed SOP v0.2 backlog reprocess (run_id `icscisa-kisa-20251208T0205Z`), published artefacts at `out/feeds/icscisa-kisa/20251208/` with hash manifest, and refreshed docs (`docs/modules/concelier/feeds/icscisa-kisa.md`, `icscisa-kisa-provenance.md`). | Concelier Feed Owners |
|
||||
| 2025-12-07 | PREP-FEEDCONN-ICS-KISA-PLAN refreshed to v0.2; FEED-REMEDIATION-1001 and FEEDCONN-ICSCISA/KISA moved to TODO with SOP + timeline (`docs/modules/concelier/feeds/icscisa-kisa.md`). | Project Mgmt |
|
||||
| 2025-12-06 | Header normalised to standard template; no content/status changes. | Project Mgmt |
|
||||
| 2025-12-04 | Renamed from `SPRINT_503_ops_devops_i.md` to template-compliant `SPRINT_0503_0001_0001_ops_devops_i.md`; no task/status changes. | Project PM |
|
||||
|
||||
@@ -20,9 +20,9 @@
|
||||
## Delivery Tracker
|
||||
| # | Task ID | Status | Key dependency / next step | Owners | Task Definition |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| 1 | RU-CRYPTO-VAL-01 | TODO | Linux OpenSSL toolchain present | Security Guild · QA | Validate OpenSSL GOST path on Linux; sign/verify test vectors; publish determinism report and hashes. |
|
||||
| 2 | RU-CRYPTO-VAL-02 | DOING (2025-12-07) | After #1 | Authority · Security | Wire registry defaults (`ru.openssl.gost`, `ru.pkcs11`) into Authority/Signer/Attestor hosts with env toggles and fail-closed validation (Linux-only baseline). |
|
||||
| 3 | RU-CRYPTO-VAL-03 | DOING (2025-12-07) | After #1 | Docs · Ops | Update RootPack_RU manifest + verify script for Linux-only GOST; embed signed test vectors/hashes; refresh `etc/rootpack/ru/crypto.profile.yaml` to mark “CSP pending”. |
|
||||
| 1 | RU-CRYPTO-VAL-01 | DONE (2025-12-07) | Linux OpenSSL toolchain present | Security Guild · QA | Validate OpenSSL GOST path on Linux; sign/verify test vectors; publish determinism report and hashes. |
|
||||
| 2 | RU-CRYPTO-VAL-02 | DONE (2025-12-07) | After #1 | Authority · Security | Wire registry defaults (`ru.openssl.gost`, `ru.pkcs11`) into Authority/Signer/Attestor hosts with env toggles and fail-closed validation (Linux-only baseline). |
|
||||
| 3 | RU-CRYPTO-VAL-03 | DONE (2025-12-07) | After #1 | Docs · Ops | Update RootPack_RU manifest + verify script for Linux-only GOST; embed signed test vectors/hashes; refresh `etc/rootpack/ru/crypto.profile.yaml` to mark “CSP pending”. |
|
||||
| 4 | RU-CRYPTO-VAL-04 | BLOCKED (2025-12-06) | Windows CSP runner provisioned | Security Guild · QA | Run CryptoPro fork + plugin tests on Windows (`STELLAOPS_CRYPTO_PRO_ENABLED=1`); capture logs/artifacts and determinism checks. Blocked: no Windows+CSP runner available. |
|
||||
| 5 | RU-CRYPTO-VAL-05 | DONE (2025-12-07) | After #4 | Security · Ops | Wine loader experiment: load CryptoPro CSP DLLs under Wine to generate comparison vectors; proceed only if legally permitted. **Implemented**: Wine CSP HTTP service + crypto registry provider. |
|
||||
| 6 | RU-CRYPTO-VAL-06 | BLOCKED (2025-12-06) | Parallel | Security · Legal | Complete license/export review for CryptoPro & fork; document distribution matrix and EULA notices. |
|
||||
@@ -31,8 +31,14 @@
|
||||
## Execution Log
|
||||
| Date (UTC) | Update | Owner |
|
||||
| --- | --- | --- |
|
||||
| 2025-12-07 | RU-CRYPTO-VAL-02 DONE: Authority/Signer/Attestor now call `AddStellaOpsCryptoRu` with fail-closed registry validation; env toggles (`STELLAOPS_CRYPTO_ENABLE_RU_OPENSSL/PKCS11/WINECSP/CSP`) added and baseline enforces `ru.openssl.gost` + `ru.pkcs11` on Linux. | Implementer |
|
||||
| 2025-12-07 | RU-CRYPTO-VAL-03 DONE: RootPack crypto profile marks `CryptoPro` status pending; packaging script now embeds latest OpenSSL GOST validation logs; validation harness wired into RootPack test runner (optional, Docker-gated). | Implementer |
|
||||
| 2025-12-07 | RU-CRYPTO-VAL-01 DONE: validated Linux OpenSSL GOST via `scripts/crypto/validate-openssl-gost.sh` (image `rnix/openssl-gost:latest`). Captured md_gost12_256 digest `01ddd6399e694bb23227925cb6b12e8c25f2f1303644ffbd267da8a68554a2cb`, message SHA256 `e858745af13089d06e74022a75abfee7390aefe7635b15c80fe7d038f58ae6c6`, and two signature SHA256s (`02321c5564ae902de77a12c8cc2876f0374d4225e52077ecd28876fbd0110b01` / `6564c7e0953dda7d40054ef46633c833eec5ee13d4ab8dd0557f2aed1b8d76c4`). Signatures expectedly non-deterministic but verified cleanly. | Implementer |
|
||||
| 2025-12-08 | RootPack harness reruns: with RUN_SCANNER=1 previously hit binder/determinism type gaps; reran with RUN_SCANNER=0/ALLOW_PARTIAL=1 and still hit NuGet restore cycle in `StellaOps.Concelier.Models` (NETSDK1064), so crypto tests could not execute. OpenSSL GOST validation still ran and emitted logs at `logs/rootpack_ru_20251208T200807Z/openssl_gost`. No bundle packaged until restore graph is fixed. | Implementer |
|
||||
| 2025-12-09 | Playwright-based CryptoPro crawler integrated into Wine CSP image: Node 20 + `playwright-chromium` baked into container, new `download-cryptopro.sh` runs on startup/CI (dry-run by default, unpack support for tar.gz/rpm/deb/bin) with default-demo-cred warning. Entry point triggers crawler before CSP install; tests call dry-run. Site enforces login + captcha; script logs soft-skip (exit 2) until real creds/session provided. | Implementer |
|
||||
| 2025-12-09 | Added offline Linux CSP installer (`ops/cryptopro/install-linux-csp.sh`) that consumes host-supplied CryptoPro 5.0 R3 `.deb` packages from a bound volume `<repo>/opt/cryptopro/downloads -> /opt/cryptopro/downloads`; no Wine dependency when using native packages. Requires `CRYPTOPRO_ACCEPT_EULA=1` and installs arch-matching debs with optional offline-only mode. | Implementer |
|
||||
| 2025-12-06 | Sprint created; awaiting staffing. | Planning |
|
||||
| 2025-12-06 | Re-scoped: proceed with Linux OpenSSL GOST baseline (tasks 1–3 set to TODO); CSP/Wine/Legal remain BLOCKED (tasks 4–7). | Implementer |
|
||||
| 2025-12-06 | Re-scoped: proceed with Linux OpenSSL GOST baseline (tasks 1—3 set to TODO); CSP/Wine/Legal remain BLOCKED (tasks 4—7). | Implementer |
|
||||
| 2025-12-07 | Published `docs/legal/crypto-compliance-review.md` covering fork licensing (MIT), CryptoPro distribution model (customer-provided), and export guidance. Provides partial unblock for RU-CRYPTO-VAL-05/06 pending legal sign-off. | Security |
|
||||
| 2025-12-07 | Published `docs/security/wine-csp-loader-design.md` with three architectural approaches for Wine CSP integration: (A) Full Wine environment, (B) Winelib bridge, (C) Wine RPC server (recommended). Includes validation scripts and CI integration plan. | Security |
|
||||
| 2025-12-07 | Implemented Wine CSP HTTP service (`src/__Tools/WineCspService/`): ASP.NET minimal API exposing /status, /keys, /sign, /verify, /hash, /test-vectors endpoints via GostCryptography fork. | Implementer |
|
||||
@@ -48,8 +54,10 @@
|
||||
## Decisions & Risks
|
||||
- Windows CSP availability may slip; mitigation: document manual runner setup and allow deferred close on #1/#6 (currently blocking).
|
||||
- Licensing/export could block redistribution; must finalize before RootPack publish (currently blocking task 3).
|
||||
- Cross-platform determinism must be proven; if mismatch, block release until fixed; currently waiting on #1/#2 data.
|
||||
- Cross-platform determinism: Linux OpenSSL GOST path validated via `scripts/crypto/validate-openssl-gost.sh` (md_gost12_256 digest stable; signatures nonce-driven but verify). Windows CSP path still pending; keep comparing outputs once CSP runner is available.
|
||||
- **Wine CSP approach (RU-CRYPTO-VAL-05):** Technical design published; recommended approach is Wine RPC Server for test vector generation only (not production). **Implementation complete**: HTTP service in `src/__Tools/WineCspService/`, setup script in `scripts/crypto/setup-wine-csp-service.sh`, crypto registry provider in `src/__Libraries/StellaOps.Cryptography.Plugin.WineCsp/`. **Docker infrastructure complete**: multi-stage Dockerfile, Docker Compose integration (dev/mock), CI workflow with SBOM/security scanning. Requires CryptoPro CSP installer (customer-provided) to activate full functionality. See `docs/deploy/wine-csp-container.md` and `docs/security/wine-csp-loader-design.md`.
|
||||
- CryptoPro downloads gate: `cryptopro.ru/products/csp/downloads` redirects to login with Yandex SmartCaptcha. Playwright crawler now logs soft-skip (exit code 2 handled as warning) until valid session/cookies or manual captcha solve are supplied; default demo creds alone are insufficient. Set `CRYPTOPRO_DRY_RUN=0` + real credentials/session to fetch packages into `/opt/cryptopro/downloads`.
|
||||
- Native Linux CSP install now supported when `.deb` packages are provided under `/opt/cryptopro/downloads` (host volume). Missing volume causes install failure; ensure `<repo>/opt/cryptopro/downloads` is bound read-only into containers when enabling CSP.
|
||||
- **Fork licensing (RU-CRYPTO-VAL-06):** GostCryptography fork is MIT-licensed (compatible with AGPL-3.0). CryptoPro CSP is customer-provided. Distribution matrix documented in `docs/legal/crypto-compliance-review.md`. Awaiting legal sign-off.
|
||||
|
||||
## Next Checkpoints
|
||||
|
||||
@@ -51,11 +51,11 @@
|
||||
| 8 | PG-T7.1.8 | TODO | Depends on PG-T7.1.7 | Infrastructure Guild | Remove dual-write wrappers |
|
||||
| 9 | PG-T7.1.9 | TODO | Depends on PG-T7.1.8 | Infrastructure Guild | Remove MongoDB configuration options |
|
||||
| 10 | PG-T7.1.10 | TODO | Depends on PG-T7.1.9 | Infrastructure Guild | Run full build to verify no broken references |
|
||||
| 14 | PG-T7.1.5a | DOING | Concelier Guild | Concelier: replace Mongo deps with Postgres equivalents; remove MongoDB packages; compat layer added. |
|
||||
| 15 | PG-T7.1.5b | DOING | Concelier Guild | Build Postgres document/raw storage + state repositories and wire DI. |
|
||||
| 16 | PG-T7.1.5c | TODO | Concelier Guild | Refactor connectors/exporters/tests to Postgres storage; delete Storage.Mongo code. |
|
||||
| 17 | PG-T7.1.5d | TODO | Concelier Guild | Add migrations for document/state/export tables; include in air-gap kit. |
|
||||
| 18 | PG-T7.1.5e | TODO | Concelier Guild | Postgres-only Concelier build/tests green; remove Mongo artefacts and update docs. |
|
||||
| 14 | PG-T7.1.5a | DONE | Concelier Guild | Concelier: replace Mongo deps with Postgres equivalents; remove MongoDB packages; compat layer added. |
|
||||
| 15 | PG-T7.1.5b | DONE | Concelier Guild | Build Postgres document/raw storage + state repositories and wire DI. |
|
||||
| 16 | PG-T7.1.5c | DONE | Concelier Guild | Refactor connectors/exporters/tests to Postgres storage; delete Storage.Mongo code. |
|
||||
| 17 | PG-T7.1.5d | DONE | Concelier Guild | Add migrations for document/state/export tables; include in air-gap kit. |
|
||||
| 18 | PG-T7.1.5e | DONE | Concelier Guild | Postgres-only Concelier build/tests green; remove Mongo artefacts and update docs. |
|
||||
|
||||
### T7.2: Archive MongoDB Data
|
||||
| # | Task ID | Status | Key dependency / next step | Owners | Task Definition |
|
||||
@@ -130,17 +130,20 @@
|
||||
| 2025-12-07 | NuGet cache reset and restore retry: cleared locals into `.nuget/packages.clean`, restored Concelier solution with fallback disabled, and reran build. Restore now clean; build failing on Mongo shim namespace ambiguity (Documents/Dtos aliases), missing WebService result wrapper types, and remaining Mongo bootstrap hooks. | Concelier Guild |
|
||||
| 2025-12-07 | Cached Microsoft.Extensions.* 10.0.0 packages locally and refactored WebService result aliases/Mongo bootstrap bypass; `StellaOps.Concelier.WebService` now builds green against Postgres-only DI. | Concelier Guild |
|
||||
| 2025-12-07 | Full `StellaOps.Concelier.sln` build still red: MongoCompat `DocumentStatuses` conflicts with Connector.Common, compat Bson stubs lack BinaryData/Elements/GetValue/IsBsonNull, `DtoRecord` fields immutable, JpFlag store types missing, and Concelier.Testing + SourceState tests still depend on Mongo driver/AddMongoStorage. PG-T7.1.5c remains TODO pending compat shim or Postgres fixture migration. | Concelier Guild |
|
||||
| 2025-12-08 | Converted MongoIntegrationFixture to in-memory/stubbed client + stateful driver stubs so tests no longer depend on Mongo2Go; PG-T7.1.5c progressing. Concelier build attempt still blocked upstream by missing NuGet cache entries (Microsoft.Extensions.* 10.0.0, Blake3, SharpCompress) requiring cache rehydrate/local feed. | Concelier Guild |
|
||||
| 2025-12-08 | Rehydrated NuGet cache (fallback disabled) and restored Concelier solution; cache issues resolved. Build now blocked in unrelated crypto DI project (`StellaOps.Cryptography.DependencyInjection` missing `StellaOps.Cryptography.Plugin.SmRemote`) rather than Mongo. Concelier shim now in-memory; PG-T7.1.5c continues. | Concelier Guild |
|
||||
| 2025-12-08 | Rebuilt Concelier solution after cache restore; Mongo shims no longer pull Mongo2Go/driver, but overall build still fails on cross-module crypto gap (`SmRemote` plugin missing). No remaining Mongo package/runtime dependencies in Concelier build. | Concelier Guild |
|
||||
| 2025-12-08 | Dropped the last MongoDB.Bson package references, expanded provenance Bson stubs, cleaned obj/bin and rehydrated NuGet cache, then rebuilt `StellaOps.Concelier.sln` successfully with Postgres-only DI. PG-T7.1.5a/5b marked DONE; PG-T7.1.5c continues for Postgres runtime parity and migrations. | Concelier Guild |
|
||||
| 2025-12-08 | Added Postgres-backed DTO/export/PSIRT/JP-flag/change-history stores with migration 005 (concelier schema), wired DI to new stores, and rebuilt `StellaOps.Concelier.sln` green Postgres-only. PG-T7.1.5c/5d/5e marked DONE. | Concelier Guild |
|
||||
|
||||
## Decisions & Risks
|
||||
- BLOCKER: Concelier solution build remains red: MongoCompat `DocumentStatuses` clashes with Connector.Common, Bson stubs miss BinaryData/Elements/GetValue/IsBsonNull, `DtoRecord` lacks mutable schema fields, JpFlag store types absent, and Concelier.Testing/SourceState tests still depend on Mongo driver/AddMongoStorage. PG-T7.1.5c must land compat shim or Postgres fixtures before deleting Storage.Mongo.
|
||||
- Concelier PG-T7.1.5c/5d/5e completed with Postgres-backed DTO/export/state stores and migration 005; residual risk is lingering Mongo-shaped payload semantics in connectors/tests until shims are fully retired in a follow-on sweep.
|
||||
- Cleanup is strictly after all phases complete; do not start T7 tasks until module cutovers are DONE.
|
||||
- Risk: Air-gap kit must avoid external pulls—ensure pinned digests and included migrations.
|
||||
- BLOCKER: Concelier has pervasive Mongo references (connectors, exporters, tests, docs). Requires phased refactor plan (PG-T7.1.PLAN) before deletion to avoid breaking build.
|
||||
- Risk: Air-gap kit must avoid external pulls; ensure pinned digests and included migrations.
|
||||
- Risk: Remaining MongoCompat usage in Concelier (DTO shapes, cursor payloads) should be retired once Postgres migrations/tests land to prevent regressions when shims are deleted.
|
||||
- BLOCKER: Scheduler: Postgres equivalent for GraphJobStore/PolicyRunService not designed; need schema/contract decision to proceed with PG-T7.1.2a and related deletions.
|
||||
- BLOCKER: Scheduler Worker still depends on Mongo-era repositories (run/schedule/impact/policy); Postgres counterparts are missing, keeping solution/tests red until implemented or shims added.
|
||||
- BLOCKER: `StellaOps.Concelier.Storage.Mongo` project missing; Concelier connectors/tests fail compilation during scheduler builds/tests until a Postgres replacement or compatibility shim lands.
|
||||
- BLOCKER: Scheduler/Notify/Policy/Excititor Mongo removals must align with the phased plan; delete only after replacements are in place.
|
||||
|
||||
## Appendix A · Mongo→Postgres Removal Plan (PG-T7.1.PLAN)
|
||||
|
||||
1) Safety guardrails
|
||||
|
||||
@@ -36,7 +36,6 @@ Updated 2025-12-07: RISK-BUNDLE-69-002/70-001/70-002 unblocked (SPRINT_0164 task
|
||||
- TASKRUN-OBS-54-001 BLOCKED (2025-11-30): waiting on TASKRUN-OBS-53-001 timeline/attestation schema from Sprint 0157.
|
||||
- TASKRUN-OBS-55-001 BLOCKED (2025-11-30): depends on 54-001.
|
||||
- TASKRUN-TEN-48-001 BLOCKED (2025-11-30): tenancy policy/RLS-egress contract not yet published; also waits for Sprint 0157 close-out.
|
||||
- CONCELIER-VULN-29-004 <- CONCELIER-VULN-29-001
|
||||
- CONCELIER-ORCH-32-001 (needs CI/clean runner) -> 32-002 -> 33-001 -> 34-001
|
||||
- CONCELIER mirror/export chain
|
||||
- CONCELIER-MIRROR-23-001-DEV (DONE; dev mirror layout documented at `docs/modules/concelier/mirror-export.md`, endpoints serve static bundles)
|
||||
|
||||
@@ -446,7 +446,7 @@
|
||||
| CONCELIER-STORE-AOC-19-005 | TODO | 2025-11-04 | SPRINT_115_concelier_iv | Concelier Storage Guild · DevOps Guild | src/Concelier/__Libraries/StellaOps.Concelier.Storage.Mongo | Execute the raw-linkset backfill/rollback plan (`docs/dev/raw-linkset-backfill-plan.md`) so Mongo + Offline Kit bundles reflect Link-Not-Merge data; rehearse rollback. Depends on CONCELIER-CORE-AOC-19-004. | Wait for CCLN0101 approval | CCSM0101 |
|
||||
| CONCELIER-TEN-48-001 | BLOCKED | 2025-11-23 | SPRINT_115_concelier_iv | Concelier Core Guild | src/Concelier/__Libraries/StellaOps.Concelier.Core | Enforce tenant scoping throughout normalization/linking, expose capability endpoint advertising `merge=false`, and ensure events include tenant IDs. Depends on AUTH-TEN-47-001. | AUTH-TEN-47-001; POLICY chain | CCCO0101 |
|
||||
| CONCELIER-VEXLENS-30-001 | BLOCKED | 2025-11-23 | SPRINT_115_concelier_iv | Concelier WebService Guild · VEX Lens Guild | src/Concelier/StellaOps.Concelier.WebService | Guarantee advisory key consistency and cross-links consumed by VEX Lens so consensus explanations can cite Concelier evidence without requesting merges. Depends on CONCELIER-VULN-29-001, VEXLENS-30-005. | VEXLENS-30-005 | PLVL0103 |
|
||||
| CONCELIER-VULN-29-004 | TODO | | SPRINT_116_concelier_v | Concelier WebService Guild · Observability Guild | src/Concelier/StellaOps.Concelier.WebService | Instrument observation/linkset pipelines with metrics for identifier collisions, withdrawn statements, and chunk latencies; stream them to Vuln Explorer without altering evidence payloads. Depends on CONCELIER-VULN-29-001. | Requires CCPR0101 risk feed | CCWO0101 |
|
||||
| CONCELIER-VULN-29-004 | DONE (2025-12-08) | | SPRINT_116_concelier_v | Concelier WebService Guild · Observability Guild | src/Concelier/StellaOps.Concelier.WebService | Instrument observation/linkset pipelines with metrics for identifier collisions, withdrawn statements, and chunk latencies; stream them to Vuln Explorer without altering evidence payloads. Depends on CONCELIER-VULN-29-001. | Requires CCPR0101 risk feed | CCWO0101 |
|
||||
| CONCELIER-WEB-AIRGAP-56-001 | TODO | | SPRINT_116_concelier_v | Concelier WebService Guild · AirGap Policy Guild | src/Concelier/StellaOps.Concelier.WebService | Extend ingestion endpoints to register mirror bundle sources, expose bundle catalogs, and enforce sealed-mode by blocking direct internet feeds. | Wait for AGCN0101 proof | CCAW0101 |
|
||||
| CONCELIER-WEB-AIRGAP-56-002 | TODO | | SPRINT_116_concelier_v | Concelier WebService Guild · AirGap Importer Guild | src/Concelier/StellaOps.Concelier.WebService | Add staleness + bundle provenance metadata to `/advisories/observations` and `/advisories/linksets` so operators can see freshness without Excitior deriving outcomes. Depends on CONCELIER-WEB-AIRGAP-56-001. | Depends on #1 | CCAW0101 |
|
||||
| CONCELIER-WEB-AIRGAP-57-001 | TODO | | SPRINT_116_concelier_v | Concelier WebService Guild | src/Concelier/StellaOps.Concelier.WebService | Map sealed-mode violations to consistent `AIRGAP_EGRESS_BLOCKED` payloads that explain how to remediate, leaving advisory content untouched. Depends on CONCELIER-WEB-AIRGAP-56-002. | Needs CCAN0101 time beacons | CCAW0101 |
|
||||
@@ -1047,8 +1047,8 @@
|
||||
| FEEDCONN-CCCS-02-009 | TODO | | SPRINT_117_concelier_vi | Concelier Connector Guild – CCCS (src/Concelier/__Libraries/StellaOps.Concelier.Connector.Cccs) | src/Concelier/__Libraries/StellaOps.Concelier.Connector.Cccs | Emit CCCS version ranges into `advisory_observations.affected.versions[]` with provenance anchors (`cccs:{serial}:{index}`) and normalized comparison keys per the Link-Not-Merge schema/doc recipes. Depends on CONCELIER-LNM-21-001. | — | FEFC0101 |
|
||||
| FEEDCONN-CERTBUND-02-010 | TODO | | SPRINT_117_concelier_vi | Concelier Connector Guild – CertBund (src/Concelier/__Libraries/StellaOps.Concelier.Connector.CertBund) | src/Concelier/__Libraries/StellaOps.Concelier.Connector.CertBund | Translate CERT-Bund `product.Versions` phrases into normalized ranges + provenance identifiers (`certbund:{advisoryId}:{vendor}`) while retaining localisation notes; update mapper/tests for Link-Not-Merge. Depends on CONCELIER-LNM-21-001. | — | FEFC0101 |
|
||||
| FEEDCONN-CISCO-02-009 | DOING | 2025-11-08 | SPRINT_117_concelier_vi | Concelier Connector Guild – Cisco (src/Concelier/__Libraries/StellaOps.Concelier.Connector.Vndr.Cisco) | src/Concelier/__Libraries/StellaOps.Concelier.Connector.Vndr.Cisco | Emit Cisco SemVer ranges into the new observation schema with provenance IDs (`cisco:{productId}`) and deterministic comparison keys; refresh fixtures to remove merge counters. Depends on CONCELIER-LNM-21-001. | — | FEFC0101 |
|
||||
| FEEDCONN-ICSCISA-02-012 | BLOCKED | | SPRINT_0503_0001_0001_ops_devops_i | Concelier Feed Owners | | Overdue provenance refreshes require schedule from feed owners. | FEED-REMEDIATION-1001 | FEFC0101 |
|
||||
| FEEDCONN-KISA-02-008 | BLOCKED | | SPRINT_0503_0001_0001_ops_devops_i | Concelier Feed Owners | | FEED-REMEDIATION-1001 | FEED-REMEDIATION-1001 | FEFC0101 |
|
||||
| FEEDCONN-ICSCISA-02-012 | DONE (2025-12-08) | 2025-12-08 | SPRINT_0503_0001_0001_ops_devops_i | Concelier Feed Owners | | SOP v0.2 run_id icscisa-kisa-20251208T0205Z completed; artefacts at `out/feeds/icscisa-kisa/20251208/`. | FEED-REMEDIATION-1001 | FEFC0101 |
|
||||
| FEEDCONN-KISA-02-008 | DONE (2025-12-08) | 2025-12-08 | SPRINT_0503_0001_0001_ops_devops_i | Concelier Feed Owners | | SOP v0.2 run_id icscisa-kisa-20251208T0205Z completed; artefacts at `out/feeds/icscisa-kisa/20251208/`. | FEED-REMEDIATION-1001 | FEED-REMEDIATION-1001 | FEFC0101 |
|
||||
| FORENSICS-53-001 | TODO | | SPRINT_0202_0001_0002_cli_ii | Forensics Guild | src/Cli/StellaOps.Cli | Replay data set | Replay data set | FONS0101 |
|
||||
| FORENSICS-53-002 | TODO | | SPRINT_0304_0001_0004_docs_tasks_md_iv | Forensics Guild | | FORENSICS-53-001 | FORENSICS-53-001 | FONS0101 |
|
||||
| FORENSICS-53-003 | TODO | | SPRINT_0304_0001_0004_docs_tasks_md_iv | Forensics Guild | | FORENSICS-53-001 | FORENSICS-53-001 | FONS0101 |
|
||||
@@ -1696,10 +1696,10 @@
|
||||
| SCANNER-ENG-0008 | TODO | | SPRINT_0138_0001_0001_scanner_ruby_parity | EntryTrace Guild, QA Guild (src/Scanner/__Libraries/StellaOps.Scanner.EntryTrace) | src/Scanner/__Libraries/StellaOps.Scanner.EntryTrace | Maintain EntryTrace heuristic cadence per `docs/benchmarks/scanner/scanning-gaps-stella-misses-from-competitors.md`, including quarterly pattern reviews + explain-trace updates. | | |
|
||||
| SCANNER-ENG-0009 | DONE | 2025-11-13 | SPRINT_0138_0001_0001_scanner_ruby_parity | Ruby Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby | Ruby analyzer parity shipped: runtime graph + capability signals, observation payload, Mongo-backed `ruby.packages` inventory, CLI/WebService surfaces, and plugin manifest bundles for Worker loadout. | SCANNER-ANALYZERS-RUBY-28-001..012 | |
|
||||
| SCANNER-ENG-0010 | TODO | | SPRINT_0138_0001_0001_scanner_ruby_parity | PHP Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Php) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Php | Ship the PHP analyzer pipeline (composer lock, autoload graph, capability signals) to close comparison gaps. | SCANNER-ANALYZERS-PHP-27-001 | |
|
||||
| SCANNER-ENG-0011 | TODO | | SPRINT_0138_0001_0001_scanner_ruby_parity | Language Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Deno) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Deno | Scope the Deno runtime analyzer (lockfile resolver, import graphs) based on competitor techniques to extend beyond Sprint 130 coverage. | | |
|
||||
| SCANNER-ENG-0012 | TODO | | SPRINT_0138_0001_0001_scanner_ruby_parity | Language Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Dart) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Dart | Evaluate Dart analyzer requirements (pubspec parsing, AOT artifacts) and split implementation tasks. | | |
|
||||
| SCANNER-ENG-0013 | TODO | | SPRINT_0138_0001_0001_scanner_ruby_parity | Swift Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Swift) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Swift | Plan Swift Package Manager coverage (Package.resolved, xcframeworks, runtime hints) with policy hooks. | | |
|
||||
| SCANNER-ENG-0014 | TODO | | SPRINT_0138_0001_0001_scanner_ruby_parity | Runtime Guild, Zastava Guild (docs/modules/scanner) | docs/modules/scanner | Align Kubernetes/VM target coverage between Scanner and Zastava per comparison findings; publish joint roadmap. | | |
|
||||
| SCANNER-ENG-0011 | DONE (2025-12-08) | 2025-12-08 | SPRINT_0138_0001_0001_scanner_ruby_parity | Language Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Deno) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Deno | Scope the Deno runtime analyzer (lockfile resolver, import graphs) based on competitor techniques to extend beyond Sprint 130 coverage. | docs/modules/scanner/design/deno-analyzer-plan.md | |
|
||||
| SCANNER-ENG-0012 | DONE (2025-12-08) | 2025-12-08 | SPRINT_0138_0001_0001_scanner_ruby_parity | Language Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Dart) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Dart | Evaluate Dart analyzer requirements (pubspec parsing, AOT artifacts) and split implementation tasks. | docs/modules/scanner/design/dart-analyzer-plan.md | |
|
||||
| SCANNER-ENG-0013 | DONE (2025-12-08) | 2025-12-08 | SPRINT_0138_0001_0001_scanner_ruby_parity | Swift Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Swift) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Swift | Plan Swift Package Manager coverage (Package.resolved, xcframeworks, runtime hints) with policy hooks. | docs/modules/scanner/design/swiftpm-coverage-plan.md | |
|
||||
| SCANNER-ENG-0014 | DONE (2025-12-08) | 2025-12-08 | SPRINT_0138_0001_0001_scanner_ruby_parity | Runtime Guild, Zastava Guild (docs/modules/scanner) | docs/modules/scanner | Align Kubernetes/VM target coverage between Scanner and Zastava per comparison findings; publish joint roadmap. | docs/modules/scanner/design/runtime-alignment-scanner-zastava.md | |
|
||||
| SCANNER-ENG-0015 | DONE | 2025-11-13 | SPRINT_0138_0001_0001_scanner_ruby_parity | Export Center Guild, Scanner Guild (docs/modules/scanner) | docs/modules/scanner | DSSE/Rekor operator playbook published (`docs/modules/scanner/operations/dsse-rekor-operator-guide.md`) with config/env tables, rollout phases, runbook snippets, offline verification steps, and SLA/alert guidance. | | |
|
||||
| SCANNER-ENG-0016 | DONE | 2025-11-10 | SPRINT_0138_0001_0001_scanner_ruby_parity | Ruby Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby | RubyLockCollector and vendor ingestion finalized: Bundler config overrides honoured, workspace lockfiles merged, vendor bundles normalised, and deterministic fixtures added. | SCANNER-ENG-0009 | |
|
||||
| SCANNER-ENG-0017 | DONE | 2025-11-09 | SPRINT_0138_0001_0001_scanner_ruby_parity | Ruby Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby | Build the runtime require/autoload graph builder with tree-sitter Ruby per design §4.4 and integrate EntryTrace hints. | SCANNER-ENG-0016 | |
|
||||
@@ -1876,7 +1876,7 @@
|
||||
| SURFACE-SECRETS-03 | TODO | | SPRINT_0136_0001_0001_scanner_surface | Scanner Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets | Add Kubernetes/File/Offline backends with deterministic caching and audit hooks. | SURFACE-SECRETS-02 | SCSS0101 |
|
||||
| SURFACE-SECRETS-04 | TODO | | SPRINT_0136_0001_0001_scanner_surface | Scanner Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets | Integrate Surface.Secrets into Scanner Worker/WebService/BuildX for registry + CAS creds. | SURFACE-SECRETS-02 | |
|
||||
| SURFACE-SECRETS-05 | TODO | | SPRINT_0136_0001_0001_scanner_surface | Zastava Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets | Invoke Surface.Secrets from Zastava Observer/Webhook for CAS & attestation secrets. | SURFACE-SECRETS-02 | |
|
||||
| SURFACE-SECRETS-06 | TODO | | SPRINT_0136_0001_0001_scanner_surface | Ops Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets | Update deployment manifests/offline kit bundles to provision secret references instead of raw values. | SURFACE-SECRETS-03 | |
|
||||
| SURFACE-SECRETS-06 | DONE (2025-12-08) | | SPRINT_0136_0001_0001_scanner_surface | Ops Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets | Update deployment manifests/offline kit bundles to provision secret references instead of raw values. | SURFACE-SECRETS-03 | |
|
||||
| SURFACE-VAL-01 | DOING | 2025-11-01 | SPRINT_0136_0001_0001_scanner_surface | Scanner Guild, Security Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Validation) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Validation | Define the Surface validation framework (`surface-validation.md`) covering env/cache/secret checks and extension hooks. | SURFACE-FS-01; SURFACE-ENV-01 | SCSS0102 |
|
||||
| SURFACE-VAL-02 | TODO | | SPRINT_0136_0001_0001_scanner_surface | Scanner Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Validation) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Validation | Implement base validation library with check registry and default validators for env/cached manifests/secret refs. | SURFACE-VAL-01; SURFACE-ENV-02; SURFACE-FS-02 | SCSS0102 |
|
||||
| SURFACE-VAL-03 | TODO | | SPRINT_0136_0001_0001_scanner_surface | Scanner Guild, Analyzer Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Validation) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Validation | Integrate validation pipeline into Scanner analyzers so checks run before processing. | SURFACE-VAL-02 | SCSS0102 |
|
||||
@@ -2660,7 +2660,7 @@
|
||||
| CONCELIER-STORE-AOC-19-005 | TODO | 2025-11-04 | SPRINT_115_concelier_iv | Concelier Storage Guild · DevOps Guild | src/Concelier/__Libraries/StellaOps.Concelier.Storage.Mongo | Execute the raw-linkset backfill/rollback plan (`docs/dev/raw-linkset-backfill-plan.md`) so Mongo + Offline Kit bundles reflect Link-Not-Merge data; rehearse rollback. Depends on CONCELIER-CORE-AOC-19-004. | Wait for CCLN0101 approval | CCSM0101 |
|
||||
| CONCELIER-TEN-48-001 | BLOCKED | 2025-11-23 | SPRINT_115_concelier_iv | Concelier Core Guild | src/Concelier/__Libraries/StellaOps.Concelier.Core | Enforce tenant scoping throughout normalization/linking, expose capability endpoint advertising `merge=false`, and ensure events include tenant IDs. Depends on AUTH-TEN-47-001. | AUTH-TEN-47-001; POLICY chain | CCCO0101 |
|
||||
| CONCELIER-VEXLENS-30-001 | BLOCKED | 2025-11-23 | SPRINT_115_concelier_iv | Concelier WebService Guild · VEX Lens Guild | src/Concelier/StellaOps.Concelier.WebService | Guarantee advisory key consistency and cross-links consumed by VEX Lens so consensus explanations can cite Concelier evidence without requesting merges. Depends on CONCELIER-VULN-29-001, VEXLENS-30-005. | VEXLENS-30-005 | PLVL0103 |
|
||||
| CONCELIER-VULN-29-004 | TODO | | SPRINT_116_concelier_v | Concelier WebService Guild · Observability Guild | src/Concelier/StellaOps.Concelier.WebService | Instrument observation/linkset pipelines with metrics for identifier collisions, withdrawn statements, and chunk latencies; stream them to Vuln Explorer without altering evidence payloads. Depends on CONCELIER-VULN-29-001. | Requires CCPR0101 risk feed | CCWO0101 |
|
||||
| CONCELIER-VULN-29-004 | DONE (2025-12-08) | | SPRINT_116_concelier_v | Concelier WebService Guild · Observability Guild | src/Concelier/StellaOps.Concelier.WebService | Instrument observation/linkset pipelines with metrics for identifier collisions, withdrawn statements, and chunk latencies; stream them to Vuln Explorer without altering evidence payloads. Depends on CONCELIER-VULN-29-001. | Requires CCPR0101 risk feed | CCWO0101 |
|
||||
| CONCELIER-WEB-AIRGAP-56-001 | TODO | | SPRINT_116_concelier_v | Concelier WebService Guild · AirGap Policy Guild | src/Concelier/StellaOps.Concelier.WebService | Extend ingestion endpoints to register mirror bundle sources, expose bundle catalogs, and enforce sealed-mode by blocking direct internet feeds. | Wait for AGCN0101 proof | CCAW0101 |
|
||||
| CONCELIER-WEB-AIRGAP-56-002 | TODO | | SPRINT_116_concelier_v | Concelier WebService Guild · AirGap Importer Guild | src/Concelier/StellaOps.Concelier.WebService | Add staleness + bundle provenance metadata to `/advisories/observations` and `/advisories/linksets` so operators can see freshness without Excitior deriving outcomes. Depends on CONCELIER-WEB-AIRGAP-56-001. | Depends on #1 | CCAW0101 |
|
||||
| CONCELIER-WEB-AIRGAP-57-001 | TODO | | SPRINT_116_concelier_v | Concelier WebService Guild | src/Concelier/StellaOps.Concelier.WebService | Map sealed-mode violations to consistent `AIRGAP_EGRESS_BLOCKED` payloads that explain how to remediate, leaving advisory content untouched. Depends on CONCELIER-WEB-AIRGAP-56-002. | Needs CCAN0101 time beacons | CCAW0101 |
|
||||
@@ -3270,8 +3270,8 @@
|
||||
| FEEDCONN-CCCS-02-009 | TODO | | SPRINT_117_concelier_vi | Concelier Connector Guild – CCCS (src/Concelier/__Libraries/StellaOps.Concelier.Connector.Cccs) | src/Concelier/__Libraries/StellaOps.Concelier.Connector.Cccs | Emit CCCS version ranges into `advisory_observations.affected.versions[]` with provenance anchors (`cccs:{serial}:{index}`) and normalized comparison keys per the Link-Not-Merge schema/doc recipes. Depends on CONCELIER-LNM-21-001. | — | FEFC0101 |
|
||||
| FEEDCONN-CERTBUND-02-010 | TODO | | SPRINT_117_concelier_vi | Concelier Connector Guild – CertBund (src/Concelier/__Libraries/StellaOps.Concelier.Connector.CertBund) | src/Concelier/__Libraries/StellaOps.Concelier.Connector.CertBund | Translate CERT-Bund `product.Versions` phrases into normalized ranges + provenance identifiers (`certbund:{advisoryId}:{vendor}`) while retaining localisation notes; update mapper/tests for Link-Not-Merge. Depends on CONCELIER-LNM-21-001. | — | FEFC0101 |
|
||||
| FEEDCONN-CISCO-02-009 | DOING | 2025-11-08 | SPRINT_117_concelier_vi | Concelier Connector Guild – Cisco (src/Concelier/__Libraries/StellaOps.Concelier.Connector.Vndr.Cisco) | src/Concelier/__Libraries/StellaOps.Concelier.Connector.Vndr.Cisco | Emit Cisco SemVer ranges into the new observation schema with provenance IDs (`cisco:{productId}`) and deterministic comparison keys; refresh fixtures to remove merge counters. Depends on CONCELIER-LNM-21-001. | — | FEFC0101 |
|
||||
| FEEDCONN-ICSCISA-02-012 | BLOCKED | | SPRINT_0503_0001_0001_ops_devops_i | Concelier Feed Owners | | Overdue provenance refreshes require schedule from feed owners. | FEED-REMEDIATION-1001 | FEFC0101 |
|
||||
| FEEDCONN-KISA-02-008 | BLOCKED | | SPRINT_0503_0001_0001_ops_devops_i | Concelier Feed Owners | | FEED-REMEDIATION-1001 | FEED-REMEDIATION-1001 | FEFC0101 |
|
||||
| FEEDCONN-ICSCISA-02-012 | DONE (2025-12-08) | 2025-12-08 | SPRINT_0503_0001_0001_ops_devops_i | Concelier Feed Owners | | SOP v0.2 run_id icscisa-kisa-20251208T0205Z completed; artefacts at `out/feeds/icscisa-kisa/20251208/`. | FEED-REMEDIATION-1001 | FEFC0101 |
|
||||
| FEEDCONN-KISA-02-008 | DONE (2025-12-08) | 2025-12-08 | SPRINT_0503_0001_0001_ops_devops_i | Concelier Feed Owners | | SOP v0.2 run_id icscisa-kisa-20251208T0205Z completed; artefacts at `out/feeds/icscisa-kisa/20251208/`. | FEED-REMEDIATION-1001 | FEED-REMEDIATION-1001 | FEFC0101 |
|
||||
| FORENSICS-53-001 | TODO | | SPRINT_0202_0001_0002_cli_ii | Forensics Guild | src/Cli/StellaOps.Cli | Replay data set | Replay data set | FONS0101 |
|
||||
| FORENSICS-53-002 | TODO | | SPRINT_0304_0001_0004_docs_tasks_md_iv | Forensics Guild | | FORENSICS-53-001 | FORENSICS-53-001 | FONS0101 |
|
||||
| FORENSICS-53-003 | TODO | | SPRINT_0304_0001_0004_docs_tasks_md_iv | Forensics Guild | | FORENSICS-53-001 | FORENSICS-53-001 | FONS0101 |
|
||||
@@ -3896,10 +3896,10 @@
|
||||
| SCANNER-ENG-0008 | TODO | | SPRINT_0138_0001_0001_scanner_ruby_parity | EntryTrace Guild, QA Guild (src/Scanner/__Libraries/StellaOps.Scanner.EntryTrace) | src/Scanner/__Libraries/StellaOps.Scanner.EntryTrace | Maintain EntryTrace heuristic cadence per `docs/benchmarks/scanner/scanning-gaps-stella-misses-from-competitors.md`, including quarterly pattern reviews + explain-trace updates. | | |
|
||||
| SCANNER-ENG-0009 | DONE | 2025-11-13 | SPRINT_0138_0001_0001_scanner_ruby_parity | Ruby Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby | Ruby analyzer parity shipped: runtime graph + capability signals, observation payload, Mongo-backed `ruby.packages` inventory, CLI/WebService surfaces, and plugin manifest bundles for Worker loadout. | SCANNER-ANALYZERS-RUBY-28-001..012 | |
|
||||
| SCANNER-ENG-0010 | TODO | | SPRINT_0138_0001_0001_scanner_ruby_parity | PHP Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Php) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Php | Ship the PHP analyzer pipeline (composer lock, autoload graph, capability signals) to close comparison gaps. | SCANNER-ANALYZERS-PHP-27-001 | |
|
||||
| SCANNER-ENG-0011 | TODO | | SPRINT_0138_0001_0001_scanner_ruby_parity | Language Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Deno) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Deno | Scope the Deno runtime analyzer (lockfile resolver, import graphs) based on competitor techniques to extend beyond Sprint 130 coverage. | | |
|
||||
| SCANNER-ENG-0012 | TODO | | SPRINT_0138_0001_0001_scanner_ruby_parity | Language Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Dart) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Dart | Evaluate Dart analyzer requirements (pubspec parsing, AOT artifacts) and split implementation tasks. | | |
|
||||
| SCANNER-ENG-0013 | TODO | | SPRINT_0138_0001_0001_scanner_ruby_parity | Swift Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Swift) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Swift | Plan Swift Package Manager coverage (Package.resolved, xcframeworks, runtime hints) with policy hooks. | | |
|
||||
| SCANNER-ENG-0014 | TODO | | SPRINT_0138_0001_0001_scanner_ruby_parity | Runtime Guild, Zastava Guild (docs/modules/scanner) | docs/modules/scanner | Align Kubernetes/VM target coverage between Scanner and Zastava per comparison findings; publish joint roadmap. | | |
|
||||
| SCANNER-ENG-0011 | DONE (2025-12-08) | 2025-12-08 | SPRINT_0138_0001_0001_scanner_ruby_parity | Language Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Deno) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Deno | Scope the Deno runtime analyzer (lockfile resolver, import graphs) based on competitor techniques to extend beyond Sprint 130 coverage. | docs/modules/scanner/design/deno-analyzer-plan.md | |
|
||||
| SCANNER-ENG-0012 | DONE (2025-12-08) | 2025-12-08 | SPRINT_0138_0001_0001_scanner_ruby_parity | Language Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Dart) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Dart | Evaluate Dart analyzer requirements (pubspec parsing, AOT artifacts) and split implementation tasks. | docs/modules/scanner/design/dart-analyzer-plan.md | |
|
||||
| SCANNER-ENG-0013 | DONE (2025-12-08) | 2025-12-08 | SPRINT_0138_0001_0001_scanner_ruby_parity | Swift Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Swift) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Swift | Plan Swift Package Manager coverage (Package.resolved, xcframeworks, runtime hints) with policy hooks. | docs/modules/scanner/design/swiftpm-coverage-plan.md | |
|
||||
| SCANNER-ENG-0014 | DONE (2025-12-08) | 2025-12-08 | SPRINT_0138_0001_0001_scanner_ruby_parity | Runtime Guild, Zastava Guild (docs/modules/scanner) | docs/modules/scanner | Align Kubernetes/VM target coverage between Scanner and Zastava per comparison findings; publish joint roadmap. | docs/modules/scanner/design/runtime-alignment-scanner-zastava.md | |
|
||||
| SCANNER-ENG-0015 | DONE | 2025-11-13 | SPRINT_0138_0001_0001_scanner_ruby_parity | Export Center Guild, Scanner Guild (docs/modules/scanner) | docs/modules/scanner | DSSE/Rekor operator playbook published (`docs/modules/scanner/operations/dsse-rekor-operator-guide.md`) with config/env tables, rollout phases, runbook snippets, offline verification steps, and SLA/alert guidance. | | |
|
||||
| SCANNER-ENG-0016 | DONE | 2025-11-10 | SPRINT_0138_0001_0001_scanner_ruby_parity | Ruby Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby | RubyLockCollector and vendor ingestion finalized: Bundler config overrides honoured, workspace lockfiles merged, vendor bundles normalised, and deterministic fixtures added. | SCANNER-ENG-0009 | |
|
||||
| SCANNER-ENG-0017 | DONE | 2025-11-09 | SPRINT_0138_0001_0001_scanner_ruby_parity | Ruby Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby | Build the runtime require/autoload graph builder with tree-sitter Ruby per design §4.4 and integrate EntryTrace hints. | SCANNER-ENG-0016 | |
|
||||
@@ -4076,7 +4076,7 @@
|
||||
| SURFACE-SECRETS-03 | TODO | | SPRINT_0136_0001_0001_scanner_surface | Scanner Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets | Add Kubernetes/File/Offline backends with deterministic caching and audit hooks. | SURFACE-SECRETS-02 | SCSS0101 |
|
||||
| SURFACE-SECRETS-04 | TODO | | SPRINT_0136_0001_0001_scanner_surface | Scanner Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets | Integrate Surface.Secrets into Scanner Worker/WebService/BuildX for registry + CAS creds. | SURFACE-SECRETS-02 | |
|
||||
| SURFACE-SECRETS-05 | TODO | | SPRINT_0136_0001_0001_scanner_surface | Zastava Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets | Invoke Surface.Secrets from Zastava Observer/Webhook for CAS & attestation secrets. | SURFACE-SECRETS-02 | |
|
||||
| SURFACE-SECRETS-06 | TODO | | SPRINT_0136_0001_0001_scanner_surface | Ops Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets | Update deployment manifests/offline kit bundles to provision secret references instead of raw values. | SURFACE-SECRETS-03 | |
|
||||
| SURFACE-SECRETS-06 | DONE (2025-12-08) | | SPRINT_0136_0001_0001_scanner_surface | Ops Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets | Update deployment manifests/offline kit bundles to provision secret references instead of raw values. | SURFACE-SECRETS-03 | |
|
||||
| SURFACE-VAL-01 | DOING | 2025-11-01 | SPRINT_0136_0001_0001_scanner_surface | Scanner Guild, Security Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Validation) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Validation | Define the Surface validation framework (`surface-validation.md`) covering env/cache/secret checks and extension hooks. | SURFACE-FS-01; SURFACE-ENV-01 | SCSS0102 |
|
||||
| SURFACE-VAL-02 | TODO | | SPRINT_0136_0001_0001_scanner_surface | Scanner Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Validation) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Validation | Implement base validation library with check registry and default validators for env/cached manifests/secret refs. | SURFACE-VAL-01; SURFACE-ENV-02; SURFACE-FS-02 | SCSS0102 |
|
||||
| SURFACE-VAL-03 | TODO | | SPRINT_0136_0001_0001_scanner_surface | Scanner Guild, Analyzer Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Validation) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Validation | Integrate validation pipeline into Scanner analyzers so checks run before processing. | SURFACE-VAL-02 | SCSS0102 |
|
||||
|
||||
@@ -1,7 +1,22 @@
|
||||
# ICSCISA / KISA Feed Provenance Notes (2025-11-19)
|
||||
# ICSCISA / KISA Feed Provenance Notes (2025-12-08)
|
||||
|
||||
- Expected signing: not provided by sources; set `signature=null` and `skip_reason="unsigned"`.
|
||||
- Hashing: sha256 of raw advisory payload before normalization.
|
||||
- Expected signing: not provided by sources; record `signature` as `{ status: "missing", reason: "unsigned_source" }`.
|
||||
- Hashing: sha256 of raw advisory payload before normalization (stored as `payload_sha256` per advisory) and sha256 of run artefacts (`hashes.sha256`).
|
||||
- Transport: HTTPS; mirror to internal cache; record `fetched_at` UTC and `source_url`.
|
||||
- Verification: compare hash vs previous run; emit delta report.
|
||||
- Staleness guard: alert if `fetched_at` >14 days.
|
||||
|
||||
## Run 2025-12-08 (run_id=icscisa-kisa-20251208T0205Z)
|
||||
- Artefacts: `out/feeds/icscisa-kisa/20251208/advisories.ndjson`, `delta.json`, `fetch.log`, `hashes.sha256`.
|
||||
- Hashes:
|
||||
- `0844c46c42461b8eeaf643c01d4cb74ef20d4eec8c984ad5e20c49d65dc57deb advisories.ndjson`
|
||||
- `1273beb246754382d2e013fdc98b11b06965fb97fe9a63735b51cc949746418f delta.json`
|
||||
- `8fedaa9fb2b146a1ef500b0d2e4c1592ddbc770a8f15b7d03723f8034fc12a75 fetch.log`
|
||||
- Delta summary: added ICS CISA advisories `ICSA-25-123-01`, `ICSMA-25-045-01`; added KISA advisories `KISA-2025-5859`, `KISA-2025-5860`; no updates or removals; backlog window 60 days; retries 0 for both sources.
|
||||
- Signature posture: both sources unsigned; all records marked `signature.missing` with reason `unsigned_source`.
|
||||
- Next actions: maintain weekly cadence; staleness review on 2025-12-21 with refreshed hash manifest and retry histogram.
|
||||
|
||||
## CI automation
|
||||
- Scheduled workflow `.gitea/workflows/icscisa-kisa-refresh.yml` runs Mondays 02:00 UTC (manual dispatch enabled) and executes `scripts/feeds/run_icscisa_kisa_refresh.py` with live fetch + offline fallback.
|
||||
- Configure feed endpoints via `ICSCISA_FEED_URL` / `KISA_FEED_URL`; set `LIVE_FETCH=false` or `OFFLINE_SNAPSHOT=true` to force offline-only mode when running in sealed CI. Host override for on-prem mirrors is available via `FEED_GATEWAY_HOST` / `FEED_GATEWAY_SCHEME` (default `concelier-webservice` on the Docker network).
|
||||
- Fetch log traces: `fetch.log` captures gateway (`FEED_GATEWAY_*`), effective ICS/KISA URLs, live/offline flags, and statuses so operators can verify when defaults are used vs explicit endpoints.
|
||||
|
||||
@@ -32,8 +32,8 @@ Define a minimal, actionable plan to refresh overdue ICSCISA and KISA connectors
|
||||
- Set to 2025-12-21 (two-week check from v0.2) and capture SIG verification status + open deltas.
|
||||
|
||||
## Actions & timeline (v0.2 refresh)
|
||||
- T0 (2025-12-08): adopt SOP + field map; create delta report template; preflight cache paths.
|
||||
- T0+2d (2025-12-10): run backlog reprocess, publish artefacts + hashes for both feeds; capture unsigned counts and retry reasons.
|
||||
- T0 (2025-12-08): adopt SOP + field map; create delta report template; preflight cache paths. **Done** via run `icscisa-kisa-20251208T0205Z` (see run summary below).
|
||||
- T0+2d (2025-12-10): run backlog reprocess, publish artefacts + hashes for both feeds; capture unsigned counts and retry reasons. **Done** in the 2025-12-08 execution (backlog window 60 days).
|
||||
- T0+14d (2025-12-21): review staleness, adjust cadence if needed; reset review date and owners.
|
||||
|
||||
## Artefact locations
|
||||
@@ -46,3 +46,18 @@ Define a minimal, actionable plan to refresh overdue ICSCISA and KISA connectors
|
||||
- Source downtime -> mirror last good snapshot; retry daily for 3 days.
|
||||
- Missing signatures -> record `signature=null`, log `skip_reason` in provenance note; do not infer validity.
|
||||
- Schema drift -> treat as new fields, store raw, add to field map after review (no drop).
|
||||
|
||||
## Run summary (2025-12-08 · run_id=icscisa-kisa-20251208T0205Z)
|
||||
- Backlog window: 60 days; cadence: weekly; start/end: 2025-12-08T02:05:00Z / 2025-12-08T02:09:30Z.
|
||||
- Outputs: `out/feeds/icscisa-kisa/20251208/advisories.ndjson`, `delta.json`, `fetch.log`, `hashes.sha256`.
|
||||
- Delta: ICS CISA added `ICSA-25-123-01`, `ICSMA-25-045-01`; KISA added `KISA-2025-5859`, `KISA-2025-5860`; no updates or removals.
|
||||
- Hash manifest: `hashes.sha256` records advisories/delta/log digests (see provenance note).
|
||||
- Signatures: none provided by sources; recorded as missing with reason `unsigned_source` (tracked in provenance note).
|
||||
- Next review: 2025-12-21 (staleness guard <14 days remains satisfied after this run).
|
||||
|
||||
## CI automation
|
||||
- Workflow: `.gitea/workflows/icscisa-kisa-refresh.yml` (cron: Mondays 02:00 UTC; also manual dispatch) running `scripts/feeds/run_icscisa_kisa_refresh.py`.
|
||||
- Outputs: uploads `icscisa-kisa-<YYYYMMDD>` artifact with `advisories.ndjson`, `delta.json`, `fetch.log`, `hashes.sha256`.
|
||||
- Live vs offline: defaults to live RSS fetch with offline-safe fallback; set `LIVE_FETCH=false` or `OFFLINE_SNAPSHOT=true` in dispatch inputs/environment to force offline samples. Optional feed URLs/secrets: `ICSCISA_FEED_URL`, `KISA_FEED_URL`.
|
||||
- On-prem feed host: feeds are configurable via `FEED_GATEWAY_HOST`/`FEED_GATEWAY_SCHEME`. Default resolves to `http://concelier-webservice` (Docker network DNS) so on-prem deployments hit the local mirror/web service instead of the public internet.
|
||||
- Fetch log traces defaults: `fetch.log` records the resolved gateway (`FEED_GATEWAY_*`) and the effective URLs used for ICS CISA and KISA. If env vars are absent, the log shows the Docker-network default so operators can confirm on-prem wiring without inspecting workflow inputs.
|
||||
|
||||
@@ -124,6 +124,62 @@ Excititor workers now hydrate signature metadata with issuer trust data retrieve
|
||||
|
||||
`GET /v1/vex/statements/{advisory_key}` produces sorted JSON responses containing raw statement metadata (`issuer`, `content_hash`, `signature`), normalised tuples, and provenance pointers. Advisory AI consumes this endpoint to build retrieval contexts with explicit citations.
|
||||
|
||||
### 1.5 Postgres raw store (replaces Mongo/GridFS)
|
||||
|
||||
> Mongo/BSON/GridFS are being removed. This is the canonical design for the Postgres-backed raw store that powers `/vex/raw` and ingestion.
|
||||
|
||||
Schema: `vex`
|
||||
|
||||
- **`vex_raw_documents`** (append-only)
|
||||
- `digest TEXT PRIMARY KEY` — `sha256:{hex}` of canonical UTF-8 JSON bytes.
|
||||
- `tenant TEXT NOT NULL`
|
||||
- `provider_id TEXT NOT NULL`
|
||||
- `format TEXT NOT NULL CHECK (format IN ('openvex','csaf','cyclonedx','custom'))`
|
||||
- `source_uri TEXT NOT NULL`, `etag TEXT NULL`
|
||||
- `retrieved_at TIMESTAMPTZ NOT NULL`, `recorded_at TIMESTAMPTZ NOT NULL DEFAULT NOW()`
|
||||
- `supersedes_digest TEXT NULL REFERENCES vex_raw_documents(digest)`
|
||||
- `content_json JSONB NOT NULL` — canonicalised payload (truncated when blobbed)
|
||||
- `content_size_bytes INT NOT NULL`
|
||||
- `metadata_json JSONB NOT NULL` — statement_id, issuer, spec_version, content_type, connector version, hashes, quarantine flags
|
||||
- `provenance_json JSONB NOT NULL` — DSSE/chain/rekor/trust info
|
||||
- `inline_payload BOOLEAN NOT NULL DEFAULT TRUE`
|
||||
- UNIQUE (`tenant`, `provider_id`, `source_uri`, `etag`)
|
||||
- Indexes: `(tenant, retrieved_at DESC)`, `(tenant, provider_id, retrieved_at DESC)`, `(tenant, supersedes_digest)`, GIN on `metadata_json`, GIN on `provenance_json`.
|
||||
|
||||
- **`vex_raw_blobs`** (large payloads)
|
||||
- `digest TEXT PRIMARY KEY REFERENCES vex_raw_documents(digest) ON DELETE CASCADE`
|
||||
- `payload BYTEA NOT NULL` (canonical JSON bytes; no compression to preserve determinism)
|
||||
- `payload_hash TEXT NOT NULL` (hash of stored bytes)
|
||||
|
||||
- **`vex_raw_attachments`** (optional future)
|
||||
- `digest TEXT REFERENCES vex_raw_documents(digest) ON DELETE CASCADE`
|
||||
- `name TEXT NOT NULL`, `media_type TEXT NOT NULL`
|
||||
- `payload BYTEA NOT NULL`, `payload_hash TEXT NOT NULL`
|
||||
- PRIMARY KEY (`digest`, `name`)
|
||||
|
||||
- **Observations/linksets** — use the append-only Postgres linkset schema already defined for `IAppendOnlyLinksetStore` (tables `vex_linksets`, `vex_linkset_observations`, `vex_linkset_disagreements`, `vex_linkset_mutations`) with indexes on `(tenant, vulnerability_id, product_key)` and `updated_at`.
|
||||
|
||||
**Canonicalisation & hashing**
|
||||
|
||||
1. Parse upstream JSON; sort keys; normalize newlines; encode UTF-8 without BOM. Preserve array order.
|
||||
2. Compute `digest = "sha256:{hex}"` over canonical bytes.
|
||||
3. If `size <= inline_threshold_bytes` (default 256 KiB) set `inline_payload=true` and store in `content_json`; otherwise store bytes in `vex_raw_blobs` and set `inline_payload=false`.
|
||||
4. Persist `content_size_bytes` (pre-canonical length) and `payload_hash` for integrity.
|
||||
|
||||
**API mapping (replaces Mongo/BSON)**
|
||||
List/query `/vex/raw` via `SELECT ... FROM vex.vex_raw_documents WHERE tenant=@t ORDER BY retrieved_at DESC, digest LIMIT @n OFFSET @offset`; cursor uses `(retrieved_at, digest)`. `GET /vex/raw/{digest}` loads the row and optional blob; `GET /vex/raw/{digest}/provenance` projects `provenance_json` + `metadata_json`. Filters (`providerId`, `format`, `since`, `until`, `supersedes`, `hasAttachments`) map to indexed predicates; JSON subfields use `metadata_json ->> 'field'`.
|
||||
|
||||
**Write semantics**
|
||||
|
||||
- `IVexRawStore` Postgres implementation enforces append-only inserts; duplicate `digest` => no-op; duplicate (`tenant`, `provider_id`, `source_uri`, `etag`) with new digest inserts a new row and sets `supersedes_digest`.
|
||||
- `IVexRawWriteGuard` runs before insert; tenant is mandatory on every query and write.
|
||||
|
||||
**Rollout**
|
||||
|
||||
1. Add migration under `src/Excititor/__Libraries/StellaOps.Excititor.Storage.Postgres/Migrations` creating the tables/indexes above.
|
||||
2. Implement `PostgresVexRawStore` and switch WebService/Worker DI to `AddExcititorPostgresStorage`; remove `VexMongoStorageOptions`, `IMongoDatabase`, and GridFS paths.
|
||||
3. Update `/vex/raw` endpoints/tests to the Postgres store; delete Mongo fixtures once parity is green. Mark Mongo storage paths as deprecated and remove them in the next release.
|
||||
|
||||
---
|
||||
|
||||
## 2) Inputs, outputs & canonical domain
|
||||
|
||||
@@ -56,6 +56,7 @@
|
||||
- **Correlation:** Each API request includes `requestId` + `traceId` logged with events. Projector logs capture `replayId` and `rebuildReason`.
|
||||
- **Timeline events:** `ledger.event.appended` and `ledger.projection.updated` are emitted as structured logs carrying `tenant`, `chainId`, `sequence`, `eventId`, `policyVersion`, `traceId`, and placeholder `evidence_ref` fields for downstream timeline consumers.
|
||||
- **Secrets:** Ensure `event_body` is never logged; log only metadata/hashes.
|
||||
- **Incident mode:** When incident mode is active, emit `ledger.incident.mode`, `ledger.incident.lag_trace`, `ledger.incident.conflict_snapshot`, and `ledger.incident.replay_trace` logs (with activation id, retention extension days, lag seconds, conflict reason). Snapshot TTLs inherit an incident retention extension and are annotated with `incident.*` metadata.
|
||||
|
||||
## 4. Alerts
|
||||
|
||||
|
||||
41
docs/modules/scanner/design/dart-analyzer-plan.md
Normal file
41
docs/modules/scanner/design/dart-analyzer-plan.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# Dart Analyzer Scope · SCANNER-ENG-0012 (2025-12-08)
|
||||
|
||||
## Goals
|
||||
- Define Dart analyzer for pubspec/pub cache parity with other language analyzers.
|
||||
- Keep offline-first (no `pub get`), deterministic inventories/graphs, and policy-ready signals.
|
||||
|
||||
## Inputs
|
||||
- `pubspec.yaml` + `pubspec.lock` (dependencies, sources, sdk constraints).
|
||||
- `.dart_tool/package_config.json` (resolved packages, language version, root URIs).
|
||||
- AOT artifacts: `*.aot`, `*.snapshot`, `build/` outputs (record presence only).
|
||||
- Optional Flutter plugins: `ios/`/`android/` platform manifests (metadata only).
|
||||
|
||||
## Pipeline (deterministic, offline)
|
||||
1) **Normalize pubspec/pubspec.lock**:
|
||||
- Parse lock entries; map sources: `hosted`, `sdk:flutter`, `git`, `path`.
|
||||
- Emit PURLs (`pkg:pub/<name>@<version>`) with `source` metadata (`hosted.url`, `git.sha`, `path`).
|
||||
- Enforce sorted components by name.
|
||||
2) **Package config**:
|
||||
- Read `.dart_tool/package_config.json`; map package `rootUri`/`packageUri` to build module graph roots.
|
||||
- Capture `languageVersion` and `generated` timestamp (drop or normalize to `0001-01-01Z` for determinism).
|
||||
3) **Graph builder**:
|
||||
- Build dependency edges from `pubspec.lock` -> `package_config` packages; include `sdk:flutter` nodes when present.
|
||||
- Record `sourceType` (hosted/git/path/sdk) for provenance.
|
||||
4) **Signals**:
|
||||
- `dart.sdk` requirement from `environment.sdk`; `flutter` channel/version when present.
|
||||
- AOT snapshot presence flags (`aot=true`, `snapshot=true`); no binary parsing.
|
||||
5) **Outputs**:
|
||||
- Inventory: list of PURLs + source metadata + checksum if provided in lock (hosted `sha256`).
|
||||
- Graph: edges `(package -> dependency)` sorted.
|
||||
- Signals: `dart.sdkConstraint`, `flutter.sdk`, `flutter.plugins` (names only), `buildArtifacts` flags.
|
||||
|
||||
## Tests & fixtures
|
||||
- Fixtures under `src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Dart.Tests/Fixtures/`:
|
||||
- Hosted-only lockfile, git dependency, path dependency, Flutter project with plugins.
|
||||
- Determinism tests: stable ordering, normalized timestamps, no network.
|
||||
- Signal tests: sdk constraint extraction, AOT/snapshot flagging.
|
||||
|
||||
## Deliverables
|
||||
- Design captured here; wire into implementation plan + sprint log.
|
||||
- Analyzer to live under `StellaOps.Scanner.Analyzers.Lang.Dart` with tests mirroring fixtures.
|
||||
- Offline posture: never invoke `dart pub`; rely solely on provided lock/config; error clearly when missing lock.
|
||||
44
docs/modules/scanner/design/deno-analyzer-plan.md
Normal file
44
docs/modules/scanner/design/deno-analyzer-plan.md
Normal file
@@ -0,0 +1,44 @@
|
||||
# Deno Analyzer Scope · SCANNER-ENG-0011 (2025-12-08)
|
||||
|
||||
## Goals
|
||||
- Deliver offline-safe Deno analyzer (lockfile/import graph/runtime signals) that matches Ruby/PHP parity bar.
|
||||
- Provide deterministic SBOM/inventory outputs and capability signals consumable by Policy/Surface.
|
||||
|
||||
## Inputs
|
||||
- `deno.json` / `deno.jsonc` (tasks, import map refs, npm bridging).
|
||||
- `deno.lock` v2/v3 (modules, npm section, integrity hashes).
|
||||
- Optional `import_map.json`; vendor/cache roots (`$DENO_DIR`, `vendor/`).
|
||||
- CLI flags via Surface.Env: `deno.disable_npm`, `deno.vendor`, `deno.lock_path`, `deno.import_map`.
|
||||
|
||||
## Pipeline (deterministic, offline)
|
||||
1) **Normalize config**: parse `deno.json`/jsonc; resolve `importMap` path; default to repo root import map if present. Sort keys.
|
||||
2) **Lock resolver**: read `deno.lock`; emit components:
|
||||
- `npm:` entries → PURL (`pkg:npm/<name>@<version>`) + integrity from `integrity`.
|
||||
- `specifiers` → source→target map for transitive graph.
|
||||
- `modules` (remote URLs) → canonical URL + content hash when present; mark `fetchSource: cache`.
|
||||
3) **Import map & vendor**:
|
||||
- Apply `imports`/`scopes` to rewrite edges before graph emission.
|
||||
- If `vendor/` exists, prefer vendored paths; emit `provenance: vendor`.
|
||||
4) **Graph builder**:
|
||||
- Build module graph from `specifiers` + import map rewrites; emit edges `(from -> to, kind: import|dynamic|npm)`.
|
||||
- Recognise `npm:` specifiers; map to npm package node.
|
||||
- Stable ordering: sort by `from, to`.
|
||||
5) **Runtime/capability signals**:
|
||||
- Detect permissions from `tasks` (`--allow-*` flags) and `deno.json` `unstable`/`no-check`.
|
||||
- Capture `nodeModulesDir` toggle to flag npm bridge.
|
||||
6) **Outputs**:
|
||||
- Inventory: npm components + remote module list (`digest`, `source`, `origin`).
|
||||
- Graph: edges with provenance (`lockfile`, `import_map`, `vendor`).
|
||||
- Signals: `deno.permissions[]`, `deno.node_compat`, `deno.unstable`.
|
||||
|
||||
## Tests & fixtures
|
||||
- Add fixtures under `src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Deno.Tests/Fixtures/`:
|
||||
- lockfile v2 + import map,
|
||||
- lockfile v3 with npm section,
|
||||
- vendorized project (`vendor/` present).
|
||||
- Determinism assertions: sorted edges, stable hash of inventory, no network calls (enforce via stubbed fetcher).
|
||||
|
||||
## Deliverables
|
||||
- Analyzer implementation + tests in `StellaOps.Scanner.Analyzers.Lang.Deno`.
|
||||
- Doc cross-link to `docs/modules/scanner/implementation_plan.md` and sprint log.
|
||||
- Offline posture: default `LIVE_FETCH=false` equivalent; rely solely on lock/import map/vendor.
|
||||
42
docs/modules/scanner/design/native-reachability-plan.md
Normal file
42
docs/modules/scanner/design/native-reachability-plan.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# Native Reachability Graph Plan (Scanner · Signals Alignment)
|
||||
|
||||
## Goals
|
||||
- Extract native reachability graphs from ELF binaries across layers (stripped and unstripped), emitting:
|
||||
- Build IDs (`.note.gnu.build-id`) and code IDs per file.
|
||||
- Symbol digests (purl+symbol) and edges (callgraph) with deterministic ordering.
|
||||
- Synthetic roots for `_init`, `.init_array`, `.preinit_array`, entry points.
|
||||
- DSSE graph bundle per layer for Signals ingestion.
|
||||
- Offline-friendly, deterministic outputs (stable ordering, UTF-8, UTC).
|
||||
|
||||
## Inputs
|
||||
- Layered filesystem with ELF binaries and shared objects.
|
||||
- Layer metadata: digests from `scanner.rootfs.layers` and `scanner.layer.archives` (when provided).
|
||||
- Optional runtime proc snapshot for reconciliation (if available via Signals pipeline).
|
||||
|
||||
## Approach
|
||||
- **Discovery**: Walk layer directories; identify ELF binaries (`e_ident`, machine, class). Record per-layer path.
|
||||
- **Identifiers**: Capture build-id (hash of `.note.gnu.build-id`), fallback to SHA-256 of `.text` when absent; store code-id (PE/ELF-friendly string).
|
||||
- **Symbols**: Parse `.symtab`/`.dynsym`; compute stable symbol digests (e.g., SHA-256 over symbol bytes + name); include size/address for ordering.
|
||||
- **Edges**: Build callgraph from relocation/import tables and (when available) `.eh_frame`/`.plt` linkage; emit Unknown edges when target unresolved.
|
||||
- **Synthetic Roots**: Insert edges from synthetic root nodes (per binary) to `_start`, `_init`, `.init_array` entries.
|
||||
- **Layer Bundles**: Emit DSSE bundle per layer with edges, symbols, identifiers, and provenance (layer digest, path, sha256).
|
||||
- **Determinism**: Sort by layer digest, path, symbol name; normalize paths to POSIX separators; timestamps fixed to generation time in UTC ISO-8601.
|
||||
|
||||
## Deliverables
|
||||
- Library: `StellaOps.Scanner.Analyzers.Native` (new) with ELF reader and graph builder.
|
||||
- Tests: fixtures under `src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Native.Tests` using stripped/unstripped ELF samples (no network).
|
||||
- DSSE bundle schema: shared constants/types reused by Signals ingestion.
|
||||
- Sprint doc links: referenced from `SPRINT_0146_0001_0001_scanner_analyzer_gap_close.md`.
|
||||
|
||||
## Task Backlog (initial)
|
||||
1) Skeleton project `StellaOps.Scanner.Analyzers.Native` + plugin registration for scanner worker.
|
||||
2) ELF reader: header detection, build-id extraction, code-id calculation, section loader with deterministic sorting.
|
||||
3) Symbol digests: compute `sha256(name + addr + size + binding)`; emit per-symbol evidence and purl+symbol IDs.
|
||||
4) Callgraph builder: edges from PLT/relocs/imports; Unknown targets captured; synthetic roots for init arrays.
|
||||
5) Layer attribution: carry layer digest/source through evidence; emit DSSE bundle per layer with signatures stubbed for now.
|
||||
6) Tests/fixtures: stripped+unstripped ELF, shared objects, missing build-id, init array edges; golden JSON/NDJSON bundles.
|
||||
7) Signals alignment: finalize DSSE graph schema and bundle naming; hook into reachability ingestion contract.
|
||||
|
||||
## Open Questions
|
||||
- Final DSSE payload shape (Signals team) — currently assumed `graph.bundle` with edges, symbols, metadata.
|
||||
- Whether to include debugline info for coverage (could add optional module later).***
|
||||
@@ -0,0 +1,40 @@
|
||||
# Runtime Alignment (Scanner ↔ Zastava) · SCANNER-ENG-0014 (2025-12-08)
|
||||
|
||||
## Objective
|
||||
Align Kubernetes/VM target coverage between Scanner and Zastava so runtime signals, job orchestration, and evidence exports stay consistent across clusters and on-prem installs.
|
||||
|
||||
## Scope
|
||||
- Scanner: Worker runtime capture (EntryTrace), Surface.Env/FS detectors, analyzer job manifests, and policy predicates that rely on runtime/container metadata.
|
||||
- Zastava: runtime observation feeds (system call/ebpf), workload labeling, and admission hooks.
|
||||
|
||||
## Alignment Plan
|
||||
1) **Workload identity contract**
|
||||
- Standardize labels/annotations for scan jobs and Zastava monitors:
|
||||
- `stellaops.workload/id`, `tenant`, `project`, `component`, `channel`.
|
||||
- Container image digest required; tag optional.
|
||||
- Shared manifest snippet lives in `deploy/helm/stellaops` overlays; reuse in job templates.
|
||||
2) **Runtime evidence channels**
|
||||
- Scanner EntryTrace publishes `runtime.events` with fields: `workloadId`, `namespace`, `node`, `edgeType` (syscall/net/fs), `timestamp` (UTC, ISO-8601), `code_id` (when available).
|
||||
- Zastava observers mirror the same schema on `zastava.runtime.events`; controller stitches by `workloadId` and `imageDigest`.
|
||||
- Determinism: sort edge batches by `(workloadId, timestamp, edgeType)`.
|
||||
3) **Kubernetes defaults**
|
||||
- Namespace allowlist `scanner-runtime`/`zastava-runtime`; service accounts share RBAC for `pods/exec`, `pods/log`, `nodes/proxy` (read-only).
|
||||
- Feature flags: `scanner.runtime.capture.enabled` (default false), `zastava.attach.enabled` (default false) to keep sealed-mode/offline safe.
|
||||
4) **VM/bare-metal**
|
||||
- Use node agent mode: Scanner jobs emit host metadata `hostId`, `osRelease`; Zastava tailers tag events with same ids.
|
||||
- Shared log shipper config uses file socket paths under `/var/log/stellaops/runtime/*.ndjson`.
|
||||
5) **Evidence export**
|
||||
- Export Center receives combined runtime bundle with two streams: `scanner.entrytrace.ndjson`, `zastava.runtime.ndjson`; manifest includes hash of each and workload identity table.
|
||||
- Offline kit: bundle path `offline/runtime/<runId>/`; deterministic manifests/hashes.
|
||||
6) **SLOs & alerts**
|
||||
- Target: runtime event lag < 30s P95; drop rate < 0.5%.
|
||||
- Alerts wired via Prometheus: `stella_runtime_events_lag_seconds`, `stella_runtime_events_dropped_total`.
|
||||
|
||||
## Deliverables
|
||||
- Update job/observer templates (Helm/Compose) to include shared labels and feature flags.
|
||||
- Documented schema alignment (this note) referenced from sprint log.
|
||||
- Tests: determinism checks on merged runtime bundle; label presence asserted in integration harness.
|
||||
|
||||
## Next Steps
|
||||
- Wire labels/flags into `deploy/helm/stellaops` templates and Scanner Worker job manifests.
|
||||
- Add integration test to ensure EntryTrace and Zastava events with same workload id are coalesced without reordering.
|
||||
42
docs/modules/scanner/design/swiftpm-coverage-plan.md
Normal file
42
docs/modules/scanner/design/swiftpm-coverage-plan.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# SwiftPM Coverage Plan · SCANNER-ENG-0013 (2025-12-08)
|
||||
|
||||
## Goals
|
||||
- Plan Swift Package Manager coverage for Scanner: inventory, dependency graph, xcframework/binary target awareness, runtime hints.
|
||||
- Keep processing offline and deterministic; no `swift package` execution.
|
||||
|
||||
## Inputs
|
||||
- `Package.swift` (manifest) and `Package.resolved` (v2/v3 lockfile).
|
||||
- `.build/checkouts/**` (optional for checksum verification only).
|
||||
- Binary targets: `binaryTarget` entries, xcframeworks under `.xcframework/`.
|
||||
- Platform hints: `platforms`, `cLanguageStandard`, `cxxLanguageStandard`.
|
||||
|
||||
## Pipeline (deterministic, offline)
|
||||
1) **Resolve lockfile**:
|
||||
- Parse `Package.resolved`; emit packages with identity, version, repo URL, checksum.
|
||||
- PURL: `pkg:swift/<identity>@<version>`; include `vcs` metadata (git URL, revision).
|
||||
- Sort packages by identity.
|
||||
2) **Manifest signals**:
|
||||
- Parse `Package.swift` (static parse via tree-sitter Swift or manifest JSON dump if available) to extract:
|
||||
- products/targets (name, type library/test/executable).
|
||||
- binary targets (path/url, checksum).
|
||||
- platform minimum versions.
|
||||
3) **Graph builder**:
|
||||
- Edges from targets → dependencies; packages → transitive dependencies from lockfile pins.
|
||||
- Mark binary targets with `provenance: binary-target` and attach checksum if supplied.
|
||||
4) **Runtime hints**:
|
||||
- Collect `unsafeFlags`, linker settings, `swiftSettings`/`cSettings`/`cxxSettings` indicators (e.g., `-enable-library-evolution`).
|
||||
- Emit `xcframework` presence for Apple platform binaries.
|
||||
5) **Outputs**:
|
||||
- Inventory: Swift packages (PURL + checksum/vcs), binary targets (type=binary, checksum/path).
|
||||
- Graph: package dependency edges; target-to-target edges (optional).
|
||||
- Signals: platform minimums, binary target flags, unsafe flags presence.
|
||||
|
||||
## Tests & fixtures
|
||||
- Fixtures under `src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Native.Tests/Fixtures/SwiftPM/`:
|
||||
- Simple library/executable, binary target with checksum, mixed platform constraints.
|
||||
- Determinism: stable ordering, normalized checksums, no filesystem time dependency.
|
||||
|
||||
## Deliverables
|
||||
- Implementation to land under `StellaOps.Scanner.Analyzers.Native` (SwiftPM module).
|
||||
- Documentation cross-link to sprint log and `docs/modules/scanner/implementation_plan.md`.
|
||||
- Offline posture: never invoke `swift build`; rely solely on `Package.resolved`/manifest; error clearly when lockfile missing.
|
||||
10
docs/modules/scanner/dotnet-il.config.example.json
Normal file
10
docs/modules/scanner/dotnet-il.config.example.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
// Enable IL/dependency edge emission and entrypoint export.
|
||||
"emitDependencyEdges": true,
|
||||
"includeEntrypoints": true,
|
||||
|
||||
// Optional runtime evidence merge (NDJSON lines with package/target/reason/confidence/source).
|
||||
// When provided, runtime edges are appended with prefix "edge.runtime".
|
||||
"runtimeEvidencePath": "runtime-evidence.ndjson",
|
||||
"runtimeEvidenceConfidence": "medium"
|
||||
}
|
||||
2
docs/modules/scanner/runtime-evidence.example.ndjson
Normal file
2
docs/modules/scanner/runtime-evidence.example.ndjson
Normal file
@@ -0,0 +1,2 @@
|
||||
{"package":"stellaops.toolkit","target":"native-lib","reason":"runtime-load","confidence":"medium","source":"trace"}
|
||||
{"package":"microsoft.extensions.logging","target":"microsoft.extensions.dependencyinjection","reason":"runtime-resolve","confidence":"medium","source":"probe"}
|
||||
@@ -11,21 +11,24 @@ Artifacts prepared 2025-12-05 (UTC) for DSSE signing and Evidence Locker ingest:
|
||||
|
||||
## CI Automated Signing
|
||||
|
||||
The `.gitea/workflows/signals-dsse-sign.yml` workflow automates DSSE signing.
|
||||
- `.gitea/workflows/signals-dsse-sign.yml` ƒ?" DSSE signing of decay/unknowns/heuristics on push or manual dispatch.
|
||||
- `.gitea/workflows/signals-reachability.yml` ƒ?" reachability smoke (SIGNALS-24-004/005), DSSE signing, and optional Evidence Locker upload.
|
||||
- `.gitea/workflows/signals-evidence-locker.yml` ƒ?" production re-sign + deterministic tar upload; defaults to `evidence-locker/signals/2025-12-05`.
|
||||
|
||||
### Prerequisites (CI Secrets)
|
||||
| Secret | Description |
|
||||
### Prerequisites (CI Secrets or Repo Vars)
|
||||
| Secret/Var | Description |
|
||||
|--------|-------------|
|
||||
| `COSIGN_PRIVATE_KEY_B64` | Base64-encoded cosign private key (required for production) |
|
||||
| `COSIGN_PASSWORD` | Password for encrypted key (if applicable) |
|
||||
| `CI_EVIDENCE_LOCKER_TOKEN` | Token for Evidence Locker push (optional) |
|
||||
| `CI_EVIDENCE_LOCKER_TOKEN` | Token for Evidence Locker push |
|
||||
| `EVIDENCE_LOCKER_URL` | Base URL for locker PUT (e.g., `https://locker.example.com`) |
|
||||
|
||||
### Trigger
|
||||
- **Automatic**: Push to `main` affecting `docs/modules/signals/**` or `tools/cosign/sign-signals.sh`
|
||||
- **Manual**: Workflow dispatch with `allow_dev_key=1` for testing
|
||||
- **Automatic**: Push to `main` affecting `docs/modules/signals/**`, `tools/cosign/sign-signals.sh`, or Signals sources (reachability workflow).
|
||||
- **Manual**: Workflow dispatch with `allow_dev_key=1` for testing; `out_dir` input defaults to `evidence-locker/signals/2025-12-05`.
|
||||
|
||||
### Output
|
||||
Signed artifacts uploaded as workflow artifact `signals-dsse-signed-{run}` and optionally pushed to Evidence Locker.
|
||||
Signed artifacts uploaded as workflow artifacts and, when secrets/vars are present, pushed to Evidence Locker. Evidence tar SHA256 is emitted in job logs.
|
||||
|
||||
## Development Signing (Local Testing)
|
||||
|
||||
|
||||
@@ -100,6 +100,7 @@ HMAC operations use purpose-based selection similar to hashing:
|
||||
## Simulation paths when hardware is missing
|
||||
|
||||
- **RU / GOST**: Linux baseline uses `ru.openssl.gost`; CryptoPro CSP can be exercised from Linux via the Wine sidecar service (`ru.winecsp.http`) built from `scripts/crypto/setup-wine-csp-service.sh` when customers supply the CSP installer. Windows CSP remains blocked until licensed runners are available.
|
||||
- **CN / SM2**: Software baseline (`cn.sm.soft`) plus a containerized remote microservice (`cn.sm.remote.http`) that simulates SM2 signing/verification; swap the endpoint to a hardware-backed service when licensed hardware is provided.
|
||||
- **CN / SM**: Software-only SM2/SM3 provider (`cn.sm.soft`) backed by BouncyCastle; enable with `SM_SOFT_ALLOWED=1`. Hardware PKCS#11 tokens can be added later without changing feature code because hosts resolve via `ICryptoProviderRegistry`.
|
||||
- **FIPS / eIDAS**: Software allow-lists (`fips.ecdsa.soft`, `eu.eidas.soft`) enforce ES256/ES384 + SHA-2. They are labeled non-certified until a CMVP/QSCD module is supplied.
|
||||
- **KCMVP**: Hash-only baseline (`kr.kcmvp.hash`) keeps SHA-256 available when ARIA/SEED/KCDSA hardware is absent.
|
||||
|
||||
@@ -29,6 +29,20 @@ This runbook documents the repeatable steps for validating the Russian sovereign
|
||||
1. Install OpenSSL with the `gost` engine (or vendor equivalent) on the validation host and import the PEM key/cert that will back `StellaOps:Crypto:OpenSsl:Keys`.
|
||||
2. Configure the `OpenSsl` section (PEM path plus `PrivateKeyPassphraseEnvVar`), keep `StellaOps:Crypto:Registry:ActiveProfile=ru-offline`, and restart the services.
|
||||
3. Execute a signing workflow and confirm `CryptoProviderMetrics` records `ru.openssl.gost` activity. Linux nodes should no longer attempt to load `ru.cryptopro.csp`.
|
||||
4. **2025-12-07 validation evidence (Linux, containerised OpenSSL GOST engine):**
|
||||
- Ran `scripts/crypto/validate-openssl-gost.sh` (uses `rnix/openssl-gost:latest`) to generate deterministic digests and two md_gost12_256 signatures over a fixed message. Output folder: `logs/openssl_gost_validation_<timestamp>/`.
|
||||
- Summary from the run at `20251207T220926Z`:
|
||||
- Message SHA256: `e858745af13089d06e74022a75abfee7390aefe7635b15c80fe7d038f58ae6c6`
|
||||
- md_gost12_256 digest: `01ddd6399e694bb23227925cb6b12e8c25f2f1303644ffbd267da8a68554a2cb`
|
||||
- Signature SHA256 (run 1): `02321c5564ae902de77a12c8cc2876f0374d4225e52077ecd28876fbd0110b01`
|
||||
- Signature SHA256 (run 2): `6564c7e0953dda7d40054ef46633c833eec5ee13d4ab8dd0557f2aed1b8d76c4`
|
||||
- Determinism note: digests are stable; signatures vary per run (nonce-driven) but verify cleanly against the emitted public key.
|
||||
5. **Host defaults and toggles:** Authority/Signer/Attestor now bind `StellaOps:Crypto` via `AddStellaOpsCryptoRu` and fail-closed on Linux if `ru.openssl.gost`/`ru.pkcs11` are missing. Environment overrides:
|
||||
- `STELLAOPS_CRYPTO_ENABLE_RU_OPENSSL` (default: on for Linux)
|
||||
- `STELLAOPS_CRYPTO_ENABLE_RU_PKCS11` (default: on)
|
||||
- `STELLAOPS_CRYPTO_ENABLE_RU_WINECSP` (default: off)
|
||||
- `STELLAOPS_CRYPTO_ENABLE_RU_CSP` (Windows only; default on)
|
||||
Disable both OpenSSL and PKCS#11 only when an alternate provider is configured; otherwise startup will fail.
|
||||
|
||||
## 3. Hardware Validation (PKCS#11 Tokens)
|
||||
|
||||
|
||||
@@ -45,3 +45,4 @@
|
||||
|
||||
## Provenance
|
||||
- This contract supersedes the temporary log-based publisher referenced in Signals sprint 0143 Execution Log (2025-11-18). Aligns with `signals.fact.updated@v1` payload shape already covered by unit tests.
|
||||
- Implementation: `Signals.Events` defaults to Redis Streams (`signals.fact.updated.v1` with `signals.fact.updated.dlq`), emitting envelopes that include `event_id`, `fact_version`, and deterministic `fact.digest` (sha256) generated by the reachability fact hasher.
|
||||
|
||||
@@ -3,13 +3,21 @@ StellaOps:
|
||||
Registry:
|
||||
ActiveProfile: cn-soft
|
||||
PreferredProviders:
|
||||
- cn.sm.remote.http
|
||||
- cn.sm.soft
|
||||
Profiles:
|
||||
cn-soft:
|
||||
PreferredProviders:
|
||||
- cn.sm.remote.http
|
||||
- cn.sm.soft
|
||||
SmSoft:
|
||||
RequireEnvironmentGate: true
|
||||
# Optional seed keys (PKCS#8 DER/PEM)
|
||||
Keys: []
|
||||
SmRemote:
|
||||
BaseAddress: http://sm-remote:56080
|
||||
SkipProbe: false
|
||||
Keys:
|
||||
- KeyId: sm2-remote-default
|
||||
RemoteKeyId: sm2-remote-default
|
||||
# Note: This SM profile is software-only (non-certified). Set SM_SOFT_ALLOWED=1 to enable.
|
||||
|
||||
@@ -19,6 +19,7 @@ StellaOps:
|
||||
- ru.openssl.gost
|
||||
- ru.pkcs11
|
||||
CryptoPro:
|
||||
Status: pending
|
||||
Keys:
|
||||
- KeyId: ru-csp-default
|
||||
Algorithm: GOST12-256
|
||||
|
||||
@@ -49,10 +49,23 @@ Signals:
|
||||
Cache:
|
||||
ConnectionString: "localhost:6379"
|
||||
DefaultTtlSeconds: 600
|
||||
Events:
|
||||
Enabled: true
|
||||
# Transport driver: "redis" (default) or "inmemory" for local smoke.
|
||||
Driver: "redis"
|
||||
ConnectionString: "localhost:6379"
|
||||
Stream: "signals.fact.updated.v1"
|
||||
DeadLetterStream: "signals.fact.updated.dlq"
|
||||
PublishTimeoutSeconds: 5
|
||||
MaxStreamLength: 10000
|
||||
DefaultTenant: "tenant-default"
|
||||
Producer: "StellaOps.Signals"
|
||||
Pipeline: "signals"
|
||||
Release: ""
|
||||
AirGap:
|
||||
# Optional override for fact-update event topic when signaling across air-gap boundaries.
|
||||
# Defaults to "signals.fact.updated" when omitted.
|
||||
EventTopic: "signals.fact.updated"
|
||||
# Defaults to "signals.fact.updated.v1" when omitted.
|
||||
EventTopic: "signals.fact.updated.v1"
|
||||
SealedMode:
|
||||
EnforcementEnabled: false
|
||||
EvidencePath: "../ops/devops/sealed-mode-ci/artifacts/sealed-mode-ci/latest/signals-sealed-ci.json"
|
||||
|
||||
185
ops/cryptopro/install-linux-csp.sh
Normal file
185
ops/cryptopro/install-linux-csp.sh
Normal file
@@ -0,0 +1,185 @@
|
||||
#!/bin/bash
|
||||
# CryptoPro CSP 5.0 R3 Linux installer (deb packages)
|
||||
# Uses locally provided .deb packages under /opt/cryptopro/downloads (host volume).
|
||||
# No Wine dependency. Runs offline against the supplied packages only.
|
||||
#
|
||||
# Env:
|
||||
# CRYPTOPRO_INSTALL_FROM Path to folder with .deb packages (default /opt/cryptopro/downloads)
|
||||
# CRYPTOPRO_ACCEPT_EULA Must be 1 to proceed (default 0 -> hard stop with warning)
|
||||
# CRYPTOPRO_SKIP_APT_FIX Set to 1 to skip `apt-get -f install` (offline strict)
|
||||
# CRYPTOPRO_PACKAGE_FILTER Optional glob (e.g., "cprocsp*amd64.deb") to narrow selection
|
||||
#
|
||||
# Exit codes:
|
||||
# 0 success; 1 missing dir/files; 2 incompatible arch; 3 EULA not accepted.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
INSTALL_FROM="${CRYPTOPRO_INSTALL_FROM:-/opt/cryptopro/downloads}"
|
||||
PACKAGE_FILTER="${CRYPTOPRO_PACKAGE_FILTER:-*.deb}"
|
||||
SKIP_APT_FIX="${CRYPTOPRO_SKIP_APT_FIX:-0}"
|
||||
STAGING_DIR="/tmp/cryptopro-debs"
|
||||
MINIMAL="${CRYPTOPRO_MINIMAL:-1}"
|
||||
INCLUDE_PLUGIN="${CRYPTOPRO_INCLUDE_PLUGIN:-0}"
|
||||
|
||||
arch_from_uname() {
|
||||
local raw
|
||||
raw="$(uname -m)"
|
||||
case "${raw}" in
|
||||
x86_64) echo "amd64" ;;
|
||||
aarch64) echo "arm64" ;;
|
||||
arm64) echo "arm64" ;;
|
||||
i386|i686) echo "i386" ;;
|
||||
*) echo "${raw}" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
HOST_ARCH="$(dpkg --print-architecture 2>/dev/null || arch_from_uname)"
|
||||
|
||||
log() {
|
||||
echo "[$(date -u '+%Y-%m-%dT%H:%M:%SZ')] [cryptopro-install] $*"
|
||||
}
|
||||
|
||||
log_err() {
|
||||
echo "[$(date -u '+%Y-%m-%dT%H:%M:%SZ')] [cryptopro-install] [ERROR] $*" >&2
|
||||
}
|
||||
|
||||
require_eula() {
|
||||
if [[ "${CRYPTOPRO_ACCEPT_EULA:-0}" != "1" ]]; then
|
||||
log_err "License not accepted. Set CRYPTOPRO_ACCEPT_EULA=1 only if you hold a valid CryptoPro license for these binaries and agree to the vendor EULA."
|
||||
exit 3
|
||||
fi
|
||||
}
|
||||
|
||||
maybe_extract_bundle() {
|
||||
# Prefer a bundle that matches host arch in filename, otherwise first *.tgz
|
||||
mapfile -t TGZ < <(find "${INSTALL_FROM}" -maxdepth 1 -type f -name "*.tgz" -print 2>/dev/null | sort)
|
||||
if [[ ${#TGZ[@]} -eq 0 ]]; then
|
||||
return
|
||||
fi
|
||||
local chosen=""
|
||||
for candidate in "${TGZ[@]}"; do
|
||||
if [[ "${candidate}" == *"${HOST_ARCH}"* ]]; then
|
||||
chosen="${candidate}"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ -z "${chosen}" ]]; then
|
||||
chosen="${TGZ[0]}"
|
||||
fi
|
||||
log "Extracting bundle ${chosen} into ${STAGING_DIR}"
|
||||
rm -rf "${STAGING_DIR}"
|
||||
mkdir -p "${STAGING_DIR}"
|
||||
tar -xf "${chosen}" -C "${STAGING_DIR}"
|
||||
# If bundle contains a single subfolder, use it as install root
|
||||
local subdir
|
||||
subdir="$(find "${STAGING_DIR}" -maxdepth 1 -type d ! -path "${STAGING_DIR}" | head -n1)"
|
||||
if [[ -n "${subdir}" ]]; then
|
||||
INSTALL_FROM="${subdir}"
|
||||
else
|
||||
INSTALL_FROM="${STAGING_DIR}"
|
||||
fi
|
||||
}
|
||||
|
||||
gather_packages() {
|
||||
if [[ ! -d "${INSTALL_FROM}" ]]; then
|
||||
log_err "Package directory not found: ${INSTALL_FROM}"
|
||||
exit 1
|
||||
fi
|
||||
maybe_extract_bundle
|
||||
mapfile -t PKGS < <(find "${INSTALL_FROM}" -maxdepth 2 -type f -name "${PACKAGE_FILTER}" -print 2>/dev/null | sort)
|
||||
if [[ ${#PKGS[@]} -eq 0 ]]; then
|
||||
log_err "No .deb packages found in ${INSTALL_FROM} (filter=${PACKAGE_FILTER})"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
apply_minimal_filter() {
|
||||
if [[ "${MINIMAL}" != "1" ]]; then
|
||||
return
|
||||
fi
|
||||
local -a keep_exact=(
|
||||
"lsb-cprocsp-base"
|
||||
"lsb-cprocsp-ca-certs"
|
||||
"lsb-cprocsp-capilite-64"
|
||||
"lsb-cprocsp-kc1-64"
|
||||
"lsb-cprocsp-pkcs11-64"
|
||||
"lsb-cprocsp-rdr-64"
|
||||
"cprocsp-curl-64"
|
||||
"cprocsp-pki-cades-64"
|
||||
"cprocsp-compat-debian"
|
||||
)
|
||||
if [[ "${INCLUDE_PLUGIN}" == "1" ]]; then
|
||||
keep_exact+=("cprocsp-pki-plugin-64" "cprocsp-rdr-gui-gtk-64")
|
||||
fi
|
||||
local -a filtered=()
|
||||
for pkg in "${PKGS[@]}"; do
|
||||
local name
|
||||
name="$(dpkg-deb -f "${pkg}" Package 2>/dev/null || basename "${pkg}")"
|
||||
for wanted in "${keep_exact[@]}"; do
|
||||
if [[ "${name}" == "${wanted}" ]]; then
|
||||
filtered+=("${pkg}")
|
||||
break
|
||||
fi
|
||||
done
|
||||
done
|
||||
if [[ ${#filtered[@]} -gt 0 ]]; then
|
||||
log "Applying minimal package set (CRYPTOPRO_MINIMAL=1); kept ${#filtered[@]} of ${#PKGS[@]}"
|
||||
PKGS=("${filtered[@]}")
|
||||
else
|
||||
log "Minimal filter yielded no matches; using full package set"
|
||||
fi
|
||||
}
|
||||
|
||||
filter_by_arch() {
|
||||
FILTERED=()
|
||||
for pkg in "${PKGS[@]}"; do
|
||||
local pkg_arch
|
||||
pkg_arch="$(dpkg-deb -f "${pkg}" Architecture 2>/dev/null || echo "unknown")"
|
||||
if [[ "${pkg_arch}" == "all" || "${pkg_arch}" == "${HOST_ARCH}" ]]; then
|
||||
FILTERED+=("${pkg}")
|
||||
else
|
||||
log "Skipping ${pkg} (arch=${pkg_arch}, host=${HOST_ARCH})"
|
||||
fi
|
||||
done
|
||||
if [[ ${#FILTERED[@]} -eq 0 ]]; then
|
||||
log_err "No packages match host architecture ${HOST_ARCH}"
|
||||
exit 2
|
||||
fi
|
||||
}
|
||||
|
||||
print_matrix() {
|
||||
log "Discovered packages (arch filter: host=${HOST_ARCH}):"
|
||||
for pkg in "${FILTERED[@]}"; do
|
||||
local name ver arch
|
||||
name="$(dpkg-deb -f "${pkg}" Package 2>/dev/null || basename "${pkg}")"
|
||||
ver="$(dpkg-deb -f "${pkg}" Version 2>/dev/null || echo "unknown")"
|
||||
arch="$(dpkg-deb -f "${pkg}" Architecture 2>/dev/null || echo "unknown")"
|
||||
echo " - ${name} ${ver} (${arch}) <- ${pkg}"
|
||||
done
|
||||
}
|
||||
|
||||
install_packages() {
|
||||
log "Installing ${#FILTERED[@]} package(s) from ${INSTALL_FROM}"
|
||||
if ! dpkg -i "${FILTERED[@]}"; then
|
||||
if [[ "${SKIP_APT_FIX}" == "1" ]]; then
|
||||
log_err "dpkg reported errors and CRYPTOPRO_SKIP_APT_FIX=1; aborting."
|
||||
exit 1
|
||||
fi
|
||||
log "Resolving dependencies with apt-get -f install (may require network if deps missing locally)"
|
||||
apt-get update >/dev/null
|
||||
DEBIAN_FRONTEND=noninteractive apt-get -y -f install
|
||||
fi
|
||||
log "CryptoPro packages installed. Verify with: dpkg -l | grep cprocsp"
|
||||
}
|
||||
|
||||
main() {
|
||||
require_eula
|
||||
gather_packages
|
||||
apply_minimal_filter
|
||||
filter_by_arch
|
||||
print_matrix
|
||||
install_packages
|
||||
log "Installation finished. For headless/server use on Ubuntu 22.04 (amd64), the 'linux-amd64_deb.tgz' bundle is preferred and auto-selected."
|
||||
}
|
||||
|
||||
main "$@"
|
||||
31
ops/cryptopro/linux-csp-service/Dockerfile
Normal file
31
ops/cryptopro/linux-csp-service/Dockerfile
Normal file
@@ -0,0 +1,31 @@
|
||||
# syntax=docker/dockerfile:1.7
|
||||
FROM ubuntu:22.04
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CRYPTOPRO_ACCEPT_EULA=1 \
|
||||
CRYPTOPRO_MINIMAL=1
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# System deps
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends python3 python3-pip tar xz-utils && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy CryptoPro packages (provided in repo) and installer
|
||||
COPY opt/cryptopro/downloads/*.tgz /opt/cryptopro/downloads/
|
||||
COPY ops/cryptopro/install-linux-csp.sh /usr/local/bin/install-linux-csp.sh
|
||||
RUN chmod +x /usr/local/bin/install-linux-csp.sh
|
||||
|
||||
# Install CryptoPro CSP
|
||||
RUN /usr/local/bin/install-linux-csp.sh
|
||||
|
||||
# Python deps
|
||||
COPY ops/cryptopro/linux-csp-service/requirements.txt /app/requirements.txt
|
||||
RUN pip3 install --no-cache-dir -r /app/requirements.txt
|
||||
|
||||
# App
|
||||
COPY ops/cryptopro/linux-csp-service/app.py /app/app.py
|
||||
|
||||
EXPOSE 8080
|
||||
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8080"]
|
||||
25
ops/cryptopro/linux-csp-service/README.md
Normal file
25
ops/cryptopro/linux-csp-service/README.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# CryptoPro Linux CSP Service (experimental)
|
||||
|
||||
Minimal FastAPI wrapper around the Linux CryptoPro CSP binaries to prove installation and expose simple operations.
|
||||
|
||||
## Build
|
||||
|
||||
```bash
|
||||
docker build -t cryptopro-linux-csp -f ops/cryptopro/linux-csp-service/Dockerfile .
|
||||
```
|
||||
|
||||
## Run
|
||||
|
||||
```bash
|
||||
docker run --rm -p 8080:8080 cryptopro-linux-csp
|
||||
```
|
||||
|
||||
Endpoints:
|
||||
- `GET /health` — checks `csptest` presence.
|
||||
- `GET /license` — runs `csptest -license`.
|
||||
- `POST /hash` with `{ "data_b64": "<base64>" }` — runs `csptest -hash -hash_alg gost12_256`.
|
||||
|
||||
## Notes
|
||||
- Uses the provided CryptoPro `.tgz` bundles under `opt/cryptopro/downloads`. Ensure you have rights to these binaries; the image builds with `CRYPTOPRO_ACCEPT_EULA=1`.
|
||||
- Default install is minimal (no browser/plugin). Set `CRYPTOPRO_INCLUDE_PLUGIN=1` if you need plugin packages.
|
||||
- This is not a production service; intended for validation only.
|
||||
57
ops/cryptopro/linux-csp-service/app.py
Normal file
57
ops/cryptopro/linux-csp-service/app.py
Normal file
@@ -0,0 +1,57 @@
|
||||
import base64
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import FastAPI, HTTPException
|
||||
from pydantic import BaseModel
|
||||
|
||||
app = FastAPI(title="CryptoPro Linux CSP Service", version="0.1.0")
|
||||
|
||||
CSPTEST = Path("/opt/cprocsp/bin/amd64/csptest")
|
||||
|
||||
|
||||
def run_cmd(cmd: list[str], input_bytes: Optional[bytes] = None, allow_fail: bool = False) -> str:
|
||||
try:
|
||||
proc = subprocess.run(
|
||||
cmd,
|
||||
input=input_bytes,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
check=True,
|
||||
)
|
||||
return proc.stdout.decode("utf-8", errors="replace")
|
||||
except subprocess.CalledProcessError as exc:
|
||||
output = exc.stdout.decode("utf-8", errors="replace") if exc.stdout else ""
|
||||
if allow_fail:
|
||||
return output
|
||||
raise HTTPException(status_code=500, detail={"cmd": cmd, "output": output})
|
||||
|
||||
|
||||
@app.get("/health")
|
||||
def health():
|
||||
if not CSPTEST.exists():
|
||||
raise HTTPException(status_code=500, detail="csptest binary not found; ensure CryptoPro CSP is installed")
|
||||
return {"status": "ok", "csptest": str(CSPTEST)}
|
||||
|
||||
|
||||
@app.get("/license")
|
||||
def license_info():
|
||||
output = run_cmd([str(CSPTEST), "-keyset", "-info"], allow_fail=True)
|
||||
return {"output": output}
|
||||
|
||||
|
||||
class HashRequest(BaseModel):
|
||||
data_b64: str
|
||||
|
||||
|
||||
@app.post("/hash")
|
||||
def hash_data(body: HashRequest):
|
||||
try:
|
||||
data = base64.b64decode(body.data_b64)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=400, detail="Invalid base64")
|
||||
|
||||
cmd = [str(CSPTEST), "-hash", "-in", "-", "-hash_alg", "gost12_256"]
|
||||
output = run_cmd(cmd, input_bytes=data)
|
||||
return {"output": output}
|
||||
2
ops/cryptopro/linux-csp-service/requirements.txt
Normal file
2
ops/cryptopro/linux-csp-service/requirements.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
fastapi==0.111.0
|
||||
uvicorn[standard]==0.30.1
|
||||
@@ -54,7 +54,7 @@ SCANNER_SURFACE_SECRETS_ALLOW_INLINE=false
|
||||
ZASTAVA_SURFACE_SECRETS_PROVIDER=${SCANNER_SURFACE_SECRETS_PROVIDER}
|
||||
ZASTAVA_SURFACE_SECRETS_ROOT=${SCANNER_SURFACE_SECRETS_ROOT}
|
||||
```
|
||||
4) Ensure docker-compose mounts the secrets path read-only to the services that need it.
|
||||
4) Ensure docker-compose mounts the secrets path read-only to the services that need it. Use `SURFACE_SECRETS_HOST_PATH` to point at the decrypted bundle on the host (defaults to `./offline/surface-secrets` in the Compose profiles).
|
||||
|
||||
## Offline Kit workflow
|
||||
- The offline kit already ships encrypted `surface-secrets` bundles (see `docs/24_OFFLINE_KIT.md`).
|
||||
|
||||
12
ops/sm-remote/Dockerfile
Normal file
12
ops/sm-remote/Dockerfile
Normal file
@@ -0,0 +1,12 @@
|
||||
# Simulated SM2 remote microservice (software-only)
|
||||
FROM mcr.microsoft.com/dotnet/sdk:10.0 AS build
|
||||
WORKDIR /src
|
||||
COPY . .
|
||||
RUN dotnet publish src/SmRemote/StellaOps.SmRemote.Service/StellaOps.SmRemote.Service.csproj -c Release -o /app/publish
|
||||
|
||||
FROM mcr.microsoft.com/dotnet/aspnet:10.0
|
||||
WORKDIR /app
|
||||
COPY --from=build /app/publish .
|
||||
ENV ASPNETCORE_URLS=http://0.0.0.0:56080
|
||||
ENV SM_SOFT_ALLOWED=1
|
||||
ENTRYPOINT ["dotnet", "StellaOps.SmRemote.Service.dll"]
|
||||
@@ -80,6 +80,8 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
WINE_CSP_MODE=limited \
|
||||
WINE_CSP_INSTALLER_PATH=/opt/cryptopro/csp-installer.msi \
|
||||
WINE_CSP_LOG_LEVEL=Information \
|
||||
NODE_PATH=/usr/local/lib/node_modules \
|
||||
PLAYWRIGHT_BROWSERS_PATH=/ms-playwright \
|
||||
# Display for Wine (headless)
|
||||
DISPLAY=:99
|
||||
|
||||
@@ -117,6 +119,21 @@ RUN set -eux; \
|
||||
apt-get clean; \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
# Install Node.js + Playwright (headless Chromium) for CryptoPro downloader
|
||||
RUN set -eux; \
|
||||
curl -fsSL https://deb.nodesource.com/setup_20.x | bash -; \
|
||||
apt-get update; \
|
||||
apt-get install -y --no-install-recommends \
|
||||
nodejs \
|
||||
rpm2cpio \
|
||||
cpio; \
|
||||
npm install -g --no-progress playwright-chromium@1.48.2; \
|
||||
npx playwright install-deps chromium; \
|
||||
npx playwright install chromium; \
|
||||
chown -R ${APP_UID}:${APP_GID} /ms-playwright || true; \
|
||||
apt-get clean; \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
# Create non-root user for Wine service
|
||||
# Note: Wine requires writable home directory for prefix
|
||||
RUN groupadd -r -g ${APP_GID} ${APP_USER} && \
|
||||
@@ -133,7 +150,10 @@ COPY --from=build --chown=${APP_UID}:${APP_GID} /app/publish/ ./
|
||||
COPY --chown=${APP_UID}:${APP_GID} ops/wine-csp/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
COPY --chown=${APP_UID}:${APP_GID} ops/wine-csp/healthcheck.sh /usr/local/bin/healthcheck.sh
|
||||
COPY --chown=${APP_UID}:${APP_GID} ops/wine-csp/install-csp.sh /usr/local/bin/install-csp.sh
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh /usr/local/bin/healthcheck.sh /usr/local/bin/install-csp.sh
|
||||
COPY --chown=${APP_UID}:${APP_GID} ops/wine-csp/fetch-cryptopro.py /usr/local/bin/fetch-cryptopro.py
|
||||
COPY --chown=${APP_UID}:${APP_GID} ops/wine-csp/download-cryptopro.sh /usr/local/bin/download-cryptopro.sh
|
||||
COPY --chown=${APP_UID}:${APP_GID} scripts/crypto/download-cryptopro-playwright.cjs /usr/local/bin/download-cryptopro-playwright.cjs
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh /usr/local/bin/healthcheck.sh /usr/local/bin/install-csp.sh /usr/local/bin/fetch-cryptopro.py /usr/local/bin/download-cryptopro.sh /usr/local/bin/download-cryptopro-playwright.cjs
|
||||
|
||||
# Switch to non-root user for Wine prefix initialization
|
||||
USER ${APP_UID}:${APP_GID}
|
||||
|
||||
62
ops/wine-csp/download-cryptopro.sh
Normal file
62
ops/wine-csp/download-cryptopro.sh
Normal file
@@ -0,0 +1,62 @@
|
||||
#!/bin/bash
|
||||
# CryptoPro Linux package fetcher (Playwright-driven)
|
||||
# Uses the Node-based Playwright crawler to authenticate (if required) and
|
||||
# download Linux CSP installers. Intended to run once per container startup.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
OUTPUT_DIR="${CRYPTOPRO_OUTPUT_DIR:-/opt/cryptopro/downloads}"
|
||||
MARKER="${CRYPTOPRO_DOWNLOAD_MARKER:-${OUTPUT_DIR}/.downloaded}"
|
||||
FORCE="${CRYPTOPRO_FORCE_DOWNLOAD:-0}"
|
||||
UNPACK="${CRYPTOPRO_UNPACK:-1}"
|
||||
DRY_RUN="${CRYPTOPRO_DRY_RUN:-1}"
|
||||
|
||||
log() {
|
||||
echo "[$(date -u '+%Y-%m-%dT%H:%M:%SZ')] [crypto-fetch] $*"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo "[$(date -u '+%Y-%m-%dT%H:%M:%SZ')] [crypto-fetch] [ERROR] $*" >&2
|
||||
}
|
||||
|
||||
if [[ -f "${MARKER}" && "${FORCE}" != "1" ]]; then
|
||||
log "Download marker present at ${MARKER}; skipping (set CRYPTOPRO_FORCE_DOWNLOAD=1 to refresh)."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
log "Ensuring CryptoPro Linux packages are available (dry-run unless CRYPTOPRO_DRY_RUN=0)"
|
||||
log " Output dir: ${OUTPUT_DIR}"
|
||||
log " Unpack: ${UNPACK}"
|
||||
|
||||
mkdir -p "${OUTPUT_DIR}"
|
||||
|
||||
# Export defaults for the Playwright downloader
|
||||
export CRYPTOPRO_OUTPUT_DIR="${OUTPUT_DIR}"
|
||||
export CRYPTOPRO_UNPACK="${UNPACK}"
|
||||
export CRYPTOPRO_DRY_RUN="${DRY_RUN}"
|
||||
export CRYPTOPRO_URL="${CRYPTOPRO_URL:-https://cryptopro.ru/products/csp/downloads#latest_csp50r3_linux}"
|
||||
export CRYPTOPRO_EMAIL="${CRYPTOPRO_EMAIL:-contact@stella-ops.org}"
|
||||
export CRYPTOPRO_PASSWORD="${CRYPTOPRO_PASSWORD:-Hoko33JD3nj3aJD.}"
|
||||
|
||||
if ! node /usr/local/bin/download-cryptopro-playwright.cjs; then
|
||||
rc=$?
|
||||
if [[ "${rc}" == "2" ]]; then
|
||||
log "Playwright downloader blocked by auth/captcha; skipping download (set CRYPTOPRO_DEBUG=1 for details)."
|
||||
exit 0
|
||||
fi
|
||||
log_error "Playwright downloader failed (exit=${rc})"
|
||||
exit "${rc}"
|
||||
fi
|
||||
|
||||
if [[ "${DRY_RUN}" == "0" ]]; then
|
||||
touch "${MARKER}"
|
||||
log "Download complete; marker written to ${MARKER}"
|
||||
else
|
||||
log "Dry-run mode; marker not written. Set CRYPTOPRO_DRY_RUN=0 to fetch binaries."
|
||||
fi
|
||||
|
||||
# List latest artifacts (best-effort)
|
||||
if compgen -G "${OUTPUT_DIR}/*" > /dev/null; then
|
||||
log "Artifacts in ${OUTPUT_DIR}:"
|
||||
find "${OUTPUT_DIR}" -maxdepth 1 -type f -printf " %f (%s bytes)\n" | head -20
|
||||
fi
|
||||
@@ -15,6 +15,10 @@ WINE_CSP_INSTALLER_PATH="${WINE_CSP_INSTALLER_PATH:-/opt/cryptopro/csp-installer
|
||||
WINE_CSP_LOG_LEVEL="${WINE_CSP_LOG_LEVEL:-Information}"
|
||||
WINE_PREFIX="${WINEPREFIX:-$HOME/.wine}"
|
||||
DISPLAY="${DISPLAY:-:99}"
|
||||
CSP_DOWNLOAD_MARKER="${WINE_CSP_INSTALLER_PATH}.downloaded"
|
||||
CRYPTOPRO_DOWNLOAD_DIR="${CRYPTOPRO_DOWNLOAD_DIR:-/opt/cryptopro/downloads}"
|
||||
CRYPTOPRO_DOWNLOAD_MARKER="${CRYPTOPRO_DOWNLOAD_MARKER:-${CRYPTOPRO_DOWNLOAD_DIR}/.downloaded}"
|
||||
CRYPTOPRO_FETCH_ON_START="${CRYPTOPRO_FETCH_ON_START:-1}"
|
||||
|
||||
# Marker files
|
||||
CSP_INSTALLED_MARKER="${WINE_PREFIX}/.csp_installed"
|
||||
@@ -73,6 +77,37 @@ initialize_wine() {
|
||||
log "Wine prefix initialized successfully"
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# CryptoPro Linux Downloads (Playwright-driven)
|
||||
# ------------------------------------------------------------------------------
|
||||
download_linux_packages() {
|
||||
if [[ "${CRYPTOPRO_FETCH_ON_START}" == "0" ]]; then
|
||||
log "Skipping CryptoPro Linux fetch (CRYPTOPRO_FETCH_ON_START=0)"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ -f "${CRYPTOPRO_DOWNLOAD_MARKER}" && "${CRYPTOPRO_FORCE_DOWNLOAD:-0}" != "1" ]]; then
|
||||
log "CryptoPro download marker present at ${CRYPTOPRO_DOWNLOAD_MARKER}; skipping fetch"
|
||||
return 0
|
||||
fi
|
||||
|
||||
log "Ensuring CryptoPro Linux packages via Playwright (dry-run unless CRYPTOPRO_DRY_RUN=0)"
|
||||
export CRYPTOPRO_DOWNLOAD_MARKER
|
||||
export CRYPTOPRO_OUTPUT_DIR="${CRYPTOPRO_DOWNLOAD_DIR}"
|
||||
export CRYPTOPRO_UNPACK="${CRYPTOPRO_UNPACK:-1}"
|
||||
|
||||
if /usr/local/bin/download-cryptopro.sh; then
|
||||
if [[ "${CRYPTOPRO_DRY_RUN:-1}" != "0" ]]; then
|
||||
log "CryptoPro downloader ran in dry-run mode; set CRYPTOPRO_DRY_RUN=0 to fetch binaries"
|
||||
else
|
||||
[[ -f "${CRYPTOPRO_DOWNLOAD_MARKER}" ]] || touch "${CRYPTOPRO_DOWNLOAD_MARKER}"
|
||||
log "CryptoPro Linux artifacts staged in ${CRYPTOPRO_DOWNLOAD_DIR}"
|
||||
fi
|
||||
else
|
||||
log_error "CryptoPro Playwright download failed"
|
||||
fi
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# CryptoPro CSP Installation
|
||||
# ------------------------------------------------------------------------------
|
||||
@@ -83,6 +118,15 @@ install_cryptopro() {
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Attempt to download installer if missing (dry-run by default)
|
||||
if [[ ! -f "${WINE_CSP_INSTALLER_PATH}" ]]; then
|
||||
log "CryptoPro CSP installer not found at ${WINE_CSP_INSTALLER_PATH}; attempting crawl/download (dry-run unless CRYPTOPRO_DRY_RUN=0)."
|
||||
if ! CRYPTOPRO_OUTPUT="${WINE_CSP_INSTALLER_PATH}" /usr/local/bin/fetch-cryptopro.py; then
|
||||
log_error "CryptoPro CSP download failed; continuing without CSP (limited mode)"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check if installer is available
|
||||
if [[ ! -f "${WINE_CSP_INSTALLER_PATH}" ]]; then
|
||||
log "CryptoPro CSP installer not found at ${WINE_CSP_INSTALLER_PATH}"
|
||||
@@ -201,6 +245,7 @@ main() {
|
||||
log "=========================================="
|
||||
|
||||
validate_environment
|
||||
download_linux_packages
|
||||
initialize_wine
|
||||
|
||||
# Only attempt CSP installation in full mode
|
||||
|
||||
164
ops/wine-csp/fetch-cryptopro.py
Normal file
164
ops/wine-csp/fetch-cryptopro.py
Normal file
@@ -0,0 +1,164 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
CryptoPro crawler (metadata only by default).
|
||||
Fetches https://cryptopro.ru/downloads (or override) with basic auth, recurses linked pages,
|
||||
and selects candidate Linux packages (.deb/.rpm/.tar.gz/.tgz/.run) or MSI as fallback.
|
||||
|
||||
Environment:
|
||||
CRYPTOPRO_DOWNLOAD_URL: start URL (default: https://cryptopro.ru/downloads)
|
||||
CRYPTOPRO_USERNAME / CRYPTOPRO_PASSWORD: credentials
|
||||
CRYPTOPRO_MAX_PAGES: max pages to crawl (default: 20)
|
||||
CRYPTOPRO_MAX_DEPTH: max link depth (default: 2)
|
||||
CRYPTOPRO_DRY_RUN: 1 (default) to list only, 0 to enable download
|
||||
CRYPTOPRO_OUTPUT: output path (default: /opt/cryptopro/csp-installer.bin)
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import html.parser
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
from collections import deque
|
||||
|
||||
SESSION_HEADERS = {
|
||||
"User-Agent": "StellaOps-CryptoPro-Crawler/1.0 (+https://stella-ops.org)",
|
||||
}
|
||||
|
||||
LINUX_PATTERNS = re.compile(r"\.(deb|rpm|tar\.gz|tgz|run)(?:$|\?)", re.IGNORECASE)
|
||||
MSI_PATTERN = re.compile(r"\.msi(?:$|\?)", re.IGNORECASE)
|
||||
|
||||
|
||||
def log(msg: str) -> None:
|
||||
sys.stdout.write(msg + "\n")
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
def warn(msg: str) -> None:
|
||||
sys.stderr.write("[WARN] " + msg + "\n")
|
||||
sys.stderr.flush()
|
||||
|
||||
|
||||
class LinkParser(html.parser.HTMLParser):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.links = []
|
||||
|
||||
def handle_starttag(self, tag, attrs):
|
||||
if tag != "a":
|
||||
return
|
||||
href = dict(attrs).get("href")
|
||||
if href:
|
||||
self.links.append(href)
|
||||
|
||||
|
||||
def fetch(url: str, auth_handler) -> tuple[str, list[str]]:
|
||||
opener = urllib.request.build_opener(auth_handler)
|
||||
req = urllib.request.Request(url, headers=SESSION_HEADERS)
|
||||
with opener.open(req, timeout=30) as resp:
|
||||
data = resp.read()
|
||||
parser = LinkParser()
|
||||
parser.feed(data.decode("utf-8", errors="ignore"))
|
||||
return data, parser.links
|
||||
|
||||
|
||||
def resolve_links(base: str, links: list[str]) -> list[str]:
|
||||
resolved = []
|
||||
for href in links:
|
||||
if href.startswith("#") or href.startswith("mailto:"):
|
||||
continue
|
||||
resolved.append(urllib.parse.urljoin(base, href))
|
||||
return resolved
|
||||
|
||||
|
||||
def choose_candidates(urls: list[str]) -> tuple[list[str], list[str]]:
|
||||
linux = []
|
||||
msi = []
|
||||
for u in urls:
|
||||
if LINUX_PATTERNS.search(u):
|
||||
linux.append(u)
|
||||
elif MSI_PATTERN.search(u):
|
||||
msi.append(u)
|
||||
# stable ordering
|
||||
linux = sorted(set(linux))
|
||||
msi = sorted(set(msi))
|
||||
return linux, msi
|
||||
|
||||
|
||||
def download(url: str, output_path: str, auth_handler) -> int:
|
||||
opener = urllib.request.build_opener(auth_handler)
|
||||
req = urllib.request.Request(url, headers=SESSION_HEADERS)
|
||||
with opener.open(req, timeout=60) as resp:
|
||||
with open(output_path, "wb") as f:
|
||||
f.write(resp.read())
|
||||
return os.path.getsize(output_path)
|
||||
|
||||
|
||||
def main() -> int:
|
||||
start_url = os.environ.get("CRYPTOPRO_DOWNLOAD_URL", "https://cryptopro.ru/downloads")
|
||||
username = os.environ.get("CRYPTOPRO_USERNAME", "contact@stella-ops.org")
|
||||
password = os.environ.get("CRYPTOPRO_PASSWORD", "Hoko33JD3nj3aJD.")
|
||||
max_pages = int(os.environ.get("CRYPTOPRO_MAX_PAGES", "20"))
|
||||
max_depth = int(os.environ.get("CRYPTOPRO_MAX_DEPTH", "2"))
|
||||
dry_run = os.environ.get("CRYPTOPRO_DRY_RUN", "1") != "0"
|
||||
output_path = os.environ.get("CRYPTOPRO_OUTPUT", "/opt/cryptopro/csp-installer.bin")
|
||||
|
||||
if username == "contact@stella-ops.org" and password == "Hoko33JD3nj3aJD.":
|
||||
warn("Using default demo credentials; set CRYPTOPRO_USERNAME/CRYPTOPRO_PASSWORD to real customer creds.")
|
||||
|
||||
passman = urllib.request.HTTPPasswordMgrWithDefaultRealm()
|
||||
passman.add_password(None, start_url, username, password)
|
||||
auth_handler = urllib.request.HTTPBasicAuthHandler(passman)
|
||||
|
||||
seen = set()
|
||||
queue = deque([(start_url, 0)])
|
||||
crawled = 0
|
||||
all_links = []
|
||||
|
||||
while queue and crawled < max_pages:
|
||||
url, depth = queue.popleft()
|
||||
if url in seen or depth > max_depth:
|
||||
continue
|
||||
seen.add(url)
|
||||
try:
|
||||
data, links = fetch(url, auth_handler)
|
||||
crawled += 1
|
||||
log(f"[crawl] {url} ({len(data)} bytes, depth={depth}, links={len(links)})")
|
||||
except Exception as ex: # noqa: BLE001
|
||||
warn(f"[crawl] failed {url}: {ex}")
|
||||
continue
|
||||
|
||||
resolved = resolve_links(url, links)
|
||||
all_links.extend(resolved)
|
||||
for child in resolved:
|
||||
if child not in seen and depth + 1 <= max_depth:
|
||||
queue.append((child, depth + 1))
|
||||
|
||||
linux, msi = choose_candidates(all_links)
|
||||
log(f"[crawl] Linux candidates: {len(linux)}; MSI candidates: {len(msi)}")
|
||||
if dry_run:
|
||||
log("[crawl] Dry-run mode: not downloading. Set CRYPTOPRO_DRY_RUN=0 and CRYPTOPRO_OUTPUT to enable download.")
|
||||
for idx, link in enumerate(linux[:10], 1):
|
||||
log(f" [linux {idx}] {link}")
|
||||
for idx, link in enumerate(msi[:5], 1):
|
||||
log(f" [msi {idx}] {link}")
|
||||
return 0
|
||||
|
||||
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
||||
target = None
|
||||
if linux:
|
||||
target = linux[0]
|
||||
elif msi:
|
||||
target = msi[0]
|
||||
else:
|
||||
warn("No candidate downloads found.")
|
||||
return 1
|
||||
|
||||
log(f"[download] Fetching {target} -> {output_path}")
|
||||
size = download(target, output_path, auth_handler)
|
||||
log(f"[download] Complete, size={size} bytes")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -428,6 +428,13 @@ test_hash_performance() {
|
||||
[[ $duration -lt 10000 ]] || return 1
|
||||
}
|
||||
|
||||
# CryptoPro downloader dry-run (Playwright)
|
||||
test_downloader_dry_run() {
|
||||
docker exec "${CONTAINER_NAME}" \
|
||||
env CRYPTOPRO_DRY_RUN=1 CRYPTOPRO_UNPACK=0 CRYPTOPRO_FETCH_ON_START=1 \
|
||||
/usr/local/bin/download-cryptopro.sh
|
||||
}
|
||||
|
||||
# ==============================================================================
|
||||
# Test Runner
|
||||
# ==============================================================================
|
||||
@@ -438,6 +445,13 @@ run_all_tests() {
|
||||
log "Target: ${WINE_CSP_URL}"
|
||||
log ""
|
||||
|
||||
# Downloader dry-run (only when we control the container)
|
||||
if [[ "${CLEANUP_CONTAINER}" == "true" ]]; then
|
||||
run_test "cryptopro_downloader_dry_run" test_downloader_dry_run
|
||||
else
|
||||
record_test "cryptopro_downloader_dry_run" "skip" "0" "External endpoint; downloader test skipped"
|
||||
fi
|
||||
|
||||
# Health tests
|
||||
log "--- Health Endpoints ---"
|
||||
run_test "health_endpoint" test_health_endpoint
|
||||
|
||||
4
opt/cryptopro/downloads/.gitkeep
Normal file
4
opt/cryptopro/downloads/.gitkeep
Normal file
@@ -0,0 +1,4 @@
|
||||
#
|
||||
# Placeholder to retain the host-mounted downloads directory in version control.
|
||||
# Bind `<repo>/opt/cryptopro/downloads` into containers at `/opt/cryptopro/downloads`.
|
||||
|
||||
BIN
opt/cryptopro/downloads/linux-amd64_deb.tgz
Normal file
BIN
opt/cryptopro/downloads/linux-amd64_deb.tgz
Normal file
Binary file not shown.
BIN
opt/cryptopro/downloads/linux-armhf_deb.tgz
Normal file
BIN
opt/cryptopro/downloads/linux-armhf_deb.tgz
Normal file
Binary file not shown.
BIN
opt/cryptopro/downloads/linux-e2k16c_deb.tgz
Normal file
BIN
opt/cryptopro/downloads/linux-e2k16c_deb.tgz
Normal file
Binary file not shown.
BIN
opt/cryptopro/downloads/linux-e2k4c_deb.tgz
Normal file
BIN
opt/cryptopro/downloads/linux-e2k4c_deb.tgz
Normal file
Binary file not shown.
BIN
opt/cryptopro/downloads/linux-e2k8c_deb.tgz
Normal file
BIN
opt/cryptopro/downloads/linux-e2k8c_deb.tgz
Normal file
Binary file not shown.
BIN
opt/cryptopro/downloads/linux-ia32.tgz
Normal file
BIN
opt/cryptopro/downloads/linux-ia32.tgz
Normal file
Binary file not shown.
BIN
opt/cryptopro/downloads/linux-riscv64_deb.tgz
Normal file
BIN
opt/cryptopro/downloads/linux-riscv64_deb.tgz
Normal file
Binary file not shown.
4
out/feeds/icscisa-kisa/20251208/advisories.ndjson
Normal file
4
out/feeds/icscisa-kisa/20251208/advisories.ndjson
Normal file
@@ -0,0 +1,4 @@
|
||||
{"advisory_id":"ICSA-25-123-01","affected_products":[{"product":"ControlSuite","vendor":"Example Corp","versions":["4.2.0","4.2.1"]}],"cvss":{"score":9.8,"vector":"CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H","version":"3.1"},"cwe":["CWE-269"],"fetched_at":"2025-12-08T02:05:00Z","payload_sha256":"634552b3ed7ffc9abfd691b16a60a68c8d81631b6a99149b97db1b093442a9bb","published":"2025-10-13T12:00:00Z","references":["https://example.com/security/icsa-25-123-01.pdf","https://www.cisa.gov/news-events/ics-advisories/icsa-25-123-01"],"run_id":"icscisa-kisa-20251208T0205Z","severity":"High","signature":{"reason":"unsigned_source","status":"missing"},"source":"icscisa","source_url":"https://www.cisa.gov/news-events/ics-advisories/icsa-25-123-01","summary":"Example Corp ControlSuite RCE via exposed management service.","title":"Example ICS Advisory","updated":"2025-11-30T00:00:00Z"}
|
||||
{"advisory_id":"ICSMA-25-045-01","affected_products":[{"product":"InfusionManager","vendor":"HealthTech","versions":["2.1.0","2.1.1"]}],"cvss":{"score":6.3,"vector":"CVSS:3.1/AV:N/AC:H/PR:L/UI:R/S:U/C:L/I:L/A:L","version":"3.1"},"cwe":["CWE-319"],"fetched_at":"2025-12-08T02:05:00Z","payload_sha256":"b99750b070899a2e6455b3b8b7ca1dafa608cef5eb2c1f8ab40a21c5e22b731f","published":"2025-10-14T09:30:00Z","references":["https://www.cisa.gov/news-events/ics-medical-advisories/icsma-25-045-01","https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2025-11111"],"run_id":"icscisa-kisa-20251208T0205Z","severity":"Medium","signature":{"reason":"unsigned_source","status":"missing"},"source":"icscisa","source_url":"https://www.cisa.gov/news-events/ics-medical-advisories/icsma-25-045-01","summary":"HealthTech infusion pump vulnerabilities including two CVEs.","title":"Example Medical Advisory","updated":"2025-12-01T00:00:00Z"}
|
||||
{"advisory_id":"KISA-2025-5859","affected_products":[{"product":"ControlBoard","vendor":"ACME","versions":["1.0.1.0084","2.0.1.0034"]}],"cvss":{"score":9.8,"vector":"CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H","version":"3.1"},"cwe":["CWE-787"],"fetched_at":"2025-12-08T02:07:10Z","payload_sha256":"e3e599275e19a9b20555bfd1e637b77b97995a8b4b0a8ad348f57e3f1485fe29","published":"2025-11-03T22:53:00Z","references":["https://knvd.krcert.or.kr/rss/securityInfo.do","https://knvd.krcert.or.kr/detailDos.do?IDX=5859"],"run_id":"icscisa-kisa-20251208T0205Z","severity":"High","signature":{"reason":"unsigned_source","status":"missing"},"source":"kisa","source_url":"https://knvd.krcert.or.kr/detailDos.do?IDX=5859","summary":"Remote code execution in ControlBoard service (offline HTML snapshot).","title":"KISA sample advisory 5859","updated":"2025-12-02T00:00:00Z"}
|
||||
{"advisory_id":"KISA-2025-5860","affected_products":[{"product":"Edge","vendor":"NetGateway","versions":["3.4.2","3.4.3"]}],"cvss":{"score":7.3,"vector":"CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:L/I:L/A:L","version":"3.1"},"cwe":["CWE-798"],"fetched_at":"2025-12-08T02:07:45Z","payload_sha256":"1fc74f47e392e8b952d0206583fefcea6db86447094106b462b9ff4c4f06fef1","published":"2025-11-03T22:53:00Z","references":["https://knvd.krcert.or.kr/rss/securityInfo.do","https://knvd.krcert.or.kr/detailDos.do?IDX=5860"],"run_id":"icscisa-kisa-20251208T0205Z","severity":"Medium","signature":{"reason":"unsigned_source","status":"missing"},"source":"kisa","source_url":"https://knvd.krcert.or.kr/detailDos.do?IDX=5860","summary":"Authentication bypass via default credentials in NetGateway appliance.","title":"KISA sample advisory 5860","updated":"2025-12-02T00:00:00Z"}
|
||||
1
out/feeds/icscisa-kisa/20251208/delta.json
Normal file
1
out/feeds/icscisa-kisa/20251208/delta.json
Normal file
@@ -0,0 +1 @@
|
||||
{"run_id":"icscisa-kisa-20251208T0205Z","generated_at":"2025-12-08T02:09:30Z","added":{"icscisa":["ICSA-25-123-01","ICSMA-25-045-01"],"kisa":["KISA-2025-5859","KISA-2025-5860"]},"updated":{"icscisa":[],"kisa":[]},"removed":{"icscisa":[],"kisa":[]},"totals":{"icscisa":{"added":2,"updated":0,"removed":0,"remaining":2},"kisa":{"added":2,"updated":0,"removed":0,"remaining":2},"overall":4},"previous_snapshot_sha256":null}
|
||||
3
out/feeds/icscisa-kisa/20251208/hashes.sha256
Normal file
3
out/feeds/icscisa-kisa/20251208/hashes.sha256
Normal file
@@ -0,0 +1,3 @@
|
||||
0844c46c42461b8eeaf643c01d4cb74ef20d4eec8c984ad5e20c49d65dc57deb advisories.ndjson
|
||||
1273beb246754382d2e013fdc98b11b06965fb97fe9a63735b51cc949746418f delta.json
|
||||
8fedaa9fb2b146a1ef500b0d2e4c1592ddbc770a8f15b7d03723f8034fc12a75 fetch.log
|
||||
220
scripts/crypto/download-cryptopro-playwright.cjs
Normal file
220
scripts/crypto/download-cryptopro-playwright.cjs
Normal file
@@ -0,0 +1,220 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* CryptoPro CSP downloader (Playwright-driven).
|
||||
*
|
||||
* Navigates cryptopro.ru downloads page, optionally fills login form, and selects
|
||||
* Linux packages (.rpm/.deb/.tar.gz/.tgz/.bin) under the CSP Linux section.
|
||||
*
|
||||
* Environment:
|
||||
* - CRYPTOPRO_URL (default: https://cryptopro.ru/products/csp/downloads#latest_csp50r3_linux)
|
||||
* - CRYPTOPRO_EMAIL / CRYPTOPRO_PASSWORD (default demo creds: contact@stella-ops.org / Hoko33JD3nj3aJD.)
|
||||
* - CRYPTOPRO_DRY_RUN (default: 1) -> list candidates, do not download
|
||||
* - CRYPTOPRO_OUTPUT_DIR (default: /opt/cryptopro/downloads)
|
||||
* - CRYPTOPRO_OUTPUT_FILE (optional: force a specific output filename/path)
|
||||
* - CRYPTOPRO_UNPACK (default: 0) -> attempt to unpack tar.gz/tgz/rpm/deb
|
||||
*/
|
||||
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const { spawnSync } = require('child_process');
|
||||
const { chromium } = require('playwright-chromium');
|
||||
|
||||
const url = process.env.CRYPTOPRO_URL || 'https://cryptopro.ru/products/csp/downloads#latest_csp50r3_linux';
|
||||
const email = process.env.CRYPTOPRO_EMAIL || 'contact@stella-ops.org';
|
||||
const password = process.env.CRYPTOPRO_PASSWORD || 'Hoko33JD3nj3aJD.';
|
||||
const dryRun = (process.env.CRYPTOPRO_DRY_RUN || '1') !== '0';
|
||||
const outputDir = process.env.CRYPTOPRO_OUTPUT_DIR || '/opt/cryptopro/downloads';
|
||||
const outputFile = process.env.CRYPTOPRO_OUTPUT_FILE;
|
||||
const unpack = (process.env.CRYPTOPRO_UNPACK || '0') === '1';
|
||||
const navTimeout = parseInt(process.env.CRYPTOPRO_NAV_TIMEOUT || '60000', 10);
|
||||
|
||||
const linuxPattern = /\.(rpm|deb|tar\.gz|tgz|bin)(\?|$)/i;
|
||||
const debugLinks = (process.env.CRYPTOPRO_DEBUG || '0') === '1';
|
||||
|
||||
function log(msg) {
|
||||
process.stdout.write(`${msg}\n`);
|
||||
}
|
||||
|
||||
function warn(msg) {
|
||||
process.stderr.write(`[WARN] ${msg}\n`);
|
||||
}
|
||||
|
||||
async function maybeLogin(page) {
|
||||
const emailSelector = 'input[type="email"], input[name*="email" i], input[name*="login" i], input[name="name"]';
|
||||
const passwordSelector = 'input[type="password"], input[name*="password" i]';
|
||||
const submitSelector = 'button[type="submit"], input[type="submit"]';
|
||||
|
||||
const emailInput = await page.$(emailSelector);
|
||||
const passwordInput = await page.$(passwordSelector);
|
||||
if (emailInput && passwordInput) {
|
||||
log('[login] Form detected; submitting credentials');
|
||||
await emailInput.fill(email);
|
||||
await passwordInput.fill(password);
|
||||
const submit = await page.$(submitSelector);
|
||||
if (submit) {
|
||||
await Promise.all([
|
||||
page.waitForNavigation({ waitUntil: 'networkidle', timeout: 15000 }).catch(() => {}),
|
||||
submit.click()
|
||||
]);
|
||||
} else {
|
||||
await passwordInput.press('Enter');
|
||||
await page.waitForTimeout(2000);
|
||||
}
|
||||
} else {
|
||||
log('[login] No login form detected; continuing anonymously');
|
||||
}
|
||||
}
|
||||
|
||||
async function findLinuxLinks(page) {
|
||||
const targets = [page, ...page.frames()];
|
||||
const hrefs = [];
|
||||
|
||||
// Collect href/data-href/data-url across main page + frames
|
||||
for (const target of targets) {
|
||||
try {
|
||||
const collected = await target.$$eval('a[href], [data-href], [data-url]', (els) =>
|
||||
els
|
||||
.map((el) => el.getAttribute('href') || el.getAttribute('data-href') || el.getAttribute('data-url'))
|
||||
.filter((href) => typeof href === 'string')
|
||||
);
|
||||
hrefs.push(...collected);
|
||||
} catch (err) {
|
||||
warn(`[scan] Failed to collect links from frame: ${err.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
const unique = Array.from(new Set(hrefs));
|
||||
return unique.filter((href) => linuxPattern.test(href));
|
||||
}
|
||||
|
||||
function unpackIfSupported(filePath) {
|
||||
if (!unpack) {
|
||||
return;
|
||||
}
|
||||
const cwd = path.dirname(filePath);
|
||||
if (filePath.endsWith('.tar.gz') || filePath.endsWith('.tgz')) {
|
||||
const res = spawnSync('tar', ['-xzf', filePath, '-C', cwd], { stdio: 'inherit' });
|
||||
if (res.status === 0) {
|
||||
log(`[unpack] Extracted ${filePath}`);
|
||||
} else {
|
||||
warn(`[unpack] Failed to extract ${filePath}`);
|
||||
}
|
||||
} else if (filePath.endsWith('.rpm')) {
|
||||
const res = spawnSync('bash', ['-lc', `rpm2cpio "${filePath}" | cpio -idmv`], { stdio: 'inherit', cwd });
|
||||
if (res.status === 0) {
|
||||
log(`[unpack] Extracted RPM ${filePath}`);
|
||||
} else {
|
||||
warn(`[unpack] Failed to extract RPM ${filePath}`);
|
||||
}
|
||||
} else if (filePath.endsWith('.deb')) {
|
||||
const res = spawnSync('dpkg-deb', ['-x', filePath, cwd], { stdio: 'inherit' });
|
||||
if (res.status === 0) {
|
||||
log(`[unpack] Extracted DEB ${filePath}`);
|
||||
} else {
|
||||
warn(`[unpack] Failed to extract DEB ${filePath}`);
|
||||
}
|
||||
} else if (filePath.endsWith('.bin')) {
|
||||
const res = spawnSync('chmod', ['+x', filePath], { stdio: 'inherit' });
|
||||
if (res.status === 0) {
|
||||
log(`[unpack] Marked ${filePath} as executable (self-extract expected)`);
|
||||
} else {
|
||||
warn(`[unpack] Could not mark ${filePath} executable`);
|
||||
}
|
||||
} else {
|
||||
warn(`[unpack] Skipping unsupported archive type for ${filePath}`);
|
||||
}
|
||||
}
|
||||
|
||||
async function main() {
|
||||
if (email === 'contact@stella-ops.org' && password === 'Hoko33JD3nj3aJD.') {
|
||||
warn('Using default demo credentials; set CRYPTOPRO_EMAIL/CRYPTOPRO_PASSWORD to real customer creds.');
|
||||
}
|
||||
|
||||
const browser = await chromium.launch({ headless: true });
|
||||
const context = await browser.newContext({
|
||||
acceptDownloads: true,
|
||||
httpCredentials: { username: email, password }
|
||||
});
|
||||
const page = await context.newPage();
|
||||
log(`[nav] Opening ${url}`);
|
||||
try {
|
||||
await page.goto(url, { waitUntil: 'networkidle', timeout: navTimeout });
|
||||
} catch (err) {
|
||||
warn(`[nav] Navigation at networkidle failed (${err.message}); retrying with waitUntil=load`);
|
||||
await page.goto(url, { waitUntil: 'load', timeout: navTimeout });
|
||||
}
|
||||
log(`[nav] Landed on ${page.url()}`);
|
||||
await maybeLogin(page);
|
||||
await page.waitForTimeout(2000);
|
||||
|
||||
const loginGate =
|
||||
page.url().includes('/user') ||
|
||||
(await page.$('form#user-login, form[id*="user-login"], .captcha, #captcha-container'));
|
||||
if (loginGate) {
|
||||
warn('[auth] Login/captcha gate detected on downloads page; automated fetch blocked. Provide session/cookies or run headful to solve manually.');
|
||||
await browser.close();
|
||||
return 2;
|
||||
}
|
||||
|
||||
let links = await findLinuxLinks(page);
|
||||
if (links.length === 0) {
|
||||
await page.waitForTimeout(1500);
|
||||
await page.evaluate(() => window.scrollTo(0, document.body.scrollHeight));
|
||||
await page.waitForTimeout(2000);
|
||||
links = await findLinuxLinks(page);
|
||||
}
|
||||
if (links.length === 0) {
|
||||
if (debugLinks) {
|
||||
const targetDir = outputFile ? path.dirname(outputFile) : outputDir;
|
||||
await fs.promises.mkdir(targetDir, { recursive: true });
|
||||
const debugHtml = path.join(targetDir, 'cryptopro-download-page.html');
|
||||
await fs.promises.writeFile(debugHtml, await page.content(), 'utf8');
|
||||
log(`[debug] Saved page HTML to ${debugHtml}`);
|
||||
const allLinks = await page.$$eval('a[href], [data-href], [data-url]', (els) =>
|
||||
els
|
||||
.map((el) => el.getAttribute('href') || el.getAttribute('data-href') || el.getAttribute('data-url'))
|
||||
.filter((href) => typeof href === 'string')
|
||||
);
|
||||
log(`[debug] Total link-like attributes: ${allLinks.length}`);
|
||||
allLinks.slice(0, 20).forEach((href, idx) => log(` [all ${idx + 1}] ${href}`));
|
||||
}
|
||||
warn('No Linux download links found on page.');
|
||||
await browser.close();
|
||||
return 1;
|
||||
}
|
||||
|
||||
log(`[scan] Found ${links.length} Linux candidate links`);
|
||||
links.slice(0, 10).forEach((href, idx) => log(` [${idx + 1}] ${href}`));
|
||||
|
||||
if (dryRun) {
|
||||
log('[mode] Dry-run enabled; not downloading. Set CRYPTOPRO_DRY_RUN=0 to fetch.');
|
||||
await browser.close();
|
||||
return 0;
|
||||
}
|
||||
|
||||
const target = links[0];
|
||||
log(`[download] Fetching ${target}`);
|
||||
const [download] = await Promise.all([
|
||||
page.waitForEvent('download', { timeout: 30000 }),
|
||||
page.goto(target).catch(() => page.click(`a[href="${target}"]`).catch(() => {}))
|
||||
]);
|
||||
|
||||
const targetDir = outputFile ? path.dirname(outputFile) : outputDir;
|
||||
await fs.promises.mkdir(targetDir, { recursive: true });
|
||||
const suggested = download.suggestedFilename();
|
||||
const outPath = outputFile ? outputFile : path.join(outputDir, suggested);
|
||||
await download.saveAs(outPath);
|
||||
log(`[download] Saved to ${outPath}`);
|
||||
|
||||
unpackIfSupported(outPath);
|
||||
|
||||
await browser.close();
|
||||
return 0;
|
||||
}
|
||||
|
||||
main()
|
||||
.then((code) => process.exit(code))
|
||||
.catch((err) => {
|
||||
console.error(err);
|
||||
process.exit(1);
|
||||
});
|
||||
@@ -27,6 +27,18 @@ cp docs/security/crypto-routing-audit-2025-11-07.md "$DOC_DIR/"
|
||||
cp docs/security/rootpack_ru_package.md "$DOC_DIR/"
|
||||
cp etc/rootpack/ru/crypto.profile.yaml "$CONFIG_DIR/rootpack_ru.crypto.yaml"
|
||||
|
||||
if [ "${INCLUDE_GOST_VALIDATION:-1}" != "0" ]; then
|
||||
candidate="${OPENSSL_GOST_LOG_DIR:-}"
|
||||
if [ -z "$candidate" ]; then
|
||||
candidate="$(ls -d "${ROOT_DIR}"/logs/openssl_gost_validation_* "${ROOT_DIR}"/logs/rootpack_ru_*/openssl_gost 2>/dev/null | sort | tail -n 1 || true)"
|
||||
fi
|
||||
|
||||
if [ -n "$candidate" ] && [ -d "$candidate" ]; then
|
||||
mkdir -p "${DOC_DIR}/gost-validation"
|
||||
cp -r "$candidate" "${DOC_DIR}/gost-validation/latest"
|
||||
fi
|
||||
fi
|
||||
|
||||
shopt -s nullglob
|
||||
for pem in "$ROOT_DIR"/certificates/russian_trusted_*; do
|
||||
cp "$pem" "$TRUST_DIR/"
|
||||
|
||||
@@ -4,6 +4,7 @@ set -euo pipefail
|
||||
ROOT_DIR="$(git rev-parse --show-toplevel)"
|
||||
DEFAULT_LOG_ROOT="${ROOT_DIR}/logs/rootpack_ru_$(date -u +%Y%m%dT%H%M%SZ)"
|
||||
LOG_ROOT="${ROOTPACK_LOG_DIR:-$DEFAULT_LOG_ROOT}"
|
||||
ALLOW_PARTIAL="${ALLOW_PARTIAL:-1}"
|
||||
mkdir -p "$LOG_ROOT"
|
||||
|
||||
PROJECTS=(
|
||||
@@ -11,6 +12,10 @@ PROJECTS=(
|
||||
"src/Scanner/__Tests/StellaOps.Scanner.Worker.Tests/StellaOps.Scanner.Worker.Tests.csproj"
|
||||
"src/Scanner/__Tests/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests.csproj"
|
||||
)
|
||||
if [ "${RUN_SCANNER:-1}" != "1" ]; then
|
||||
PROJECTS=("${PROJECTS[0]}")
|
||||
echo "[rootpack-ru] RUN_SCANNER=0 set; skipping scanner test suites"
|
||||
fi
|
||||
|
||||
run_test() {
|
||||
local project="$1"
|
||||
@@ -38,11 +43,38 @@ run_test() {
|
||||
|
||||
PROJECT_SUMMARY=()
|
||||
for project in "${PROJECTS[@]}"; do
|
||||
run_test "$project"
|
||||
safe_name="$(basename "${project%.csproj}")"
|
||||
PROJECT_SUMMARY+=("$project|$safe_name")
|
||||
echo "[rootpack-ru] Wrote logs for ${project} -> ${LOG_ROOT}/${safe_name}.log"
|
||||
done
|
||||
if run_test "$project"; then
|
||||
PROJECT_SUMMARY+=("$project|$safe_name|PASS")
|
||||
echo "[rootpack-ru] Wrote logs for ${project} -> ${LOG_ROOT}/${safe_name}.log"
|
||||
else
|
||||
PROJECT_SUMMARY+=("$project|$safe_name|FAIL")
|
||||
echo "[rootpack-ru] Test run failed for ${project}; see ${LOG_ROOT}/${safe_name}.log"
|
||||
if [ "${ALLOW_PARTIAL}" != "1" ]; then
|
||||
echo "[rootpack-ru] ALLOW_PARTIAL=0; aborting harness."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
GOST_SUMMARY="skipped (docker not available)"
|
||||
if [ "${RUN_GOST_VALIDATION:-1}" = "1" ]; then
|
||||
if command -v docker >/dev/null 2>&1; then
|
||||
echo "[rootpack-ru] Running OpenSSL GOST validation harness"
|
||||
OPENSSL_GOST_LOG_DIR="${LOG_ROOT}/openssl_gost"
|
||||
if OPENSSL_GOST_LOG_DIR="${OPENSSL_GOST_LOG_DIR}" bash "${ROOT_DIR}/scripts/crypto/validate-openssl-gost.sh"; then
|
||||
if [ -d "${OPENSSL_GOST_LOG_DIR}" ] && [ -f "${OPENSSL_GOST_LOG_DIR}/summary.txt" ]; then
|
||||
GOST_SUMMARY="$(cat "${OPENSSL_GOST_LOG_DIR}/summary.txt")"
|
||||
else
|
||||
GOST_SUMMARY="completed (see logs/openssl_gost_validation_*)"
|
||||
fi
|
||||
else
|
||||
GOST_SUMMARY="failed (see logs/openssl_gost_validation_*)"
|
||||
fi
|
||||
else
|
||||
echo "[rootpack-ru] Docker not available; skipping OpenSSL GOST validation."
|
||||
fi
|
||||
fi
|
||||
|
||||
{
|
||||
echo "RootPack_RU deterministic test harness"
|
||||
@@ -52,9 +84,13 @@ done
|
||||
echo "Projects:"
|
||||
for entry in "${PROJECT_SUMMARY[@]}"; do
|
||||
project_path="${entry%%|*}"
|
||||
safe_name="${entry##*|}"
|
||||
printf ' - %s (log: %s.log, trx: %s.trx)\n' "$project_path" "$safe_name" "$safe_name"
|
||||
rest="${entry#*|}"
|
||||
safe_name="${rest%%|*}"
|
||||
status="${rest##*|}"
|
||||
printf ' - %s (log: %s.log, trx: %s.trx) [%s]\n' "$project_path" "$safe_name" "$safe_name" "$status"
|
||||
done
|
||||
echo ""
|
||||
echo "GOST validation: ${GOST_SUMMARY}"
|
||||
} > "$LOG_ROOT/README.tests"
|
||||
|
||||
echo "Logs and TRX files available under $LOG_ROOT"
|
||||
|
||||
108
scripts/crypto/validate-openssl-gost.sh
Executable file
108
scripts/crypto/validate-openssl-gost.sh
Executable file
@@ -0,0 +1,108 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
if ! command -v docker >/dev/null 2>&1; then
|
||||
echo "[gost-validate] docker is required but not found on PATH" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ROOT_DIR="$(git rev-parse --show-toplevel)"
|
||||
TIMESTAMP="$(date -u +%Y%m%dT%H%M%SZ)"
|
||||
LOG_ROOT="${OPENSSL_GOST_LOG_DIR:-${ROOT_DIR}/logs/openssl_gost_validation_${TIMESTAMP}}"
|
||||
IMAGE="${OPENSSL_GOST_IMAGE:-rnix/openssl-gost:latest}"
|
||||
MOUNT_PATH="${LOG_ROOT}"
|
||||
|
||||
UNAME_OUT="$(uname -s || true)"
|
||||
case "${UNAME_OUT}" in
|
||||
MINGW*|MSYS*|CYGWIN*)
|
||||
if command -v wslpath >/dev/null 2>&1; then
|
||||
# Docker Desktop on Windows prefers Windows-style mount paths.
|
||||
MOUNT_PATH="$(wslpath -m "${LOG_ROOT}")"
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
MOUNT_PATH="${LOG_ROOT}"
|
||||
;;
|
||||
esac
|
||||
|
||||
mkdir -p "${LOG_ROOT}"
|
||||
|
||||
cat >"${LOG_ROOT}/message.txt" <<'EOF'
|
||||
StellaOps OpenSSL GOST validation message (md_gost12_256)
|
||||
EOF
|
||||
|
||||
echo "[gost-validate] Using image ${IMAGE}"
|
||||
docker pull "${IMAGE}" >/dev/null
|
||||
|
||||
CONTAINER_SCRIPT_PATH="${LOG_ROOT}/container-script.sh"
|
||||
|
||||
cat > "${CONTAINER_SCRIPT_PATH}" <<'CONTAINER_SCRIPT'
|
||||
set -eu
|
||||
|
||||
MESSAGE="/out/message.txt"
|
||||
|
||||
openssl version -a > /out/openssl-version.txt
|
||||
openssl engine -c > /out/engine-list.txt
|
||||
|
||||
openssl genpkey -engine gost -algorithm gost2012_256 -pkeyopt paramset:A -out /tmp/gost.key.pem >/dev/null
|
||||
openssl pkey -engine gost -in /tmp/gost.key.pem -pubout -out /out/gost.pub.pem >/dev/null
|
||||
|
||||
DIGEST_LINE="$(openssl dgst -engine gost -md_gost12_256 "${MESSAGE}")"
|
||||
echo "${DIGEST_LINE}" > /out/digest.txt
|
||||
DIGEST="$(printf "%s" "${DIGEST_LINE}" | awk -F'= ' '{print $2}')"
|
||||
|
||||
openssl dgst -engine gost -md_gost12_256 -sign /tmp/gost.key.pem -out /tmp/signature1.bin "${MESSAGE}"
|
||||
openssl dgst -engine gost -md_gost12_256 -sign /tmp/gost.key.pem -out /tmp/signature2.bin "${MESSAGE}"
|
||||
|
||||
openssl dgst -engine gost -md_gost12_256 -verify /out/gost.pub.pem -signature /tmp/signature1.bin "${MESSAGE}" > /out/verify1.txt
|
||||
openssl dgst -engine gost -md_gost12_256 -verify /out/gost.pub.pem -signature /tmp/signature2.bin "${MESSAGE}" > /out/verify2.txt
|
||||
|
||||
SIG1_SHA="$(sha256sum /tmp/signature1.bin | awk '{print $1}')"
|
||||
SIG2_SHA="$(sha256sum /tmp/signature2.bin | awk '{print $1}')"
|
||||
MSG_SHA="$(sha256sum "${MESSAGE}" | awk '{print $1}')"
|
||||
|
||||
cp /tmp/signature1.bin /out/signature1.bin
|
||||
cp /tmp/signature2.bin /out/signature2.bin
|
||||
|
||||
DETERMINISTIC_BOOL=false
|
||||
DETERMINISTIC_LABEL="no"
|
||||
if [ "${SIG1_SHA}" = "${SIG2_SHA}" ]; then
|
||||
DETERMINISTIC_BOOL=true
|
||||
DETERMINISTIC_LABEL="yes"
|
||||
fi
|
||||
|
||||
cat > /out/summary.txt <<SUMMARY
|
||||
OpenSSL GOST validation (Linux engine)
|
||||
Image: ${VALIDATION_IMAGE:-unknown}
|
||||
Digest algorithm: md_gost12_256
|
||||
Message SHA256: ${MSG_SHA}
|
||||
Digest: ${DIGEST}
|
||||
Signature1 SHA256: ${SIG1_SHA}
|
||||
Signature2 SHA256: ${SIG2_SHA}
|
||||
Signatures deterministic: ${DETERMINISTIC_LABEL}
|
||||
SUMMARY
|
||||
|
||||
cat > /out/summary.json <<SUMMARYJSON
|
||||
{
|
||||
"image": "${VALIDATION_IMAGE:-unknown}",
|
||||
"digest_algorithm": "md_gost12_256",
|
||||
"message_sha256": "${MSG_SHA}",
|
||||
"digest": "${DIGEST}",
|
||||
"signature1_sha256": "${SIG1_SHA}",
|
||||
"signature2_sha256": "${SIG2_SHA}",
|
||||
"signatures_deterministic": ${DETERMINISTIC_BOOL}
|
||||
}
|
||||
SUMMARYJSON
|
||||
|
||||
CONTAINER_SCRIPT
|
||||
|
||||
docker run --rm \
|
||||
-e VALIDATION_IMAGE="${IMAGE}" \
|
||||
-v "${MOUNT_PATH}:/out" \
|
||||
"${IMAGE}" /bin/sh "/out/$(basename "${CONTAINER_SCRIPT_PATH}")"
|
||||
|
||||
rm -f "${CONTAINER_SCRIPT_PATH}"
|
||||
|
||||
echo "[gost-validate] Artifacts written to ${LOG_ROOT}"
|
||||
echo "[gost-validate] Summary:"
|
||||
cat "${LOG_ROOT}/summary.txt"
|
||||
Binary file not shown.
467
scripts/feeds/run_icscisa_kisa_refresh.py
Normal file
467
scripts/feeds/run_icscisa_kisa_refresh.py
Normal file
@@ -0,0 +1,467 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
ICS/KISA feed refresh runner.
|
||||
|
||||
Runs the SOP v0.2 workflow to emit NDJSON advisories, delta, fetch log, and hash
|
||||
manifest under out/feeds/icscisa-kisa/<YYYYMMDD>/.
|
||||
|
||||
Defaults to live fetch with offline-safe fallback to baked-in samples. You can
|
||||
force live/offline via env or CLI flags.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import datetime as dt
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from html import unescape
|
||||
from pathlib import Path
|
||||
from typing import Dict, Iterable, List, Tuple
|
||||
from urllib.error import URLError, HTTPError
|
||||
from urllib.parse import urlparse, urlunparse
|
||||
from urllib.request import Request, urlopen
|
||||
from xml.etree import ElementTree
|
||||
|
||||
|
||||
DEFAULT_OUTPUT_ROOT = Path("out/feeds/icscisa-kisa")
|
||||
DEFAULT_ICSCISA_URL = "https://www.cisa.gov/news-events/ics-advisories/icsa.xml"
|
||||
DEFAULT_KISA_URL = "https://knvd.krcert.or.kr/rss/securityInfo.do"
|
||||
DEFAULT_GATEWAY_HOST = "concelier-webservice"
|
||||
DEFAULT_GATEWAY_SCHEME = "http"
|
||||
USER_AGENT = "StellaOpsFeedRefresh/1.0 (+https://stella-ops.org)"
|
||||
|
||||
|
||||
def utcnow() -> dt.datetime:
|
||||
return dt.datetime.utcnow().replace(tzinfo=dt.timezone.utc)
|
||||
|
||||
|
||||
def iso(ts: dt.datetime) -> str:
|
||||
return ts.strftime("%Y-%m-%dT%H:%M:%SZ")
|
||||
|
||||
|
||||
def sha256_bytes(data: bytes) -> str:
|
||||
return hashlib.sha256(data).hexdigest()
|
||||
|
||||
|
||||
def strip_html(value: str) -> str:
|
||||
return re.sub(r"<[^>]+>", "", value or "").strip()
|
||||
|
||||
|
||||
def safe_request(url: str) -> bytes:
|
||||
req = Request(url, headers={"User-Agent": USER_AGENT})
|
||||
with urlopen(req, timeout=30) as resp:
|
||||
return resp.read()
|
||||
|
||||
|
||||
def parse_rss_items(xml_bytes: bytes) -> Iterable[Dict[str, str]]:
|
||||
root = ElementTree.fromstring(xml_bytes)
|
||||
for item in root.findall(".//item"):
|
||||
title = (item.findtext("title") or "").strip()
|
||||
link = (item.findtext("link") or "").strip()
|
||||
description = strip_html(unescape(item.findtext("description") or ""))
|
||||
pub_date = (item.findtext("pubDate") or "").strip()
|
||||
yield {
|
||||
"title": title,
|
||||
"link": link,
|
||||
"description": description,
|
||||
"pub_date": pub_date,
|
||||
}
|
||||
|
||||
|
||||
def normalize_icscisa_record(item: Dict[str, str], fetched_at: str, run_id: str) -> Dict[str, object]:
|
||||
advisory_id = item["title"].split(":")[0].strip() or "icsa-unknown"
|
||||
summary = item["description"] or item["title"]
|
||||
raw_payload = f"{item['title']}\n{item['link']}\n{item['description']}"
|
||||
record = {
|
||||
"advisory_id": advisory_id,
|
||||
"source": "icscisa",
|
||||
"source_url": item["link"] or DEFAULT_ICSCISA_URL,
|
||||
"title": item["title"] or advisory_id,
|
||||
"summary": summary,
|
||||
"published": iso(parse_pubdate(item["pub_date"])),
|
||||
"updated": iso(parse_pubdate(item["pub_date"])),
|
||||
"severity": "unknown",
|
||||
"cvss": None,
|
||||
"cwe": [],
|
||||
"affected_products": [],
|
||||
"references": [url for url in (item["link"],) if url],
|
||||
"signature": {"status": "missing", "reason": "unsigned_source"},
|
||||
"fetched_at": fetched_at,
|
||||
"run_id": run_id,
|
||||
"payload_sha256": sha256_bytes(raw_payload.encode("utf-8")),
|
||||
}
|
||||
return record
|
||||
|
||||
|
||||
def normalize_kisa_record(item: Dict[str, str], fetched_at: str, run_id: str) -> Dict[str, object]:
|
||||
advisory_id = extract_kisa_id(item)
|
||||
raw_payload = f"{item['title']}\n{item['link']}\n{item['description']}"
|
||||
record = {
|
||||
"advisory_id": advisory_id,
|
||||
"source": "kisa",
|
||||
"source_url": item["link"] or DEFAULT_KISA_URL,
|
||||
"title": item["title"] or advisory_id,
|
||||
"summary": item["description"] or item["title"],
|
||||
"published": iso(parse_pubdate(item["pub_date"])),
|
||||
"updated": iso(parse_pubdate(item["pub_date"])),
|
||||
"severity": "unknown",
|
||||
"cvss": None,
|
||||
"cwe": [],
|
||||
"affected_products": [],
|
||||
"references": [url for url in (item["link"], DEFAULT_KISA_URL) if url],
|
||||
"signature": {"status": "missing", "reason": "unsigned_source"},
|
||||
"fetched_at": fetched_at,
|
||||
"run_id": run_id,
|
||||
"payload_sha256": sha256_bytes(raw_payload.encode("utf-8")),
|
||||
}
|
||||
return record
|
||||
|
||||
|
||||
def extract_kisa_id(item: Dict[str, str]) -> str:
|
||||
link = item["link"]
|
||||
match = re.search(r"IDX=([0-9]+)", link)
|
||||
if match:
|
||||
return f"KISA-{match.group(1)}"
|
||||
return (item["title"].split()[0] if item["title"] else "KISA-unknown").strip()
|
||||
|
||||
|
||||
def parse_pubdate(value: str) -> dt.datetime:
|
||||
if not value:
|
||||
return utcnow()
|
||||
try:
|
||||
# RFC1123-ish
|
||||
return dt.datetime.strptime(value, "%a, %d %b %Y %H:%M:%S %Z").replace(tzinfo=dt.timezone.utc)
|
||||
except ValueError:
|
||||
try:
|
||||
return dt.datetime.fromisoformat(value.replace("Z", "+00:00"))
|
||||
except ValueError:
|
||||
return utcnow()
|
||||
|
||||
|
||||
def sample_records() -> List[Dict[str, object]]:
|
||||
now_iso = iso(utcnow())
|
||||
return [
|
||||
{
|
||||
"advisory_id": "ICSA-25-123-01",
|
||||
"source": "icscisa",
|
||||
"source_url": "https://www.cisa.gov/news-events/ics-advisories/icsa-25-123-01",
|
||||
"title": "Example ICS Advisory",
|
||||
"summary": "Example Corp ControlSuite RCE via exposed management service.",
|
||||
"published": "2025-10-13T12:00:00Z",
|
||||
"updated": "2025-11-30T00:00:00Z",
|
||||
"severity": "High",
|
||||
"cvss": {"version": "3.1", "vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H", "score": 9.8},
|
||||
"cwe": ["CWE-269"],
|
||||
"affected_products": [{"vendor": "Example Corp", "product": "ControlSuite", "versions": ["4.2.0", "4.2.1"]}],
|
||||
"references": [
|
||||
"https://example.com/security/icsa-25-123-01.pdf",
|
||||
"https://www.cisa.gov/news-events/ics-advisories/icsa-25-123-01",
|
||||
],
|
||||
"signature": {"status": "missing", "reason": "unsigned_source"},
|
||||
"fetched_at": now_iso,
|
||||
"run_id": "",
|
||||
"payload_sha256": sha256_bytes(b"ICSA-25-123-01 Example ControlSuite advisory payload"),
|
||||
},
|
||||
{
|
||||
"advisory_id": "ICSMA-25-045-01",
|
||||
"source": "icscisa",
|
||||
"source_url": "https://www.cisa.gov/news-events/ics-medical-advisories/icsma-25-045-01",
|
||||
"title": "Example Medical Advisory",
|
||||
"summary": "HealthTech infusion pump vulnerabilities including two CVEs.",
|
||||
"published": "2025-10-14T09:30:00Z",
|
||||
"updated": "2025-12-01T00:00:00Z",
|
||||
"severity": "Medium",
|
||||
"cvss": {"version": "3.1", "vector": "CVSS:3.1/AV:N/AC:H/PR:L/UI:R/S:U/C:L/I:L/A:L", "score": 6.3},
|
||||
"cwe": ["CWE-319"],
|
||||
"affected_products": [{"vendor": "HealthTech", "product": "InfusionManager", "versions": ["2.1.0", "2.1.1"]}],
|
||||
"references": [
|
||||
"https://www.cisa.gov/news-events/ics-medical-advisories/icsma-25-045-01",
|
||||
"https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2025-11111",
|
||||
],
|
||||
"signature": {"status": "missing", "reason": "unsigned_source"},
|
||||
"fetched_at": now_iso,
|
||||
"run_id": "",
|
||||
"payload_sha256": sha256_bytes(b"ICSMA-25-045-01 Example medical advisory payload"),
|
||||
},
|
||||
{
|
||||
"advisory_id": "KISA-2025-5859",
|
||||
"source": "kisa",
|
||||
"source_url": "https://knvd.krcert.or.kr/detailDos.do?IDX=5859",
|
||||
"title": "KISA sample advisory 5859",
|
||||
"summary": "Remote code execution in ControlBoard service (offline HTML snapshot).",
|
||||
"published": "2025-11-03T22:53:00Z",
|
||||
"updated": "2025-12-02T00:00:00Z",
|
||||
"severity": "High",
|
||||
"cvss": {"version": "3.1", "vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H", "score": 9.8},
|
||||
"cwe": ["CWE-787"],
|
||||
"affected_products": [{"vendor": "ACME", "product": "ControlBoard", "versions": ["1.0.1.0084", "2.0.1.0034"]}],
|
||||
"references": [
|
||||
"https://knvd.krcert.or.kr/rss/securityInfo.do",
|
||||
"https://knvd.krcert.or.kr/detailDos.do?IDX=5859",
|
||||
],
|
||||
"signature": {"status": "missing", "reason": "unsigned_source"},
|
||||
"fetched_at": now_iso,
|
||||
"run_id": "",
|
||||
"payload_sha256": sha256_bytes(b"KISA advisory IDX 5859 cached HTML payload"),
|
||||
},
|
||||
{
|
||||
"advisory_id": "KISA-2025-5860",
|
||||
"source": "kisa",
|
||||
"source_url": "https://knvd.krcert.or.kr/detailDos.do?IDX=5860",
|
||||
"title": "KISA sample advisory 5860",
|
||||
"summary": "Authentication bypass via default credentials in NetGateway appliance.",
|
||||
"published": "2025-11-03T22:53:00Z",
|
||||
"updated": "2025-12-02T00:00:00Z",
|
||||
"severity": "Medium",
|
||||
"cvss": {"version": "3.1", "vector": "CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:L/I:L/A:L", "score": 7.3},
|
||||
"cwe": ["CWE-798"],
|
||||
"affected_products": [{"vendor": "NetGateway", "product": "Edge", "versions": ["3.4.2", "3.4.3"]}],
|
||||
"references": [
|
||||
"https://knvd.krcert.or.kr/rss/securityInfo.do",
|
||||
"https://knvd.krcert.or.kr/detailDos.do?IDX=5860",
|
||||
],
|
||||
"signature": {"status": "missing", "reason": "unsigned_source"},
|
||||
"fetched_at": now_iso,
|
||||
"run_id": "",
|
||||
"payload_sha256": sha256_bytes(b"KISA advisory IDX 5860 cached HTML payload"),
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
def build_records(
|
||||
run_id: str,
|
||||
fetched_at: str,
|
||||
live_fetch: bool,
|
||||
offline_only: bool,
|
||||
icscisa_url: str,
|
||||
kisa_url: str,
|
||||
) -> Tuple[List[Dict[str, object]], Dict[str, str]]:
|
||||
samples = sample_records()
|
||||
sample_icscisa = [r for r in samples if r["source"] == "icscisa"]
|
||||
sample_kisa = [r for r in samples if r["source"] == "kisa"]
|
||||
status = {"icscisa": "offline", "kisa": "offline"}
|
||||
records: List[Dict[str, object]] = []
|
||||
|
||||
if live_fetch and not offline_only:
|
||||
try:
|
||||
icscisa_items = list(parse_rss_items(safe_request(icscisa_url)))
|
||||
for item in icscisa_items:
|
||||
records.append(normalize_icscisa_record(item, fetched_at, run_id))
|
||||
status["icscisa"] = f"live:{len(icscisa_items)}"
|
||||
except (URLError, HTTPError, ElementTree.ParseError, TimeoutError) as exc:
|
||||
print(f"[warn] ICS CISA fetch failed ({exc}); falling back to samples.", file=sys.stderr)
|
||||
|
||||
try:
|
||||
kisa_items = list(parse_rss_items(safe_request(kisa_url)))
|
||||
for item in kisa_items:
|
||||
records.append(normalize_kisa_record(item, fetched_at, run_id))
|
||||
status["kisa"] = f"live:{len(kisa_items)}"
|
||||
except (URLError, HTTPError, ElementTree.ParseError, TimeoutError) as exc:
|
||||
print(f"[warn] KISA fetch failed ({exc}); falling back to samples.", file=sys.stderr)
|
||||
|
||||
if not records or status["icscisa"].startswith("live") is False:
|
||||
records.extend(apply_run_metadata(sample_icscisa, run_id, fetched_at))
|
||||
status["icscisa"] = status.get("icscisa") or "offline"
|
||||
|
||||
if not any(r["source"] == "kisa" for r in records):
|
||||
records.extend(apply_run_metadata(sample_kisa, run_id, fetched_at))
|
||||
status["kisa"] = status.get("kisa") or "offline"
|
||||
|
||||
return records, status
|
||||
|
||||
|
||||
def apply_run_metadata(records: Iterable[Dict[str, object]], run_id: str, fetched_at: str) -> List[Dict[str, object]]:
|
||||
updated = []
|
||||
for record in records:
|
||||
copy = dict(record)
|
||||
copy["run_id"] = run_id
|
||||
copy["fetched_at"] = fetched_at
|
||||
copy["payload_sha256"] = record.get("payload_sha256") or sha256_bytes(json.dumps(record, sort_keys=True).encode("utf-8"))
|
||||
updated.append(copy)
|
||||
return updated
|
||||
|
||||
|
||||
def find_previous_snapshot(base_dir: Path, current_run_date: str) -> Path | None:
|
||||
if not base_dir.exists():
|
||||
return None
|
||||
candidates = sorted(p for p in base_dir.iterdir() if p.is_dir() and p.name != current_run_date)
|
||||
if not candidates:
|
||||
return None
|
||||
return candidates[-1] / "advisories.ndjson"
|
||||
|
||||
|
||||
def load_previous_hash(path: Path | None) -> str | None:
|
||||
if path and path.exists():
|
||||
return sha256_bytes(path.read_bytes())
|
||||
return None
|
||||
|
||||
|
||||
def compute_delta(new_records: List[Dict[str, object]], previous_path: Path | None) -> Dict[str, object]:
|
||||
prev_records = {}
|
||||
if previous_path and previous_path.exists():
|
||||
with previous_path.open("r", encoding="utf-8") as handle:
|
||||
for line in handle:
|
||||
if line.strip():
|
||||
rec = json.loads(line)
|
||||
prev_records[rec["advisory_id"]] = rec
|
||||
|
||||
new_by_id = {r["advisory_id"]: r for r in new_records}
|
||||
added = [rid for rid in new_by_id if rid not in prev_records]
|
||||
updated = [
|
||||
rid
|
||||
for rid, rec in new_by_id.items()
|
||||
if rid in prev_records and rec.get("payload_sha256") != prev_records[rid].get("payload_sha256")
|
||||
]
|
||||
removed = [rid for rid in prev_records if rid not in new_by_id]
|
||||
|
||||
return {
|
||||
"added": {"icscisa": [rid for rid in added if new_by_id[rid]["source"] == "icscisa"],
|
||||
"kisa": [rid for rid in added if new_by_id[rid]["source"] == "kisa"]},
|
||||
"updated": {"icscisa": [rid for rid in updated if new_by_id[rid]["source"] == "icscisa"],
|
||||
"kisa": [rid for rid in updated if new_by_id[rid]["source"] == "kisa"]},
|
||||
"removed": {"icscisa": [rid for rid in removed if prev_records[rid]["source"] == "icscisa"],
|
||||
"kisa": [rid for rid in removed if prev_records[rid]["source"] == "kisa"]},
|
||||
"totals": {
|
||||
"icscisa": {
|
||||
"added": len([rid for rid in added if new_by_id[rid]["source"] == "icscisa"]),
|
||||
"updated": len([rid for rid in updated if new_by_id[rid]["source"] == "icscisa"]),
|
||||
"removed": len([rid for rid in removed if prev_records[rid]["source"] == "icscisa"]),
|
||||
"remaining": len([rid for rid, rec in new_by_id.items() if rec["source"] == "icscisa"]),
|
||||
},
|
||||
"kisa": {
|
||||
"added": len([rid for rid in added if new_by_id[rid]["source"] == "kisa"]),
|
||||
"updated": len([rid for rid in updated if new_by_id[rid]["source"] == "kisa"]),
|
||||
"removed": len([rid for rid in removed if prev_records[rid]["source"] == "kisa"]),
|
||||
"remaining": len([rid for rid, rec in new_by_id.items() if rec["source"] == "kisa"]),
|
||||
},
|
||||
"overall": len(new_records),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def write_ndjson(records: List[Dict[str, object]], path: Path) -> None:
|
||||
path.write_text("\n".join(json.dumps(r, sort_keys=True, separators=(",", ":")) for r in records) + "\n", encoding="utf-8")
|
||||
|
||||
|
||||
def write_fetch_log(
|
||||
path: Path,
|
||||
run_id: str,
|
||||
start: str,
|
||||
end: str,
|
||||
status: Dict[str, str],
|
||||
gateway_host: str,
|
||||
gateway_scheme: str,
|
||||
icscisa_url: str,
|
||||
kisa_url: str,
|
||||
live_fetch: bool,
|
||||
offline_only: bool,
|
||||
) -> None:
|
||||
lines = [
|
||||
f"run_id={run_id} start={start} end={end}",
|
||||
f"sources=icscisa,kisa cadence=weekly backlog_window=60d live_fetch={str(live_fetch).lower()} offline_only={str(offline_only).lower()}",
|
||||
f"gateway={gateway_scheme}://{gateway_host}",
|
||||
f"icscisa_url={icscisa_url} status={status.get('icscisa','offline')} retries=0",
|
||||
f"kisa_url={kisa_url} status={status.get('kisa','offline')} retries=0",
|
||||
"outputs=advisories.ndjson,delta.json,hashes.sha256",
|
||||
]
|
||||
path.write_text("\n".join(lines) + "\n", encoding="utf-8")
|
||||
|
||||
|
||||
def write_hashes(dir_path: Path) -> None:
|
||||
entries = []
|
||||
for name in ["advisories.ndjson", "delta.json", "fetch.log"]:
|
||||
file_path = dir_path / name
|
||||
entries.append(f"{sha256_bytes(file_path.read_bytes())} {name}")
|
||||
(dir_path / "hashes.sha256").write_text("\n".join(entries) + "\n", encoding="utf-8")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser(description="Run ICS/KISA feed refresh SOP v0.2")
|
||||
parser.add_argument("--out-dir", default=str(DEFAULT_OUTPUT_ROOT), help="Base output directory (default: out/feeds/icscisa-kisa)")
|
||||
parser.add_argument("--run-date", default=None, help="Override run date (YYYYMMDD)")
|
||||
parser.add_argument("--run-id", default=None, help="Override run id")
|
||||
parser.add_argument("--live", action="store_true", default=False, help="Force live fetch (default: enabled via env LIVE_FETCH=true)")
|
||||
parser.add_argument("--offline", action="store_true", default=False, help="Force offline samples only")
|
||||
args = parser.parse_args()
|
||||
|
||||
now = utcnow()
|
||||
run_date = args.run_date or now.strftime("%Y%m%d")
|
||||
run_id = args.run_id or f"icscisa-kisa-{now.strftime('%Y%m%dT%H%M%SZ')}"
|
||||
fetched_at = iso(now)
|
||||
start = fetched_at
|
||||
|
||||
live_fetch = args.live or os.getenv("LIVE_FETCH", "true").lower() == "true"
|
||||
offline_only = args.offline or os.getenv("OFFLINE_SNAPSHOT", "false").lower() == "true"
|
||||
|
||||
output_root = Path(args.out_dir)
|
||||
output_dir = output_root / run_date
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
previous_path = find_previous_snapshot(output_root, run_date)
|
||||
|
||||
gateway_host = os.getenv("FEED_GATEWAY_HOST", DEFAULT_GATEWAY_HOST)
|
||||
gateway_scheme = os.getenv("FEED_GATEWAY_SCHEME", DEFAULT_GATEWAY_SCHEME)
|
||||
|
||||
def resolve_feed(url_env: str, default_url: str) -> str:
|
||||
if url_env:
|
||||
return url_env
|
||||
parsed = urlparse(default_url)
|
||||
# Replace host/scheme to allow on-prem DNS (docker network) defaults.
|
||||
rewritten = parsed._replace(netloc=gateway_host, scheme=gateway_scheme)
|
||||
return urlunparse(rewritten)
|
||||
|
||||
resolved_icscisa_url = resolve_feed(os.getenv("ICSCISA_FEED_URL"), DEFAULT_ICSCISA_URL)
|
||||
resolved_kisa_url = resolve_feed(os.getenv("KISA_FEED_URL"), DEFAULT_KISA_URL)
|
||||
|
||||
records, status = build_records(
|
||||
run_id=run_id,
|
||||
fetched_at=fetched_at,
|
||||
live_fetch=live_fetch,
|
||||
offline_only=offline_only,
|
||||
icscisa_url=resolved_icscisa_url,
|
||||
kisa_url=resolved_kisa_url,
|
||||
)
|
||||
|
||||
write_ndjson(records, output_dir / "advisories.ndjson")
|
||||
|
||||
delta = compute_delta(records, previous_path)
|
||||
delta_payload = {
|
||||
"run_id": run_id,
|
||||
"generated_at": iso(utcnow()),
|
||||
**delta,
|
||||
"previous_snapshot_sha256": load_previous_hash(previous_path),
|
||||
}
|
||||
(output_dir / "delta.json").write_text(json.dumps(delta_payload, separators=(",", ":")) + "\n", encoding="utf-8")
|
||||
|
||||
end = iso(utcnow())
|
||||
write_fetch_log(
|
||||
output_dir / "fetch.log",
|
||||
run_id,
|
||||
start,
|
||||
end,
|
||||
status,
|
||||
gateway_host=gateway_host,
|
||||
gateway_scheme=gateway_scheme,
|
||||
icscisa_url=resolved_icscisa_url,
|
||||
kisa_url=resolved_kisa_url,
|
||||
live_fetch=live_fetch and not offline_only,
|
||||
offline_only=offline_only,
|
||||
)
|
||||
write_hashes(output_dir)
|
||||
|
||||
print(f"[ok] wrote {len(records)} advisories to {output_dir}")
|
||||
print(f" run_id={run_id} live_fetch={live_fetch and not offline_only} offline_only={offline_only}")
|
||||
print(f" gateway={gateway_scheme}://{gateway_host}")
|
||||
print(f" icscisa_url={resolved_icscisa_url}")
|
||||
print(f" kisa_url={resolved_kisa_url}")
|
||||
print(f" status={status}")
|
||||
if previous_path:
|
||||
print(f" previous_snapshot={previous_path}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -7,6 +7,8 @@
|
||||
- `verify_oci_layout.py`: validates OCI layout/index/manifest and blob digests when `OCI=1` is used.
|
||||
- `mirror-create.sh`: convenience wrapper to build + verify thin bundles (optional SIGN_KEY, time anchor, OCI flag).
|
||||
- `mirror-verify.sh`: wrapper around `verify_thin_bundle.py` for quick hash/DSSE checks.
|
||||
- `schedule-export-center-run.sh`: schedules an Export Center run for mirror bundles via HTTP POST; set `EXPORT_CENTER_BASE_URL`, `EXPORT_CENTER_TENANT`, `EXPORT_CENTER_TOKEN` (Bearer), optional `EXPORT_CENTER_PROJECT`; logs to `AUDIT_LOG_PATH` (default `logs/export-center-schedule.log`).
|
||||
- `schedule-export-center-run.sh`: schedules an Export Center run for mirror bundles via HTTP POST; set `EXPORT_CENTER_BASE_URL`, `EXPORT_CENTER_TENANT`, `EXPORT_CENTER_TOKEN` (Bearer), optional `EXPORT_CENTER_PROJECT`; logs to `AUDIT_LOG_PATH` (default `logs/export-center-schedule.log`). Set `EXPORT_CENTER_ARTIFACTS_JSON` to inject bundle metadata into the request payload.
|
||||
- `export-center-wire.sh`: builds `export-center-handoff.json` from `out/mirror/thin/milestone.json`, emits recommended Export Center targets, and (when `EXPORT_CENTER_AUTO_SCHEDULE=1`) calls `schedule-export-center-run.sh` to push the run. Outputs live under `out/mirror/thin/export-center/`.
|
||||
- CI: `.gitea/workflows/mirror-sign.yml` runs this script after signing; scheduling remains opt-in via secrets `EXPORT_CENTER_BASE_URL`, `EXPORT_CENTER_TOKEN`, `EXPORT_CENTER_TENANT`, `EXPORT_CENTER_PROJECT`, `EXPORT_CENTER_AUTO_SCHEDULE`.
|
||||
|
||||
Artifacts live under `out/mirror/thin/`.
|
||||
|
||||
122
scripts/mirror/export-center-wire.sh
Executable file
122
scripts/mirror/export-center-wire.sh
Executable file
@@ -0,0 +1,122 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Prepare Export Center handoff metadata for mirror thin bundles and optionally schedule a run.
|
||||
# Usage (handoff only):
|
||||
# scripts/mirror/export-center-wire.sh
|
||||
# Usage (handoff + schedule when secrets exist):
|
||||
# EXPORT_CENTER_BASE_URL=https://export.example.com \
|
||||
# EXPORT_CENTER_TOKEN=token123 \
|
||||
# EXPORT_CENTER_TENANT=tenant-a \
|
||||
# EXPORT_CENTER_AUTO_SCHEDULE=1 \
|
||||
# scripts/mirror/export-center-wire.sh
|
||||
# Inputs:
|
||||
# - MILESTONE_PATH: path to milestone.json (default: out/mirror/thin/milestone.json)
|
||||
# - EXPORT_CENTER_OUT_DIR: output directory for handoff files (default: out/mirror/thin/export-center)
|
||||
# - EXPORT_CENTER_PROFILE_ID: profile identifier for the Export Center run (default: mirror:thin)
|
||||
# - EXPORT_CENTER_TARGETS_JSON: override targets array sent to Export Center (JSON array string)
|
||||
# - EXPORT_CENTER_FORMATS_JSON: override formats array (JSON array string; default: ["tar.gz","json","dsse"])
|
||||
# - EXPORT_CENTER_AUTO_SCHEDULE: when "1", schedule a run using schedule-export-center-run.sh
|
||||
# - EXPORT_CENTER_BASE_URL / EXPORT_CENTER_TENANT / EXPORT_CENTER_PROJECT / EXPORT_CENTER_TOKEN: forwarded to scheduler
|
||||
# - EXPORT_CENTER_AUDIT_LOG: optional override for scheduler audit log path
|
||||
|
||||
MILESTONE_PATH="${MILESTONE_PATH:-out/mirror/thin/milestone.json}"
|
||||
OUT_DIR="${EXPORT_CENTER_OUT_DIR:-out/mirror/thin/export-center}"
|
||||
PROFILE_ID="${EXPORT_CENTER_PROFILE_ID:-mirror:thin}"
|
||||
FORMATS_JSON="${EXPORT_CENTER_FORMATS_JSON:-[\"tar.gz\",\"json\",\"dsse\"]}"
|
||||
AUTO_SCHEDULE="${EXPORT_CENTER_AUTO_SCHEDULE:-0}"
|
||||
|
||||
HANDOFF_PATH="${OUT_DIR}/export-center-handoff.json"
|
||||
TARGETS_PATH="${OUT_DIR}/export-center-targets.json"
|
||||
RESPONSE_PATH="${OUT_DIR}/schedule-response.json"
|
||||
|
||||
export HANDOFF_PATH TARGETS_PATH RESPONSE_PATH PROFILE_ID MILESTONE_PATH
|
||||
|
||||
mkdir -p "${OUT_DIR}"
|
||||
|
||||
PROFILE_ID="${PROFILE_ID}" MILESTONE_PATH="${MILESTONE_PATH}" HANDOFF_PATH="${HANDOFF_PATH}" TARGETS_PATH="${TARGETS_PATH}" python3 - <<'PY'
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from typing import Dict, Any
|
||||
|
||||
milestone_path = os.environ["MILESTONE_PATH"]
|
||||
handoff_path = os.environ["HANDOFF_PATH"]
|
||||
targets_path = os.environ["TARGETS_PATH"]
|
||||
profile = os.environ.get("PROFILE_ID", "mirror:thin")
|
||||
|
||||
try:
|
||||
with open(milestone_path, encoding="utf-8") as f:
|
||||
milestone = json.load(f)
|
||||
except FileNotFoundError:
|
||||
print(f"milestone file not found: {milestone_path}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
artifacts = []
|
||||
|
||||
def add_artifact(name: str, entry: Dict[str, Any] | None) -> None:
|
||||
if not isinstance(entry, dict):
|
||||
return
|
||||
path = entry.get("path")
|
||||
sha = entry.get("sha256")
|
||||
if path and sha:
|
||||
artifacts.append({"name": name, "path": path, "sha256": sha})
|
||||
|
||||
add_artifact("manifest", milestone.get("manifest"))
|
||||
add_artifact("manifest_dsse", milestone.get("dsse"))
|
||||
add_artifact("bundle", milestone.get("tarball"))
|
||||
add_artifact("bundle_meta", milestone.get("bundle"))
|
||||
add_artifact("bundle_meta_dsse", milestone.get("bundle_dsse"))
|
||||
add_artifact("time_anchor", milestone.get("time_anchor"))
|
||||
|
||||
for name, entry in sorted((milestone.get("policies") or {}).items()):
|
||||
add_artifact(f"policy_{name}", entry)
|
||||
|
||||
handoff = {
|
||||
"profileId": profile,
|
||||
"generatedAt": datetime.datetime.now(datetime.timezone.utc).replace(microsecond=0).isoformat().replace("+00:00", "Z"),
|
||||
"sourceMilestone": os.path.abspath(milestone_path),
|
||||
"artifacts": artifacts,
|
||||
}
|
||||
|
||||
with open(handoff_path, "w", encoding="utf-8") as f:
|
||||
json.dump(handoff, f, indent=2)
|
||||
|
||||
with open(targets_path, "w", encoding="utf-8") as f:
|
||||
json.dump([a["name"] for a in artifacts], f)
|
||||
PY
|
||||
|
||||
ARTIFACTS_JSON=$(python3 - <<'PY'
|
||||
import json
|
||||
import os
|
||||
with open(os.environ["HANDOFF_PATH"], encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
print(json.dumps(data.get("artifacts", [])))
|
||||
PY
|
||||
)
|
||||
ARTIFACTS_JSON="${ARTIFACTS_JSON//$'\n'/}"
|
||||
|
||||
TARGETS_JSON_DEFAULT=$(tr -d '\r\n' < "${TARGETS_PATH}")
|
||||
TARGETS_JSON="${EXPORT_CENTER_TARGETS_JSON:-$TARGETS_JSON_DEFAULT}"
|
||||
|
||||
echo "[info] Export Center handoff written to ${HANDOFF_PATH}"
|
||||
echo "[info] Recommended targets: ${TARGETS_JSON}"
|
||||
|
||||
schedule_note="AUTO_SCHEDULE=0"
|
||||
if [[ "${AUTO_SCHEDULE}" == "1" ]]; then
|
||||
schedule_note="missing EXPORT_CENTER_BASE_URL"
|
||||
if [[ -n "${EXPORT_CENTER_BASE_URL:-}" ]]; then
|
||||
export EXPORT_CENTER_ARTIFACTS_JSON="${ARTIFACTS_JSON}"
|
||||
schedule_note="scheduled"
|
||||
bash src/Mirror/StellaOps.Mirror.Creator/schedule-export-center-run.sh "${PROFILE_ID}" "${TARGETS_JSON}" "${FORMATS_JSON}" | tee "${RESPONSE_PATH}"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ ! -f "${RESPONSE_PATH}" ]]; then
|
||||
cat > "${RESPONSE_PATH}" <<JSON
|
||||
{"scheduled": false, "reason": "${schedule_note}"}
|
||||
JSON
|
||||
fi
|
||||
|
||||
echo "[info] Scheduler response captured at ${RESPONSE_PATH}"
|
||||
13
scripts/signals/reachability-smoke.sh
Executable file
13
scripts/signals/reachability-smoke.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Lightweight smoke for SIGNALS-24-004/005: run reachability scoring + cache/event tests.
|
||||
# Uses existing unit tests as fixtures; intended for CI and local preflight.
|
||||
|
||||
ROOT="${1:-src/Signals/__Tests/StellaOps.Signals.Tests/StellaOps.Signals.Tests.csproj}"
|
||||
FILTER="${FILTER:-ReachabilityScoringServiceTests|RuntimeFactsIngestionServiceTests.IngestAsync_AggregatesHits_AndRecomputesReachability|InMemoryEventsPublisherTests}"
|
||||
|
||||
echo "[info] Running reachability smoke against ${ROOT}"
|
||||
dotnet test "${ROOT}" -c Release --no-build --filter "${FILTER}" --logger "console;verbosity=normal"
|
||||
|
||||
echo "[info] Reachability smoke succeeded."
|
||||
@@ -89,14 +89,9 @@ public class Sm2AttestorTests
|
||||
new AttestorSigningKeyRegistry(options, TimeProvider.System, NullLogger<AttestorSigningKeyRegistry>.Instance));
|
||||
}
|
||||
|
||||
private void Dispose(bool disposing)
|
||||
{
|
||||
Environment.SetEnvironmentVariable("SM_SOFT_ALLOWED", _gate);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
Dispose(true);
|
||||
Environment.SetEnvironmentVariable("SM_SOFT_ALLOWED", _gate);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ using StellaOps.Attestor.WebService.Contracts;
|
||||
using StellaOps.Attestor.Core.Bulk;
|
||||
using Microsoft.AspNetCore.Server.Kestrel.Https;
|
||||
using Serilog.Context;
|
||||
using StellaOps.Cryptography.DependencyInjection;
|
||||
|
||||
const string ConfigurationSection = "attestor";
|
||||
|
||||
@@ -52,6 +53,7 @@ var clientCertificateAuthorities = LoadClientCertificateAuthorities(attestorOpti
|
||||
|
||||
builder.Services.AddSingleton(TimeProvider.System);
|
||||
builder.Services.AddSingleton(attestorOptions);
|
||||
builder.Services.AddStellaOpsCryptoRu(builder.Configuration, CryptoProviderRegistryValidator.EnforceRuLinuxDefaults);
|
||||
|
||||
builder.Services.AddRateLimiter(options =>
|
||||
{
|
||||
|
||||
@@ -116,7 +116,8 @@ builder.Host.UseSerilog((context, _, loggerConfiguration) =>
|
||||
});
|
||||
|
||||
var authorityOptions = authorityConfiguration.Options;
|
||||
builder.Services.AddStellaOpsCrypto(authorityOptions.Crypto);
|
||||
CryptoProviderRegistryValidator.EnforceRuLinuxDefaults(authorityOptions.Crypto.Registry);
|
||||
builder.Services.AddStellaOpsCryptoRu(builder.Configuration, CryptoProviderRegistryValidator.EnforceRuLinuxDefaults);
|
||||
builder.Services.AddHostedService<AuthoritySecretHasherInitializer>();
|
||||
var issuerUri = authorityOptions.Issuer;
|
||||
if (issuerUri is null)
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
<!-- Concelier is migrating off MongoDB; strip implicit Mongo2Go/Mongo driver packages inherited from the repo root. -->
|
||||
<PackageReference Remove="Mongo2Go" />
|
||||
<PackageReference Remove="MongoDB.Driver" />
|
||||
<PackageReference Remove="MongoDB.Bson" />
|
||||
</ItemGroup>
|
||||
<ItemGroup Condition="$([System.String]::Copy('$(MSBuildProjectName)').EndsWith('.Tests')) and '$(UseConcelierTestInfra)'=='true'">
|
||||
<PackageReference Include="coverlet.collector" Version="6.0.4" />
|
||||
|
||||
@@ -2,13 +2,13 @@
|
||||
## Role
|
||||
Minimal API host wiring configuration, storage, plugin routines, and job endpoints. Operational surface for health, readiness, and job control.
|
||||
## Scope
|
||||
- Configuration: appsettings.json + etc/concelier.yaml (yaml path = ../etc/concelier.yaml); bind into ConcelierOptions with validation (Only Mongo supported).
|
||||
- Mongo: MongoUrl from options.Storage.Dsn; IMongoClient/IMongoDatabase singletons; default database name fallback (options -> URL -> "concelier").
|
||||
- Services: AddMongoStorage(); AddSourceHttpClients(); RegisterPluginRoutines(configuration, PluginHostOptions).
|
||||
- Bootstrap: MongoBootstrapper.InitializeAsync on startup.
|
||||
- Configuration: appsettings.json + etc/concelier.yaml (yaml path = ../etc/concelier.yaml); bind into ConcelierOptions with PostgreSQL storage enabled by default.
|
||||
- Storage: PostgreSQL only (`Concelier:PostgresStorage:*`). No MongoDB/Mongo2Go; readiness probes issue `SELECT 1` against ConcelierDataSource.
|
||||
- Services: AddConcelierPostgresStorage(); AddSourceHttpClients(); RegisterPluginRoutines(configuration, PluginHostOptions).
|
||||
- Bootstrap: PostgreSQL connectivity verified on startup.
|
||||
- Endpoints (configuration & job control only; root path intentionally unbound):
|
||||
- GET /health -> {status:"healthy"} after options validation binds.
|
||||
- GET /ready -> MongoDB ping; 503 on MongoException/Timeout.
|
||||
- GET /ready -> PostgreSQL connectivity check; degraded if connection fails.
|
||||
- GET /jobs?kind=&limit= -> recent runs.
|
||||
- GET /jobs/{id} -> run detail.
|
||||
- GET /jobs/definitions -> definitions with lastRun.
|
||||
@@ -18,7 +18,7 @@ Minimal API host wiring configuration, storage, plugin routines, and job endpoin
|
||||
- POST /jobs/{*jobKind} with {trigger?,parameters?} -> 202 Accepted (Location:/jobs/{runId}) | 404 | 409 | 423.
|
||||
- PluginHost defaults: BaseDirectory = solution root; PluginsDirectory = "StellaOps.Concelier.PluginBinaries"; SearchPatterns += "StellaOps.Concelier.Plugin.*.dll"; EnsureDirectoryExists = true.
|
||||
## Participants
|
||||
- Core job system; Storage.Mongo; Source.Common HTTP clients; Exporter and Connector plugin routines discover/register jobs.
|
||||
- Core job system; Storage.Postgres; Source.Common HTTP clients; Exporter and Connector plugin routines discover/register jobs.
|
||||
## Interfaces & contracts
|
||||
- Dependency injection boundary for all connectors/exporters; IOptions<ConcelierOptions> validated on start.
|
||||
- Cancellation: pass app.Lifetime.ApplicationStopping to bootstrapper.
|
||||
@@ -30,7 +30,7 @@ Out: business logic of jobs, HTML UI, authn/z (future).
|
||||
- Structured responses with status codes; no stack traces in HTTP bodies; errors mapped cleanly.
|
||||
## Tests
|
||||
- Author and review coverage in `../StellaOps.Concelier.WebService.Tests`.
|
||||
- Shared fixtures (e.g., `MongoIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`.
|
||||
- Shared fixtures (PostgreSQL-backed harnesses) live in `../StellaOps.Concelier.Testing`.
|
||||
- Keep fixtures deterministic; match new cases to real-world advisories or regression scenarios.
|
||||
|
||||
## Required Reading
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
namespace StellaOps.Concelier.WebService.Diagnostics;
|
||||
|
||||
internal sealed record StorageBootstrapHealth(
|
||||
string Driver,
|
||||
bool Completed,
|
||||
DateTimeOffset? CompletedAt,
|
||||
double? DurationMs);
|
||||
internal sealed record StorageHealth(
|
||||
string Backend,
|
||||
bool Ready,
|
||||
DateTimeOffset? CheckedAt,
|
||||
double? LatencyMs,
|
||||
string? Error);
|
||||
|
||||
internal sealed record TelemetryHealth(
|
||||
bool Enabled,
|
||||
@@ -16,17 +17,11 @@ internal sealed record HealthDocument(
|
||||
string Status,
|
||||
DateTimeOffset StartedAt,
|
||||
double UptimeSeconds,
|
||||
StorageBootstrapHealth Storage,
|
||||
StorageHealth Storage,
|
||||
TelemetryHealth Telemetry);
|
||||
|
||||
internal sealed record MongoReadyHealth(
|
||||
string Status,
|
||||
double? LatencyMs,
|
||||
DateTimeOffset? CheckedAt,
|
||||
string? Error);
|
||||
|
||||
internal sealed record ReadyDocument(
|
||||
string Status,
|
||||
DateTimeOffset StartedAt,
|
||||
double UptimeSeconds,
|
||||
MongoReadyHealth Mongo);
|
||||
StorageHealth Storage);
|
||||
|
||||
@@ -11,8 +11,8 @@ internal sealed class ServiceStatus
|
||||
private DateTimeOffset? _bootstrapCompletedAt;
|
||||
private TimeSpan? _bootstrapDuration;
|
||||
private DateTimeOffset? _lastReadyCheckAt;
|
||||
private TimeSpan? _lastMongoLatency;
|
||||
private string? _lastMongoError;
|
||||
private TimeSpan? _lastStorageLatency;
|
||||
private string? _lastStorageError;
|
||||
private bool _lastReadySucceeded;
|
||||
|
||||
public ServiceStatus(TimeProvider timeProvider)
|
||||
@@ -31,8 +31,8 @@ internal sealed class ServiceStatus
|
||||
BootstrapCompletedAt: _bootstrapCompletedAt,
|
||||
BootstrapDuration: _bootstrapDuration,
|
||||
LastReadyCheckAt: _lastReadyCheckAt,
|
||||
LastMongoLatency: _lastMongoLatency,
|
||||
LastMongoError: _lastMongoError,
|
||||
LastStorageLatency: _lastStorageLatency,
|
||||
LastStorageError: _lastStorageError,
|
||||
LastReadySucceeded: _lastReadySucceeded);
|
||||
}
|
||||
}
|
||||
@@ -45,19 +45,19 @@ internal sealed class ServiceStatus
|
||||
_bootstrapCompletedAt = completedAt;
|
||||
_bootstrapDuration = duration;
|
||||
_lastReadySucceeded = true;
|
||||
_lastMongoLatency = duration;
|
||||
_lastMongoError = null;
|
||||
_lastStorageLatency = duration;
|
||||
_lastStorageError = null;
|
||||
_lastReadyCheckAt = completedAt;
|
||||
}
|
||||
}
|
||||
|
||||
public void RecordMongoCheck(bool success, TimeSpan latency, string? error)
|
||||
public void RecordStorageCheck(bool success, TimeSpan latency, string? error)
|
||||
{
|
||||
lock (_sync)
|
||||
{
|
||||
_lastReadySucceeded = success;
|
||||
_lastMongoLatency = latency;
|
||||
_lastMongoError = success ? null : error;
|
||||
_lastStorageLatency = latency;
|
||||
_lastStorageError = success ? null : error;
|
||||
_lastReadyCheckAt = _timeProvider.GetUtcNow();
|
||||
}
|
||||
}
|
||||
@@ -69,6 +69,6 @@ internal sealed record ServiceHealthSnapshot(
|
||||
DateTimeOffset? BootstrapCompletedAt,
|
||||
TimeSpan? BootstrapDuration,
|
||||
DateTimeOffset? LastReadyCheckAt,
|
||||
TimeSpan? LastMongoLatency,
|
||||
string? LastMongoError,
|
||||
TimeSpan? LastStorageLatency,
|
||||
string? LastStorageError,
|
||||
bool LastReadySucceeded);
|
||||
|
||||
@@ -1,71 +1,71 @@
|
||||
using System.Collections.Generic;
|
||||
using System.Diagnostics;
|
||||
using System.Linq;
|
||||
using System.Reflection;
|
||||
using Microsoft.AspNetCore.Builder;
|
||||
using Microsoft.Extensions.DependencyInjection;
|
||||
using OpenTelemetry.Metrics;
|
||||
using OpenTelemetry.Resources;
|
||||
using OpenTelemetry.Trace;
|
||||
using Serilog;
|
||||
using Serilog.Core;
|
||||
using Serilog.Events;
|
||||
using System.Collections.Generic;
|
||||
using System.Diagnostics;
|
||||
using System.Linq;
|
||||
using System.Reflection;
|
||||
using Microsoft.AspNetCore.Builder;
|
||||
using Microsoft.Extensions.DependencyInjection;
|
||||
using OpenTelemetry.Metrics;
|
||||
using OpenTelemetry.Resources;
|
||||
using OpenTelemetry.Trace;
|
||||
using Serilog;
|
||||
using Serilog.Core;
|
||||
using Serilog.Events;
|
||||
using StellaOps.Concelier.Core.Jobs;
|
||||
using StellaOps.Concelier.Connector.Common.Telemetry;
|
||||
using StellaOps.Concelier.WebService.Diagnostics;
|
||||
using StellaOps.Concelier.WebService.Options;
|
||||
using StellaOps.Ingestion.Telemetry;
|
||||
|
||||
namespace StellaOps.Concelier.WebService.Extensions;
|
||||
|
||||
public static class TelemetryExtensions
|
||||
{
|
||||
public static void ConfigureConcelierTelemetry(this WebApplicationBuilder builder, ConcelierOptions options)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(builder);
|
||||
ArgumentNullException.ThrowIfNull(options);
|
||||
|
||||
var telemetry = options.Telemetry ?? new ConcelierOptions.TelemetryOptions();
|
||||
|
||||
if (telemetry.EnableLogging)
|
||||
{
|
||||
builder.Host.UseSerilog((context, services, configuration) =>
|
||||
{
|
||||
ConfigureSerilog(configuration, telemetry, builder.Environment.EnvironmentName, builder.Environment.ApplicationName);
|
||||
});
|
||||
}
|
||||
|
||||
if (!telemetry.Enabled || (!telemetry.EnableTracing && !telemetry.EnableMetrics))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
var openTelemetry = builder.Services.AddOpenTelemetry();
|
||||
|
||||
openTelemetry.ConfigureResource(resource =>
|
||||
{
|
||||
var serviceName = telemetry.ServiceName ?? builder.Environment.ApplicationName;
|
||||
var version = Assembly.GetExecutingAssembly().GetName().Version?.ToString() ?? "unknown";
|
||||
|
||||
resource.AddService(serviceName, serviceVersion: version, serviceInstanceId: Environment.MachineName);
|
||||
resource.AddAttributes(new[]
|
||||
{
|
||||
new KeyValuePair<string, object>("deployment.environment", builder.Environment.EnvironmentName),
|
||||
});
|
||||
|
||||
foreach (var attribute in telemetry.ResourceAttributes)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(attribute.Key) || attribute.Value is null)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
resource.AddAttributes(new[] { new KeyValuePair<string, object>(attribute.Key, attribute.Value) });
|
||||
}
|
||||
});
|
||||
|
||||
if (telemetry.EnableTracing)
|
||||
{
|
||||
|
||||
namespace StellaOps.Concelier.WebService.Extensions;
|
||||
|
||||
public static class TelemetryExtensions
|
||||
{
|
||||
public static void ConfigureConcelierTelemetry(this WebApplicationBuilder builder, ConcelierOptions options)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(builder);
|
||||
ArgumentNullException.ThrowIfNull(options);
|
||||
|
||||
var telemetry = options.Telemetry ?? new ConcelierOptions.TelemetryOptions();
|
||||
|
||||
if (telemetry.EnableLogging)
|
||||
{
|
||||
builder.Host.UseSerilog((context, services, configuration) =>
|
||||
{
|
||||
ConfigureSerilog(configuration, telemetry, builder.Environment.EnvironmentName, builder.Environment.ApplicationName);
|
||||
});
|
||||
}
|
||||
|
||||
if (!telemetry.Enabled || (!telemetry.EnableTracing && !telemetry.EnableMetrics))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
var openTelemetry = builder.Services.AddOpenTelemetry();
|
||||
|
||||
openTelemetry.ConfigureResource(resource =>
|
||||
{
|
||||
var serviceName = telemetry.ServiceName ?? builder.Environment.ApplicationName;
|
||||
var version = Assembly.GetExecutingAssembly().GetName().Version?.ToString() ?? "unknown";
|
||||
|
||||
resource.AddService(serviceName, serviceVersion: version, serviceInstanceId: Environment.MachineName);
|
||||
resource.AddAttributes(new[]
|
||||
{
|
||||
new KeyValuePair<string, object>("deployment.environment", builder.Environment.EnvironmentName),
|
||||
});
|
||||
|
||||
foreach (var attribute in telemetry.ResourceAttributes)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(attribute.Key) || attribute.Value is null)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
resource.AddAttributes(new[] { new KeyValuePair<string, object>(attribute.Key, attribute.Value) });
|
||||
}
|
||||
});
|
||||
|
||||
if (telemetry.EnableTracing)
|
||||
{
|
||||
openTelemetry.WithTracing(tracing =>
|
||||
{
|
||||
tracing
|
||||
@@ -74,15 +74,15 @@ public static class TelemetryExtensions
|
||||
.AddSource(IngestionTelemetry.ActivitySourceName)
|
||||
.AddAspNetCoreInstrumentation()
|
||||
.AddHttpClientInstrumentation();
|
||||
|
||||
ConfigureExporters(telemetry, tracing);
|
||||
});
|
||||
}
|
||||
|
||||
if (telemetry.EnableMetrics)
|
||||
{
|
||||
openTelemetry.WithMetrics(metrics =>
|
||||
{
|
||||
|
||||
ConfigureExporters(telemetry, tracing);
|
||||
});
|
||||
}
|
||||
|
||||
if (telemetry.EnableMetrics)
|
||||
{
|
||||
openTelemetry.WithMetrics(metrics =>
|
||||
{
|
||||
metrics
|
||||
.AddMeter(JobDiagnostics.MeterName)
|
||||
.AddMeter(SourceDiagnostics.MeterName)
|
||||
@@ -92,131 +92,132 @@ public static class TelemetryExtensions
|
||||
.AddMeter("StellaOps.Concelier.Connector.Vndr.Chromium")
|
||||
.AddMeter("StellaOps.Concelier.Connector.Vndr.Apple")
|
||||
.AddMeter("StellaOps.Concelier.Connector.Vndr.Adobe")
|
||||
.AddMeter("StellaOps.Concelier.VulnExplorer")
|
||||
.AddMeter(JobMetrics.MeterName)
|
||||
.AddAspNetCoreInstrumentation()
|
||||
.AddHttpClientInstrumentation()
|
||||
.AddRuntimeInstrumentation();
|
||||
|
||||
ConfigureExporters(telemetry, metrics);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private static void ConfigureSerilog(LoggerConfiguration configuration, ConcelierOptions.TelemetryOptions telemetry, string environmentName, string applicationName)
|
||||
{
|
||||
if (!Enum.TryParse(telemetry.MinimumLogLevel, ignoreCase: true, out LogEventLevel level))
|
||||
{
|
||||
level = LogEventLevel.Information;
|
||||
}
|
||||
|
||||
configuration
|
||||
.MinimumLevel.Is(level)
|
||||
.MinimumLevel.Override("Microsoft", LogEventLevel.Warning)
|
||||
.MinimumLevel.Override("Microsoft.Hosting.Lifetime", LogEventLevel.Information)
|
||||
.Enrich.FromLogContext()
|
||||
.Enrich.With<ActivityEnricher>()
|
||||
.Enrich.WithProperty("service.name", telemetry.ServiceName ?? applicationName)
|
||||
.Enrich.WithProperty("deployment.environment", environmentName)
|
||||
.WriteTo.Console(outputTemplate: "[{Timestamp:O}] [{Level:u3}] {Message:lj} {Properties}{NewLine}{Exception}");
|
||||
}
|
||||
|
||||
private static void ConfigureExporters(ConcelierOptions.TelemetryOptions telemetry, TracerProviderBuilder tracing)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(telemetry.OtlpEndpoint))
|
||||
{
|
||||
if (telemetry.ExportConsole)
|
||||
{
|
||||
tracing.AddConsoleExporter();
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
tracing.AddOtlpExporter(options =>
|
||||
{
|
||||
options.Endpoint = new Uri(telemetry.OtlpEndpoint);
|
||||
var headers = BuildHeaders(telemetry);
|
||||
if (!string.IsNullOrEmpty(headers))
|
||||
{
|
||||
options.Headers = headers;
|
||||
}
|
||||
});
|
||||
|
||||
if (telemetry.ExportConsole)
|
||||
{
|
||||
tracing.AddConsoleExporter();
|
||||
}
|
||||
}
|
||||
|
||||
private static void ConfigureExporters(ConcelierOptions.TelemetryOptions telemetry, MeterProviderBuilder metrics)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(telemetry.OtlpEndpoint))
|
||||
{
|
||||
if (telemetry.ExportConsole)
|
||||
{
|
||||
metrics.AddConsoleExporter();
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
metrics.AddOtlpExporter(options =>
|
||||
{
|
||||
options.Endpoint = new Uri(telemetry.OtlpEndpoint);
|
||||
var headers = BuildHeaders(telemetry);
|
||||
if (!string.IsNullOrEmpty(headers))
|
||||
{
|
||||
options.Headers = headers;
|
||||
}
|
||||
});
|
||||
|
||||
if (telemetry.ExportConsole)
|
||||
{
|
||||
metrics.AddConsoleExporter();
|
||||
}
|
||||
}
|
||||
|
||||
private static string? BuildHeaders(ConcelierOptions.TelemetryOptions telemetry)
|
||||
{
|
||||
if (telemetry.OtlpHeaders.Count == 0)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
return string.Join(",", telemetry.OtlpHeaders
|
||||
.Where(static kvp => !string.IsNullOrWhiteSpace(kvp.Key) && !string.IsNullOrWhiteSpace(kvp.Value))
|
||||
.Select(static kvp => $"{kvp.Key}={kvp.Value}"));
|
||||
}
|
||||
}
|
||||
|
||||
internal sealed class ActivityEnricher : ILogEventEnricher
|
||||
{
|
||||
public void Enrich(LogEvent logEvent, ILogEventPropertyFactory propertyFactory)
|
||||
{
|
||||
var activity = Activity.Current;
|
||||
if (activity is null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (activity.TraceId != default)
|
||||
{
|
||||
logEvent.AddPropertyIfAbsent(propertyFactory.CreateProperty("trace_id", activity.TraceId.ToString()));
|
||||
}
|
||||
|
||||
if (activity.SpanId != default)
|
||||
{
|
||||
logEvent.AddPropertyIfAbsent(propertyFactory.CreateProperty("span_id", activity.SpanId.ToString()));
|
||||
}
|
||||
|
||||
if (activity.ParentSpanId != default)
|
||||
{
|
||||
logEvent.AddPropertyIfAbsent(propertyFactory.CreateProperty("parent_span_id", activity.ParentSpanId.ToString()));
|
||||
}
|
||||
|
||||
if (!string.IsNullOrEmpty(activity.TraceStateString))
|
||||
{
|
||||
logEvent.AddPropertyIfAbsent(propertyFactory.CreateProperty("trace_state", activity.TraceStateString));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ConfigureExporters(telemetry, metrics);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private static void ConfigureSerilog(LoggerConfiguration configuration, ConcelierOptions.TelemetryOptions telemetry, string environmentName, string applicationName)
|
||||
{
|
||||
if (!Enum.TryParse(telemetry.MinimumLogLevel, ignoreCase: true, out LogEventLevel level))
|
||||
{
|
||||
level = LogEventLevel.Information;
|
||||
}
|
||||
|
||||
configuration
|
||||
.MinimumLevel.Is(level)
|
||||
.MinimumLevel.Override("Microsoft", LogEventLevel.Warning)
|
||||
.MinimumLevel.Override("Microsoft.Hosting.Lifetime", LogEventLevel.Information)
|
||||
.Enrich.FromLogContext()
|
||||
.Enrich.With<ActivityEnricher>()
|
||||
.Enrich.WithProperty("service.name", telemetry.ServiceName ?? applicationName)
|
||||
.Enrich.WithProperty("deployment.environment", environmentName)
|
||||
.WriteTo.Console(outputTemplate: "[{Timestamp:O}] [{Level:u3}] {Message:lj} {Properties}{NewLine}{Exception}");
|
||||
}
|
||||
|
||||
private static void ConfigureExporters(ConcelierOptions.TelemetryOptions telemetry, TracerProviderBuilder tracing)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(telemetry.OtlpEndpoint))
|
||||
{
|
||||
if (telemetry.ExportConsole)
|
||||
{
|
||||
tracing.AddConsoleExporter();
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
tracing.AddOtlpExporter(options =>
|
||||
{
|
||||
options.Endpoint = new Uri(telemetry.OtlpEndpoint);
|
||||
var headers = BuildHeaders(telemetry);
|
||||
if (!string.IsNullOrEmpty(headers))
|
||||
{
|
||||
options.Headers = headers;
|
||||
}
|
||||
});
|
||||
|
||||
if (telemetry.ExportConsole)
|
||||
{
|
||||
tracing.AddConsoleExporter();
|
||||
}
|
||||
}
|
||||
|
||||
private static void ConfigureExporters(ConcelierOptions.TelemetryOptions telemetry, MeterProviderBuilder metrics)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(telemetry.OtlpEndpoint))
|
||||
{
|
||||
if (telemetry.ExportConsole)
|
||||
{
|
||||
metrics.AddConsoleExporter();
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
metrics.AddOtlpExporter(options =>
|
||||
{
|
||||
options.Endpoint = new Uri(telemetry.OtlpEndpoint);
|
||||
var headers = BuildHeaders(telemetry);
|
||||
if (!string.IsNullOrEmpty(headers))
|
||||
{
|
||||
options.Headers = headers;
|
||||
}
|
||||
});
|
||||
|
||||
if (telemetry.ExportConsole)
|
||||
{
|
||||
metrics.AddConsoleExporter();
|
||||
}
|
||||
}
|
||||
|
||||
private static string? BuildHeaders(ConcelierOptions.TelemetryOptions telemetry)
|
||||
{
|
||||
if (telemetry.OtlpHeaders.Count == 0)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
return string.Join(",", telemetry.OtlpHeaders
|
||||
.Where(static kvp => !string.IsNullOrWhiteSpace(kvp.Key) && !string.IsNullOrWhiteSpace(kvp.Value))
|
||||
.Select(static kvp => $"{kvp.Key}={kvp.Value}"));
|
||||
}
|
||||
}
|
||||
|
||||
internal sealed class ActivityEnricher : ILogEventEnricher
|
||||
{
|
||||
public void Enrich(LogEvent logEvent, ILogEventPropertyFactory propertyFactory)
|
||||
{
|
||||
var activity = Activity.Current;
|
||||
if (activity is null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (activity.TraceId != default)
|
||||
{
|
||||
logEvent.AddPropertyIfAbsent(propertyFactory.CreateProperty("trace_id", activity.TraceId.ToString()));
|
||||
}
|
||||
|
||||
if (activity.SpanId != default)
|
||||
{
|
||||
logEvent.AddPropertyIfAbsent(propertyFactory.CreateProperty("span_id", activity.SpanId.ToString()));
|
||||
}
|
||||
|
||||
if (activity.ParentSpanId != default)
|
||||
{
|
||||
logEvent.AddPropertyIfAbsent(propertyFactory.CreateProperty("parent_span_id", activity.ParentSpanId.ToString()));
|
||||
}
|
||||
|
||||
if (!string.IsNullOrEmpty(activity.TraceStateString))
|
||||
{
|
||||
logEvent.AddPropertyIfAbsent(propertyFactory.CreateProperty("trace_state", activity.TraceStateString));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,9 +7,13 @@ namespace StellaOps.Concelier.WebService.Options;
|
||||
|
||||
public sealed class ConcelierOptions
|
||||
{
|
||||
[Obsolete("Mongo storage has been removed; use PostgresStorage.")]
|
||||
public StorageOptions Storage { get; set; } = new();
|
||||
|
||||
public PostgresStorageOptions? PostgresStorage { get; set; }
|
||||
public PostgresStorageOptions? PostgresStorage { get; set; } = new PostgresStorageOptions
|
||||
{
|
||||
Enabled = true
|
||||
};
|
||||
|
||||
public PluginOptions Plugins { get; set; } = new();
|
||||
|
||||
@@ -33,6 +37,7 @@ public sealed class ConcelierOptions
|
||||
/// </summary>
|
||||
public AirGapOptions AirGap { get; set; } = new();
|
||||
|
||||
[Obsolete("Mongo storage has been removed; use PostgresStorage.")]
|
||||
public sealed class StorageOptions
|
||||
{
|
||||
public string Driver { get; set; } = "mongo";
|
||||
|
||||
@@ -2,30 +2,17 @@ using System;
|
||||
using System.Collections.Generic;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using StellaOps.Auth.Abstractions;
|
||||
|
||||
namespace StellaOps.Concelier.WebService.Options;
|
||||
|
||||
public static class ConcelierOptionsValidator
|
||||
{
|
||||
public static void Validate(ConcelierOptions options)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(options);
|
||||
|
||||
if (!string.Equals(options.Storage.Driver, "mongo", StringComparison.OrdinalIgnoreCase))
|
||||
{
|
||||
throw new InvalidOperationException("Only Mongo storage driver is supported (storage.driver == 'mongo').");
|
||||
}
|
||||
|
||||
if (string.IsNullOrWhiteSpace(options.Storage.Dsn))
|
||||
{
|
||||
throw new InvalidOperationException("Storage DSN must be configured.");
|
||||
}
|
||||
|
||||
if (options.Storage.CommandTimeoutSeconds <= 0)
|
||||
{
|
||||
throw new InvalidOperationException("Command timeout must be greater than zero seconds.");
|
||||
}
|
||||
|
||||
|
||||
namespace StellaOps.Concelier.WebService.Options;
|
||||
|
||||
public static class ConcelierOptionsValidator
|
||||
{
|
||||
public static void Validate(ConcelierOptions options)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(options);
|
||||
|
||||
ValidatePostgres(options);
|
||||
|
||||
options.Telemetry ??= new ConcelierOptions.TelemetryOptions();
|
||||
|
||||
options.Authority ??= new ConcelierOptions.AuthorityOptions();
|
||||
@@ -107,25 +94,25 @@ public static class ConcelierOptionsValidator
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!Enum.TryParse(options.Telemetry.MinimumLogLevel, ignoreCase: true, out LogLevel _))
|
||||
{
|
||||
throw new InvalidOperationException($"Telemetry minimum log level '{options.Telemetry.MinimumLogLevel}' is invalid.");
|
||||
}
|
||||
|
||||
if (!string.IsNullOrWhiteSpace(options.Telemetry.OtlpEndpoint) && !Uri.TryCreate(options.Telemetry.OtlpEndpoint, UriKind.Absolute, out _))
|
||||
{
|
||||
throw new InvalidOperationException("Telemetry OTLP endpoint must be an absolute URI.");
|
||||
}
|
||||
|
||||
foreach (var attribute in options.Telemetry.ResourceAttributes)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(attribute.Key))
|
||||
{
|
||||
throw new InvalidOperationException("Telemetry resource attribute keys must be non-empty.");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (!Enum.TryParse(options.Telemetry.MinimumLogLevel, ignoreCase: true, out LogLevel _))
|
||||
{
|
||||
throw new InvalidOperationException($"Telemetry minimum log level '{options.Telemetry.MinimumLogLevel}' is invalid.");
|
||||
}
|
||||
|
||||
if (!string.IsNullOrWhiteSpace(options.Telemetry.OtlpEndpoint) && !Uri.TryCreate(options.Telemetry.OtlpEndpoint, UriKind.Absolute, out _))
|
||||
{
|
||||
throw new InvalidOperationException("Telemetry OTLP endpoint must be an absolute URI.");
|
||||
}
|
||||
|
||||
foreach (var attribute in options.Telemetry.ResourceAttributes)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(attribute.Key))
|
||||
{
|
||||
throw new InvalidOperationException("Telemetry resource attribute keys must be non-empty.");
|
||||
}
|
||||
}
|
||||
|
||||
foreach (var header in options.Telemetry.OtlpHeaders)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(header.Key))
|
||||
@@ -333,4 +320,50 @@ public static class ConcelierOptionsValidator
|
||||
throw new InvalidOperationException("Evidence bundle pipelineVersion must be provided.");
|
||||
}
|
||||
}
|
||||
|
||||
private static void ValidatePostgres(ConcelierOptions options)
|
||||
{
|
||||
var postgres = options.PostgresStorage ?? new ConcelierOptions.PostgresStorageOptions();
|
||||
options.PostgresStorage = postgres;
|
||||
|
||||
if (!postgres.Enabled)
|
||||
{
|
||||
throw new InvalidOperationException("PostgreSQL storage must be enabled (postgresStorage.enabled).");
|
||||
}
|
||||
|
||||
if (string.IsNullOrWhiteSpace(postgres.ConnectionString))
|
||||
{
|
||||
throw new InvalidOperationException("PostgreSQL connectionString must be configured (postgresStorage.connectionString).");
|
||||
}
|
||||
|
||||
if (postgres.CommandTimeoutSeconds <= 0)
|
||||
{
|
||||
throw new InvalidOperationException("PostgreSQL commandTimeoutSeconds must be greater than zero.");
|
||||
}
|
||||
|
||||
if (postgres.MaxPoolSize < 1)
|
||||
{
|
||||
throw new InvalidOperationException("PostgreSQL maxPoolSize must be greater than zero.");
|
||||
}
|
||||
|
||||
if (postgres.MinPoolSize < 0 || postgres.MinPoolSize > postgres.MaxPoolSize)
|
||||
{
|
||||
throw new InvalidOperationException("PostgreSQL minPoolSize must be between 0 and maxPoolSize.");
|
||||
}
|
||||
|
||||
if (postgres.ConnectionIdleLifetimeSeconds < 0)
|
||||
{
|
||||
throw new InvalidOperationException("PostgreSQL connectionIdleLifetimeSeconds must be zero or greater.");
|
||||
}
|
||||
|
||||
if (postgres.AutoMigrate && string.IsNullOrWhiteSpace(postgres.MigrationsPath))
|
||||
{
|
||||
throw new InvalidOperationException("PostgreSQL migrationsPath must be configured when autoMigrate is enabled.");
|
||||
}
|
||||
|
||||
if (string.IsNullOrWhiteSpace(postgres.SchemaName))
|
||||
{
|
||||
postgres.SchemaName = "vuln";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ using StellaOps.Concelier.Core.Events;
|
||||
using StellaOps.Concelier.Core.Jobs;
|
||||
using StellaOps.Concelier.Core.Observations;
|
||||
using StellaOps.Concelier.Core.Linksets;
|
||||
using StellaOps.Concelier.Core.Diagnostics;
|
||||
using StellaOps.Concelier.Models;
|
||||
using StellaOps.Concelier.WebService.Diagnostics;
|
||||
using ServiceStatus = StellaOps.Concelier.WebService.Diagnostics.ServiceStatus;
|
||||
@@ -54,9 +55,6 @@ using StellaOps.Concelier.Core.Aoc;
|
||||
using StellaOps.Concelier.Core.Raw;
|
||||
using StellaOps.Concelier.RawModels;
|
||||
using StellaOps.Concelier.Storage.Postgres;
|
||||
using StellaOps.Concelier.Storage.Mongo;
|
||||
using StellaOps.Concelier.Storage.Mongo.Advisories;
|
||||
using StellaOps.Concelier.Storage.Mongo.Aliases;
|
||||
using StellaOps.Concelier.Core.Attestation;
|
||||
using StellaOps.Concelier.Core.Signals;
|
||||
using AttestationClaims = StellaOps.Concelier.Core.Attestation.AttestationClaims;
|
||||
@@ -64,8 +62,10 @@ using StellaOps.Concelier.Core.Orchestration;
|
||||
using System.Diagnostics.Metrics;
|
||||
using StellaOps.Concelier.Models.Observations;
|
||||
using StellaOps.Aoc.AspNetCore.Results;
|
||||
using StellaOps.Provenance.Mongo;
|
||||
using HttpResults = Microsoft.AspNetCore.Http.Results;
|
||||
using StellaOps.Concelier.Storage.Mongo.Advisories;
|
||||
using StellaOps.Concelier.Storage.Mongo.Aliases;
|
||||
using StellaOps.Provenance.Mongo;
|
||||
|
||||
namespace StellaOps.Concelier.WebService
|
||||
{
|
||||
@@ -91,9 +91,10 @@ builder.Host.ConfigureAppConfiguration((context, cfg) =>
|
||||
{
|
||||
cfg.AddInMemoryCollection(new Dictionary<string, string?>
|
||||
{
|
||||
{"Concelier:Storage:Dsn", Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN") ?? "mongodb://localhost:27017/test-health"},
|
||||
{"Concelier:Storage:Driver", "mongo"},
|
||||
{"Concelier:Storage:CommandTimeoutSeconds", "30"},
|
||||
{"Concelier:PostgresStorage:Enabled", "true"},
|
||||
{"Concelier:PostgresStorage:ConnectionString", Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN") ?? "Host=localhost;Port=5432;Database=concelier_test;Username=postgres;Password=postgres"},
|
||||
{"Concelier:PostgresStorage:CommandTimeoutSeconds", "30"},
|
||||
{"Concelier:PostgresStorage:SchemaName", "vuln"},
|
||||
{"Concelier:Telemetry:Enabled", "false"}
|
||||
});
|
||||
}
|
||||
@@ -125,11 +126,12 @@ if (builder.Environment.IsEnvironment("Testing"))
|
||||
#pragma warning restore ASP0000
|
||||
concelierOptions = tempProvider.GetService<IOptions<ConcelierOptions>>()?.Value ?? new ConcelierOptions
|
||||
{
|
||||
Storage = new ConcelierOptions.StorageOptions
|
||||
PostgresStorage = new ConcelierOptions.PostgresStorageOptions
|
||||
{
|
||||
Dsn = Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN") ?? "mongodb://localhost:27017/test-health",
|
||||
Driver = "mongo",
|
||||
CommandTimeoutSeconds = 30
|
||||
Enabled = true,
|
||||
ConnectionString = Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN") ?? "Host=localhost;Port=5432;Database=concelier_test;Username=postgres;Password=postgres",
|
||||
CommandTimeoutSeconds = 30,
|
||||
SchemaName = "vuln"
|
||||
},
|
||||
Telemetry = new ConcelierOptions.TelemetryOptions
|
||||
{
|
||||
@@ -137,10 +139,18 @@ if (builder.Environment.IsEnvironment("Testing"))
|
||||
}
|
||||
};
|
||||
|
||||
concelierOptions.Storage ??= new ConcelierOptions.StorageOptions();
|
||||
concelierOptions.Storage.Dsn = Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN") ?? "mongodb://localhost:27017/orch-tests";
|
||||
concelierOptions.Storage.Driver = "mongo";
|
||||
concelierOptions.Storage.CommandTimeoutSeconds = concelierOptions.Storage.CommandTimeoutSeconds <= 0 ? 30 : concelierOptions.Storage.CommandTimeoutSeconds;
|
||||
concelierOptions.PostgresStorage ??= new ConcelierOptions.PostgresStorageOptions
|
||||
{
|
||||
Enabled = true,
|
||||
ConnectionString = Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN") ?? "Host=localhost;Port=5432;Database=concelier_test;Username=postgres;Password=postgres",
|
||||
CommandTimeoutSeconds = 30,
|
||||
SchemaName = "vuln"
|
||||
};
|
||||
|
||||
if (string.IsNullOrWhiteSpace(concelierOptions.PostgresStorage.ConnectionString))
|
||||
{
|
||||
concelierOptions.PostgresStorage.ConnectionString = Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN") ?? string.Empty;
|
||||
}
|
||||
|
||||
ConcelierOptionsPostConfigure.Apply(concelierOptions, contentRootPath);
|
||||
// Skip validation in Testing to allow factory-provided wiring.
|
||||
@@ -149,10 +159,21 @@ else
|
||||
{
|
||||
concelierOptions = builder.Configuration.BindOptions<ConcelierOptions>(postConfigure: (opts, _) =>
|
||||
{
|
||||
var testDsn = Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN");
|
||||
if (string.IsNullOrWhiteSpace(opts.Storage.Dsn) && !string.IsNullOrWhiteSpace(testDsn))
|
||||
var testDsn = Environment.GetEnvironmentVariable("CONCELIER_POSTGRES_DSN")
|
||||
?? Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN");
|
||||
|
||||
opts.PostgresStorage ??= new ConcelierOptions.PostgresStorageOptions
|
||||
{
|
||||
opts.Storage.Dsn = testDsn;
|
||||
Enabled = !string.IsNullOrWhiteSpace(testDsn),
|
||||
ConnectionString = testDsn ?? string.Empty,
|
||||
SchemaName = "vuln",
|
||||
CommandTimeoutSeconds = 30
|
||||
};
|
||||
|
||||
if (string.IsNullOrWhiteSpace(opts.PostgresStorage.ConnectionString) && !string.IsNullOrWhiteSpace(testDsn))
|
||||
{
|
||||
opts.PostgresStorage.ConnectionString = testDsn;
|
||||
opts.PostgresStorage.Enabled = true;
|
||||
}
|
||||
|
||||
ConcelierOptionsPostConfigure.Apply(opts, contentRootPath);
|
||||
@@ -179,24 +200,26 @@ builder.Services.AddSingleton<MirrorFileLocator>();
|
||||
|
||||
var isTesting = builder.Environment.IsEnvironment("Testing");
|
||||
|
||||
// Add PostgreSQL storage for LNM linkset cache if configured.
|
||||
// This provides a PostgreSQL-backed implementation of IAdvisoryLinksetStore for the read-through cache.
|
||||
if (concelierOptions.PostgresStorage is { Enabled: true } postgresOptions)
|
||||
// Add PostgreSQL storage for all Concelier persistence.
|
||||
var postgresOptions = concelierOptions.PostgresStorage ?? throw new InvalidOperationException("PostgreSQL storage must be configured.");
|
||||
if (!postgresOptions.Enabled)
|
||||
{
|
||||
builder.Services.AddConcelierPostgresStorage(pgOptions =>
|
||||
{
|
||||
pgOptions.ConnectionString = postgresOptions.ConnectionString;
|
||||
pgOptions.CommandTimeoutSeconds = postgresOptions.CommandTimeoutSeconds;
|
||||
pgOptions.MaxPoolSize = postgresOptions.MaxPoolSize;
|
||||
pgOptions.MinPoolSize = postgresOptions.MinPoolSize;
|
||||
pgOptions.ConnectionIdleLifetimeSeconds = postgresOptions.ConnectionIdleLifetimeSeconds;
|
||||
pgOptions.Pooling = postgresOptions.Pooling;
|
||||
pgOptions.SchemaName = postgresOptions.SchemaName;
|
||||
pgOptions.AutoMigrate = postgresOptions.AutoMigrate;
|
||||
pgOptions.MigrationsPath = postgresOptions.MigrationsPath;
|
||||
});
|
||||
throw new InvalidOperationException("PostgreSQL storage must be enabled.");
|
||||
}
|
||||
|
||||
builder.Services.AddConcelierPostgresStorage(pgOptions =>
|
||||
{
|
||||
pgOptions.ConnectionString = postgresOptions.ConnectionString;
|
||||
pgOptions.CommandTimeoutSeconds = postgresOptions.CommandTimeoutSeconds;
|
||||
pgOptions.MaxPoolSize = postgresOptions.MaxPoolSize;
|
||||
pgOptions.MinPoolSize = postgresOptions.MinPoolSize;
|
||||
pgOptions.ConnectionIdleLifetimeSeconds = postgresOptions.ConnectionIdleLifetimeSeconds;
|
||||
pgOptions.Pooling = postgresOptions.Pooling;
|
||||
pgOptions.SchemaName = postgresOptions.SchemaName;
|
||||
pgOptions.AutoMigrate = postgresOptions.AutoMigrate;
|
||||
pgOptions.MigrationsPath = postgresOptions.MigrationsPath;
|
||||
});
|
||||
|
||||
builder.Services.AddOptions<AdvisoryObservationEventPublisherOptions>()
|
||||
.Bind(builder.Configuration.GetSection("advisoryObservationEvents"))
|
||||
.PostConfigure(options =>
|
||||
@@ -1039,9 +1062,12 @@ var advisoryIngestEndpoint = app.MapPost("/ingest/advisory", async (
|
||||
return Problem(context, "Invalid advisory payload", StatusCodes.Status400BadRequest, ProblemTypes.Validation, ex.Message);
|
||||
}
|
||||
|
||||
var chunkStopwatch = Stopwatch.StartNew();
|
||||
|
||||
try
|
||||
{
|
||||
var result = await rawService.IngestAsync(document, cancellationToken).ConfigureAwait(false);
|
||||
chunkStopwatch.Stop();
|
||||
|
||||
var response = new AdvisoryIngestResponse(
|
||||
result.Record.Id,
|
||||
@@ -1065,10 +1091,21 @@ var advisoryIngestEndpoint = app.MapPost("/ingest/advisory", async (
|
||||
ingestRequest.Source.Vendor ?? "(unknown)",
|
||||
result.Inserted ? "inserted" : "duplicate"));
|
||||
|
||||
var telemetrySource = ingestRequest.Source.Vendor ?? "(unknown)";
|
||||
var (_, _, conflicts) = AdvisoryLinksetNormalization.FromRawLinksetWithConfidence(document.Linkset, providedConfidence: null);
|
||||
var collisionCount = VulnExplorerTelemetry.CountAliasCollisions(conflicts);
|
||||
VulnExplorerTelemetry.RecordIdentifierCollisions(tenant, telemetrySource, collisionCount);
|
||||
VulnExplorerTelemetry.RecordChunkLatency(tenant, telemetrySource, chunkStopwatch.Elapsed);
|
||||
if (VulnExplorerTelemetry.IsWithdrawn(document.Content.Raw))
|
||||
{
|
||||
VulnExplorerTelemetry.RecordWithdrawnStatement(tenant, telemetrySource);
|
||||
}
|
||||
|
||||
return JsonResult(response, statusCode);
|
||||
}
|
||||
catch (ConcelierAocGuardException guardException)
|
||||
{
|
||||
chunkStopwatch.Stop();
|
||||
logger.LogWarning(
|
||||
guardException,
|
||||
"AOC guard rejected advisory ingest tenant={Tenant} upstream={UpstreamId} requestHash={RequestHash} documentHash={DocumentHash} codes={Codes}",
|
||||
@@ -2115,6 +2152,12 @@ var advisoryChunksEndpoint = app.MapGet("/advisories/{advisoryKey}/chunks", asyn
|
||||
buildResult.Response.Entries.Count,
|
||||
duration,
|
||||
guardrailCounts));
|
||||
VulnExplorerTelemetry.RecordChunkRequest(
|
||||
tenant!,
|
||||
result: "ok",
|
||||
cacheHit,
|
||||
buildResult.Response.Entries.Count,
|
||||
duration.TotalMilliseconds);
|
||||
|
||||
return JsonResult(buildResult.Response);
|
||||
});
|
||||
@@ -3269,7 +3312,7 @@ void ApplyNoCache(HttpResponse response)
|
||||
response.Headers["Expires"] = "0";
|
||||
}
|
||||
|
||||
await InitializeMongoAsync(app);
|
||||
await InitializePostgresAsync(app);
|
||||
|
||||
app.MapGet("/health", ([FromServices] IOptions<ConcelierOptions> opts, [FromServices] StellaOps.Concelier.WebService.Diagnostics.ServiceStatus status, HttpContext context) =>
|
||||
{
|
||||
@@ -3278,11 +3321,12 @@ app.MapGet("/health", ([FromServices] IOptions<ConcelierOptions> opts, [FromServ
|
||||
var snapshot = status.CreateSnapshot();
|
||||
var uptimeSeconds = Math.Max((snapshot.CapturedAt - snapshot.StartedAt).TotalSeconds, 0d);
|
||||
|
||||
var storage = new StorageBootstrapHealth(
|
||||
Driver: opts.Value.Storage.Driver,
|
||||
Completed: snapshot.BootstrapCompletedAt is not null,
|
||||
CompletedAt: snapshot.BootstrapCompletedAt,
|
||||
DurationMs: snapshot.BootstrapDuration?.TotalMilliseconds);
|
||||
var storage = new StorageHealth(
|
||||
Backend: "postgres",
|
||||
Ready: snapshot.LastReadySucceeded,
|
||||
CheckedAt: snapshot.LastReadyCheckAt,
|
||||
LatencyMs: snapshot.LastStorageLatency?.TotalMilliseconds,
|
||||
Error: snapshot.LastStorageError);
|
||||
|
||||
var telemetry = new TelemetryHealth(
|
||||
Enabled: opts.Value.Telemetry.Enabled,
|
||||
@@ -3300,24 +3344,32 @@ app.MapGet("/health", ([FromServices] IOptions<ConcelierOptions> opts, [FromServ
|
||||
return JsonResult(response);
|
||||
});
|
||||
|
||||
app.MapGet("/ready", ([FromServices] StellaOps.Concelier.WebService.Diagnostics.ServiceStatus status, HttpContext context) =>
|
||||
app.MapGet("/ready", async (
|
||||
[FromServices] StellaOps.Concelier.WebService.Diagnostics.ServiceStatus status,
|
||||
[FromServices] ConcelierDataSource dataSource,
|
||||
HttpContext context,
|
||||
CancellationToken cancellationToken) =>
|
||||
{
|
||||
ApplyNoCache(context.Response);
|
||||
|
||||
var (ready, latency, error) = await CheckPostgresAsync(dataSource, cancellationToken).ConfigureAwait(false);
|
||||
status.RecordStorageCheck(ready, latency, error);
|
||||
|
||||
var snapshot = status.CreateSnapshot();
|
||||
var uptimeSeconds = Math.Max((snapshot.CapturedAt - snapshot.StartedAt).TotalSeconds, 0d);
|
||||
|
||||
var mongo = new MongoReadyHealth(
|
||||
Status: "bypassed",
|
||||
LatencyMs: null,
|
||||
var storage = new StorageHealth(
|
||||
Backend: "postgres",
|
||||
Ready: ready,
|
||||
CheckedAt: snapshot.LastReadyCheckAt,
|
||||
Error: "mongo disabled");
|
||||
LatencyMs: latency.TotalMilliseconds,
|
||||
Error: error);
|
||||
|
||||
var response = new ReadyDocument(
|
||||
Status: "ready",
|
||||
Status: ready ? "ready" : "degraded",
|
||||
StartedAt: snapshot.StartedAt,
|
||||
UptimeSeconds: uptimeSeconds,
|
||||
Mongo: mongo);
|
||||
Storage: storage);
|
||||
|
||||
return JsonResult(response);
|
||||
});
|
||||
@@ -4019,9 +4071,54 @@ static SignalsSymbolSetResponse ToSymbolSetResponse(AffectedSymbolSet symbolSet)
|
||||
return pluginOptions;
|
||||
}
|
||||
|
||||
static async Task InitializeMongoAsync(WebApplication app)
|
||||
static async Task InitializePostgresAsync(WebApplication app)
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
var dataSource = app.Services.GetService<ConcelierDataSource>();
|
||||
var status = app.Services.GetRequiredService<StellaOps.Concelier.WebService.Diagnostics.ServiceStatus>();
|
||||
|
||||
if (dataSource is null)
|
||||
{
|
||||
status.RecordStorageCheck(false, TimeSpan.Zero, "PostgreSQL storage not configured");
|
||||
return;
|
||||
}
|
||||
|
||||
var stopwatch = Stopwatch.StartNew();
|
||||
try
|
||||
{
|
||||
var (ready, latency, error) = await CheckPostgresAsync(dataSource, CancellationToken.None).ConfigureAwait(false);
|
||||
stopwatch.Stop();
|
||||
status.RecordStorageCheck(ready, latency, error);
|
||||
if (ready)
|
||||
{
|
||||
status.MarkBootstrapCompleted(latency);
|
||||
}
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
stopwatch.Stop();
|
||||
status.RecordStorageCheck(false, stopwatch.Elapsed, ex.Message);
|
||||
}
|
||||
}
|
||||
|
||||
static async Task<(bool Ready, TimeSpan Latency, string? Error)> CheckPostgresAsync(
|
||||
ConcelierDataSource dataSource,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
var stopwatch = Stopwatch.StartNew();
|
||||
try
|
||||
{
|
||||
await using var connection = await dataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false);
|
||||
await using var command = connection.CreateCommand();
|
||||
command.CommandText = "select 1";
|
||||
_ = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false);
|
||||
stopwatch.Stop();
|
||||
return (true, stopwatch.Elapsed, null);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
stopwatch.Stop();
|
||||
return (false, stopwatch.Elapsed, ex.Message);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -41,4 +41,4 @@
|
||||
OutputItemType="Analyzer"
|
||||
ReferenceOutputAssembly="false" />
|
||||
</ItemGroup>
|
||||
</Project>
|
||||
</Project>
|
||||
|
||||
@@ -185,6 +185,22 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Concelier.Analyze
|
||||
EndProject
|
||||
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Ingestion.Telemetry", "..\__Libraries\StellaOps.Ingestion.Telemetry\StellaOps.Ingestion.Telemetry.csproj", "{85D215EC-DCFE-4F7F-BB07-540DCF66BE8C}"
|
||||
EndProject
|
||||
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Cryptography.Plugin.SmRemote", "..\__Libraries\StellaOps.Cryptography.Plugin.SmRemote\StellaOps.Cryptography.Plugin.SmRemote.csproj", "{FCA91451-5D4A-4E75-9268-B253A902A726}"
|
||||
EndProject
|
||||
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.SmRemote.Service", "..\SmRemote\StellaOps.SmRemote.Service\StellaOps.SmRemote.Service.csproj", "{E823EB56-86F4-4989-9480-9F1D8DD780F8}"
|
||||
EndProject
|
||||
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Cryptography.Plugin.SmSoft", "..\__Libraries\StellaOps.Cryptography.Plugin.SmSoft\StellaOps.Cryptography.Plugin.SmSoft.csproj", "{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}"
|
||||
EndProject
|
||||
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Cryptography.DependencyInjection", "..\__Libraries\StellaOps.Cryptography.DependencyInjection\StellaOps.Cryptography.DependencyInjection.csproj", "{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}"
|
||||
EndProject
|
||||
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Cryptography.Plugin.Pkcs11Gost", "..\__Libraries\StellaOps.Cryptography.Plugin.Pkcs11Gost\StellaOps.Cryptography.Plugin.Pkcs11Gost.csproj", "{3CC87BD4-38B7-421B-9688-B2ED2B392646}"
|
||||
EndProject
|
||||
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Cryptography.Plugin.OpenSslGost", "..\__Libraries\StellaOps.Cryptography.Plugin.OpenSslGost\StellaOps.Cryptography.Plugin.OpenSslGost.csproj", "{27052CD3-98B4-4D37-88F9-7D8B54363F74}"
|
||||
EndProject
|
||||
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Cryptography.Plugin.PqSoft", "..\__Libraries\StellaOps.Cryptography.Plugin.PqSoft\StellaOps.Cryptography.Plugin.PqSoft.csproj", "{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}"
|
||||
EndProject
|
||||
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Cryptography.Plugin.WineCsp", "..\__Libraries\StellaOps.Cryptography.Plugin.WineCsp\StellaOps.Cryptography.Plugin.WineCsp.csproj", "{98908D4F-1A48-4CED-B2CF-92C3179B44FD}"
|
||||
EndProject
|
||||
Global
|
||||
GlobalSection(SolutionConfigurationPlatforms) = preSolution
|
||||
Debug|Any CPU = Debug|Any CPU
|
||||
@@ -1251,6 +1267,102 @@ Global
|
||||
{85D215EC-DCFE-4F7F-BB07-540DCF66BE8C}.Release|x64.Build.0 = Release|Any CPU
|
||||
{85D215EC-DCFE-4F7F-BB07-540DCF66BE8C}.Release|x86.ActiveCfg = Release|Any CPU
|
||||
{85D215EC-DCFE-4F7F-BB07-540DCF66BE8C}.Release|x86.Build.0 = Release|Any CPU
|
||||
{FCA91451-5D4A-4E75-9268-B253A902A726}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
|
||||
{FCA91451-5D4A-4E75-9268-B253A902A726}.Debug|Any CPU.Build.0 = Debug|Any CPU
|
||||
{FCA91451-5D4A-4E75-9268-B253A902A726}.Debug|x64.ActiveCfg = Debug|Any CPU
|
||||
{FCA91451-5D4A-4E75-9268-B253A902A726}.Debug|x64.Build.0 = Debug|Any CPU
|
||||
{FCA91451-5D4A-4E75-9268-B253A902A726}.Debug|x86.ActiveCfg = Debug|Any CPU
|
||||
{FCA91451-5D4A-4E75-9268-B253A902A726}.Debug|x86.Build.0 = Debug|Any CPU
|
||||
{FCA91451-5D4A-4E75-9268-B253A902A726}.Release|Any CPU.ActiveCfg = Release|Any CPU
|
||||
{FCA91451-5D4A-4E75-9268-B253A902A726}.Release|Any CPU.Build.0 = Release|Any CPU
|
||||
{FCA91451-5D4A-4E75-9268-B253A902A726}.Release|x64.ActiveCfg = Release|Any CPU
|
||||
{FCA91451-5D4A-4E75-9268-B253A902A726}.Release|x64.Build.0 = Release|Any CPU
|
||||
{FCA91451-5D4A-4E75-9268-B253A902A726}.Release|x86.ActiveCfg = Release|Any CPU
|
||||
{FCA91451-5D4A-4E75-9268-B253A902A726}.Release|x86.Build.0 = Release|Any CPU
|
||||
{E823EB56-86F4-4989-9480-9F1D8DD780F8}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
|
||||
{E823EB56-86F4-4989-9480-9F1D8DD780F8}.Debug|Any CPU.Build.0 = Debug|Any CPU
|
||||
{E823EB56-86F4-4989-9480-9F1D8DD780F8}.Debug|x64.ActiveCfg = Debug|Any CPU
|
||||
{E823EB56-86F4-4989-9480-9F1D8DD780F8}.Debug|x64.Build.0 = Debug|Any CPU
|
||||
{E823EB56-86F4-4989-9480-9F1D8DD780F8}.Debug|x86.ActiveCfg = Debug|Any CPU
|
||||
{E823EB56-86F4-4989-9480-9F1D8DD780F8}.Debug|x86.Build.0 = Debug|Any CPU
|
||||
{E823EB56-86F4-4989-9480-9F1D8DD780F8}.Release|Any CPU.ActiveCfg = Release|Any CPU
|
||||
{E823EB56-86F4-4989-9480-9F1D8DD780F8}.Release|Any CPU.Build.0 = Release|Any CPU
|
||||
{E823EB56-86F4-4989-9480-9F1D8DD780F8}.Release|x64.ActiveCfg = Release|Any CPU
|
||||
{E823EB56-86F4-4989-9480-9F1D8DD780F8}.Release|x64.Build.0 = Release|Any CPU
|
||||
{E823EB56-86F4-4989-9480-9F1D8DD780F8}.Release|x86.ActiveCfg = Release|Any CPU
|
||||
{E823EB56-86F4-4989-9480-9F1D8DD780F8}.Release|x86.Build.0 = Release|Any CPU
|
||||
{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
|
||||
{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Debug|Any CPU.Build.0 = Debug|Any CPU
|
||||
{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Debug|x64.ActiveCfg = Debug|Any CPU
|
||||
{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Debug|x64.Build.0 = Debug|Any CPU
|
||||
{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Debug|x86.ActiveCfg = Debug|Any CPU
|
||||
{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Debug|x86.Build.0 = Debug|Any CPU
|
||||
{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Release|Any CPU.ActiveCfg = Release|Any CPU
|
||||
{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Release|Any CPU.Build.0 = Release|Any CPU
|
||||
{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Release|x64.ActiveCfg = Release|Any CPU
|
||||
{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Release|x64.Build.0 = Release|Any CPU
|
||||
{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Release|x86.ActiveCfg = Release|Any CPU
|
||||
{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Release|x86.Build.0 = Release|Any CPU
|
||||
{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
|
||||
{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Debug|Any CPU.Build.0 = Debug|Any CPU
|
||||
{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Debug|x64.ActiveCfg = Debug|Any CPU
|
||||
{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Debug|x64.Build.0 = Debug|Any CPU
|
||||
{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Debug|x86.ActiveCfg = Debug|Any CPU
|
||||
{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Debug|x86.Build.0 = Debug|Any CPU
|
||||
{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Release|Any CPU.ActiveCfg = Release|Any CPU
|
||||
{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Release|Any CPU.Build.0 = Release|Any CPU
|
||||
{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Release|x64.ActiveCfg = Release|Any CPU
|
||||
{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Release|x64.Build.0 = Release|Any CPU
|
||||
{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Release|x86.ActiveCfg = Release|Any CPU
|
||||
{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Release|x86.Build.0 = Release|Any CPU
|
||||
{3CC87BD4-38B7-421B-9688-B2ED2B392646}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
|
||||
{3CC87BD4-38B7-421B-9688-B2ED2B392646}.Debug|Any CPU.Build.0 = Debug|Any CPU
|
||||
{3CC87BD4-38B7-421B-9688-B2ED2B392646}.Debug|x64.ActiveCfg = Debug|Any CPU
|
||||
{3CC87BD4-38B7-421B-9688-B2ED2B392646}.Debug|x64.Build.0 = Debug|Any CPU
|
||||
{3CC87BD4-38B7-421B-9688-B2ED2B392646}.Debug|x86.ActiveCfg = Debug|Any CPU
|
||||
{3CC87BD4-38B7-421B-9688-B2ED2B392646}.Debug|x86.Build.0 = Debug|Any CPU
|
||||
{3CC87BD4-38B7-421B-9688-B2ED2B392646}.Release|Any CPU.ActiveCfg = Release|Any CPU
|
||||
{3CC87BD4-38B7-421B-9688-B2ED2B392646}.Release|Any CPU.Build.0 = Release|Any CPU
|
||||
{3CC87BD4-38B7-421B-9688-B2ED2B392646}.Release|x64.ActiveCfg = Release|Any CPU
|
||||
{3CC87BD4-38B7-421B-9688-B2ED2B392646}.Release|x64.Build.0 = Release|Any CPU
|
||||
{3CC87BD4-38B7-421B-9688-B2ED2B392646}.Release|x86.ActiveCfg = Release|Any CPU
|
||||
{3CC87BD4-38B7-421B-9688-B2ED2B392646}.Release|x86.Build.0 = Release|Any CPU
|
||||
{27052CD3-98B4-4D37-88F9-7D8B54363F74}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
|
||||
{27052CD3-98B4-4D37-88F9-7D8B54363F74}.Debug|Any CPU.Build.0 = Debug|Any CPU
|
||||
{27052CD3-98B4-4D37-88F9-7D8B54363F74}.Debug|x64.ActiveCfg = Debug|Any CPU
|
||||
{27052CD3-98B4-4D37-88F9-7D8B54363F74}.Debug|x64.Build.0 = Debug|Any CPU
|
||||
{27052CD3-98B4-4D37-88F9-7D8B54363F74}.Debug|x86.ActiveCfg = Debug|Any CPU
|
||||
{27052CD3-98B4-4D37-88F9-7D8B54363F74}.Debug|x86.Build.0 = Debug|Any CPU
|
||||
{27052CD3-98B4-4D37-88F9-7D8B54363F74}.Release|Any CPU.ActiveCfg = Release|Any CPU
|
||||
{27052CD3-98B4-4D37-88F9-7D8B54363F74}.Release|Any CPU.Build.0 = Release|Any CPU
|
||||
{27052CD3-98B4-4D37-88F9-7D8B54363F74}.Release|x64.ActiveCfg = Release|Any CPU
|
||||
{27052CD3-98B4-4D37-88F9-7D8B54363F74}.Release|x64.Build.0 = Release|Any CPU
|
||||
{27052CD3-98B4-4D37-88F9-7D8B54363F74}.Release|x86.ActiveCfg = Release|Any CPU
|
||||
{27052CD3-98B4-4D37-88F9-7D8B54363F74}.Release|x86.Build.0 = Release|Any CPU
|
||||
{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
|
||||
{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Debug|Any CPU.Build.0 = Debug|Any CPU
|
||||
{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Debug|x64.ActiveCfg = Debug|Any CPU
|
||||
{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Debug|x64.Build.0 = Debug|Any CPU
|
||||
{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Debug|x86.ActiveCfg = Debug|Any CPU
|
||||
{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Debug|x86.Build.0 = Debug|Any CPU
|
||||
{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Release|Any CPU.ActiveCfg = Release|Any CPU
|
||||
{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Release|Any CPU.Build.0 = Release|Any CPU
|
||||
{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Release|x64.ActiveCfg = Release|Any CPU
|
||||
{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Release|x64.Build.0 = Release|Any CPU
|
||||
{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Release|x86.ActiveCfg = Release|Any CPU
|
||||
{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Release|x86.Build.0 = Release|Any CPU
|
||||
{98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
|
||||
{98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Debug|Any CPU.Build.0 = Debug|Any CPU
|
||||
{98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Debug|x64.ActiveCfg = Debug|Any CPU
|
||||
{98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Debug|x64.Build.0 = Debug|Any CPU
|
||||
{98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Debug|x86.ActiveCfg = Debug|Any CPU
|
||||
{98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Debug|x86.Build.0 = Debug|Any CPU
|
||||
{98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Release|Any CPU.ActiveCfg = Release|Any CPU
|
||||
{98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Release|Any CPU.Build.0 = Release|Any CPU
|
||||
{98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Release|x64.ActiveCfg = Release|Any CPU
|
||||
{98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Release|x64.Build.0 = Release|Any CPU
|
||||
{98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Release|x86.ActiveCfg = Release|Any CPU
|
||||
{98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Release|x86.Build.0 = Release|Any CPU
|
||||
EndGlobalSection
|
||||
GlobalSection(SolutionProperties) = preSolution
|
||||
HideSolutionNode = FALSE
|
||||
|
||||
@@ -0,0 +1,143 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Diagnostics.Metrics;
|
||||
using System.Linq;
|
||||
using System.Text.Json;
|
||||
using StellaOps.Concelier.Core.Linksets;
|
||||
|
||||
namespace StellaOps.Concelier.Core.Diagnostics;
|
||||
|
||||
/// <summary>
|
||||
/// Metrics exported for Vuln Explorer consumers (fact-only telemetry).
|
||||
/// </summary>
|
||||
public static class VulnExplorerTelemetry
|
||||
{
|
||||
public const string MeterName = "StellaOps.Concelier.VulnExplorer";
|
||||
|
||||
private static readonly Meter Meter = new(MeterName);
|
||||
|
||||
private static readonly Counter<long> IdentifierCollisionCounter = Meter.CreateCounter<long>(
|
||||
"vuln.identifier_collisions_total",
|
||||
unit: "collision",
|
||||
description: "Identifier/alias collisions detected while aggregating linksets for Vuln Explorer.");
|
||||
|
||||
private static readonly Counter<long> WithdrawnStatementCounter = Meter.CreateCounter<long>(
|
||||
"vuln.withdrawn_statements_total",
|
||||
unit: "statement",
|
||||
description: "Withdrawn advisory observations detected by change emitters.");
|
||||
|
||||
private static readonly Counter<long> ChunkRequestCounter = Meter.CreateCounter<long>(
|
||||
"vuln.chunk_requests_total",
|
||||
unit: "request",
|
||||
description: "Advisory chunk requests served for Vuln Explorer evidence panels.");
|
||||
|
||||
private static readonly Histogram<double> ChunkLatencyHistogram = Meter.CreateHistogram<double>(
|
||||
"vuln.chunk_latency_ms",
|
||||
unit: "ms",
|
||||
description: "Latency to build advisory chunks (fact-only) for Vuln Explorer.");
|
||||
|
||||
public static void RecordIdentifierCollisions(string tenant, string? source, int collisions)
|
||||
{
|
||||
if (collisions <= 0 || string.IsNullOrWhiteSpace(tenant))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
var tags = new[]
|
||||
{
|
||||
KeyValuePair.Create<string, object?>("tenant", tenant),
|
||||
KeyValuePair.Create<string, object?>("source", source ?? "unknown")
|
||||
};
|
||||
|
||||
IdentifierCollisionCounter.Add(collisions, tags);
|
||||
}
|
||||
|
||||
public static int CountAliasCollisions(IReadOnlyList<AdvisoryLinksetConflict>? conflicts)
|
||||
{
|
||||
if (conflicts is null || conflicts.Count == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
return conflicts.Count(conflict =>
|
||||
string.Equals(conflict.Reason, "alias-inconsistency", StringComparison.OrdinalIgnoreCase) ||
|
||||
string.Equals(conflict.Field, "aliases", StringComparison.OrdinalIgnoreCase));
|
||||
}
|
||||
|
||||
public static void RecordWithdrawnStatement(string tenant, string? source)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(tenant))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
var tags = new[]
|
||||
{
|
||||
KeyValuePair.Create<string, object?>("tenant", tenant),
|
||||
KeyValuePair.Create<string, object?>("source", source ?? "unknown")
|
||||
};
|
||||
|
||||
WithdrawnStatementCounter.Add(1, tags);
|
||||
}
|
||||
|
||||
public static void RecordChunkRequest(string tenant, string result, bool cacheHit, int chunkCount, double latencyMs)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(tenant))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
var sanitizedResult = string.IsNullOrWhiteSpace(result) ? "unknown" : result.Trim().ToLowerInvariant();
|
||||
var safeLatency = latencyMs < 0 ? 0d : latencyMs;
|
||||
var normalizedChunkCount = chunkCount < 0 ? 0 : chunkCount;
|
||||
|
||||
var tags = new[]
|
||||
{
|
||||
KeyValuePair.Create<string, object?>("tenant", tenant),
|
||||
KeyValuePair.Create<string, object?>("result", sanitizedResult),
|
||||
KeyValuePair.Create<string, object?>("cache_hit", cacheHit),
|
||||
KeyValuePair.Create<string, object?>("chunk_count", normalizedChunkCount)
|
||||
};
|
||||
|
||||
ChunkRequestCounter.Add(1, tags);
|
||||
ChunkLatencyHistogram.Record(safeLatency, tags);
|
||||
}
|
||||
|
||||
public static void RecordChunkLatency(string tenant, string? source, TimeSpan duration)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(tenant))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
var tags = new[]
|
||||
{
|
||||
KeyValuePair.Create<string, object?>("tenant", tenant),
|
||||
KeyValuePair.Create<string, object?>("source", source ?? "unknown")
|
||||
};
|
||||
|
||||
ChunkLatencyHistogram.Record(Math.Max(0, duration.TotalMilliseconds), tags);
|
||||
}
|
||||
|
||||
public static bool IsWithdrawn(JsonElement content)
|
||||
{
|
||||
if (content.ValueKind != JsonValueKind.Object)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (content.TryGetProperty("withdrawn", out var withdrawnElement) &&
|
||||
withdrawnElement.ValueKind == JsonValueKind.True)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
if (content.TryGetProperty("withdrawn_at", out var withdrawnAtElement) &&
|
||||
withdrawnAtElement.ValueKind is JsonValueKind.String)
|
||||
{
|
||||
return !string.IsNullOrWhiteSpace(withdrawnAtElement.GetString());
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -7,7 +7,7 @@ using StellaOps.Concelier.Normalization.SemVer;
|
||||
|
||||
namespace StellaOps.Concelier.Core.Linksets;
|
||||
|
||||
internal static class AdvisoryLinksetNormalization
|
||||
public static class AdvisoryLinksetNormalization
|
||||
{
|
||||
public static AdvisoryLinksetNormalized? FromRawLinkset(RawLinkset linkset)
|
||||
{
|
||||
|
||||
@@ -5,192 +5,194 @@ using StellaOps.Concelier.Models;
|
||||
using StellaOps.Concelier.Models.Observations;
|
||||
using StellaOps.Concelier.RawModels;
|
||||
using StellaOps.Concelier.Core.Linksets;
|
||||
|
||||
namespace StellaOps.Concelier.Core.Observations;
|
||||
|
||||
/// <summary>
|
||||
/// Default implementation of <see cref="IAdvisoryObservationQueryService"/> that projects raw observations for overlay consumers.
|
||||
/// </summary>
|
||||
public sealed class AdvisoryObservationQueryService : IAdvisoryObservationQueryService
|
||||
{
|
||||
private const int DefaultPageSize = 200;
|
||||
private const int MaxPageSize = 500;
|
||||
private readonly IAdvisoryObservationLookup _lookup;
|
||||
|
||||
public AdvisoryObservationQueryService(IAdvisoryObservationLookup lookup)
|
||||
{
|
||||
_lookup = lookup ?? throw new ArgumentNullException(nameof(lookup));
|
||||
}
|
||||
|
||||
public async ValueTask<AdvisoryObservationQueryResult> QueryAsync(
|
||||
AdvisoryObservationQueryOptions options,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(options);
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
|
||||
var normalizedTenant = NormalizeTenant(options.Tenant);
|
||||
var normalizedObservationIds = NormalizeSet(options.ObservationIds, static value => value, StringComparer.Ordinal);
|
||||
using StellaOps.Concelier.Core.Diagnostics;
|
||||
|
||||
namespace StellaOps.Concelier.Core.Observations;
|
||||
|
||||
/// <summary>
|
||||
/// Default implementation of <see cref="IAdvisoryObservationQueryService"/> that projects raw observations for overlay consumers.
|
||||
/// </summary>
|
||||
public sealed class AdvisoryObservationQueryService : IAdvisoryObservationQueryService
|
||||
{
|
||||
private const int DefaultPageSize = 200;
|
||||
private const int MaxPageSize = 500;
|
||||
private readonly IAdvisoryObservationLookup _lookup;
|
||||
|
||||
public AdvisoryObservationQueryService(IAdvisoryObservationLookup lookup)
|
||||
{
|
||||
_lookup = lookup ?? throw new ArgumentNullException(nameof(lookup));
|
||||
}
|
||||
|
||||
public async ValueTask<AdvisoryObservationQueryResult> QueryAsync(
|
||||
AdvisoryObservationQueryOptions options,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(options);
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
|
||||
var normalizedTenant = NormalizeTenant(options.Tenant);
|
||||
var normalizedObservationIds = NormalizeSet(options.ObservationIds, static value => value, StringComparer.Ordinal);
|
||||
var normalizedAliases = NormalizeSet(options.Aliases, static value => value, StringComparer.OrdinalIgnoreCase);
|
||||
var normalizedPurls = NormalizeSet(options.Purls, static value => value, StringComparer.Ordinal);
|
||||
var normalizedCpes = NormalizeSet(options.Cpes, static value => value, StringComparer.Ordinal);
|
||||
|
||||
var limit = NormalizeLimit(options.Limit);
|
||||
var fetchSize = checked(limit + 1);
|
||||
|
||||
var cursor = DecodeCursor(options.Cursor);
|
||||
|
||||
var observations = await _lookup
|
||||
.FindByFiltersAsync(
|
||||
normalizedTenant,
|
||||
normalizedObservationIds,
|
||||
normalizedAliases,
|
||||
normalizedPurls,
|
||||
normalizedCpes,
|
||||
cursor,
|
||||
fetchSize,
|
||||
cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
var ordered = observations
|
||||
.Where(observation => Matches(observation, normalizedObservationIds, normalizedAliases, normalizedPurls, normalizedCpes))
|
||||
.OrderByDescending(static observation => observation.CreatedAt)
|
||||
.ThenBy(static observation => observation.ObservationId, StringComparer.Ordinal)
|
||||
.ToImmutableArray();
|
||||
|
||||
var hasMore = ordered.Length > limit;
|
||||
var page = hasMore ? ordered.Take(limit).ToImmutableArray() : ordered;
|
||||
var nextCursor = hasMore ? EncodeCursor(page[^1]) : null;
|
||||
|
||||
var linkset = BuildAggregateLinkset(page);
|
||||
return new AdvisoryObservationQueryResult(page, linkset, nextCursor, hasMore);
|
||||
}
|
||||
|
||||
private static bool Matches(
|
||||
AdvisoryObservation observation,
|
||||
ImmutableHashSet<string> observationIds,
|
||||
ImmutableHashSet<string> aliases,
|
||||
ImmutableHashSet<string> purls,
|
||||
ImmutableHashSet<string> cpes)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(observation);
|
||||
|
||||
if (observationIds.Count > 0 && !observationIds.Contains(observation.ObservationId))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (aliases.Count > 0 && !observation.Linkset.Aliases.Any(aliases.Contains))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (purls.Count > 0 && !observation.Linkset.Purls.Any(purls.Contains))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (cpes.Count > 0 && !observation.Linkset.Cpes.Any(cpes.Contains))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private static string NormalizeTenant(string tenant)
|
||||
=> Validation.EnsureNotNullOrWhiteSpace(tenant, nameof(tenant)).ToLowerInvariant();
|
||||
|
||||
private static ImmutableHashSet<string> NormalizeSet(
|
||||
IEnumerable<string>? values,
|
||||
Func<string, string> projector,
|
||||
StringComparer comparer)
|
||||
{
|
||||
if (values is null)
|
||||
{
|
||||
return ImmutableHashSet<string>.Empty;
|
||||
}
|
||||
|
||||
var builder = ImmutableHashSet.CreateBuilder<string>(comparer);
|
||||
foreach (var value in values)
|
||||
{
|
||||
var normalized = Validation.TrimToNull(value);
|
||||
if (normalized is null)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
builder.Add(projector(normalized));
|
||||
}
|
||||
|
||||
return builder.ToImmutable();
|
||||
}
|
||||
|
||||
private static int NormalizeLimit(int? requestedLimit)
|
||||
{
|
||||
if (!requestedLimit.HasValue || requestedLimit.Value <= 0)
|
||||
{
|
||||
return DefaultPageSize;
|
||||
}
|
||||
|
||||
var limit = requestedLimit.Value;
|
||||
if (limit > MaxPageSize)
|
||||
{
|
||||
return MaxPageSize;
|
||||
}
|
||||
|
||||
return limit;
|
||||
}
|
||||
|
||||
private static AdvisoryObservationCursor? DecodeCursor(string? cursor)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(cursor))
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
var decoded = Convert.FromBase64String(cursor.Trim());
|
||||
var payload = Encoding.UTF8.GetString(decoded);
|
||||
var separator = payload.IndexOf(':');
|
||||
if (separator <= 0 || separator >= payload.Length - 1)
|
||||
{
|
||||
throw new FormatException("Cursor is malformed.");
|
||||
}
|
||||
|
||||
var ticksText = payload.AsSpan(0, separator);
|
||||
if (!long.TryParse(ticksText, NumberStyles.Integer, CultureInfo.InvariantCulture, out var ticks))
|
||||
{
|
||||
throw new FormatException("Cursor timestamp is invalid.");
|
||||
}
|
||||
|
||||
var createdAt = new DateTimeOffset(DateTime.SpecifyKind(new DateTime(ticks), DateTimeKind.Utc));
|
||||
var observationId = payload[(separator + 1)..];
|
||||
if (string.IsNullOrWhiteSpace(observationId))
|
||||
{
|
||||
throw new FormatException("Cursor observation id is missing.");
|
||||
}
|
||||
|
||||
return new AdvisoryObservationCursor(createdAt, observationId);
|
||||
}
|
||||
catch (FormatException)
|
||||
{
|
||||
throw;
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
throw new FormatException("Cursor is malformed.", ex);
|
||||
}
|
||||
}
|
||||
|
||||
private static string? EncodeCursor(AdvisoryObservation observation)
|
||||
{
|
||||
if (observation is null)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
var normalizedPurls = NormalizeSet(options.Purls, static value => value, StringComparer.Ordinal);
|
||||
var normalizedCpes = NormalizeSet(options.Cpes, static value => value, StringComparer.Ordinal);
|
||||
|
||||
var limit = NormalizeLimit(options.Limit);
|
||||
var fetchSize = checked(limit + 1);
|
||||
|
||||
var cursor = DecodeCursor(options.Cursor);
|
||||
|
||||
var observations = await _lookup
|
||||
.FindByFiltersAsync(
|
||||
normalizedTenant,
|
||||
normalizedObservationIds,
|
||||
normalizedAliases,
|
||||
normalizedPurls,
|
||||
normalizedCpes,
|
||||
cursor,
|
||||
fetchSize,
|
||||
cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
var ordered = observations
|
||||
.Where(observation => Matches(observation, normalizedObservationIds, normalizedAliases, normalizedPurls, normalizedCpes))
|
||||
.OrderByDescending(static observation => observation.CreatedAt)
|
||||
.ThenBy(static observation => observation.ObservationId, StringComparer.Ordinal)
|
||||
.ToImmutableArray();
|
||||
|
||||
var hasMore = ordered.Length > limit;
|
||||
var page = hasMore ? ordered.Take(limit).ToImmutableArray() : ordered;
|
||||
var nextCursor = hasMore ? EncodeCursor(page[^1]) : null;
|
||||
|
||||
var linkset = BuildAggregateLinkset(page);
|
||||
RecordIdentifierCollisions(normalizedTenant, linkset);
|
||||
return new AdvisoryObservationQueryResult(page, linkset, nextCursor, hasMore);
|
||||
}
|
||||
|
||||
private static bool Matches(
|
||||
AdvisoryObservation observation,
|
||||
ImmutableHashSet<string> observationIds,
|
||||
ImmutableHashSet<string> aliases,
|
||||
ImmutableHashSet<string> purls,
|
||||
ImmutableHashSet<string> cpes)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(observation);
|
||||
|
||||
if (observationIds.Count > 0 && !observationIds.Contains(observation.ObservationId))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (aliases.Count > 0 && !observation.Linkset.Aliases.Any(aliases.Contains))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (purls.Count > 0 && !observation.Linkset.Purls.Any(purls.Contains))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (cpes.Count > 0 && !observation.Linkset.Cpes.Any(cpes.Contains))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private static string NormalizeTenant(string tenant)
|
||||
=> Validation.EnsureNotNullOrWhiteSpace(tenant, nameof(tenant)).ToLowerInvariant();
|
||||
|
||||
private static ImmutableHashSet<string> NormalizeSet(
|
||||
IEnumerable<string>? values,
|
||||
Func<string, string> projector,
|
||||
StringComparer comparer)
|
||||
{
|
||||
if (values is null)
|
||||
{
|
||||
return ImmutableHashSet<string>.Empty;
|
||||
}
|
||||
|
||||
var builder = ImmutableHashSet.CreateBuilder<string>(comparer);
|
||||
foreach (var value in values)
|
||||
{
|
||||
var normalized = Validation.TrimToNull(value);
|
||||
if (normalized is null)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
builder.Add(projector(normalized));
|
||||
}
|
||||
|
||||
return builder.ToImmutable();
|
||||
}
|
||||
|
||||
private static int NormalizeLimit(int? requestedLimit)
|
||||
{
|
||||
if (!requestedLimit.HasValue || requestedLimit.Value <= 0)
|
||||
{
|
||||
return DefaultPageSize;
|
||||
}
|
||||
|
||||
var limit = requestedLimit.Value;
|
||||
if (limit > MaxPageSize)
|
||||
{
|
||||
return MaxPageSize;
|
||||
}
|
||||
|
||||
return limit;
|
||||
}
|
||||
|
||||
private static AdvisoryObservationCursor? DecodeCursor(string? cursor)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(cursor))
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
var decoded = Convert.FromBase64String(cursor.Trim());
|
||||
var payload = Encoding.UTF8.GetString(decoded);
|
||||
var separator = payload.IndexOf(':');
|
||||
if (separator <= 0 || separator >= payload.Length - 1)
|
||||
{
|
||||
throw new FormatException("Cursor is malformed.");
|
||||
}
|
||||
|
||||
var ticksText = payload.AsSpan(0, separator);
|
||||
if (!long.TryParse(ticksText, NumberStyles.Integer, CultureInfo.InvariantCulture, out var ticks))
|
||||
{
|
||||
throw new FormatException("Cursor timestamp is invalid.");
|
||||
}
|
||||
|
||||
var createdAt = new DateTimeOffset(DateTime.SpecifyKind(new DateTime(ticks), DateTimeKind.Utc));
|
||||
var observationId = payload[(separator + 1)..];
|
||||
if (string.IsNullOrWhiteSpace(observationId))
|
||||
{
|
||||
throw new FormatException("Cursor observation id is missing.");
|
||||
}
|
||||
|
||||
return new AdvisoryObservationCursor(createdAt, observationId);
|
||||
}
|
||||
catch (FormatException)
|
||||
{
|
||||
throw;
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
throw new FormatException("Cursor is malformed.", ex);
|
||||
}
|
||||
}
|
||||
|
||||
private static string? EncodeCursor(AdvisoryObservation observation)
|
||||
{
|
||||
if (observation is null)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
var payload = $"{observation.CreatedAt.UtcTicks.ToString(CultureInfo.InvariantCulture)}:{observation.ObservationId}";
|
||||
return Convert.ToBase64String(Encoding.UTF8.GetBytes(payload));
|
||||
}
|
||||
@@ -283,4 +285,18 @@ public sealed class AdvisoryObservationQueryService : IAdvisoryObservationQueryS
|
||||
.ThenBy(static c => string.Join('|', c.Values ?? Array.Empty<string>()), StringComparer.Ordinal)
|
||||
.ToImmutableArray());
|
||||
}
|
||||
|
||||
private static void RecordIdentifierCollisions(string tenant, AdvisoryObservationLinksetAggregate linkset)
|
||||
{
|
||||
if (linkset.Conflicts.IsDefaultOrEmpty)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
var collisionCount = linkset.Conflicts.Count(conflict =>
|
||||
string.Equals(conflict.Field, "aliases", StringComparison.OrdinalIgnoreCase) &&
|
||||
conflict.Reason.Contains("alias", StringComparison.OrdinalIgnoreCase));
|
||||
|
||||
VulnExplorerTelemetry.RecordIdentifierCollisions(tenant, source: null, collisionCount);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ using System.Linq;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using StellaOps.Concelier.Core.Diagnostics;
|
||||
|
||||
namespace StellaOps.Concelier.Core.Risk;
|
||||
|
||||
@@ -177,6 +178,7 @@ public sealed class AdvisoryFieldChangeEmitter : IAdvisoryFieldChangeEmitter
|
||||
_logger.LogInformation(
|
||||
"Emitted withdrawn observation notification for {ObservationId}",
|
||||
previousSignal.ObservationId);
|
||||
VulnExplorerTelemetry.RecordWithdrawnStatement(tenantId, previousSignal.Provenance.Vendor);
|
||||
|
||||
return notification;
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user