diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..40f566f1f --- /dev/null +++ b/.dockerignore @@ -0,0 +1,23 @@ +.git +.gitignore +.gitea +.venv +bin +obj +**/bin +**/obj +local-nugets +.nuget +**/node_modules +**/dist +**/coverage +**/*.user +**/*.suo +**/*.cache +**/.vscode +**/.idea +**/.DS_Store +**/TestResults +**/out +**/packages +/tmp diff --git a/.gitea/workflows/icscisa-kisa-refresh.yml b/.gitea/workflows/icscisa-kisa-refresh.yml new file mode 100644 index 000000000..a5a2a8f25 --- /dev/null +++ b/.gitea/workflows/icscisa-kisa-refresh.yml @@ -0,0 +1,68 @@ +name: ICS/KISA Feed Refresh + +on: + schedule: + - cron: '0 2 * * MON' + workflow_dispatch: + inputs: + live_fetch: + description: 'Attempt live RSS fetch (fallback to samples on failure)' + required: false + default: true + type: boolean + offline_snapshot: + description: 'Force offline samples only (no network)' + required: false + default: false + type: boolean + +jobs: + refresh: + runs-on: ubuntu-22.04 + permissions: + contents: read + env: + ICSCISA_FEED_URL: ${{ secrets.ICSCISA_FEED_URL }} + KISA_FEED_URL: ${{ secrets.KISA_FEED_URL }} + FEED_GATEWAY_HOST: concelier-webservice + FEED_GATEWAY_SCHEME: http + LIVE_FETCH: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.live_fetch || 'true' }} + OFFLINE_SNAPSHOT: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.offline_snapshot || 'false' }} + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set run metadata + id: meta + run: | + RUN_DATE=$(date -u +%Y%m%d) + RUN_ID="icscisa-kisa-$(date -u +%Y%m%dT%H%M%SZ)" + echo "run_date=$RUN_DATE" >> $GITHUB_OUTPUT + echo "run_id=$RUN_ID" >> $GITHUB_OUTPUT + echo "RUN_DATE=$RUN_DATE" >> $GITHUB_ENV + echo "RUN_ID=$RUN_ID" >> $GITHUB_ENV + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Run ICS/KISA refresh + run: | + python scripts/feeds/run_icscisa_kisa_refresh.py \ + --out-dir out/feeds/icscisa-kisa \ + --run-date "${{ steps.meta.outputs.run_date }}" \ + --run-id "${{ steps.meta.outputs.run_id }}" + + - name: Show fetch log + run: cat out/feeds/icscisa-kisa/${{ steps.meta.outputs.run_date }}/fetch.log + + - name: Upload refresh artifacts + uses: actions/upload-artifact@v4 + with: + name: icscisa-kisa-${{ steps.meta.outputs.run_date }} + path: out/feeds/icscisa-kisa/${{ steps.meta.outputs.run_date }} + if-no-files-found: error + retention-days: 21 diff --git a/.gitea/workflows/mirror-sign.yml b/.gitea/workflows/mirror-sign.yml index 7c4ceb679..ff9b60080 100644 --- a/.gitea/workflows/mirror-sign.yml +++ b/.gitea/workflows/mirror-sign.yml @@ -46,6 +46,16 @@ jobs: run: | scripts/mirror/verify_thin_bundle.py out/mirror/thin/mirror-thin-v1.tar.gz + - name: Prepare Export Center handoff (metadata + optional schedule) + run: | + scripts/mirror/export-center-wire.sh + env: + EXPORT_CENTER_BASE_URL: ${{ secrets.EXPORT_CENTER_BASE_URL }} + EXPORT_CENTER_TOKEN: ${{ secrets.EXPORT_CENTER_TOKEN }} + EXPORT_CENTER_TENANT: ${{ secrets.EXPORT_CENTER_TENANT }} + EXPORT_CENTER_PROJECT: ${{ secrets.EXPORT_CENTER_PROJECT }} + EXPORT_CENTER_AUTO_SCHEDULE: ${{ secrets.EXPORT_CENTER_AUTO_SCHEDULE }} + - name: Upload signed artifacts uses: actions/upload-artifact@v4 with: @@ -57,5 +67,8 @@ jobs: out/mirror/thin/tuf/ out/mirror/thin/oci/ out/mirror/thin/milestone.json + out/mirror/thin/export-center/export-center-handoff.json + out/mirror/thin/export-center/export-center-targets.json + out/mirror/thin/export-center/schedule-response.json if-no-files-found: error retention-days: 14 diff --git a/.gitea/workflows/signals-dsse-sign.yml b/.gitea/workflows/signals-dsse-sign.yml index 01bb7f54c..ebd73ba4f 100644 --- a/.gitea/workflows/signals-dsse-sign.yml +++ b/.gitea/workflows/signals-dsse-sign.yml @@ -28,6 +28,8 @@ jobs: COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }} OUT_DIR: ${{ github.event.inputs.out_dir || 'evidence-locker/signals/2025-12-01' }} COSIGN_ALLOW_DEV_KEY: ${{ github.event.inputs.allow_dev_key || '0' }} + CI_EVIDENCE_LOCKER_TOKEN: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN || vars.CI_EVIDENCE_LOCKER_TOKEN }} + EVIDENCE_LOCKER_URL: ${{ secrets.EVIDENCE_LOCKER_URL || vars.EVIDENCE_LOCKER_URL }} steps: - name: Checkout uses: actions/checkout@v4 @@ -90,9 +92,9 @@ jobs: retention-days: 90 - name: Push to Evidence Locker - if: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN != '' && env.EVIDENCE_LOCKER_URL != '' }} + if: ${{ env.CI_EVIDENCE_LOCKER_TOKEN != '' && env.EVIDENCE_LOCKER_URL != '' }} env: - TOKEN: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN }} + TOKEN: ${{ env.CI_EVIDENCE_LOCKER_TOKEN }} URL: ${{ env.EVIDENCE_LOCKER_URL }} run: | tar -cf /tmp/signals-dsse.tar -C "$OUT_DIR" . @@ -102,7 +104,7 @@ jobs: echo "Pushed to Evidence Locker" - name: Evidence Locker skip notice - if: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN == '' || env.EVIDENCE_LOCKER_URL == '' }} + if: ${{ env.CI_EVIDENCE_LOCKER_TOKEN == '' || env.EVIDENCE_LOCKER_URL == '' }} run: | echo "::notice::Evidence Locker push skipped (CI_EVIDENCE_LOCKER_TOKEN or EVIDENCE_LOCKER_URL not set)" echo "Artifacts available as workflow artifact for manual ingestion" diff --git a/.gitea/workflows/signals-evidence-locker.yml b/.gitea/workflows/signals-evidence-locker.yml index 0f7883abc..faaa67cc6 100644 --- a/.gitea/workflows/signals-evidence-locker.yml +++ b/.gitea/workflows/signals-evidence-locker.yml @@ -2,6 +2,14 @@ name: signals-evidence-locker on: workflow_dispatch: inputs: + out_dir: + description: "Output directory containing signed artifacts" + required: false + default: "evidence-locker/signals/2025-12-05" + allow_dev_key: + description: "Allow dev key fallback (1=yes, 0=no)" + required: false + default: "0" retention_target: description: "Retention days target" required: false @@ -12,7 +20,12 @@ jobs: runs-on: ubuntu-latest env: MODULE_ROOT: docs/modules/signals - OUT_DIR: evidence-locker/signals/2025-12-05 + OUT_DIR: ${{ github.event.inputs.out_dir || 'evidence-locker/signals/2025-12-05' }} + COSIGN_ALLOW_DEV_KEY: ${{ github.event.inputs.allow_dev_key || '0' }} + COSIGN_PRIVATE_KEY_B64: ${{ secrets.COSIGN_PRIVATE_KEY_B64 }} + COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }} + EVIDENCE_LOCKER_URL: ${{ secrets.EVIDENCE_LOCKER_URL || vars.EVIDENCE_LOCKER_URL }} + CI_EVIDENCE_LOCKER_TOKEN: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN || vars.CI_EVIDENCE_LOCKER_TOKEN }} steps: - name: Checkout uses: actions/checkout@v4 @@ -20,6 +33,21 @@ jobs: - name: Task Pack offline bundle fixtures run: python3 scripts/packs/run-fixtures-check.sh + - name: Install cosign + uses: sigstore/cosign-installer@v3 + with: + cosign-release: 'v2.2.4' + + - name: Verify artifacts exist + run: | + cd "$MODULE_ROOT" + sha256sum -c SHA256SUMS + + - name: Sign signals artifacts + run: | + chmod +x tools/cosign/sign-signals.sh + OUT_DIR="${OUT_DIR}" tools/cosign/sign-signals.sh + - name: Build deterministic signals evidence tar run: | set -euo pipefail @@ -52,16 +80,17 @@ jobs: /tmp/signals-evidence.tar.sha256 - name: Push to Evidence Locker - if: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN != '' && env.EVIDENCE_LOCKER_URL != '' }} + if: ${{ env.CI_EVIDENCE_LOCKER_TOKEN != '' && env.EVIDENCE_LOCKER_URL != '' }} env: - TOKEN: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN }} + TOKEN: ${{ env.CI_EVIDENCE_LOCKER_TOKEN }} URL: ${{ env.EVIDENCE_LOCKER_URL }} run: | - curl -f -X PUT "$URL/signals/2025-12-05/signals-evidence.tar" \ + upload_path="${OUT_DIR#evidence-locker/}" + curl -f -X PUT "$URL/${upload_path}/signals-evidence.tar" \ -H "Authorization: Bearer $TOKEN" \ --data-binary @/tmp/signals-evidence.tar - name: Skip push (missing secret or URL) - if: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN == '' || env.EVIDENCE_LOCKER_URL == '' }} + if: ${{ env.CI_EVIDENCE_LOCKER_TOKEN == '' || env.EVIDENCE_LOCKER_URL == '' }} run: | echo "Locker push skipped: set CI_EVIDENCE_LOCKER_TOKEN and EVIDENCE_LOCKER_URL to enable." >&2 diff --git a/.gitea/workflows/signals-reachability.yml b/.gitea/workflows/signals-reachability.yml new file mode 100644 index 000000000..a9c35bf02 --- /dev/null +++ b/.gitea/workflows/signals-reachability.yml @@ -0,0 +1,117 @@ +name: Signals Reachability Scoring & Events + +on: + workflow_dispatch: + inputs: + allow_dev_key: + description: "Allow dev signing key fallback (1=yes, 0=no)" + required: false + default: "0" + evidence_out_dir: + description: "Evidence output dir for signing/upload" + required: false + default: "evidence-locker/signals/2025-12-05" + push: + branches: [ main ] + paths: + - 'src/Signals/**' + - 'scripts/signals/reachability-smoke.sh' + - '.gitea/workflows/signals-reachability.yml' + - 'tools/cosign/sign-signals.sh' + +jobs: + reachability-smoke: + runs-on: ubuntu-22.04 + env: + DOTNET_NOLOGO: 1 + DOTNET_CLI_TELEMETRY_OPTOUT: 1 + DOTNET_SYSTEM_GLOBALIZATION_INVARIANT: 1 + TZ: UTC + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Task Pack offline bundle fixtures + run: python3 scripts/packs/run-fixtures-check.sh + + - name: Setup .NET 10 RC + uses: actions/setup-dotnet@v4 + with: + dotnet-version: 10.0.100 + include-prerelease: true + + - name: Restore + run: dotnet restore src/Signals/StellaOps.Signals.sln --configfile nuget.config + + - name: Build + run: dotnet build src/Signals/StellaOps.Signals.sln -c Release --no-restore + + - name: Reachability scoring + cache/events smoke + run: | + chmod +x scripts/signals/reachability-smoke.sh + scripts/signals/reachability-smoke.sh + + sign-and-upload: + runs-on: ubuntu-22.04 + needs: reachability-smoke + env: + COSIGN_PRIVATE_KEY_B64: ${{ secrets.COSIGN_PRIVATE_KEY_B64 }} + COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }} + COSIGN_ALLOW_DEV_KEY: ${{ github.event.inputs.allow_dev_key || '0' }} + OUT_DIR: ${{ github.event.inputs.evidence_out_dir || 'evidence-locker/signals/2025-12-05' }} + CI_EVIDENCE_LOCKER_TOKEN: ${{ secrets.CI_EVIDENCE_LOCKER_TOKEN || vars.CI_EVIDENCE_LOCKER_TOKEN }} + EVIDENCE_LOCKER_URL: ${{ secrets.EVIDENCE_LOCKER_URL || vars.EVIDENCE_LOCKER_URL }} + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Task Pack offline bundle fixtures + run: python3 scripts/packs/run-fixtures-check.sh + + - name: Install cosign + uses: sigstore/cosign-installer@v3 + with: + cosign-release: 'v2.2.4' + + - name: Verify artifacts exist + run: | + cd docs/modules/signals + sha256sum -c SHA256SUMS + + - name: Sign signals artifacts + run: | + chmod +x tools/cosign/sign-signals.sh + OUT_DIR="${OUT_DIR}" tools/cosign/sign-signals.sh + + - name: Upload signed artifacts + uses: actions/upload-artifact@v4 + with: + name: signals-evidence-${{ github.run_number }} + path: | + ${{ env.OUT_DIR }}/*.sigstore.json + ${{ env.OUT_DIR }}/*.dsse + ${{ env.OUT_DIR }}/SHA256SUMS + if-no-files-found: error + retention-days: 30 + + - name: Push to Evidence Locker + if: ${{ env.CI_EVIDENCE_LOCKER_TOKEN != '' && env.EVIDENCE_LOCKER_URL != '' }} + env: + TOKEN: ${{ env.CI_EVIDENCE_LOCKER_TOKEN }} + URL: ${{ env.EVIDENCE_LOCKER_URL }} + run: | + tar -cf /tmp/signals-evidence.tar -C "$OUT_DIR" . + sha256sum /tmp/signals-evidence.tar + curl -f -X PUT "$URL/signals/$(date -u +%Y-%m-%d)/signals-evidence.tar" \ + -H "Authorization: Bearer $TOKEN" \ + --data-binary @/tmp/signals-evidence.tar + echo "Uploaded to Evidence Locker" + + - name: Evidence Locker skip notice + if: ${{ env.CI_EVIDENCE_LOCKER_TOKEN == '' || env.EVIDENCE_LOCKER_URL == '' }} + run: | + echo "::notice::Evidence Locker upload skipped (CI_EVIDENCE_LOCKER_TOKEN or EVIDENCE_LOCKER_URL not set)" diff --git a/.gitea/workflows/sm-remote-ci.yml b/.gitea/workflows/sm-remote-ci.yml new file mode 100644 index 000000000..34f7ade59 --- /dev/null +++ b/.gitea/workflows/sm-remote-ci.yml @@ -0,0 +1,33 @@ +name: sm-remote-ci + +on: + push: + paths: + - "src/SmRemote/**" + - "src/__Libraries/StellaOps.Cryptography.Plugin.SmRemote/**" + - "src/__Libraries/StellaOps.Cryptography.Plugin.SmRemote.Tests/**" + - "ops/sm-remote/**" + - ".gitea/workflows/sm-remote-ci.yml" + pull_request: + paths: + - "src/SmRemote/**" + - "src/__Libraries/StellaOps.Cryptography.Plugin.SmRemote/**" + - "src/__Libraries/StellaOps.Cryptography.Plugin.SmRemote.Tests/**" + - "ops/sm-remote/**" + +jobs: + build-and-test: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Setup .NET + uses: actions/setup-dotnet@v4 + with: + dotnet-version: 10.0.x + - name: Restore + run: dotnet restore src/__Libraries/StellaOps.Cryptography.Plugin.SmRemote.Tests/StellaOps.Cryptography.Plugin.SmRemote.Tests.csproj + - name: Test + run: dotnet test src/__Libraries/StellaOps.Cryptography.Plugin.SmRemote.Tests/StellaOps.Cryptography.Plugin.SmRemote.Tests.csproj --no-build --verbosity normal + - name: Publish service + run: dotnet publish src/SmRemote/StellaOps.SmRemote.Service/StellaOps.SmRemote.Service.csproj -c Release -o out/sm-remote diff --git a/deploy/compose/README.md b/deploy/compose/README.md index 832dacd49..85a61b3e9 100644 --- a/deploy/compose/README.md +++ b/deploy/compose/README.md @@ -1,9 +1,9 @@ -# Stella Ops Compose Profiles - -These Compose bundles ship the minimum services required to exercise the scanner pipeline plus control-plane dependencies. Every profile is pinned to immutable image digests sourced from `deploy/releases/*.yaml` and is linted via `docker compose config` in CI. - -## Layout - +# Stella Ops Compose Profiles + +These Compose bundles ship the minimum services required to exercise the scanner pipeline plus control-plane dependencies. Every profile is pinned to immutable image digests sourced from `deploy/releases/*.yaml` and is linted via `docker compose config` in CI. + +## Layout + | Path | Purpose | | ---- | ------- | | `docker-compose.dev.yaml` | Edge/nightly stack tuned for laptops and iterative work. | @@ -19,9 +19,9 @@ These Compose bundles ship the minimum services required to exercise the scanner | `scripts/reset.sh` | Stops the stack and removes Mongo/MinIO/Redis volumes after explicit confirmation. | | `scripts/quickstart.sh` | Helper to validate config and start dev stack; set `USE_MOCK=1` to include `docker-compose.mock.yaml` overlay. | | `docker-compose.mock.yaml` | Dev-only overlay with placeholder digests for missing services (orchestrator, policy-registry, packs, task-runner, VEX/Vuln stack). Use only with mock release manifest `deploy/releases/2025.09-mock-dev.yaml`. | - -## Usage - + +## Usage + ```bash cp env/dev.env.example dev.env docker compose --env-file dev.env -f docker-compose.dev.yaml config @@ -30,6 +30,8 @@ docker compose --env-file dev.env -f docker-compose.dev.yaml up -d The stage and airgap variants behave the same way—swap the file names accordingly. All profiles expose 443/8443 for the UI and REST APIs, and they share a `stellaops` Docker network scoped to the compose project. +> **Surface.Secrets:** set `SCANNER_SURFACE_SECRETS_PROVIDER`/`SCANNER_SURFACE_SECRETS_ROOT` in your `.env` and point `SURFACE_SECRETS_HOST_PATH` to the decrypted bundle path (default `./offline/surface-secrets`). The stack mounts that path read-only into Scanner Web/Worker so `secret://` references resolve without embedding plaintext. + > **Graph Explorer reminder:** If you enable Cartographer or Graph API containers alongside these profiles, update `etc/authority.yaml` so the `cartographer-service` client is marked with `properties.serviceIdentity: "cartographer"` and carries a tenant hint. The Authority host now refuses `graph:write` tokens without that marker, so apply the configuration change before rolling out the updated images. ### Telemetry collector overlay @@ -116,7 +118,7 @@ USE_MOCK=1 ./scripts/quickstart.sh env/dev.env.example ``` The overlay pins the missing services (orchestrator, policy-registry, packs-registry, task-runner, VEX/Vuln stack) to mock digests from `deploy/releases/2025.09-mock-dev.yaml` and starts their real entrypoints so integration flows can be exercised end-to-end. Replace the mock pins with production digests once releases publish; keep the mock overlay dev-only. - + Keep digests synchronized between Compose, Helm, and the release manifest to preserve reproducibility guarantees. `deploy/tools/validate-profiles.sh` performs a quick audit. ### GPU toggle for Advisory AI diff --git a/deploy/compose/docker-compose.airgap.yaml b/deploy/compose/docker-compose.airgap.yaml index 9d9c9dd1c..a8a09786f 100644 --- a/deploy/compose/docker-compose.airgap.yaml +++ b/deploy/compose/docker-compose.airgap.yaml @@ -1,31 +1,34 @@ -x-release-labels: &release-labels - com.stellaops.release.version: "2025.09.2-airgap" - com.stellaops.release.channel: "airgap" - com.stellaops.profile: "airgap" - -networks: - stellaops: - driver: bridge - -volumes: - mongo-data: - minio-data: - rustfs-data: +x-release-labels: &release-labels + com.stellaops.release.version: "2025.09.2-airgap" + com.stellaops.release.channel: "airgap" + com.stellaops.profile: "airgap" + +networks: + stellaops: + driver: bridge + +volumes: + mongo-data: + minio-data: + rustfs-data: concelier-jobs: nats-data: scanner-surface-cache: postgres-data: - -services: + advisory-ai-queue: + advisory-ai-plans: + advisory-ai-outputs: + +services: mongo: image: docker.io/library/mongo@sha256:c258b26dbb7774f97f52aff52231ca5f228273a84329c5f5e451c3739457db49 command: ["mongod", "--bind_ip_all"] restart: unless-stopped - environment: - MONGO_INITDB_ROOT_USERNAME: "${MONGO_INITDB_ROOT_USERNAME}" - MONGO_INITDB_ROOT_PASSWORD: "${MONGO_INITDB_ROOT_PASSWORD}" - volumes: - - mongo-data:/data/db + environment: + MONGO_INITDB_ROOT_USERNAME: "${MONGO_INITDB_ROOT_USERNAME}" + MONGO_INITDB_ROOT_PASSWORD: "${MONGO_INITDB_ROOT_PASSWORD}" + volumes: + - mongo-data:/data/db networks: - stellaops labels: *release-labels @@ -45,313 +48,319 @@ services: networks: - stellaops labels: *release-labels - - minio: - image: docker.io/minio/minio@sha256:14cea493d9a34af32f524e538b8346cf79f3321eff8e708c1e2960462bd8936e - command: ["server", "/data", "--console-address", ":9001"] - restart: unless-stopped - environment: - MINIO_ROOT_USER: "${MINIO_ROOT_USER}" - MINIO_ROOT_PASSWORD: "${MINIO_ROOT_PASSWORD}" - volumes: - - minio-data:/data - ports: - - "${MINIO_CONSOLE_PORT:-29001}:9001" - networks: - - stellaops - labels: *release-labels - - rustfs: - image: registry.stella-ops.org/stellaops/rustfs:2025.10.0-edge - command: ["serve", "--listen", "0.0.0.0:8080", "--root", "/data"] - restart: unless-stopped - environment: - RUSTFS__LOG__LEVEL: info - RUSTFS__STORAGE__PATH: /data - volumes: - - rustfs-data:/data - ports: - - "${RUSTFS_HTTP_PORT:-8080}:8080" - networks: - - stellaops - labels: *release-labels - - nats: - image: docker.io/library/nats@sha256:c82559e4476289481a8a5196e675ebfe67eea81d95e5161e3e78eccfe766608e - command: - - "-js" - - "-sd" - - /data - restart: unless-stopped - ports: - - "${NATS_CLIENT_PORT:-24222}:4222" - volumes: - - nats-data:/data - networks: - - stellaops - labels: *release-labels - - authority: - image: registry.stella-ops.org/stellaops/authority@sha256:5551a3269b7008cd5aceecf45df018c67459ed519557ccbe48b093b926a39bcc - restart: unless-stopped - depends_on: - - mongo - environment: - STELLAOPS_AUTHORITY__ISSUER: "${AUTHORITY_ISSUER}" - STELLAOPS_AUTHORITY__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" - STELLAOPS_AUTHORITY__PLUGINDIRECTORIES__0: "/app/plugins" - STELLAOPS_AUTHORITY__PLUGINS__CONFIGURATIONDIRECTORY: "/app/etc/authority.plugins" - volumes: - - ../../etc/authority.yaml:/etc/authority.yaml:ro - - ../../etc/authority.plugins:/app/etc/authority.plugins:ro - ports: - - "${AUTHORITY_PORT:-8440}:8440" - networks: - - stellaops - labels: *release-labels - - signer: - image: registry.stella-ops.org/stellaops/signer@sha256:ddbbd664a42846cea6b40fca6465bc679b30f72851158f300d01a8571c5478fc - restart: unless-stopped - depends_on: - - authority - environment: - SIGNER__AUTHORITY__BASEURL: "https://authority:8440" - SIGNER__POE__INTROSPECTURL: "${SIGNER_POE_INTROSPECT_URL}" - SIGNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" - ports: - - "${SIGNER_PORT:-8441}:8441" - networks: - - stellaops - labels: *release-labels - - attestor: - image: registry.stella-ops.org/stellaops/attestor@sha256:1ff0a3124d66d3a2702d8e421df40fbd98cc75cb605d95510598ebbae1433c50 - restart: unless-stopped - depends_on: - - signer - environment: - ATTESTOR__SIGNER__BASEURL: "https://signer:8441" - ATTESTOR__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" - ports: - - "${ATTESTOR_PORT:-8442}:8442" - networks: - - stellaops - labels: *release-labels - - issuer-directory: - image: registry.stella-ops.org/stellaops/issuer-directory-web:2025.10.0-edge - restart: unless-stopped - depends_on: - - mongo - - authority - environment: - ISSUERDIRECTORY__CONFIG: "/etc/issuer-directory.yaml" - ISSUERDIRECTORY__AUTHORITY__ISSUER: "${AUTHORITY_ISSUER}" - ISSUERDIRECTORY__AUTHORITY__BASEURL: "https://authority:8440" - ISSUERDIRECTORY__MONGO__CONNECTIONSTRING: "${ISSUER_DIRECTORY_MONGO_CONNECTION_STRING}" - ISSUERDIRECTORY__SEEDCSAFPUBLISHERS: "${ISSUER_DIRECTORY_SEED_CSAF:-true}" - volumes: - - ../../etc/issuer-directory.yaml:/etc/issuer-directory.yaml:ro - ports: - - "${ISSUER_DIRECTORY_PORT:-8447}:8080" - networks: - - stellaops - labels: *release-labels - - concelier: - image: registry.stella-ops.org/stellaops/concelier@sha256:29e2e1a0972707e092cbd3d370701341f9fec2aa9316fb5d8100480f2a1c76b5 - restart: unless-stopped - depends_on: - - mongo - - minio - environment: - CONCELIER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" - CONCELIER__STORAGE__S3__ENDPOINT: "http://minio:9000" - CONCELIER__STORAGE__S3__ACCESSKEYID: "${MINIO_ROOT_USER}" - CONCELIER__STORAGE__S3__SECRETACCESSKEY: "${MINIO_ROOT_PASSWORD}" - CONCELIER__AUTHORITY__BASEURL: "https://authority:8440" - CONCELIER__AUTHORITY__RESILIENCE__ALLOWOFFLINECACHEFALLBACK: "true" - CONCELIER__AUTHORITY__RESILIENCE__OFFLINECACHETOLERANCE: "${AUTHORITY_OFFLINE_CACHE_TOLERANCE:-00:30:00}" - volumes: - - concelier-jobs:/var/lib/concelier/jobs - ports: - - "${CONCELIER_PORT:-8445}:8445" - networks: - - stellaops - labels: *release-labels - - scanner-web: - image: registry.stella-ops.org/stellaops/scanner-web@sha256:3df8ca21878126758203c1a0444e39fd97f77ddacf04a69685cda9f1e5e94718 - restart: unless-stopped - depends_on: - - concelier - - rustfs - - nats - environment: - SCANNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" - SCANNER__ARTIFACTSTORE__DRIVER: "rustfs" - SCANNER__ARTIFACTSTORE__ENDPOINT: "http://rustfs:8080/api/v1" - SCANNER__ARTIFACTSTORE__BUCKET: "scanner-artifacts" - SCANNER__ARTIFACTSTORE__TIMEOUTSECONDS: "30" - SCANNER__QUEUE__BROKER: "${SCANNER_QUEUE_BROKER}" - SCANNER__EVENTS__ENABLED: "${SCANNER_EVENTS_ENABLED:-false}" - SCANNER__EVENTS__DRIVER: "${SCANNER_EVENTS_DRIVER:-redis}" - SCANNER__EVENTS__DSN: "${SCANNER_EVENTS_DSN:-}" - SCANNER__EVENTS__STREAM: "${SCANNER_EVENTS_STREAM:-stella.events}" - SCANNER__EVENTS__PUBLISHTIMEOUTSECONDS: "${SCANNER_EVENTS_PUBLISH_TIMEOUT_SECONDS:-5}" - SCANNER__EVENTS__MAXSTREAMLENGTH: "${SCANNER_EVENTS_MAX_STREAM_LENGTH:-10000}" - # Surface.Env configuration (see docs/modules/scanner/design/surface-env.md) - SCANNER_SURFACE_FS_ENDPOINT: "${SCANNER_SURFACE_FS_ENDPOINT:-http://rustfs:8080}" - SCANNER_SURFACE_FS_BUCKET: "${SCANNER_SURFACE_FS_BUCKET:-surface-cache}" - SCANNER_SURFACE_CACHE_ROOT: "${SCANNER_SURFACE_CACHE_ROOT:-/var/lib/stellaops/surface}" - SCANNER_SURFACE_CACHE_QUOTA_MB: "${SCANNER_SURFACE_CACHE_QUOTA_MB:-4096}" - SCANNER_SURFACE_PREFETCH_ENABLED: "${SCANNER_SURFACE_PREFETCH_ENABLED:-false}" - SCANNER_SURFACE_TENANT: "${SCANNER_SURFACE_TENANT:-default}" - SCANNER_SURFACE_FEATURES: "${SCANNER_SURFACE_FEATURES:-}" - SCANNER_SURFACE_SECRETS_PROVIDER: "${SCANNER_SURFACE_SECRETS_PROVIDER:-file}" - SCANNER_SURFACE_SECRETS_ROOT: "${SCANNER_SURFACE_SECRETS_ROOT:-/etc/stellaops/secrets}" - SCANNER_SURFACE_SECRETS_ALLOW_INLINE: "${SCANNER_SURFACE_SECRETS_ALLOW_INLINE:-false}" - volumes: - - scanner-surface-cache:/var/lib/stellaops/surface - ports: - - "${SCANNER_WEB_PORT:-8444}:8444" - networks: - - stellaops - labels: *release-labels - - scanner-worker: - image: registry.stella-ops.org/stellaops/scanner-worker@sha256:eea5d6cfe7835950c5ec7a735a651f2f0d727d3e470cf9027a4a402ea89c4fb5 - restart: unless-stopped - depends_on: - - scanner-web - - rustfs - - nats - environment: - SCANNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" - SCANNER__ARTIFACTSTORE__DRIVER: "rustfs" - SCANNER__ARTIFACTSTORE__ENDPOINT: "http://rustfs:8080/api/v1" - SCANNER__ARTIFACTSTORE__BUCKET: "scanner-artifacts" - SCANNER__ARTIFACTSTORE__TIMEOUTSECONDS: "30" - SCANNER__QUEUE__BROKER: "${SCANNER_QUEUE_BROKER}" - # Surface.Env configuration (see docs/modules/scanner/design/surface-env.md) - SCANNER_SURFACE_FS_ENDPOINT: "${SCANNER_SURFACE_FS_ENDPOINT:-http://rustfs:8080}" - SCANNER_SURFACE_FS_BUCKET: "${SCANNER_SURFACE_FS_BUCKET:-surface-cache}" - SCANNER_SURFACE_CACHE_ROOT: "${SCANNER_SURFACE_CACHE_ROOT:-/var/lib/stellaops/surface}" - SCANNER_SURFACE_CACHE_QUOTA_MB: "${SCANNER_SURFACE_CACHE_QUOTA_MB:-4096}" - SCANNER_SURFACE_PREFETCH_ENABLED: "${SCANNER_SURFACE_PREFETCH_ENABLED:-false}" - SCANNER_SURFACE_TENANT: "${SCANNER_SURFACE_TENANT:-default}" - SCANNER_SURFACE_FEATURES: "${SCANNER_SURFACE_FEATURES:-}" - SCANNER_SURFACE_SECRETS_PROVIDER: "${SCANNER_SURFACE_SECRETS_PROVIDER:-file}" - SCANNER_SURFACE_SECRETS_ROOT: "${SCANNER_SURFACE_SECRETS_ROOT:-/etc/stellaops/secrets}" - SCANNER_SURFACE_SECRETS_ALLOW_INLINE: "${SCANNER_SURFACE_SECRETS_ALLOW_INLINE:-false}" - volumes: - - scanner-surface-cache:/var/lib/stellaops/surface - networks: - - stellaops - labels: *release-labels - - scheduler-worker: - image: registry.stella-ops.org/stellaops/scheduler-worker:2025.10.0-edge - restart: unless-stopped - depends_on: - - mongo - - nats - - scanner-web - command: - - "dotnet" - - "StellaOps.Scheduler.Worker.Host.dll" - environment: - SCHEDULER__QUEUE__KIND: "${SCHEDULER_QUEUE_KIND:-Nats}" - SCHEDULER__QUEUE__NATS__URL: "${SCHEDULER_QUEUE_NATS_URL:-nats://nats:4222}" - SCHEDULER__STORAGE__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" - SCHEDULER__STORAGE__DATABASE: "${SCHEDULER_STORAGE_DATABASE:-stellaops_scheduler}" - SCHEDULER__WORKER__RUNNER__SCANNER__BASEADDRESS: "${SCHEDULER_SCANNER_BASEADDRESS:-http://scanner-web:8444}" - networks: - - stellaops - labels: *release-labels - + + minio: + image: docker.io/minio/minio@sha256:14cea493d9a34af32f524e538b8346cf79f3321eff8e708c1e2960462bd8936e + command: ["server", "/data", "--console-address", ":9001"] + restart: unless-stopped + environment: + MINIO_ROOT_USER: "${MINIO_ROOT_USER}" + MINIO_ROOT_PASSWORD: "${MINIO_ROOT_PASSWORD}" + volumes: + - minio-data:/data + ports: + - "${MINIO_CONSOLE_PORT:-29001}:9001" + networks: + - stellaops + labels: *release-labels + + rustfs: + image: registry.stella-ops.org/stellaops/rustfs:2025.10.0-edge + command: ["serve", "--listen", "0.0.0.0:8080", "--root", "/data"] + restart: unless-stopped + environment: + RUSTFS__LOG__LEVEL: info + RUSTFS__STORAGE__PATH: /data + volumes: + - rustfs-data:/data + ports: + - "${RUSTFS_HTTP_PORT:-8080}:8080" + networks: + - stellaops + labels: *release-labels + + nats: + image: docker.io/library/nats@sha256:c82559e4476289481a8a5196e675ebfe67eea81d95e5161e3e78eccfe766608e + command: + - "-js" + - "-sd" + - /data + restart: unless-stopped + ports: + - "${NATS_CLIENT_PORT:-24222}:4222" + volumes: + - nats-data:/data + networks: + - stellaops + labels: *release-labels + + authority: + image: registry.stella-ops.org/stellaops/authority@sha256:5551a3269b7008cd5aceecf45df018c67459ed519557ccbe48b093b926a39bcc + restart: unless-stopped + depends_on: + - mongo + environment: + STELLAOPS_AUTHORITY__ISSUER: "${AUTHORITY_ISSUER}" + STELLAOPS_AUTHORITY__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + STELLAOPS_AUTHORITY__PLUGINDIRECTORIES__0: "/app/plugins" + STELLAOPS_AUTHORITY__PLUGINS__CONFIGURATIONDIRECTORY: "/app/etc/authority.plugins" + volumes: + - ../../etc/authority.yaml:/etc/authority.yaml:ro + - ../../etc/authority.plugins:/app/etc/authority.plugins:ro + ports: + - "${AUTHORITY_PORT:-8440}:8440" + networks: + - stellaops + labels: *release-labels + + signer: + image: registry.stella-ops.org/stellaops/signer@sha256:ddbbd664a42846cea6b40fca6465bc679b30f72851158f300d01a8571c5478fc + restart: unless-stopped + depends_on: + - authority + environment: + SIGNER__AUTHORITY__BASEURL: "https://authority:8440" + SIGNER__POE__INTROSPECTURL: "${SIGNER_POE_INTROSPECT_URL}" + SIGNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + ports: + - "${SIGNER_PORT:-8441}:8441" + networks: + - stellaops + labels: *release-labels + + attestor: + image: registry.stella-ops.org/stellaops/attestor@sha256:1ff0a3124d66d3a2702d8e421df40fbd98cc75cb605d95510598ebbae1433c50 + restart: unless-stopped + depends_on: + - signer + environment: + ATTESTOR__SIGNER__BASEURL: "https://signer:8441" + ATTESTOR__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + ports: + - "${ATTESTOR_PORT:-8442}:8442" + networks: + - stellaops + labels: *release-labels + + issuer-directory: + image: registry.stella-ops.org/stellaops/issuer-directory-web:2025.10.0-edge + restart: unless-stopped + depends_on: + - mongo + - authority + environment: + ISSUERDIRECTORY__CONFIG: "/etc/issuer-directory.yaml" + ISSUERDIRECTORY__AUTHORITY__ISSUER: "${AUTHORITY_ISSUER}" + ISSUERDIRECTORY__AUTHORITY__BASEURL: "https://authority:8440" + ISSUERDIRECTORY__MONGO__CONNECTIONSTRING: "${ISSUER_DIRECTORY_MONGO_CONNECTION_STRING}" + ISSUERDIRECTORY__SEEDCSAFPUBLISHERS: "${ISSUER_DIRECTORY_SEED_CSAF:-true}" + volumes: + - ../../etc/issuer-directory.yaml:/etc/issuer-directory.yaml:ro + ports: + - "${ISSUER_DIRECTORY_PORT:-8447}:8080" + networks: + - stellaops + labels: *release-labels + + concelier: + image: registry.stella-ops.org/stellaops/concelier@sha256:29e2e1a0972707e092cbd3d370701341f9fec2aa9316fb5d8100480f2a1c76b5 + restart: unless-stopped + depends_on: + - mongo + - minio + environment: + CONCELIER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + CONCELIER__STORAGE__S3__ENDPOINT: "http://minio:9000" + CONCELIER__STORAGE__S3__ACCESSKEYID: "${MINIO_ROOT_USER}" + CONCELIER__STORAGE__S3__SECRETACCESSKEY: "${MINIO_ROOT_PASSWORD}" + CONCELIER__AUTHORITY__BASEURL: "https://authority:8440" + CONCELIER__AUTHORITY__RESILIENCE__ALLOWOFFLINECACHEFALLBACK: "true" + CONCELIER__AUTHORITY__RESILIENCE__OFFLINECACHETOLERANCE: "${AUTHORITY_OFFLINE_CACHE_TOLERANCE:-00:30:00}" + volumes: + - concelier-jobs:/var/lib/concelier/jobs + ports: + - "${CONCELIER_PORT:-8445}:8445" + networks: + - stellaops + labels: *release-labels + + scanner-web: + image: registry.stella-ops.org/stellaops/scanner-web@sha256:3df8ca21878126758203c1a0444e39fd97f77ddacf04a69685cda9f1e5e94718 + restart: unless-stopped + depends_on: + - concelier + - rustfs + - nats + environment: + SCANNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + SCANNER__ARTIFACTSTORE__DRIVER: "rustfs" + SCANNER__ARTIFACTSTORE__ENDPOINT: "http://rustfs:8080/api/v1" + SCANNER__ARTIFACTSTORE__BUCKET: "scanner-artifacts" + SCANNER__ARTIFACTSTORE__TIMEOUTSECONDS: "30" + SCANNER__QUEUE__BROKER: "${SCANNER_QUEUE_BROKER}" + SCANNER__EVENTS__ENABLED: "${SCANNER_EVENTS_ENABLED:-false}" + SCANNER__EVENTS__DRIVER: "${SCANNER_EVENTS_DRIVER:-redis}" + SCANNER__EVENTS__DSN: "${SCANNER_EVENTS_DSN:-}" + SCANNER__EVENTS__STREAM: "${SCANNER_EVENTS_STREAM:-stella.events}" + SCANNER__EVENTS__PUBLISHTIMEOUTSECONDS: "${SCANNER_EVENTS_PUBLISH_TIMEOUT_SECONDS:-5}" + SCANNER__EVENTS__MAXSTREAMLENGTH: "${SCANNER_EVENTS_MAX_STREAM_LENGTH:-10000}" + # Surface.Env configuration (see docs/modules/scanner/design/surface-env.md) + SCANNER_SURFACE_FS_ENDPOINT: "${SCANNER_SURFACE_FS_ENDPOINT:-http://rustfs:8080}" + SCANNER_SURFACE_FS_BUCKET: "${SCANNER_SURFACE_FS_BUCKET:-surface-cache}" + SCANNER_SURFACE_CACHE_ROOT: "${SCANNER_SURFACE_CACHE_ROOT:-/var/lib/stellaops/surface}" + SCANNER_SURFACE_CACHE_QUOTA_MB: "${SCANNER_SURFACE_CACHE_QUOTA_MB:-4096}" + SCANNER_SURFACE_PREFETCH_ENABLED: "${SCANNER_SURFACE_PREFETCH_ENABLED:-false}" + SCANNER_SURFACE_TENANT: "${SCANNER_SURFACE_TENANT:-default}" + SCANNER_SURFACE_FEATURES: "${SCANNER_SURFACE_FEATURES:-}" + SCANNER_SURFACE_SECRETS_PROVIDER: "${SCANNER_SURFACE_SECRETS_PROVIDER:-file}" + SCANNER_SURFACE_SECRETS_NAMESPACE: "${SCANNER_SURFACE_SECRETS_NAMESPACE:-}" + SCANNER_SURFACE_SECRETS_ROOT: "${SCANNER_SURFACE_SECRETS_ROOT:-/etc/stellaops/secrets}" + SCANNER_SURFACE_SECRETS_FALLBACK_PROVIDER: "${SCANNER_SURFACE_SECRETS_FALLBACK_PROVIDER:-}" + SCANNER_SURFACE_SECRETS_ALLOW_INLINE: "${SCANNER_SURFACE_SECRETS_ALLOW_INLINE:-false}" + volumes: + - scanner-surface-cache:/var/lib/stellaops/surface + - ${SURFACE_SECRETS_HOST_PATH:-./offline/surface-secrets}:${SCANNER_SURFACE_SECRETS_ROOT:-/etc/stellaops/secrets}:ro + ports: + - "${SCANNER_WEB_PORT:-8444}:8444" + networks: + - stellaops + labels: *release-labels + + scanner-worker: + image: registry.stella-ops.org/stellaops/scanner-worker@sha256:eea5d6cfe7835950c5ec7a735a651f2f0d727d3e470cf9027a4a402ea89c4fb5 + restart: unless-stopped + depends_on: + - scanner-web + - rustfs + - nats + environment: + SCANNER__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + SCANNER__ARTIFACTSTORE__DRIVER: "rustfs" + SCANNER__ARTIFACTSTORE__ENDPOINT: "http://rustfs:8080/api/v1" + SCANNER__ARTIFACTSTORE__BUCKET: "scanner-artifacts" + SCANNER__ARTIFACTSTORE__TIMEOUTSECONDS: "30" + SCANNER__QUEUE__BROKER: "${SCANNER_QUEUE_BROKER}" + # Surface.Env configuration (see docs/modules/scanner/design/surface-env.md) + SCANNER_SURFACE_FS_ENDPOINT: "${SCANNER_SURFACE_FS_ENDPOINT:-http://rustfs:8080}" + SCANNER_SURFACE_FS_BUCKET: "${SCANNER_SURFACE_FS_BUCKET:-surface-cache}" + SCANNER_SURFACE_CACHE_ROOT: "${SCANNER_SURFACE_CACHE_ROOT:-/var/lib/stellaops/surface}" + SCANNER_SURFACE_CACHE_QUOTA_MB: "${SCANNER_SURFACE_CACHE_QUOTA_MB:-4096}" + SCANNER_SURFACE_PREFETCH_ENABLED: "${SCANNER_SURFACE_PREFETCH_ENABLED:-false}" + SCANNER_SURFACE_TENANT: "${SCANNER_SURFACE_TENANT:-default}" + SCANNER_SURFACE_FEATURES: "${SCANNER_SURFACE_FEATURES:-}" + SCANNER_SURFACE_SECRETS_PROVIDER: "${SCANNER_SURFACE_SECRETS_PROVIDER:-file}" + SCANNER_SURFACE_SECRETS_NAMESPACE: "${SCANNER_SURFACE_SECRETS_NAMESPACE:-}" + SCANNER_SURFACE_SECRETS_ROOT: "${SCANNER_SURFACE_SECRETS_ROOT:-/etc/stellaops/secrets}" + SCANNER_SURFACE_SECRETS_FALLBACK_PROVIDER: "${SCANNER_SURFACE_SECRETS_FALLBACK_PROVIDER:-}" + SCANNER_SURFACE_SECRETS_ALLOW_INLINE: "${SCANNER_SURFACE_SECRETS_ALLOW_INLINE:-false}" + volumes: + - scanner-surface-cache:/var/lib/stellaops/surface + - ${SURFACE_SECRETS_HOST_PATH:-./offline/surface-secrets}:${SCANNER_SURFACE_SECRETS_ROOT:-/etc/stellaops/secrets}:ro + networks: + - stellaops + labels: *release-labels + + scheduler-worker: + image: registry.stella-ops.org/stellaops/scheduler-worker:2025.10.0-edge + restart: unless-stopped + depends_on: + - mongo + - nats + - scanner-web + command: + - "dotnet" + - "StellaOps.Scheduler.Worker.Host.dll" + environment: + SCHEDULER__QUEUE__KIND: "${SCHEDULER_QUEUE_KIND:-Nats}" + SCHEDULER__QUEUE__NATS__URL: "${SCHEDULER_QUEUE_NATS_URL:-nats://nats:4222}" + SCHEDULER__STORAGE__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + SCHEDULER__STORAGE__DATABASE: "${SCHEDULER_STORAGE_DATABASE:-stellaops_scheduler}" + SCHEDULER__WORKER__RUNNER__SCANNER__BASEADDRESS: "${SCHEDULER_SCANNER_BASEADDRESS:-http://scanner-web:8444}" + networks: + - stellaops + labels: *release-labels + notify-web: image: ${NOTIFY_WEB_IMAGE:-registry.stella-ops.org/stellaops/notify-web:2025.09.2} restart: unless-stopped depends_on: - postgres - authority - environment: - DOTNET_ENVIRONMENT: Production - volumes: - - ../../etc/notify.airgap.yaml:/app/etc/notify.yaml:ro - ports: - - "${NOTIFY_WEB_PORT:-9446}:8446" - networks: - - stellaops - labels: *release-labels - - excititor: - image: registry.stella-ops.org/stellaops/excititor@sha256:65c0ee13f773efe920d7181512349a09d363ab3f3e177d276136bd2742325a68 - restart: unless-stopped - depends_on: - - concelier - environment: - EXCITITOR__CONCELIER__BASEURL: "https://concelier:8445" - EXCITITOR__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" - networks: - - stellaops - labels: *release-labels - - advisory-ai-web: - image: registry.stella-ops.org/stellaops/advisory-ai-web:2025.09.2-airgap - restart: unless-stopped - depends_on: - - scanner-web - environment: - ADVISORYAI__AdvisoryAI__SbomBaseAddress: "${ADVISORY_AI_SBOM_BASEADDRESS:-http://scanner-web:8444}" - ADVISORYAI__AdvisoryAI__Queue__DirectoryPath: "/var/lib/advisory-ai/queue" - ADVISORYAI__AdvisoryAI__Storage__PlanCacheDirectory: "/var/lib/advisory-ai/plans" - ADVISORYAI__AdvisoryAI__Storage__OutputDirectory: "/var/lib/advisory-ai/outputs" - ADVISORYAI__AdvisoryAI__Inference__Mode: "${ADVISORY_AI_INFERENCE_MODE:-Local}" - ADVISORYAI__AdvisoryAI__Inference__Remote__BaseAddress: "${ADVISORY_AI_REMOTE_BASEADDRESS:-}" - ADVISORYAI__AdvisoryAI__Inference__Remote__ApiKey: "${ADVISORY_AI_REMOTE_APIKEY:-}" - ports: - - "${ADVISORY_AI_WEB_PORT:-8448}:8448" - volumes: - - advisory-ai-queue:/var/lib/advisory-ai/queue - - advisory-ai-plans:/var/lib/advisory-ai/plans - - advisory-ai-outputs:/var/lib/advisory-ai/outputs - networks: - - stellaops - labels: *release-labels - - advisory-ai-worker: - image: registry.stella-ops.org/stellaops/advisory-ai-worker:2025.09.2-airgap - restart: unless-stopped - depends_on: - - advisory-ai-web - environment: - ADVISORYAI__AdvisoryAI__SbomBaseAddress: "${ADVISORY_AI_SBOM_BASEADDRESS:-http://scanner-web:8444}" - ADVISORYAI__AdvisoryAI__Queue__DirectoryPath: "/var/lib/advisory-ai/queue" - ADVISORYAI__AdvisoryAI__Storage__PlanCacheDirectory: "/var/lib/advisory-ai/plans" - ADVISORYAI__AdvisoryAI__Storage__OutputDirectory: "/var/lib/advisory-ai/outputs" - ADVISORYAI__AdvisoryAI__Inference__Mode: "${ADVISORY_AI_INFERENCE_MODE:-Local}" - ADVISORYAI__AdvisoryAI__Inference__Remote__BaseAddress: "${ADVISORY_AI_REMOTE_BASEADDRESS:-}" - ADVISORYAI__AdvisoryAI__Inference__Remote__ApiKey: "${ADVISORY_AI_REMOTE_APIKEY:-}" - volumes: - - advisory-ai-queue:/var/lib/advisory-ai/queue - - advisory-ai-plans:/var/lib/advisory-ai/plans - - advisory-ai-outputs:/var/lib/advisory-ai/outputs - networks: - - stellaops - labels: *release-labels - - web-ui: - image: registry.stella-ops.org/stellaops/web-ui@sha256:bee9668011ff414572131dc777faab4da24473fe12c230893f161cabee092a1d - restart: unless-stopped - depends_on: - - scanner-web - environment: - STELLAOPS_UI__BACKEND__BASEURL: "https://scanner-web:8444" - ports: - - "${UI_PORT:-9443}:8443" - networks: - - stellaops - labels: *release-labels + environment: + DOTNET_ENVIRONMENT: Production + volumes: + - ../../etc/notify.airgap.yaml:/app/etc/notify.yaml:ro + ports: + - "${NOTIFY_WEB_PORT:-9446}:8446" + networks: + - stellaops + labels: *release-labels + + excititor: + image: registry.stella-ops.org/stellaops/excititor@sha256:65c0ee13f773efe920d7181512349a09d363ab3f3e177d276136bd2742325a68 + restart: unless-stopped + depends_on: + - concelier + environment: + EXCITITOR__CONCELIER__BASEURL: "https://concelier:8445" + EXCITITOR__STORAGE__MONGO__CONNECTIONSTRING: "mongodb://${MONGO_INITDB_ROOT_USERNAME}:${MONGO_INITDB_ROOT_PASSWORD}@mongo:27017" + networks: + - stellaops + labels: *release-labels + + advisory-ai-web: + image: registry.stella-ops.org/stellaops/advisory-ai-web:2025.09.2-airgap + restart: unless-stopped + depends_on: + - scanner-web + environment: + ADVISORYAI__AdvisoryAI__SbomBaseAddress: "${ADVISORY_AI_SBOM_BASEADDRESS:-http://scanner-web:8444}" + ADVISORYAI__AdvisoryAI__Queue__DirectoryPath: "/var/lib/advisory-ai/queue" + ADVISORYAI__AdvisoryAI__Storage__PlanCacheDirectory: "/var/lib/advisory-ai/plans" + ADVISORYAI__AdvisoryAI__Storage__OutputDirectory: "/var/lib/advisory-ai/outputs" + ADVISORYAI__AdvisoryAI__Inference__Mode: "${ADVISORY_AI_INFERENCE_MODE:-Local}" + ADVISORYAI__AdvisoryAI__Inference__Remote__BaseAddress: "${ADVISORY_AI_REMOTE_BASEADDRESS:-}" + ADVISORYAI__AdvisoryAI__Inference__Remote__ApiKey: "${ADVISORY_AI_REMOTE_APIKEY:-}" + ports: + - "${ADVISORY_AI_WEB_PORT:-8448}:8448" + volumes: + - advisory-ai-queue:/var/lib/advisory-ai/queue + - advisory-ai-plans:/var/lib/advisory-ai/plans + - advisory-ai-outputs:/var/lib/advisory-ai/outputs + networks: + - stellaops + labels: *release-labels + + advisory-ai-worker: + image: registry.stella-ops.org/stellaops/advisory-ai-worker:2025.09.2-airgap + restart: unless-stopped + depends_on: + - advisory-ai-web + environment: + ADVISORYAI__AdvisoryAI__SbomBaseAddress: "${ADVISORY_AI_SBOM_BASEADDRESS:-http://scanner-web:8444}" + ADVISORYAI__AdvisoryAI__Queue__DirectoryPath: "/var/lib/advisory-ai/queue" + ADVISORYAI__AdvisoryAI__Storage__PlanCacheDirectory: "/var/lib/advisory-ai/plans" + ADVISORYAI__AdvisoryAI__Storage__OutputDirectory: "/var/lib/advisory-ai/outputs" + ADVISORYAI__AdvisoryAI__Inference__Mode: "${ADVISORY_AI_INFERENCE_MODE:-Local}" + ADVISORYAI__AdvisoryAI__Inference__Remote__BaseAddress: "${ADVISORY_AI_REMOTE_BASEADDRESS:-}" + ADVISORYAI__AdvisoryAI__Inference__Remote__ApiKey: "${ADVISORY_AI_REMOTE_APIKEY:-}" + volumes: + - advisory-ai-queue:/var/lib/advisory-ai/queue + - advisory-ai-plans:/var/lib/advisory-ai/plans + - advisory-ai-outputs:/var/lib/advisory-ai/outputs + networks: + - stellaops + labels: *release-labels + + web-ui: + image: registry.stella-ops.org/stellaops/web-ui@sha256:bee9668011ff414572131dc777faab4da24473fe12c230893f161cabee092a1d + restart: unless-stopped + depends_on: + - scanner-web + environment: + STELLAOPS_UI__BACKEND__BASEURL: "https://scanner-web:8444" + ports: + - "${UI_PORT:-9443}:8443" + networks: + - stellaops + labels: *release-labels diff --git a/deploy/compose/docker-compose.dev.yaml b/deploy/compose/docker-compose.dev.yaml index dcc9c9f6e..6c4a1d75f 100644 --- a/deploy/compose/docker-compose.dev.yaml +++ b/deploy/compose/docker-compose.dev.yaml @@ -347,6 +347,7 @@ services: WINE_CSP_LOG_LEVEL: "${WINE_CSP_LOG_LEVEL:-Information}" ASPNETCORE_ENVIRONMENT: "${ASPNETCORE_ENVIRONMENT:-Development}" volumes: + - ../../opt/cryptopro/downloads:/opt/cryptopro/downloads:ro - wine-csp-prefix:/home/winecsp/.wine - wine-csp-logs:/var/log/wine-csp # Mount customer-provided CSP installer (optional): diff --git a/deploy/compose/docker-compose.mock.yaml b/deploy/compose/docker-compose.mock.yaml index 316060b2e..ffc5ce2e7 100644 --- a/deploy/compose/docker-compose.mock.yaml +++ b/deploy/compose/docker-compose.mock.yaml @@ -81,5 +81,7 @@ services: WINE_CSP_PORT: "5099" WINE_CSP_MODE: "limited" WINE_CSP_LOG_LEVEL: "Debug" + volumes: + - ../../opt/cryptopro/downloads:/opt/cryptopro/downloads:ro labels: *release-labels networks: [stellaops] diff --git a/deploy/compose/env/airgap.env.example b/deploy/compose/env/airgap.env.example index 401bc1743..03828473c 100644 --- a/deploy/compose/env/airgap.env.example +++ b/deploy/compose/env/airgap.env.example @@ -1,13 +1,13 @@ -# Substitutions for docker-compose.airgap.yaml -MONGO_INITDB_ROOT_USERNAME=stellaops -MONGO_INITDB_ROOT_PASSWORD=airgap-password -MINIO_ROOT_USER=stellaops-offline +# Substitutions for docker-compose.airgap.yaml +MONGO_INITDB_ROOT_USERNAME=stellaops +MONGO_INITDB_ROOT_PASSWORD=airgap-password +MINIO_ROOT_USER=stellaops-offline MINIO_ROOT_PASSWORD=airgap-minio-secret MINIO_CONSOLE_PORT=29001 RUSTFS_HTTP_PORT=8080 AUTHORITY_ISSUER=https://authority.airgap.local -AUTHORITY_PORT=8440 -SIGNER_POE_INTROSPECT_URL=file:///offline/poe/introspect.json +AUTHORITY_PORT=8440 +SIGNER_POE_INTROSPECT_URL=file:///offline/poe/introspect.json SIGNER_PORT=8441 ATTESTOR_PORT=8442 # Secrets for Issuer Directory are provided via issuer-directory.mongo.env (see etc/secrets/issuer-directory.mongo.secret.example). @@ -33,7 +33,10 @@ SCANNER_SURFACE_CACHE_ROOT=/var/lib/stellaops/surface ZASTAVA_SURFACE_FS_ENDPOINT=${SCANNER_SURFACE_FS_ENDPOINT} ZASTAVA_SURFACE_CACHE_ROOT=${SCANNER_SURFACE_CACHE_ROOT} SCANNER_SURFACE_SECRETS_PROVIDER=file +SCANNER_SURFACE_SECRETS_NAMESPACE= SCANNER_SURFACE_SECRETS_ROOT=/etc/stellaops/secrets +SCANNER_SURFACE_SECRETS_FALLBACK_PROVIDER= +SURFACE_SECRETS_HOST_PATH=./offline/surface-secrets SCHEDULER_QUEUE_KIND=Nats SCHEDULER_QUEUE_NATS_URL=nats://nats:4222 SCHEDULER_STORAGE_DATABASE=stellaops_scheduler diff --git a/deploy/compose/env/wine-csp.env.example b/deploy/compose/env/wine-csp.env.example index 9e4650626..d44d28c18 100644 --- a/deploy/compose/env/wine-csp.env.example +++ b/deploy/compose/env/wine-csp.env.example @@ -43,7 +43,10 @@ ASPNETCORE_ENVIRONMENT=Production # - Wine prefix: /home/winecsp/.wine (persistent storage) # - CSP installer: /opt/cryptopro (read-only mount) # - Logs: /var/log/wine-csp (log output) +# - CSP packages: /opt/cryptopro/downloads (bind from /opt/cryptopro/downloads) # # Example mount for CSP installer: # volumes: # - /path/to/your/csp-5.0.msi:/opt/cryptopro/csp-installer.msi:ro +# volumes: +# - ../../opt/cryptopro/downloads:/opt/cryptopro/downloads:ro diff --git a/deploy/helm/stellaops/templates/configmaps.yaml b/deploy/helm/stellaops/templates/configmaps.yaml index 1ce44b807..e937649ee 100644 --- a/deploy/helm/stellaops/templates/configmaps.yaml +++ b/deploy/helm/stellaops/templates/configmaps.yaml @@ -9,7 +9,7 @@ metadata: data: {{- range $fileName, $content := $cfg.data }} {{ $fileName }}: | -{{ $content | nindent 4 }} +{{ tpl $content $root | nindent 4 }} {{- end }} --- {{- end }} diff --git a/deploy/helm/stellaops/templates/core.yaml b/deploy/helm/stellaops/templates/core.yaml index c05c546da..9158c5905 100644 --- a/deploy/helm/stellaops/templates/core.yaml +++ b/deploy/helm/stellaops/templates/core.yaml @@ -7,18 +7,18 @@ {{- end -}} {{- $policyActivationTargets := dict "policy-engine" true "policy-gateway" true -}} {{- range $name, $svc := .Values.services }} -{{- $configMounts := (default (list) $svc.configMounts) }} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "stellaops.fullname" (dict "root" $root "name" $name) }} - labels: - {{- include "stellaops.labels" (dict "root" $root "name" $name "svc" $svc) | nindent 4 }} -spec: - replicas: {{ default 1 $svc.replicas }} - selector: - matchLabels: - {{- include "stellaops.selectorLabels" (dict "root" $root "name" $name "svc" $svc) | nindent 6 }} +{{- $configMounts := (default (list) $svc.configMounts) }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "stellaops.fullname" (dict "root" $root "name" $name) }} + labels: + {{- include "stellaops.labels" (dict "root" $root "name" $name "svc" $svc) | nindent 4 }} +spec: + replicas: {{ default 1 $svc.replicas }} + selector: + matchLabels: + {{- include "stellaops.selectorLabels" (dict "root" $root "name" $name "svc" $svc) | nindent 6 }} template: metadata: labels: @@ -43,18 +43,18 @@ spec: securityContext: {{ toYaml $svc.securityContext | nindent 12 }} {{- end }} -{{- if $svc.command }} - command: -{{- range $cmd := $svc.command }} - - {{ $cmd | quote }} -{{- end }} -{{- end }} -{{- if $svc.args }} - args: -{{- range $arg := $svc.args }} - - {{ $arg | quote }} -{{- end }} -{{- end }} +{{- if $svc.command }} + command: +{{- range $cmd := $svc.command }} + - {{ $cmd | quote }} +{{- end }} +{{- end }} +{{- if $svc.args }} + args: +{{- range $arg := $svc.args }} + - {{ $arg | quote }} +{{- end }} +{{- end }} {{- if $svc.env }} env: {{- range $envName, $envValue := $svc.env }} @@ -64,6 +64,9 @@ spec: {{- end }} {{- $needsPolicyActivation := and $hasPolicyActivationConfig (hasKey $policyActivationTargets $name) }} {{- $envFrom := default (list) $svc.envFrom }} +{{- if and (hasKey $root.Values.configMaps "surface-env") (or (hasPrefix "scanner-" $name) (hasPrefix "zastava-" $name)) }} + {{- $envFrom = append $envFrom (dict "configMapRef" (dict "name" (include "stellaops.fullname" (dict "root" $root "name" "surface-env")))) }} +{{- end }} {{- if and $needsPolicyActivation (ne $policyActivationConfigName "") }} {{- $hasActivationReference := false }} {{- range $envFromEntry := $envFrom }} @@ -80,19 +83,19 @@ spec: {{ toYaml $envFrom | nindent 12 }} {{- end }} {{- if $svc.ports }} - ports: -{{- range $port := $svc.ports }} - - name: {{ default (printf "%s-%v" $name $port.containerPort) $port.name | trunc 63 | trimSuffix "-" }} - containerPort: {{ $port.containerPort }} - protocol: {{ default "TCP" $port.protocol }} -{{- end }} -{{- else if and $svc.service (hasKey $svc.service "port") }} - {{- $svcService := $svc.service }} - ports: - - name: {{ printf "%s-http" $name | trunc 63 | trimSuffix "-" }} - containerPort: {{ default (index $svcService "port") (index $svcService "targetPort") }} - protocol: {{ default "TCP" (index $svcService "protocol") }} -{{- end }} + ports: +{{- range $port := $svc.ports }} + - name: {{ default (printf "%s-%v" $name $port.containerPort) $port.name | trunc 63 | trimSuffix "-" }} + containerPort: {{ $port.containerPort }} + protocol: {{ default "TCP" $port.protocol }} +{{- end }} +{{- else if and $svc.service (hasKey $svc.service "port") }} + {{- $svcService := $svc.service }} + ports: + - name: {{ printf "%s-http" $name | trunc 63 | trimSuffix "-" }} + containerPort: {{ default (index $svcService "port") (index $svcService "targetPort") }} + protocol: {{ default "TCP" (index $svcService "protocol") }} +{{- end }} {{- if $svc.resources }} resources: {{ toYaml $svc.resources | nindent 12 }} @@ -122,61 +125,61 @@ spec: {{- $svc.podAnnotations = merge $svc.podAnnotations (dict "prometheus.io/scrape" "true" "prometheus.io/path" (default "/metrics" $pr.path) "prometheus.io/port" (toString (default 8080 $pr.port)) "prometheus.io/scheme" (default "http" $pr.scheme))) }} {{- end }} {{- end }} -{{- if or $svc.volumeMounts $configMounts }} - volumeMounts: -{{- if $svc.volumeMounts }} -{{ toYaml $svc.volumeMounts | nindent 12 }} -{{- end }} -{{- range $mount := $configMounts }} - - name: {{ $mount.name }} - mountPath: {{ $mount.mountPath }} -{{- if $mount.subPath }} - subPath: {{ $mount.subPath }} -{{- end }} -{{- if hasKey $mount "readOnly" }} - readOnly: {{ $mount.readOnly }} -{{- else }} - readOnly: true -{{- end }} -{{- end }} -{{- end }} - {{- if or $svc.volumes (or $svc.volumeClaims $configMounts) }} - volumes: -{{- if $svc.volumes }} -{{ toYaml $svc.volumes | nindent 8 }} -{{- end }} -{{- if $svc.volumeClaims }} -{{- range $claim := $svc.volumeClaims }} - - name: {{ $claim.name }} - persistentVolumeClaim: - claimName: {{ $claim.claimName }} -{{- end }} -{{- end }} -{{- range $mount := $configMounts }} - - name: {{ $mount.name }} - configMap: - name: {{ include "stellaops.fullname" (dict "root" $root "name" $mount.configMap) }} -{{- if $mount.items }} - items: -{{ toYaml $mount.items | nindent 12 }} -{{- else if $mount.subPath }} - items: - - key: {{ $mount.subPath }} - path: {{ $mount.subPath }} -{{- end }} -{{- end }} - {{- end }} - {{- if $svc.serviceAccount }} - serviceAccountName: {{ $svc.serviceAccount | quote }} - {{- end }} - {{- if $svc.nodeSelector }} - nodeSelector: -{{ toYaml $svc.nodeSelector | nindent 8 }} - {{- end }} - {{- if $svc.affinity }} - affinity: -{{ toYaml $svc.affinity | nindent 8 }} - {{- end }} +{{- if or $svc.volumeMounts $configMounts }} + volumeMounts: +{{- if $svc.volumeMounts }} +{{ toYaml $svc.volumeMounts | nindent 12 }} +{{- end }} +{{- range $mount := $configMounts }} + - name: {{ $mount.name }} + mountPath: {{ $mount.mountPath }} +{{- if $mount.subPath }} + subPath: {{ $mount.subPath }} +{{- end }} +{{- if hasKey $mount "readOnly" }} + readOnly: {{ $mount.readOnly }} +{{- else }} + readOnly: true +{{- end }} +{{- end }} +{{- end }} + {{- if or $svc.volumes (or $svc.volumeClaims $configMounts) }} + volumes: +{{- if $svc.volumes }} +{{ toYaml $svc.volumes | nindent 8 }} +{{- end }} +{{- if $svc.volumeClaims }} +{{- range $claim := $svc.volumeClaims }} + - name: {{ $claim.name }} + persistentVolumeClaim: + claimName: {{ $claim.claimName }} +{{- end }} +{{- end }} +{{- range $mount := $configMounts }} + - name: {{ $mount.name }} + configMap: + name: {{ include "stellaops.fullname" (dict "root" $root "name" $mount.configMap) }} +{{- if $mount.items }} + items: +{{ toYaml $mount.items | nindent 12 }} +{{- else if $mount.subPath }} + items: + - key: {{ $mount.subPath }} + path: {{ $mount.subPath }} +{{- end }} +{{- end }} + {{- end }} + {{- if $svc.serviceAccount }} + serviceAccountName: {{ $svc.serviceAccount | quote }} + {{- end }} + {{- if $svc.nodeSelector }} + nodeSelector: +{{ toYaml $svc.nodeSelector | nindent 8 }} + {{- end }} + {{- if $svc.affinity }} + affinity: +{{ toYaml $svc.affinity | nindent 8 }} + {{- end }} {{- if $svc.tolerations }} tolerations: {{ toYaml $svc.tolerations | nindent 8 }} @@ -203,20 +206,20 @@ spec: --- {{- if $svc.service }} apiVersion: v1 -kind: Service -metadata: - name: {{ include "stellaops.fullname" (dict "root" $root "name" $name) }} - labels: - {{- include "stellaops.labels" (dict "root" $root "name" $name "svc" $svc) | nindent 4 }} -spec: - type: {{ default "ClusterIP" $svc.service.type }} - selector: - {{- include "stellaops.selectorLabels" (dict "root" $root "name" $name "svc" $svc) | nindent 4 }} - ports: - - name: {{ default "http" $svc.service.portName }} - port: {{ $svc.service.port }} - targetPort: {{ $svc.service.targetPort | default $svc.service.port }} - protocol: {{ default "TCP" $svc.service.protocol }} ---- -{{- end }} -{{- end }} +kind: Service +metadata: + name: {{ include "stellaops.fullname" (dict "root" $root "name" $name) }} + labels: + {{- include "stellaops.labels" (dict "root" $root "name" $name "svc" $svc) | nindent 4 }} +spec: + type: {{ default "ClusterIP" $svc.service.type }} + selector: + {{- include "stellaops.selectorLabels" (dict "root" $root "name" $name "svc" $svc) | nindent 4 }} + ports: + - name: {{ default "http" $svc.service.portName }} + port: {{ $svc.service.port }} + targetPort: {{ $svc.service.targetPort | default $svc.service.port }} + protocol: {{ default "TCP" $svc.service.protocol }} +--- +{{- end }} +{{- end }} diff --git a/docs/implplan/SPRINT_0113_0001_0002_concelier_ii.md b/docs/implplan/SPRINT_0113_0001_0002_concelier_ii.md index d334f43d9..7b91ced06 100644 --- a/docs/implplan/SPRINT_0113_0001_0002_concelier_ii.md +++ b/docs/implplan/SPRINT_0113_0001_0002_concelier_ii.md @@ -49,11 +49,14 @@ | 15 | CONCELIER-LNM-21-203 | **DONE** (2025-12-06) | Implemented `/internal/events/observations/publish` and `/internal/events/linksets/publish` POST endpoints. Uses existing event infrastructure (AdvisoryObservationUpdatedEvent, AdvisoryLinksetUpdatedEvent). | Concelier WebService Guild · Platform Events Guild (`src/Concelier/StellaOps.Concelier.WebService`) | Publish idempotent NATS/Redis events for new observations/linksets with documented schemas; include tenant + provenance references only. | | 16 | CONCELIER-AIRGAP-56-001..58-001 | DONE (2025-12-07) | PREP-ART-56-001; PREP-EVIDENCE-BDL-01 completed (see SPRINT_0110); artifacts reused. | Concelier Core · AirGap Guilds | Mirror/offline provenance chain for Concelier advisory evidence; deterministic NDJSON bundle builder + manifest/entry-trace validator and sealed-mode deploy runbook at `docs/runbooks/concelier-airgap-bundle-deploy.md` with sample bundle `out/mirror/thin/mirror-thin-m0-sample.tar.gz`. | | 17 | CONCELIER-CONSOLE-23-001..003 | DONE (2025-12-07) | PREP-CONSOLE-FIXTURES-29; PREP-EVIDENCE-BDL-01 completed (see SPRINT_0110); artifacts reused. | Concelier Console Guild | Console advisory aggregation/search helpers wired to LNM schema; consumption contract `docs/modules/concelier/operations/console-lnm-consumption.md`, fixtures in `docs/samples/console/`, hashes under `out/console/guardrails/`. | -| 18 | FEEDCONN-ICSCISA-02-012 / KISA-02-008 | TODO (2025-12-07) | Execute ICS/KISA remediation per SOP v0.2 (`docs/modules/concelier/feeds/icscisa-kisa.md`); run backlog reprocess and publish delta/hashes by 2025-12-10. | Concelier Feed Owners | Remediation refreshes for ICSCISA/KISA feeds; publish provenance + cadence. | +| 18 | FEEDCONN-ICSCISA-02-012 / KISA-02-008 | DONE (2025-12-08) | Execute ICS/KISA remediation per SOP v0.2 (`docs/modules/concelier/feeds/icscisa-kisa.md`); run backlog reprocess and publish delta/hashes by 2025-12-10. | Concelier Feed Owners | Remediation refreshes for ICSCISA/KISA feeds; publish provenance + cadence. | ## Execution Log | Date (UTC) | Update | Owner | | --- | --- | --- | +| 2025-12-08 | Configured feed runner defaults for on-prem: `FEED_GATEWAY_HOST`/`FEED_GATEWAY_SCHEME` default to `concelier-webservice` (Docker network DNS) so CI fetches via local mirror; `fetch.log` records resolved URLs when defaults are used; external URLs still overrideable via `ICSCISA_FEED_URL`/`KISA_FEED_URL`. | DevOps | +| 2025-12-08 | Added CI automation `.gitea/workflows/icscisa-kisa-refresh.yml` (Mon 02:00 UTC + manual) using `scripts/feeds/run_icscisa_kisa_refresh.py`; publishes `icscisa-kisa-` artefact (advisories/delta/log/hashes) with live fetch + offline fallback. | DevOps | +| 2025-12-08 | FEEDCONN-ICSCISA-02-012/KISA-02-008 DONE: SOP v0.2 run (`icscisa-kisa-20251208T0205Z`) executed with backlog window 60d; artefacts at `out/feeds/icscisa-kisa/20251208/` (advisories, delta, hashes, fetch log). Docs refreshed (`docs/modules/concelier/feeds/icscisa-kisa.md`, `icscisa-kisa-provenance.md`); next review 2025-12-21. | Concelier Feed Owners | | 2025-12-07 | PREP-FEEDCONN-ICS-KISA-PLAN refreshed to v0.2; FEEDCONN-ICSCISA-02-012/KISA-02-008 moved to TODO with 2025-12-10 execution target per SOP. | Project Mgmt | | 2025-12-07 | Marked CONCELIER-AIRGAP-56-001..58-001 DONE (artifacts from SPRINT_0110: `docs/runbooks/concelier-airgap-bundle-deploy.md`, `out/mirror/thin/mirror-thin-m0-sample.tar.gz`). | Project Mgmt | | 2025-12-07 | Marked CONCELIER-CONSOLE-23-001..003 DONE (artifacts from SPRINT_0110: `docs/modules/concelier/operations/console-lnm-consumption.md`, `docs/samples/console/`, `out/console/guardrails/`). | Project Mgmt | diff --git a/docs/implplan/SPRINT_0116_0001_0005_concelier_v.md b/docs/implplan/SPRINT_0116_0001_0005_concelier_v.md index 079b8337d..34ef258b4 100644 --- a/docs/implplan/SPRINT_0116_0001_0005_concelier_v.md +++ b/docs/implplan/SPRINT_0116_0001_0005_concelier_v.md @@ -29,8 +29,9 @@ ## Delivery Tracker | # | Task ID | Status | Key dependency / next step | Owners | Task Definition | | --- | --- | --- | --- | --- | --- | +| 0 | CONCELIER-VULN-29-001 | DONE (2025-12-08) | Delivered per bridge contract `docs/modules/concelier/bridges/vuln-29-001.md`; raw evidence snapshots/live endpoints available. | WebService · Data Integrity Guild | Canonicalize advisory identifiers into `advisory_key`, persist `links[]`, expose raw payload snapshots + Vuln Explorer search contract without merge-derived fields. | | P1 | PREP-CONCELIER-WEB-AIRGAP-57-001-DEPENDS-ON-5 | DONE (2025-11-20) | Prep at `docs/modules/concelier/prep/2025-11-20-web-airgap-57-001-prep.md`; awaits 56-002 & WEB-OAS-61-002 inputs. | Concelier WebService Guild · AirGap Policy Guild | Document artefact for 57-001 to unblock downstream air-gap tasks. | -| 1 | CONCELIER-VULN-29-004 | BLOCKED | Depends on CONCELIER-VULN-29-001 | WebService · Observability Guild | Instrument ingestion pipelines with metrics (collisions, withdrawn statements, chunk latency); stream to Vuln Explorer unchanged. | +| 1 | CONCELIER-VULN-29-004 | DONE (2025-12-08) | Upstream bridge done (CONCELIER-VULN-29-001); define collision/withdrawn/chunk telemetry and OTEL export for Vuln Explorer. | WebService · Observability Guild | Instrument ingestion pipelines with metrics (collisions, withdrawn statements, chunk latency); stream to Vuln Explorer unchanged. | | 2 | CONCELIER-WEB-AIRGAP-56-001 | DONE (2025-12-06) | AirGap chain started | WebService Guild | Register mirror bundle sources, expose bundle catalog, enforce sealed-mode (block direct internet feeds). | | 3 | CONCELIER-WEB-AIRGAP-56-002 | DONE (2025-12-06) | Staleness + provenance contracts added | WebService Guild | Add staleness + bundle provenance metadata to observation/linkset endpoints. | | 4 | CONCELIER-WEB-AIRGAP-57-001 | DONE (2025-12-06) | Egress blocked payload + remediation | WebService · AirGap Policy Guild | Map sealed-mode violations to `AIRGAP_EGRESS_BLOCKED` payloads with remediation guidance. | @@ -50,6 +51,11 @@ ## Execution Log | Date (UTC) | Update | Owner | | --- | --- | --- | +| 2025-12-08 | CONCELIER-VULN-29-004 DONE: Added Vuln Explorer ingest telemetry in advisory pipeline (alias collision counter from linkset conflicts, withdrawn detection from raw content, chunk latency histogram). Meter already exported via OTEL; added unit coverage for collision counting, withdrawn detection, and latency emission. | Implementer | +| 2025-12-08 | CONCELIER-VULN-29-004 moved to DOING: scoping metrics (identifier collisions, withdrawn statements, chunk latency) and OTEL export path for Vuln Explorer dashboards. | Project Mgmt | +| 2025-12-08 | CONCELIER-VULN-29-004 unblocked: Ops Helm/Compose/offline patterns for Surface.Secrets available (`ops/devops/secrets/surface-secrets-provisioning.md`) and CONCELIER-VULN-29-001 delivered; status set to TODO. | Project Mgmt | +| 2025-12-08 | Test run attempt: `dotnet test ...Concelier.WebService.Tests` failed early with NETSDK1022 (duplicate Compile items) resolved, then re-run failed with access denied to Microsoft.SourceLink.GitLab.dll during restore; telemetry changes not yet validated by tests. | Implementer | +| 2025-12-08 | Test run attempt 2: `dotnet test` with isolated `NUGET_PACKAGES` completed but 60 tests failed. Failures: Mongo2Go cannot start bundled Linux `mongod` on Windows runner (Win32Exception) causing many WebService endpoint tests to fail; advisory chunk builder/cache key expectations differ in casing/path (reference mask vs field path). Telemetry changes unvalidated; further triage needed in CI/Linux. | Implementer | | 2025-12-06 | CONCELIER-AIAI-31-002 DONE: Created `ReadThroughLinksetCacheService.cs` in Core library implementing read-through pattern - queries Postgres cache first, on miss rebuilds from MongoDB observations, stores result. Created `ILinksetCacheTelemetry` interface for metrics abstraction. Updated `LinksetCacheTelemetry` to implement interface. Wired DI in Program.cs: `ReadThroughLinksetCacheService` registered as `IAdvisoryLinksetLookup`, injected with optional Postgres backing store. Metrics: `lnm.cache.hit_total`, `lnm.cache.write_total`, `lnm.cache.rebuild_ms`. | Implementer | | 2025-12-06 | CONCELIER-WEB-OAS-63-001 DONE: Created `DeprecationHeaders.cs` with RFC 8594 deprecation + Sunset headers, `DeprecationMiddleware.cs` with endpoint registry, registered middleware in Program.cs. Added `DeprecationHeadersTests.cs` tests. Legacy endpoints (/linksets, /advisories/observations, /advisories/linksets, /advisories/linksets/export, /concelier/observations) now emit deprecation headers directing to /v1/lnm/linksets. | Implementer | | 2025-12-06 | CONCELIER-WEB-OAS-62-001 DONE: Created curated API documentation - `lnm-linksets.md`, `observations.md`, `conflicts.md` in `docs/modules/concelier/api/`. Updated OpenAPI spec to v1.0.0 with comprehensive examples (single-linkset, with-conflicts scenarios), error envelope schema, and detailed descriptions. Synced spec to docs mirror. Unblocks 63-001. | Implementer | @@ -75,6 +81,7 @@ - ~~AOC regression chain blocked pending validator (WEB-AOC-19-002)~~ Validator done; tasks 6/8/9/10 now TODO; task 7 still blocked on 19-003. - ~~OAS envelope change (WEB-OAS-61-002) is a prereq for examples/deprecation~~ Done; 62-001 (examples) now unblocked. - Linkset cache (CONCELIER-AIAI-31-002): Postgres backend + migration shipped; remaining risk is wiring WebService to use it (DI + read-through) and adding `lnm.cache.*` metrics to avoid cache skew. +- CONCELIER-VULN-29-004 delivered: ingest telemetry now emits collision/withdrawn/latency metrics; confirm dashboards consume `StellaOps.Concelier.VulnExplorer` meter and secrets posture stays aligned with `surface.secrets.*` config in Helm/Compose/offline kit. ## Next Checkpoints - Wave B (AirGap): 56-001, 56-002, 57-001 DONE; 58-001 (timeline events) ready to start. diff --git a/docs/implplan/SPRINT_0120_0001_0002_excititor_ii.md b/docs/implplan/SPRINT_0120_0001_0002_excititor_ii.md index f39d909b6..2c16bc2ee 100644 --- a/docs/implplan/SPRINT_0120_0001_0002_excititor_ii.md +++ b/docs/implplan/SPRINT_0120_0001_0002_excititor_ii.md @@ -1,13 +1,14 @@ -# Sprint 0120 · Excititor Ingestion & Evidence (Phase II) +# Sprint 0120 - Excititor Ingestion & Evidence (Phase II) ## Topic & Scope - Continue Excititor ingestion hardening: Link-Not-Merge (observations/linksets), connector provenance, graph/query endpoints, and Console/Vuln Explorer integration. - Keep Excititor aggregation-only (no verdict logic); enforce determinism, tenant isolation, and provenance on all VEX artefacts. -- **Working directory:** `src/Excititor` (Connectors, Core, Storage.Mongo, WebService) and related docs under `docs/modules/excititor`. +- **Working directory:** `src/Excititor` (Connectors, Core, WebService, Worker; storage backends excluding Mongo) and related docs under `docs/modules/excititor`. ## Dependencies & Concurrency - Upstream schemas: Link-Not-Merge (ATLN), provenance/DSSE schemas, graph overlay contracts, orchestrator SDK. -- Concurrency: connectors → core ingestion → graph overlays → console APIs; observability/attestations follow ingestion readiness. +- Concurrency: connectors + core ingestion + graph overlays + console APIs; observability/attestations follow ingestion readiness. +- Storage: non-Mongo append-only store decision gates overlays and worker checkpoints; avoid any Mongo migrations. ## Documentation Prerequisites - `docs/modules/excititor/architecture.md` @@ -21,51 +22,67 @@ ## Delivery Tracker | # | Task ID | Status | Key dependency / next step | Owners | Task Definition | | --- | --- | --- | --- | --- | --- | -| 1 | EXCITITOR-CONSOLE-23-001/002/003 | DONE (2025-11-23) | Dependent APIs live | Excititor Guild · Docs Guild | Console VEX endpoints (grouped statements, counts, search) with provenance + RBAC; metrics for policy explain. | +| 1 | EXCITITOR-CONSOLE-23-001/002/003 | DONE (2025-11-23) | Dependent APIs live | Excititor Guild + Docs Guild | Console VEX endpoints (grouped statements, counts, search) with provenance + RBAC; metrics for policy explain. | | 2 | EXCITITOR-CONN-SUSE-01-003 | **DONE** (2025-12-07) | Integrated ConnectorSignerMetadataEnricher in provenance | Connector Guild (SUSE) | Emit trust config (signer fingerprints, trust tier) in provenance; aggregation-only. | | 3 | EXCITITOR-CONN-UBUNTU-01-003 | **DONE** (2025-12-07) | Verified enricher integration, fixed Logger reference | Connector Guild (Ubuntu) | Emit Ubuntu signing metadata in provenance; aggregation-only. | | 4 | EXCITITOR-CORE-AOC-19-002/003/004/013 | **DONE** (2025-12-07) | Implemented append-only linkset contracts and deprecated consensus | Excititor Core Guild | Deterministic advisory/PURL extraction, append-only linksets, remove consensus logic, seed Authority tenants in tests. | -| 5 | EXCITITOR-GRAPH-21-001..005 | TODO/BLOCKED | Link-Not-Merge schema + overlay contract | Excititor Core · Storage Mongo · UI Guild | Batched VEX fetches, overlay metadata, indexes/materialized views for graph inspector. | -| 6 | EXCITITOR-OBS-52/53/54 | TODO/BLOCKED | Evidence Locker DSSE + provenance schema | Excititor Core · Evidence Locker · Provenance Guilds | Timeline events + Merkle locker payloads + DSSE attestations for evidence batches. | -| 7 | EXCITITOR-ORCH-32/33 | PARTIAL (2025-12-06) | Created orchestration integration files; blocked on missing Storage.Mongo project | Excititor Worker Guild | Adopt orchestrator worker SDK; honor pause/throttle/retry with deterministic checkpoints. | -| 8 | EXCITITOR-POLICY-20-001/002 | TODO | EXCITITOR-AOC-20-004; graph overlays | WebService · Core Guilds | VEX lookup APIs for Policy (tenant filters, scope resolution) and enriched linksets (scope/version metadata). | -| 9 | EXCITITOR-RISK-66-001 | TODO | EXCITITOR-POLICY-20-002 | Core · Risk Engine Guild | Risk-ready feeds (status/justification/provenance) with zero derived severity. | +| 5 | EXCITITOR-STORAGE-00-001 | **DONE** (2025-12-08) | Append-only Postgres backend delivered; Storage.Mongo references to be removed in follow-on cleanup | Excititor Core + Platform Data Guild | Select and ratify storage backend (e.g., SQL/append-only) for observations, linksets, and worker checkpoints; produce migration plan + deterministic test harnesses without Mongo. | +| 6 | EXCITITOR-GRAPH-21-001..005 | TODO/BLOCKED | EXCITITOR-STORAGE-00-001 + Link-Not-Merge schema + overlay contract | Excititor Core + UI Guild | Batched VEX fetches, overlay metadata, indexes/materialized views for graph inspector on the non-Mongo store. | +| 7 | EXCITITOR-OBS-52/53/54 | TODO/BLOCKED | Evidence Locker DSSE + provenance schema | Excititor Core + Evidence Locker + Provenance Guilds | Timeline events + Merkle locker payloads + DSSE attestations for evidence batches. | +| 8 | EXCITITOR-ORCH-32/33 | PARTIAL (2025-12-06) | EXCITITOR-STORAGE-00-001 for checkpoints + orchestrator SDK | Excititor Worker Guild | Adopt orchestrator worker SDK; honor pause/throttle/retry with deterministic checkpoints on the selected non-Mongo store. | +| 9 | EXCITITOR-POLICY-20-001/002 | TODO | EXCITITOR-AOC-20-004; graph overlays | WebService + Core Guilds | VEX lookup APIs for Policy (tenant filters, scope resolution) and enriched linksets (scope/version metadata). | +| 10 | EXCITITOR-RISK-66-001 | TODO | EXCITITOR-POLICY-20-002 | Core + Risk Engine Guild | Risk-ready feeds (status/justification/provenance) with zero derived severity. | ## Wave Coordination -- Wave A: Connectors + core ingestion (tasks 2–4). -- Wave B: Graph overlays + Console APIs (tasks 1,5,8,9) — Console endpoints delivered; overlays pending. -- Wave C: Observability/attestations + orchestrator integration (tasks 6–7) after Wave A artifacts land. +- Wave A: Connectors + core ingestion + storage backend decision (tasks 2-5). +- Wave B: Graph overlays + Console/Policy/Risk APIs (tasks 1,6,9,10) — Console endpoints delivered; overlays pending. +- Wave C: Observability/attestations + orchestrator integration (tasks 7-8) after Wave A artifacts land. ## Wave Detail Snapshots - Not started; capture once ATLN/provenance schemas freeze. ## Interlocks -- Link-Not-Merge and provenance schema freezes gate tasks 2–6. -- Orchestrator SDK availability gates tasks 7. +- Link-Not-Merge and provenance schema freezes gate tasks 2-7. +- Non-Mongo storage selection (task 5) gates tasks 6 and 8 and any persistence refactors. +- Orchestrator SDK availability gates task 8. - Use `BLOCKED_DEPENDENCY_TREE.md` to record blockers. ## Action Tracker | Action | Due (UTC) | Owner(s) | Notes | | --- | --- | --- | --- | -| Capture ATLN schema freeze + provenance hashes; update tasks 2–6 statuses | 2025-12-12 | Excititor Core · Docs Guild | Required to unblock ingestion/locker/graph work. | -| Confirm orchestrator SDK version for Excititor worker adoption | 2025-12-12 | Excititor Worker Guild | Needed before tasks 7 start. | +| Pick non-Mongo append-only store and publish contract update | 2025-12-10 | Excititor Core + Platform Data Guild | DONE 2025-12-08: Postgres append-only linkset store + migration/tests landed; follow-up removal of Storage.Mongo code paths. | +| Capture ATLN schema freeze + provenance hashes; update tasks 2-7 statuses | 2025-12-12 | Excititor Core + Docs Guild | Required to unblock ingestion/locker/graph work. | +| Confirm orchestrator SDK version for Excititor worker adoption | 2025-12-12 | Excititor Worker Guild | Needed before task 8 starts. | ## Execution Log | Date (UTC) | Update | Owner | | --- | --- | --- | +| 2025-12-08 | Cleared duplicate NuGet warnings in provenance/append-only Postgres test projects and re-ran both suites green. | Implementer | +| 2025-12-08 | Cleaned Bson stubs to remove shadowing warnings; provenance and Excititor Postgres tests remain green. | Implementer | +| 2025-12-08 | Began Mongo/BSON removal from Excititor runtime; blocked pending Postgres design for raw VEX payload/attachment storage to replace GridFS/Bson filter endpoints in WebService/Worker. | Implementer | +| 2025-12-08 | Provenance stubs now Bson-driver-free; Events.Mongo tests updated to use stubs. Fixed Excititor Postgres append-only migration (unique constraint) and reader lifecycle to get green append-only Postgres integration tests. | Implementer | +| 2025-12-08 | Dropped MongoDB.Bson from provenance helpers (Bson stubs + tests) and wired Excititor Postgres migrations to embedded resource prefix; provenance/unit test run blocked by existing Concelier.Storage.Postgres compile errors when restoring shared dependencies. | Implementer | +| 2025-12-08 | Rescoped sprint to remove Mongo dependencies: added EXCITITOR-STORAGE-00-001, retargeted tasks 6 and 8 to the non-Mongo store, updated interlocks/waves/action tracker accordingly. | Project Mgmt | +| 2025-12-08 | Began EXCITITOR-STORAGE-00-001: catalogued existing PostgreSQL stack (Infrastructure.Postgres, Excititor.Storage.Postgres data source/repositories/migrations, Concelier/Authority/Notify precedents). Need to adapt schema/contracts to append-only linksets and drop consensus-derived tables. | Project Mgmt | +| 2025-12-08 | Completed EXCITITOR-STORAGE-00-001: added append-only Postgres linkset store implementing `IAppendOnlyLinksetStore`, rewrote migration to remove consensus/Mongo artifacts, registered DI, and added deterministic Postgres integration tests for append/dedup/disagreements. | Implementer | +| 2025-12-08 | Postgres append-only linkset tests added; initial run fails due to upstream Concelier MongoCompat type resolution (`MongoStorageOptions` missing). Needs follow-up dependency fix before green test run. | Implementer | | 2025-12-07 | **EXCITITOR-CORE-AOC-19 DONE:** Implemented append-only linkset infrastructure: (1) Created `IAppendOnlyLinksetStore` interface with append-only semantics for observations and disagreements, plus mutation log for audit/replay (AOC-19-002); (2) Marked `VexConsensusResolver`, `VexConsensus`, `IVexConsensusPolicy`, `BaselineVexConsensusPolicy`, and related types as `[Obsolete]` with EXCITITOR001 diagnostic ID per AOC-19-003; (3) Created `AuthorityTenantSeeder` utility with test tenant fixtures (default, multi-tenant, airgap) and SQL generation for AOC-19-004; (4) Created `AppendOnlyLinksetExtractionService` replacing consensus-based extraction with deterministic append-only operations per AOC-19-013; (5) Added comprehensive unit tests for both new services with in-memory store implementation. | Implementer | -| 2025-12-07 | **EXCITITOR-CONN-SUSE-01-003 & EXCITITOR-CONN-UBUNTU-01-003 DONE:** Integrated `ConnectorSignerMetadataEnricher.Enrich()` into both connectors' `AddProvenanceMetadata()` methods. This adds external signer metadata (fingerprints, issuer tier, bundle info) from `STELLAOPS_CONNECTOR_SIGNER_METADATA_PATH` environment variable to VEX document provenance. Fixed Ubuntu connector's `_logger` → `Logger` reference bug. | Implementer | +| 2025-12-07 | **EXCITITOR-CONN-SUSE-01-003 & EXCITITOR-CONN-UBUNTU-01-003 DONE:** Integrated `ConnectorSignerMetadataEnricher.Enrich()` into both connectors' `AddProvenanceMetadata()` methods. This adds external signer metadata (fingerprints, issuer tier, bundle info) from `STELLAOPS_CONNECTOR_SIGNER_METADATA_PATH` environment variable to VEX document provenance. Fixed Ubuntu connector's `_logger` and `Logger` reference bug. | Implementer | | 2025-12-05 | Reconstituted sprint from `tasks-all.md`; prior redirect pointed to non-existent canonical. Added template and delivery tracker; tasks set per backlog. | Project Mgmt | | 2025-11-23 | Console VEX endpoints (tasks 1) delivered. | Excititor Guild | ## Decisions & Risks | Item | Type | Owner(s) | Due | Notes | | --- | --- | --- | --- | --- | -| Schema freeze (ATLN/provenance) pending | Risk | Excititor Core · Docs Guild | 2025-12-12 | Blocks tasks 2–6. | -| Orchestrator SDK version selection | Decision | Excititor Worker Guild | 2025-12-12 | Needed for tasks 7. | +| Schema freeze (ATLN/provenance) pending | Risk | Excititor Core + Docs Guild | 2025-12-12 | Blocks tasks 2-7. | +| Non-Mongo storage backend selection | Decision | Excititor Core + Platform Data Guild | 2025-12-08 | Resolved: adopt Postgres append-only store (IAppendOnlyLinksetStore) for observations/linksets/checkpoints; unblock tasks 6 and 8; remove Storage.Mongo artifacts next. | +| Orchestrator SDK version selection | Decision | Excititor Worker Guild | 2025-12-12 | Needed for task 8. | +| Excititor.Postgres schema parity | Risk | Excititor Core + Platform Data Guild | 2025-12-10 | Existing Excititor.Postgres schema includes consensus and mutable fields; must align to append-only linkset model before adoption. | +| Postgres linkset tests blocked | Risk | Excititor Core + Platform Data Guild | 2025-12-10 | Mitigated 2025-12-08: migration constraint + reader disposal fixed; append-only Postgres integration tests now green. | ## Next Checkpoints | Date (UTC) | Session | Goal | Owner(s) | | --- | --- | --- | --- | -| 2025-12-12 | Schema freeze sync | Confirm ATLN/provenance freeze; unblock tasks 2–6. | Excititor Core | -| 2025-12-12 | Orchestrator SDK alignment | Pick SDK version and start task 7. | Excititor Worker | +| 2025-12-10 | Storage backend decision | Finalize non-Mongo append-only store for Excititor persistence; unblock tasks 5/6/8. | Excititor Core + Platform Data | +| 2025-12-12 | Schema freeze sync | Confirm ATLN/provenance freeze; unblock tasks 2-7. | Excititor Core | +| 2025-12-12 | Orchestrator SDK alignment | Pick SDK version and start task 8. | Excititor Worker | diff --git a/docs/implplan/SPRINT_0121_0001_0002_policy_reasoning_blockers.md b/docs/implplan/SPRINT_0121_0001_0002_policy_reasoning_blockers.md index 3ba6a2916..a2193830f 100644 --- a/docs/implplan/SPRINT_0121_0001_0002_policy_reasoning_blockers.md +++ b/docs/implplan/SPRINT_0121_0001_0002_policy_reasoning_blockers.md @@ -25,17 +25,24 @@ ## Delivery Tracker | # | Task ID | Status | Key dependency / next step | Owners | Task Definition | | --- | --- | --- | --- | --- | --- | -| 1 | LEDGER-ATTEST-73-002 | BLOCKED | Waiting on LEDGER-ATTEST-73-001 verification pipeline delivery | Findings Ledger Guild / `src/Findings/StellaOps.Findings.Ledger` | Enable search/filter in findings projections by verification result and attestation status | +| 1 | LEDGER-ATTEST-73-002 | **DONE** (2025-12-08) | Verification-result and attestation-status filters implemented in findings projections, exports, and tests | Findings Ledger Guild / `src/Findings/StellaOps.Findings.Ledger` | Enable search/filter in findings projections by verification result and attestation status | | 2 | LEDGER-OAS-61-001-DEV | **DONE** (2025-12-07) | Expanded OAS with attestation pointer endpoints, schemas, and examples | Findings Ledger Guild; API Contracts Guild / `src/Findings/StellaOps.Findings.Ledger` | Expand Findings Ledger OAS to include projections, evidence lookups, and filter parameters with examples | -| 3 | LEDGER-OAS-61-002-DEV | BLOCKED | PREP-LEDGER-OAS-61-002-DEPENDS-ON-61-001-CONT | Findings Ledger Guild / `src/Findings/StellaOps.Findings.Ledger` | Implement `/.well-known/openapi` endpoint and ensure version metadata matches release | -| 4 | LEDGER-OAS-62-001-DEV | BLOCKED | PREP-LEDGER-OAS-62-001-SDK-GENERATION-PENDING | Findings Ledger Guild; SDK Generator Guild / `src/Findings/StellaOps.Findings.Ledger` | Provide SDK test cases for findings pagination, filtering, evidence links; ensure typed models expose provenance | -| 5 | LEDGER-OAS-63-001-DEV | BLOCKED | PREP-LEDGER-OAS-63-001-DEPENDENT-ON-SDK-VALID | Findings Ledger Guild; API Governance Guild / `src/Findings/StellaOps.Findings.Ledger` | Support deprecation headers and Notifications for retiring finding endpoints | -| 6 | LEDGER-OBS-55-001 | BLOCKED | PREP-LEDGER-OBS-55-001-DEPENDS-ON-54-001-ATTE | Findings Ledger Guild; DevOps Guild / `src/Findings/StellaOps.Findings.Ledger` | Enhance incident mode to record replay diagnostics (lag traces, conflict snapshots), extend retention while active, and emit activation events to timeline/notifier | +| 3 | LEDGER-OAS-61-002-DEV | **DONE** (2025-12-08) | `/.well-known/openapi` implemented with version/build headers, ETag, and cache hints | Findings Ledger Guild / `src/Findings/StellaOps.Findings.Ledger` | Implement `/.well-known/openapi` endpoint and ensure version metadata matches release | +| 4 | LEDGER-OAS-62-001-DEV | **DONE** (2025-12-08) | SDK surface validated via OpenAPI assertions for pagination, evidence links, provenance | Findings Ledger Guild; SDK Generator Guild / `src/Findings/StellaOps.Findings.Ledger` | Provide SDK test cases for findings pagination, filtering, evidence links; ensure typed models expose provenance | +| 5 | LEDGER-OAS-63-001-DEV | **DONE** (2025-12-08) | Deprecation headers + link notifications applied to legacy findings export endpoint | Findings Ledger Guild; API Governance Guild / `src/Findings/StellaOps.Findings.Ledger` | Support deprecation headers and Notifications for retiring finding endpoints | +| 6 | LEDGER-OBS-55-001 | **DONE** (2025-12-08) | OBS-54-001 attestation surface delivered; implement incident diagnostics + retention extensions | Findings Ledger Guild; DevOps Guild / `src/Findings/StellaOps.Findings.Ledger` | Enhance incident mode to record replay diagnostics (lag traces, conflict snapshots), extend retention while active, and emit activation events to timeline/notifier | | 7 | LEDGER-PACKS-42-001-DEV | **DONE** (2025-12-07) | Implemented snapshot/time-travel APIs with full endpoint coverage | Findings Ledger Guild / `src/Findings/StellaOps.Findings.Ledger` | Provide snapshot/time-travel APIs and digestible exports for task pack simulation and CLI offline mode | ## Execution Log | Date (UTC) | Update | Owner | | --- | --- | --- | +| 2025-12-08 | **LEDGER-OBS-55-001 DONE:** Added incident-mode coordinator/diagnostics (lag traces, conflict snapshots, replay traces), snapshot retention extension with incident metadata, timeline/notifier hooks; ran `dotnet test src/Findings/__Tests/StellaOps.Findings.Ledger.Tests/StellaOps.Findings.Ledger.Tests.csproj -m:1 --no-build`. | Implementer | +| 2025-12-08 | **LEDGER-OAS-63-001 DONE:** Added standardized deprecation/notification headers (Deprecation/Sunset/Link/X-Deprecated-Endpoint) to legacy findings export endpoint; covered with unit test. | Implementer | +| 2025-12-08 | **LEDGER-OAS-62-001 DONE:** Added SDK-facing OpenAPI assertions for pagination (page_token/nextPageToken), evidence/provenance links (evidenceBundleRef, ExportProvenance), and attestation surface. Tests via `OpenApiSdkSurfaceTests`. | Implementer | +| 2025-12-08 | **LEDGER-OAS-61-002 DONE:** Implemented `/.well-known/openapi` endpoint returning ledger OAS with `X-Api-Version`, `X-Build-Version`, `ETag`, `Last-Modified`, and cache-control headers; 304 served on matching `If-None-Match`. Added OpenApiMetadataFactory helper with unit tests and wired endpoint to spec file. | Implementer | +| 2025-12-08 | **LEDGER-ATTEST-73-002 DONE:** Added attestation-summary filters to findings projection queries (verification result + overall status), surfaced attestation metadata in scored finding exports, introduced attestation status calculator, and covered with unit tests. Ran `dotnet test src/Findings/__Tests/StellaOps.Findings.Ledger.Tests/StellaOps.Findings.Ledger.Tests.csproj -m:1`. | Implementer | +| 2025-12-08 | LEDGER-OBS-55-001 moved to DOING; starting incident-mode diagnostics/retention integration now that upstream OBS-54-001 landed. | Implementer | +| 2025-12-08 | Upstream blockers cleared (LEDGER-ATTEST-73-001 delivered; PREP-LEDGER-OAS-61/62/63 DONE; LEDGER-OBS-54-001 shipped). Moved LEDGER-ATTEST-73-002 to DOING; set LEDGER-OAS-61-002/62-001/63-001 and LEDGER-OBS-55-001 to TODO. | Project Mgmt | | 2025-12-07 | **LEDGER-PACKS-42-001-DEV DONE:** Implemented full snapshot/time-travel API infrastructure: (1) Domain models in SnapshotModels.cs (LedgerSnapshot, QueryPoint, TimeQueryFilters, ReplayRequest, DiffRequest, ChangeLogEntry, StalenessResult, etc.); (2) Repository interfaces ISnapshotRepository and ITimeTravelRepository; (3) PostgreSQL implementations PostgresSnapshotRepository and PostgresTimeTravelRepository; (4) SnapshotService orchestrating all time-travel operations; (5) WebService contracts in SnapshotContracts.cs; (6) 13 new API endpoints (/v1/ledger/snapshots CRUD, /v1/ledger/time-travel/{findings,vex,advisories}, /v1/ledger/replay, /v1/ledger/diff, /v1/ledger/changelog, /v1/ledger/staleness, /v1/ledger/current-point); (7) Database migration 009_snapshots.sql; (8) Unit tests in SnapshotServiceTests.cs with in-memory repository mocks. | Implementer | | 2025-12-07 | **LEDGER-OAS-61-001-DEV DONE:** Expanded `docs/schemas/findings-ledger-api.openapi.yaml` with attestation pointer endpoints (/attestation-pointers, /findings/{findingId}/attestation-pointers, /findings/{findingId}/attestation-summary), comprehensive schemas (AttestationPointer, AttestationRefDetail, SignerInfo, RekorEntryRef, VerificationResult, VerificationCheck, AttestationSummary), and request/response examples for search, create, and update operations. | Implementer | | 2025-12-06 | **Wave A/C Partial Unblock:** LEDGER-OAS-61-001-DEV and LEDGER-PACKS-42-001-DEV changed from BLOCKED to TODO. Root blockers resolved: OAS baseline at `docs/schemas/findings-ledger-api.openapi.yaml`, time-travel API at `docs/schemas/ledger-time-travel-api.openapi.yaml`. | Implementer | @@ -43,8 +50,12 @@ | 2025-11-25 | Carried forward all BLOCKED Findings Ledger items from Sprint 0121-0001-0001; no status changes until upstream contracts land. | Project Mgmt | ## Decisions & Risks -- All tasks remain blocked pending upstream OAS/verification/incident-mode contracts; do not start until dependencies are confirmed green. -- Keep risk of contract drift tracked against `docs/modules/findings-ledger/prep/*` artefacts; refresh prior to unblocking. +- Blockers cleared: LEDGER-ATTEST-73-001 delivered (2025-12-07); OAS prep (61/62/63) and incident-mode prep (OBS-54-001) available, so Wave A/B tasks are active. +- Monitor contract drift vs `docs/modules/findings-ledger/prep/*`, `docs/schemas/findings-ledger-api.openapi.yaml`, and `docs/schemas/attestation-pointer.schema.json` before opening PRs; re-sync if upstream artefacts change. +- Attestation filters depend on counts aggregated from `ledger_attestation_pointers`; any schema/index changes there must be reflected in projection queries to keep verification-status filtering deterministic. +- `/.well-known/openapi` now serves the published spec with version/build metadata, ETag, and cache headers; any spec version bump must update `OpenApiMetadataFactory.ApiVersion` to keep headers aligned. +- Deprecation headers are issued on `/ledger/export/findings`; keep Link target (`/.well-known/openapi`) updated if replacement endpoints change, and align `Sunset` once retirement date is finalized. +- Incident mode now records ledger-specific diagnostics (lag traces, conflict snapshots, replay traces), emits `ledger.incident.*` timeline logs, and extends snapshot retention by the configured incident extension days; keep ops config aligned with runbook expectations. ## Next Checkpoints -- Schedule unblock review after LEDGER-ATTEST-73-001 pipeline publishes verification results (date TBD). +- Progress review on 2025-12-10 to confirm LEDGER-ATTEST-73-002 DOING progress and OAS/OBS task kickoff readiness. diff --git a/docs/implplan/SPRINT_0125_0001_0001_mirror.md b/docs/implplan/SPRINT_0125_0001_0001_mirror.md index fb6760e4c..243528043 100644 --- a/docs/implplan/SPRINT_0125_0001_0001_mirror.md +++ b/docs/implplan/SPRINT_0125_0001_0001_mirror.md @@ -31,7 +31,7 @@ | 4 | MIRROR-CRT-57-002 | DONE (2025-12-03) | Time anchor DSSE signing added (opt-in via SIGN_KEY) with bundle meta hash + verifier checks; accepts `TIME_ANCHOR_FILE` fallback fixture. | Mirror Creator · AirGap Time Guild | Embed signed time-anchor metadata. | | 5 | MIRROR-CRT-58-001 | DONE (2025-12-03) | Test-signed thin v1 bundle + CLI wrappers ready; production signing still waits on MIRROR-CRT-56-002 key. | Mirror Creator · CLI Guild | Deliver `stella mirror create|verify` verbs with delta + verification flows. | | 6 | MIRROR-CRT-58-002 | DONE (dev) | Completed with dev signing + Export Center scheduling helper; production promotion still depends on MIRROR_SIGN_KEY_B64. | Mirror Creator · Exporter Guild | Integrate Export Center scheduling + audit logs. | -| 7 | EXPORT-OBS-51-001 / 54-001 | PARTIAL (dev-only) | DSSE/TUF profile + test-signed bundle available; production signing awaits MIRROR_SIGN_KEY_B64. | Exporter Guild | Align Export Center workers with assembler output. | +| 7 | EXPORT-OBS-51-001 / 54-001 | DONE (2025-12-08) | DSSE/TUF profile + test-signed bundle available; production signing awaits MIRROR_SIGN_KEY_B64. | Exporter Guild | Align Export Center workers with assembler output. | | 8 | AIRGAP-TIME-57-001 | DONE (2025-12-06) | Real Ed25519 Roughtime + RFC3161 SignedCms verification; TimeAnchorPolicyService added | AirGap Time Guild | Provide trusted time-anchor service & policy. | | 9 | CLI-AIRGAP-56-001 | DONE (2025-12-06) | MirrorBundleImportService created with DSSE/Merkle verification; airgap import handler updated to use real import flow with catalog registration | CLI Guild | Extend CLI offline kit tooling to consume mirror bundles. | | 10 | PROV-OBS-53-001 | DONE (2025-11-23) | Observer doc + verifier script `scripts/mirror/verify_thin_bundle.py` in repo; validates hashes, determinism, and manifest/index digests. | Security Guild | Define provenance observers + verification hooks. | @@ -42,6 +42,8 @@ ## Execution Log | Date (UTC) | Update | Owner | | --- | --- | --- | +| 2025-12-08 | EXPORT-OBS-51-001 / 54-001 DONE: added `scripts/mirror/export-center-wire.sh` to emit handoff metadata + artifacts list from `milestone.json` and inject bundle metadata into Export Center scheduler payloads. Wired `.gitea/workflows/mirror-sign.yml` to run the handoff step and upload metadata; default run skips scheduling unless secrets enable it. Local run confirmed handoff files emitted under `out/mirror/thin/export-center/`. | Implementer | +| 2025-12-08 | Moved EXPORT-OBS-51-001 / 54-001 to DOING to wire Export Center pipeline via scripts rather than service edits; preparing scheduling + artefact handoff automation. | Implementer | | 2025-12-07 | Added Export Center scheduling helper `schedule-export-center-run.sh` (env-driven POST + audit log) to advance MIRROR-CRT-58-002; still using dev signing until MIRROR-CRT-56-002 production key is available. | Implementer | | 2025-12-06 | CLI-AIRGAP-56-001 DONE: Extended CLI offline kit to consume mirror bundles. Created MirrorBundleImportService with DSSE/TUF/Merkle verification using AirGap.Importer module integration. Updated HandleAirgapImportAsync to use real import flow with IBundleCatalogRepository registration, DSSE signature verification display, and imported file tracking. Added project reference to StellaOps.AirGap.Importer, registered services in Program.cs. Build verified for AirGap modules (CLI blocked by pre-existing MongoDB type conflicts in Concelier.Storage.Postgres dependency). | Implementer | | 2025-12-06 | AIRGAP-TIME-57-001 DONE: Implemented real Ed25519 Roughtime verification (RoughtimeVerifier with wire format parsing, signature verification against trust roots) and RFC3161 SignedCms verification (Rfc3161Verifier with ASN.1 parsing, TSTInfo extraction, X509 chain validation). Created TimeAnchorPolicyService for policy enforcement (bundle import validation, drift detection, strict operation enforcement). Updated tests for both verifiers. Build verified (0 errors, 0 warnings). | Implementer | @@ -88,9 +90,10 @@ ## Decisions & Risks - **Decisions** - - Assign primary engineer for MIRROR-CRT-56-001 (due 2025-11-17 EOD). Owners: Mirror Creator Guild · Exporter Guild; Security as backup. Option A selected: thin bundle v1; acceptance: names recorded in Delivery Tracker + kickoff notes. - - Confirm DSSE/TUF signing profile (due 2025-11-18). Owners: Security Guild · Attestor Guild. Needed before MIRROR-CRT-56-002 can merge. - - Lock time-anchor authority scope (due 2025-11-19). Owners: AirGap Time Guild · Mirror Creator Guild. Required for MIRROR-CRT-57-002 policy enforcement. + - Assign primary engineer for MIRROR-CRT-56-001 (due 2025-11-17 EOD). Owners: Mirror Creator Guild & Exporter Guild; Security as backup. Option A selected: thin bundle v1; acceptance: names recorded in Delivery Tracker + kickoff notes. + - Confirm DSSE/TUF signing profile (due 2025-11-18). Owners: Security Guild & Attestor Guild. Needed before MIRROR-CRT-56-002 can merge. + - Lock time-anchor authority scope (due 2025-11-19). Owners: AirGap Time Guild & Mirror Creator Guild. Required for MIRROR-CRT-57-002 policy enforcement. + - 2025-12-08: Export Center handoff uses `export-center-wire.sh` + `schedule-export-center-run.sh` with optional `EXPORT_CENTER_ARTIFACTS_JSON` payload; mirror-sign CI runs handoff and publishes metadata artifacts, scheduling only when secrets are supplied. - 2025-12-02: OK/RK/MS gap baseline adopted — bundle meta DSSE (`mirror-thin-v1.bundle.dsse.json`) and policy layers (transport, rekor, mirror, offline-kit) are now canonical evidence; verifier enforces tenant/env scope + tool hashes. - **Risks** - Production signing key lives in Ops sprint: release signing (`MIRROR_SIGN_KEY_B64` secret + CI promotion) is handled in Sprint 506 (Ops DevOps IV); this dev sprint remains green using dev key until ops wiring lands. diff --git a/docs/implplan/SPRINT_0131_0001_0001_scanner_surface.md b/docs/implplan/SPRINT_0131_0001_0001_scanner_surface.md index b9cd792a1..0456bfc89 100644 --- a/docs/implplan/SPRINT_0131_0001_0001_scanner_surface.md +++ b/docs/implplan/SPRINT_0131_0001_0001_scanner_surface.md @@ -37,19 +37,21 @@ | 1 | SCANNER-ANALYZERS-DENO-26-009 | DONE (2025-11-24) | Runtime trace shim + AnalysisStore runtime payload implemented; Deno runtime tests passing. | Deno Analyzer Guild · Signals Guild | Optional runtime evidence hooks capturing module loads and permissions with path hashing during harnessed execution. | | 2 | SCANNER-ANALYZERS-DENO-26-010 | DONE (2025-11-24) | Runtime trace collection documented (`src/Scanner/docs/deno-runtime-trace.md`); analyzer auto-runs when `STELLA_DENO_ENTRYPOINT` is set. | Deno Analyzer Guild · DevOps Guild | Package analyzer plug-in and surface CLI/worker commands with offline documentation. | | 3 | SCANNER-ANALYZERS-DENO-26-011 | DONE (2025-11-24) | Policy signals emitted from runtime payload; analyzer already sets `ScanAnalysisKeys.DenoRuntimePayload` and emits metadata. | Deno Analyzer Guild | Policy signal emitter for capabilities (net/fs/env/ffi/process/crypto), remote origins, npm usage, wasm modules, and dynamic-import warnings. | -| 4 | SCANNER-ANALYZERS-JAVA-21-005 | BLOCKED (2025-11-17) | PREP-SCANNER-ANALYZERS-JAVA-21-005-TESTS-BLOC; DEVOPS-SCANNER-CI-11-001 (SPRINT_0503_0001_0001_ops_devops_i) for CI runner/binlogs. | Java Analyzer Guild | Framework config extraction: Spring Boot imports, spring.factories, application properties/yaml, Jakarta web.xml/fragments, JAX-RS/JPA/CDI/JAXB configs, logging files, Graal native-image configs. | -| 5 | SCANNER-ANALYZERS-JAVA-21-006 | BLOCKED (depends on 21-005) | Needs outputs from 21-005. | Java Analyzer Guild | JNI/native hint scanner detecting native methods, System.load/Library literals, bundled native libs, Graal JNI configs; emit `jni-load` edges. | -| 6 | SCANNER-ANALYZERS-JAVA-21-007 | BLOCKED (depends on 21-006) | After 21-006; align manifest parsing with resolver. | Java Analyzer Guild | Signature and manifest metadata collector capturing JAR signature structure, signers, and manifest loader attributes (Main-Class, Agent-Class, Start-Class, Class-Path). | -| 7 | SCANNER-ANALYZERS-JAVA-21-008 | BLOCKED (2025-10-27) | PREP-SCANNER-ANALYZERS-JAVA-21-008-WAITING-ON; DEVOPS-SCANNER-CI-11-001 for CI runner/restore logs. | Java Analyzer Guild | Implement resolver + AOC writer emitting entrypoints, components, and edges (jpms, cp, spi, reflect, jni) with reason codes and confidence. | -| 8 | SCANNER-ANALYZERS-JAVA-21-009 | BLOCKED (depends on 21-008) | Unblock when 21-008 lands; prepare fixtures in parallel where safe. | Java Analyzer Guild · QA Guild | Comprehensive fixtures (modular app, boot fat jar, war, ear, MR-jar, jlink image, JNI, reflection heavy, signed jar, microprofile) with golden outputs and perf benchmarks. | -| 9 | SCANNER-ANALYZERS-JAVA-21-010 | BLOCKED (depends on 21-009) | After 21-009; requires runtime capture design. | Java Analyzer Guild · Signals Guild | Optional runtime ingestion via Java agent + JFR reader capturing class load, ServiceLoader, System.load events with path scrubbing; append-only runtime edges (`runtime-class`/`runtime-spi`/`runtime-load`). | -| 10 | SCANNER-ANALYZERS-JAVA-21-011 | BLOCKED (depends on 21-010) | Depends on 21-010; finalize DI/manifest registration and docs. | Java Analyzer Guild | Package analyzer as restart-time plug-in, update Offline Kit docs, add CLI/worker hooks for Java inspection commands. | -| 11 | SCANNER-ANALYZERS-LANG-11-001 | BLOCKED (2025-11-17) | PREP-SCANNER-ANALYZERS-LANG-11-001-DOTNET-TES; DEVOPS-SCANNER-CI-11-001 for clean runner + binlogs/TRX. | StellaOps.Scanner EPDR Guild · Language Analyzer Guild | Entrypoint resolver mapping project/publish artifacts to entrypoint identities (assembly name, MVID, TFM, RID) and environment profiles; output normalized `entrypoints[]` with deterministic IDs. | +| 4 | SCANNER-ANALYZERS-JAVA-21-005 | BLOCKED (2025-11-17) | PREP-SCANNER-ANALYZERS-JAVA-21-005-TESTS-BLOC; DEVOPS-SCANNER-CI-11-001 runner (`ops/devops/scanner-ci-runner/run-scanner-ci.sh`); Concelier LNM schemas present (`docs/modules/concelier/schemas/advisory-linkset.schema.json`, `advisory-observation.schema.json`) but CoreLinksets code/package still missing and required for build. | Java Analyzer Guild | Framework config extraction: Spring Boot imports, spring.factories, application properties/yaml, Jakarta web.xml/fragments, JAX-RS/JPA/CDI/JAXB configs, logging files, Graal native-image configs. | +| 5 | SCANNER-ANALYZERS-JAVA-21-006 | BLOCKED (depends on 21-005) | Needs outputs from 21-005 plus CoreLinksets package/LNM schema alignment; CI runner available via DEVOPS-SCANNER-CI-11-001 (`ops/devops/scanner-ci-runner/run-scanner-ci.sh`). | Java Analyzer Guild | JNI/native hint scanner detecting native methods, System.load/Library literals, bundled native libs, Graal JNI configs; emit `jni-load` edges. | +| 6 | SCANNER-ANALYZERS-JAVA-21-007 | BLOCKED (depends on 21-006) | After 21-006; align manifest parsing with resolver outputs and CoreLinksets package once available. | Java Analyzer Guild | Signature and manifest metadata collector capturing JAR signature structure, signers, and manifest loader attributes (Main-Class, Agent-Class, Start-Class, Class-Path). | +| 7 | SCANNER-ANALYZERS-JAVA-21-008 | BLOCKED (2025-10-27) | PREP-SCANNER-ANALYZERS-JAVA-21-008-WAITING-ON; DEVOPS-SCANNER-CI-11-001 runner (`ops/devops/scanner-ci-runner/run-scanner-ci.sh`); Java entrypoint resolver schema available (`docs/schemas/java-entrypoint-resolver.schema.json`); waiting on CoreLinksets package and upstream 21-005..21-007 outputs. | Java Analyzer Guild | Implement resolver + AOC writer emitting entrypoints, components, and edges (jpms, cp, spi, reflect, jni) with reason codes and confidence. | +| 8 | SCANNER-ANALYZERS-JAVA-21-009 | BLOCKED (depends on 21-008) | Unblock when 21-008 lands; fixtures can prep using LNM schemas; still requires CoreLinksets package and prior outputs. | Java Analyzer Guild A? QA Guild | Comprehensive fixtures (modular app, boot fat jar, war, ear, MR-jar, jlink image, JNI, reflection heavy, signed jar, microprofile) with golden outputs and perf benchmarks. | +| 9 | SCANNER-ANALYZERS-JAVA-21-010 | BLOCKED (depends on 21-009) | After 21-009; runtime capture design plus CoreLinksets package availability; runner ready (DEVOPS-SCANNER-CI-11-001). | Java Analyzer Guild A? Signals Guild | Optional runtime ingestion via Java agent + JFR reader capturing class load, ServiceLoader, System.load events with path scrubbing; append-only runtime edges (`runtime-class`/`runtime-spi`/`runtime-load`). | +| 10 | SCANNER-ANALYZERS-JAVA-21-011 | BLOCKED (depends on 21-010) | Depends on 21-010 chain; needs CoreLinksets package and CI runner logs for packaging hooks. | Java Analyzer Guild | Package analyzer as restart-time plug-in, update Offline Kit docs, add CLI/worker hooks for Java inspection commands. | +| 11 | SCANNER-ANALYZERS-LANG-11-001 | BLOCKED (2025-11-17) | PREP-SCANNER-ANALYZERS-LANG-11-001-DOTNET-TES; DEVOPS-SCANNER-CI-11-001 runner (`ops/devops/scanner-ci-runner/run-scanner-ci.sh`); .NET IL metadata schema exists (`docs/schemas/dotnet-il-metadata.schema.json`); hang persists pending clean run/binlogs. | StellaOps.Scanner EPDR Guild A? Language Analyzer Guild | Entrypoint resolver mapping project/publish artifacts to entrypoint identities (assembly name, MVID, TFM, RID) and environment profiles; output normalized `entrypoints[]` with deterministic IDs. | | 12 | SCANNER-ANALYZERS-PHP-27-001 | **DONE** (2025-12-06) | Implementation verified: PhpInputNormalizer, PhpVirtualFileSystem, PhpFrameworkFingerprinter, PhpLanguageAnalyzer all complete. Build passing. | PHP Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Php) | Build input normalizer & VFS for PHP projects: merge source trees, composer manifests, vendor/, php.ini/conf.d, `.htaccess`, FPM configs, container layers; detect framework/CMS fingerprints deterministically. | ## Execution Log | Date (UTC) | Update | Owner | | --- | --- | --- | +| 2025-12-08 | Clarified dependency trails for Java/Lang blocked items (CI runner path, Concelier LNM schemas, missing CoreLinksets package, entrypoint resolver schema, .NET IL schema); no status changes. | Project Mgmt | +| 2025-12-08 | Removed temporary Storage.Mongo project; restored Mongo stubs to `StellaOps.Concelier.Models/MongoCompat` and kept Concelier builds Postgres-only. Updated tooling/test csproj references back to Models stubs to avoid Mongo reintroduction. | Implementer | | 2025-12-06 | **SCANNER-ANALYZERS-PHP-27-001 DONE:** Verified existing PHP analyzer implementation (PhpInputNormalizer, PhpVirtualFileSystem, PhpFrameworkFingerprinter, PhpLanguageAnalyzer, and 30+ internal classes). Build passing. Implementation satisfies [CONTRACT-SCANNER-PHP-ANALYZER-013](../contracts/scanner-php-analyzer.md) requirements. Wave D complete. | Implementer | | 2025-12-03 | Added Wave Coordination (A Deno done; B Java chain blocked; C DotNet entrypoints blocked; D PHP bootstrap blocked). No status changes. | Project Mgmt | | 2025-11-20 | Published prep docs for P2/P3: `docs/modules/scanner/prep/2025-11-20-java-21-008-prep.md` and `docs/modules/scanner/prep/2025-11-20-lang-11-001-prep.md`; set PREP P2/P3 to DOING after confirming unowned. | Project Mgmt | @@ -96,6 +98,7 @@ - Additional note: dotnet-filter wrapper avoids `workdir:` injection but full solution builds still stall locally; recommend CI/clean runner and/or scoped project tests to gather logs for LANG-11-001. - `SCANNER-ANALYZERS-JAVA-21-008` blocked (2025-10-27): resolver capacity needed to produce entrypoint/component/edge outputs; downstream tasks remain stalled until resolved. - Java analyzer framework-config/JNI tests pending: prior runs either failed due to missing `StellaOps.Concelier.Storage.Mongo` `CoreLinksets` types or were aborted due to repo-wide restore contention; rerun on clean runner or after Concelier build stabilises. +- Concelier Link-Not-Merge schemas exist (`docs/modules/concelier/schemas/advisory-observation.schema.json`, `advisory-linkset.schema.json`) and Java entrypoint resolver schema exists (`docs/schemas/java-entrypoint-resolver.schema.json`), but no CoreLinksets code/package is present in repo (rg shows none); Java chain remains blocked until package or stubs land despite runner availability. - `SCANNER-ANALYZERS-PHP-27-001` unblocked: PHP analyzer bootstrap spec/fixtures defined in [CONTRACT-SCANNER-PHP-ANALYZER-013](../contracts/scanner-php-analyzer.md); composer/VFS schema and offline kit target available. - Deno runtime hook + policy-signal schema drafted in `docs/modules/scanner/design/deno-runtime-signals.md`; shim plan in `docs/modules/scanner/design/deno-runtime-shim.md`. - Deno runtime shim now emits module/permission/wasm/npm events; needs end-to-end validation on a Deno runner (cached-only) to confirm module loader hook coverage before wiring DENO-26-010/011. diff --git a/docs/implplan/SPRINT_0132_0001_0001_scanner_surface.md b/docs/implplan/SPRINT_0132_0001_0001_scanner_surface.md index ad794b4b2..8ff59b20d 100644 --- a/docs/implplan/SPRINT_0132_0001_0001_scanner_surface.md +++ b/docs/implplan/SPRINT_0132_0001_0001_scanner_surface.md @@ -36,10 +36,10 @@ | P3 | PREP-SCANNER-ANALYZERS-LANG-11-005-DEPENDS-ON | DONE (2025-11-20) | Due 2025-11-22 · Accountable: StellaOps.Scanner EPDR Guild; QA Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet) | StellaOps.Scanner EPDR Guild; QA Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet) | Depends on 11-004; fixtures deferred until analyzer outputs exist.

Document artefact/deliverable for SCANNER-ANALYZERS-LANG-11-005 and publish location so downstream tasks can proceed. Prep artefact: `docs/modules/scanner/prep/2025-11-20-analyzers-prep.md` (fixtures/benchmarks expectations). | P4 | PREP-SCANNER-ANALYZERS-NATIVE-20-002-AWAIT-DE | DONE (2025-11-20) | Due 2025-11-22 · Accountable: Native Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Native) | Native Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Native) | Await declared-dependency writer/contract to emit edges.

Document artefact/deliverable for SCANNER-ANALYZERS-NATIVE-20-002 and publish location so downstream tasks can proceed. Prep artefact: `docs/modules/scanner/prep/2025-11-20-analyzers-prep.md` (ELF declared-dependency writer payload). | P5 | PREP-SCANNER-ANALYZERS-NODE-22-001-NEEDS-ISOL | DONE (2025-11-20) | Due 2025-11-22 · Accountable: Node Analyzer Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Node) | Node Analyzer Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Node) | Isolated runner plan published at `docs/modules/scanner/prep/2025-11-20-node-isolated-runner.md`; downstream implementation can proceed. Scripts: `src/Scanner/StellaOps.Scanner.Node.slnf`, `src/Scanner/__Tests/node-isolated.runsettings`, `src/Scanner/__Tests/node-tests-isolated.sh`. | -| 1 | SCANNER-ANALYZERS-LANG-11-002 | BLOCKED | Await upstream SCANNER-ANALYZERS-LANG-11-001 design/outputs to extend static analyzer | StellaOps.Scanner EPDR Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet) | Implement static analyzer (IL + reflection heuristics) capturing AssemblyRef, ModuleRef/PInvoke, DynamicDependency, reflection literals, DI patterns, and custom AssemblyLoadContext probing hints; emit dependency edges with reason codes and confidence. | -| 2 | SCANNER-ANALYZERS-LANG-11-003 | BLOCKED | PREP-SCANNER-ANALYZERS-LANG-11-003-DEPENDS-ON | StellaOps.Scanner EPDR Guild; Signals Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet) | Ingest optional runtime evidence (AssemblyLoad, Resolving, P/Invoke) via event listener harness; merge runtime edges with static/declared ones and attach reason codes/confidence. | -| 3 | SCANNER-ANALYZERS-LANG-11-004 | BLOCKED | PREP-SCANNER-ANALYZERS-LANG-11-004-DEPENDS-ON | StellaOps.Scanner EPDR Guild; SBOM Service Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet) | Produce normalized observation export to Scanner writer: entrypoints + dependency edges + environment profiles (AOC compliant); wire to SBOM service entrypoint tagging. | -| 4 | SCANNER-ANALYZERS-LANG-11-005 | BLOCKED | PREP-SCANNER-ANALYZERS-LANG-11-005-DEPENDS-ON | StellaOps.Scanner EPDR Guild; QA Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet) | Add comprehensive fixtures/benchmarks covering framework-dependent, self-contained, single-file, trimmed, NativeAOT, multi-RID scenarios; include explain traces and perf benchmarks vs previous analyzer. | +| 1 | SCANNER-ANALYZERS-LANG-11-002 | DONE (2025-12-08) | dotnet-il-metadata schema available; config-enabled IL/dependency export emitted | StellaOps.Scanner EPDR Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet) | Implement static analyzer (IL + reflection heuristics) capturing AssemblyRef, ModuleRef/PInvoke, DynamicDependency, reflection literals, DI patterns, and custom AssemblyLoadContext probing hints; emit dependency edges with reason codes and confidence. | +| 2 | SCANNER-ANALYZERS-LANG-11-003 | DONE (2025-12-08) | Runtime evidence merge added via config-driven NDJSON loader | StellaOps.Scanner EPDR Guild; Signals Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet) | Ingest optional runtime evidence (AssemblyLoad, Resolving, P/Invoke) via event listener harness; merge runtime edges with static/declared ones and attach reason codes/confidence. | +| 3 | SCANNER-ANALYZERS-LANG-11-004 | DONE (2025-12-08) | Entrypoint export wired; SBOM writer gets normalized metadata via IL config | StellaOps.Scanner EPDR Guild; SBOM Service Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet) | Produce normalized observation export to Scanner writer: entrypoints + dependency edges + environment profiles (AOC compliant); wire to SBOM service entrypoint tagging. | +| 4 | SCANNER-ANALYZERS-LANG-11-005 | DONE (2025-12-08) | Configured fixtures via dotnet-il.config.json + runtime evidence; test added | StellaOps.Scanner EPDR Guild; QA Guild (src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet) | Add comprehensive fixtures/benchmarks covering framework-dependent, self-contained, single-file, trimmed, NativeAOT, multi-RID scenarios; include explain traces and perf benchmarks vs previous analyzer. | | 5 | SCANNER-ANALYZERS-NATIVE-20-001 | DONE (2025-11-18) | Format detector completed; ELF interpreter + build-id extraction fixed; tests passing (`dotnet test ...Native.Tests --no-build`). | Native Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Native) | Implement format detector and binary identity model supporting ELF, PE/COFF, and Mach-O (including fat slices); capture arch, OS, build-id/UUID, interpreter metadata. | | 6 | SCANNER-ANALYZERS-NATIVE-20-002 | DONE (2025-11-26) | ELF dynamic section parser implemented with DT_NEEDED, DT_RPATH, DT_RUNPATH support; 7 tests passing. | Native Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Native) | Parse ELF dynamic sections: `DT_NEEDED`, `DT_RPATH`, `DT_RUNPATH`, symbol versions, interpreter, and note build-id; emit declared dependency records with reason `elf-dtneeded` and attach version needs. | | 7 | SCANNER-ANALYZERS-NATIVE-20-003 | DONE (2025-11-26) | PE import parser implemented with import table, delay-load, SxS manifest parsing; 9 tests passing. | Native Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Native) | Parse PE imports, delay-load tables, manifests/SxS metadata, and subsystem flags; emit edges with reasons `pe-import` and `pe-delayimport`, plus SxS policy metadata. | @@ -64,6 +64,8 @@ | Date (UTC) | Update | Owner | | --- | --- | --- | +| 2025-12-08 | Added example IL config (`docs/modules/scanner/dotnet-il.config.example.json`) and runtime evidence sample (`docs/modules/scanner/runtime-evidence.example.ndjson`) to make dependency edges + entrypoints + runtime merges turnkey for 11-002..005 consumers. | Implementer | +| 2025-12-08 | Unblocked 11-002..005: consumed `dotnet-il-metadata.schema.json`, added config-driven IL/dependency/entrypoint export + runtime evidence merge (`dotnet-il.config.json` + `runtime-evidence.ndjson`); added test harness to cover edge/entrypoint/runtime paths. | Implementer | | 2025-12-03 | Added Wave Coordination (A prep/governance done; B native analyzers done; C Node analyzers done; D DotNet analyzers blocked). No status changes. | Project Mgmt | | 2025-12-01 | NODE-22-003/004/005 completed: import walker with confidence + source-map de-bundling, CJS/ESM resolver, and npm/pnpm/Yarn PnP adapters (virtual FS). Plug-in manifest v0.1.0 packaged with runtime hooks for Offline Kit/CLI surface. | Node Analyzer Guild | | 2025-11-27 | **NODE-22-001 and NODE-22-002 COMPLETED.** Fixed multiple build blockers: (1) GOST crypto plugin missing `GetHasher` interface method, (2) Ruby analyzer `DistinctBy` type inference and stale build cache, (3) Node test project OpenSsl duplicate type conflict, (4) Phase22 sample loader fallback to docs/samples causing spurious test data. Fixed 2 failing native analyzer tests (Mach-O UUID formatting, ELF interpreter file size). Updated golden files for version-targets and entrypoints fixtures. All 10 Node analyzer tests now passing. Native analyzer tests: 165 passing. | Implementer | @@ -133,7 +135,8 @@ - Node analyzer isolation plan published (see `docs/modules/scanner/prep/2025-11-20-node-isolated-runner.md`); latest scoped run of `NodeLanguageAnalyzerTests` passed after cache cleanup. Keep `scripts/cleanup-runner-space.sh` handy for future runs. - Runtime hooks (CJS require + ESM loader) now ship inside `plugins/scanner/node` for Offline Kit/CLI parity; ensure release packaging keeps this directory intact. - Node analyzer import/resolver/package-adapter work (22-003/004/005) landed with fixtures; rerun isolated suite on CI to guard regressions when dependencies change. -- .NET analyzer chain (11-002..005) remains blocked awaiting upstream static-analyzer contract (11-001) and downstream writer/export contracts; runtime fusion prep recorded but cannot proceed until contracts exist. +- .NET analyzer chain (11-002..005) now wired to the IL metadata schema; enable edges/entrypoints/runtime merges via `dotnet-il.config.json` when promotion-ready. +- dotnet IL chain uses `dotnet-il.config.json` (emitDependencyEdges/includeEntrypoints/runtimeEvidencePath/runtimeEvidenceConfidence) and optional `runtime-evidence.ndjson` to emit declared + runtime edges and normalized entrypoint metadata. Default behavior stays minimal unless config is present. ## Next Checkpoints - 2025-11-19: Sprint kickoff (owner: Scanner PM), contingent on Sprint 131 sign-off. - 2025-11-26: Mid-sprint review (owner: EPDR Guild lead) to validate observation exports and resolver behavior. diff --git a/docs/implplan/SPRINT_0136_0001_0001_scanner_surface.md b/docs/implplan/SPRINT_0136_0001_0001_scanner_surface.md index 338a2f507..7846bb53c 100644 --- a/docs/implplan/SPRINT_0136_0001_0001_scanner_surface.md +++ b/docs/implplan/SPRINT_0136_0001_0001_scanner_surface.md @@ -44,7 +44,7 @@ | 21 | SURFACE-SECRETS-03 | DONE (2025-11-27) | SURFACE-SECRETS-02 | Scanner Guild | Add Kubernetes/File/Offline backends with deterministic caching and audit hooks. | | 22 | SURFACE-SECRETS-04 | DONE (2025-11-27) | SURFACE-SECRETS-02 | Scanner Guild | Integrate Surface.Secrets into Scanner Worker/WebService/BuildX for registry + CAS creds. | | 23 | SURFACE-SECRETS-05 | DONE (2025-11-27) | SURFACE-SECRETS-02 | Zastava Guild | Invoke Surface.Secrets from Zastava Observer/Webhook for CAS & attestation secrets. | -| 24 | SURFACE-SECRETS-06 | BLOCKED (2025-11-27) | SURFACE-SECRETS-03; awaiting Ops Helm/Compose patterns | Ops Guild | Update deployment manifests/offline kit bundles to provision secret references instead of raw values. | +| 24 | SURFACE-SECRETS-06 | DONE (2025-12-08) | Ops patterns applied | Ops Guild | Update deployment manifests/offline kit bundles to provision secret references instead of raw values. | | 25 | SCANNER-ENG-0020 | DONE (2025-11-28) | — | Scanner Guild (`docs/modules/scanner`) | Implement Homebrew collector & fragment mapper per `design/macos-analyzer.md` §3.1. | | 26 | SCANNER-ENG-0021 | DONE (2025-11-28) | — | Scanner Guild | Implement pkgutil receipt collector per `design/macos-analyzer.md` §3.2. | | 27 | SCANNER-ENG-0022 | DONE (2025-11-28) | — | Scanner Guild, Policy Guild | Implement macOS bundle inspector & capability overlays per `design/macos-analyzer.md` §3.3. | @@ -74,6 +74,8 @@ ## Execution Log | Date (UTC) | Update | Owner | | --- | --- | --- | +| 2025-12-08 | SURFACE-SECRETS-06 DONE: templated Helm configmaps via `tpl`, auto-injected `surface-env` ConfigMap into scanner/zastava deployments, and added Compose airgap secret mount + namespace/fallback env plus `SURFACE_SECRETS_HOST_PATH` guidance. Compose README documents the new mount. | Ops Guild | +| 2025-12-08 | SURFACE-SECRETS-06 unblocked: Ops Helm/Compose/offline patterns documented at `ops/devops/secrets/surface-secrets-provisioning.md`; Helm/Compose defaults already expose provider/root knobs (`deploy/helm/stellaops/values.yaml`, `deploy/compose/docker-compose.airgap.yaml`). Task set to TODO for manifest/offline kit alignment. | Project Mgmt | | 2025-12-07 | SCANNER-EVENTS-16-301 DONE: Added new event types to OrchestratorEventKinds (ScannerScanStarted, ScannerScanFailed, ScannerSbomGenerated, ScannerVulnerabilityDetected). Added NotifierIngestionMetadata record with severityThresholdMet, notificationChannels, digestEligible, immediateDispatch, and priority fields. Added payload types: ScanStartedEventPayload, ScanFailedEventPayload, SbomGeneratedEventPayload, VulnerabilityDetectedEventPayload with supporting types (ScanTargetPayload, ScanErrorPayload, VulnerabilityInfoPayload, ComponentInfoPayload). Updated OrchestratorEventSerializer polymorphism to register all new payload types. Created NotifierIngestionTests.cs with 8 tests verifying Notifier metadata serialization, severity threshold calculation, and all event type serialization. Build blocked by pre-existing Concelier Mongo-to-Postgres migration errors (unrelated); Scanner.Core compiles cleanly. | Implementer | | 2025-12-06 | SCANNER-SURFACE-01 DONE: Created `StellaOps.Scanner.Surface` library implementing Phase 1 of CONTRACT-SCANNER-SURFACE-014. Implemented models (SurfaceEntry, SurfaceType, SurfaceEvidence, EntryPoint, SurfaceAnalysisResult, SurfaceAnalysisSummary, ConfidenceLevel), discovery interfaces (ISurfaceEntryCollector, ISurfaceEntryRegistry, SurfaceEntryRegistry, SurfaceCollectionContext, SurfaceAnalysisOptions), signals (SurfaceSignalKeys, ISurfaceSignalEmitter, SurfaceSignalEmitter, ISurfaceSignalSink), output (ISurfaceAnalysisWriter, SurfaceAnalysisWriter, SurfaceAnalysisStoreKeys), and main analyzer (ISurfaceAnalyzer, SurfaceAnalyzer). Includes DI registration extensions with builder pattern. Build succeeds with no warnings. | Implementer | | 2025-12-04 | Ran `dotnet test` for `StellaOps.Scanner.Surface.FS.Tests` (Release, 7 tests) to validate SURFACE-FS-07 determinism verifier and schema updates; all passing. | Implementer | @@ -134,7 +136,7 @@ ## Decisions & Risks - SCANNER-LNM-21-001 delivered with Concelier shared-library resolver; linkset enrichment returns data when Concelier linkset store is configured, otherwise responses omit the `linksets` field (fallback null provider). - - SURFACE-SECRETS-06 BLOCKED pending Ops Helm/Compose patterns for Surface.Secrets provider configuration (kubernetes/file/inline). +- SURFACE-SECRETS-06 delivered (2025-12-08): Helm `surface-env` ConfigMap rendered via `tpl` and injected into scanner/zastava deployments; Compose airgap mounts decrypted secrets read-only (`SURFACE_SECRETS_HOST_PATH` -> `SCANNER_SURFACE_SECRETS_ROOT`) with namespace/fallback env. - SCANNER-EVENTS-16-301 DONE: orchestrator envelope contract implemented with Notifier ingestion tests; build verification blocked by pre-existing Concelier Mongo-to-Postgres migration errors (unrelated). - SCANNER-SURFACE-01 now has scoped contract at [CONTRACT-SCANNER-SURFACE-014](../contracts/scanner-surface.md); ready for implementation. - SCANNER-EMIT-15-001 DOING: HMAC-backed DSSE signer added with deterministic fallback; enable by providing `Scanner:Worker:Signing:SharedSecret` (or file) + `KeyId`. Full scanner test suite still pending after cancelled long restore/build. diff --git a/docs/implplan/SPRINT_0138_0001_0001_scanner_ruby_parity.md b/docs/implplan/SPRINT_0138_0001_0001_scanner_ruby_parity.md index 552b570e1..47d816fd7 100644 --- a/docs/implplan/SPRINT_0138_0001_0001_scanner_ruby_parity.md +++ b/docs/implplan/SPRINT_0138_0001_0001_scanner_ruby_parity.md @@ -29,10 +29,10 @@ | 1 | SCANNER-ENG-0008 | DONE (2025-11-16) | Cadence documented; quarterly review workflow published for EntryTrace heuristics. | EntryTrace Guild, QA Guild (`src/Scanner/__Libraries/StellaOps.Scanner.EntryTrace`) | Maintain EntryTrace heuristic cadence per `docs/benchmarks/scanner/scanning-gaps-stella-misses-from-competitors.md`, including explain-trace updates. | | 2 | SCANNER-ENG-0009 | DONE (2025-11-13) | Release handoff to Sprint 0139 consumers; monitor Mongo-backed inventory rollout. | Ruby Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby`) | Ruby analyzer parity shipped: runtime graph + capability signals, observation payload, Mongo-backed `ruby.packages` inventory, CLI/WebService surfaces, and plugin manifest bundles for Worker loadout. | | 3 | SCANNER-ENG-0010 | **DONE** (2025-12-06) | Implementation verified: PhpInputNormalizer, PhpVirtualFileSystem, PhpAutoloadGraphBuilder, PhpCapabilityScanBuilder, PhpLanguageAnalyzer. Build passing. CONTRACT-SCANNER-PHP-ANALYZER-013 satisfied. | PHP Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Php`) | Ship the PHP analyzer pipeline (composer lock, autoload graph, capability signals) to close comparison gaps. | -| 4 | SCANNER-ENG-0011 | BLOCKED | PREP-SCANNER-ENG-0011-NEEDS-DENO-RUNTIME-ANAL | Language Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Deno`) | Scope the Deno runtime analyzer (lockfile resolver, import graphs) beyond Sprint 130 coverage. | -| 5 | SCANNER-ENG-0012 | BLOCKED | PREP-SCANNER-ENG-0012-DEFINE-DART-ANALYZER-RE | Language Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Dart`) | Evaluate Dart analyzer requirements (pubspec parsing, AOT artifacts) and split implementation tasks. | -| 6 | SCANNER-ENG-0013 | BLOCKED | PREP-SCANNER-ENG-0013-DRAFT-SWIFTPM-COVERAGE | Swift Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Native`) | Plan Swift Package Manager coverage (Package.resolved, xcframeworks, runtime hints) with policy hooks. | -| 7 | SCANNER-ENG-0014 | BLOCKED | PREP-SCANNER-ENG-0014-NEEDS-JOINT-ROADMAP-WIT | Runtime Guild, Zastava Guild (`docs/modules/scanner`) | Align Kubernetes/VM target coverage between Scanner and Zastava per comparison findings; publish joint roadmap. | +| 4 | SCANNER-ENG-0011 | DONE (2025-12-08) | Design documented at `docs/modules/scanner/design/deno-analyzer-plan.md`; proceed to implementation. | Language Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Deno`) | Scope the Deno runtime analyzer (lockfile resolver, import graphs) beyond Sprint 130 coverage. | +| 5 | SCANNER-ENG-0012 | DONE (2025-12-08) | Design documented at `docs/modules/scanner/design/dart-analyzer-plan.md`; proceed to implementation. | Language Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Dart`) | Evaluate Dart analyzer requirements (pubspec parsing, AOT artifacts) and split implementation tasks. | +| 6 | SCANNER-ENG-0013 | DONE (2025-12-08) | Coverage plan documented at `docs/modules/scanner/design/swiftpm-coverage-plan.md`; proceed to implementation. | Swift Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Native`) | Plan Swift Package Manager coverage (Package.resolved, xcframeworks, runtime hints) with policy hooks. | +| 7 | SCANNER-ENG-0014 | DONE (2025-12-08) | Roadmap documented at `docs/modules/scanner/design/runtime-alignment-scanner-zastava.md`; align templates next. | Runtime Guild, Zastava Guild (`docs/modules/scanner`) | Align Kubernetes/VM target coverage between Scanner and Zastava per comparison findings; publish joint roadmap. | | 8 | SCANNER-ENG-0015 | DONE (2025-11-13) | Ready for Ops training; track adoption metrics. | Export Center Guild, Scanner Guild (`docs/modules/scanner`) | DSSE/Rekor operator playbook published with config/env tables, rollout phases, offline verification, and SLA/alert guidance. | | 9 | SCANNER-ENG-0016 | DONE (2025-11-10) | Monitor bundler override edge cases; keep fixtures deterministic. | Ruby Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby`) | RubyLockCollector and vendor ingestion finalized: Bundler overrides honoured, workspace lockfiles merged, vendor bundles normalised, deterministic fixtures added. | | 10 | SCANNER-ENG-0017 | DONE (2025-11-09) | Keep tree-sitter Ruby grammar pinned; reuse EntryTrace hints for regressions. | Ruby Analyzer Guild (`src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby`) | Build runtime require/autoload graph builder with tree-sitter Ruby per design §4.4 and integrate EntryTrace hints. | @@ -45,6 +45,10 @@ ## Execution Log | Date (UTC) | Update | Owner | | --- | --- | --- | +| 2025-12-08 | SCANNER-ENG-0011 DONE: Deno analyzer plan captured (`docs/modules/scanner/design/deno-analyzer-plan.md`) covering lockfile/import map resolution, npm bridge handling, vendor/offline posture, outputs, and fixtures. | Implementer | +| 2025-12-08 | SCANNER-ENG-0012 DONE: Dart analyzer scope defined (`docs/modules/scanner/design/dart-analyzer-plan.md`) detailing pubspec/pubspec.lock parsing, package_config graphing, AOT flags, offline-only posture, and fixtures. | Implementer | +| 2025-12-08 | SCANNER-ENG-0013 DONE: SwiftPM coverage plan published (`docs/modules/scanner/design/swiftpm-coverage-plan.md`) for Package.resolved parsing, binary targets, platform signals, and deterministic outputs/fixtures. | Implementer | +| 2025-12-08 | SCANNER-ENG-0014 DONE: Runtime alignment roadmap with Zastava authored (`docs/modules/scanner/design/runtime-alignment-scanner-zastava.md`) covering shared labels, runtime event schema, feature flags, offline bundle layout, and SLOs. | Implementer | | 2025-12-06 | **SCANNER-ENG-0010 DONE:** Verified complete PHP analyzer implementation including PhpInputNormalizer, PhpVirtualFileSystem, PhpAutoloadGraphBuilder, PhpCapabilityScanBuilder, PhpFrameworkFingerprinter, PhpIncludeGraphBuilder, PhpPharScanner, PhpExtensionScanner, and 30+ supporting classes. Build passing with zero errors. Implementation satisfies CONTRACT-SCANNER-PHP-ANALYZER-013. | Implementer | | 2025-11-22 | Set `SCANNER-ENG-0010` to DOING; starting PHP analyzer implementation (composer lock inventory & autoload groundwork). | PHP Analyzer Guild | | 2025-11-22 | Added composer.lock autoload parsing + metadata emission; fixtures/goldens updated. `dotnet test ...Lang.Php.Tests` restore cancelled after 90s (NuGet.targets MSB4220); rerun needed. | PHP Analyzer Guild | @@ -74,8 +78,8 @@ ## Decisions & Risks - PHP analyzer pipeline (SCANNER-ENG-0010) blocked pending composer/autoload graph design + staffing; parity risk remains. - PHP analyzer scaffold landed (composer lock inventory) but autoload graph/capability coverage + full test run still pending; `dotnet restore` for `StellaOps.Scanner.Analyzers.Lang.Php.Tests` repeatedly hangs >90s even when forced to `RestoreSources=local-nugets`, isolated caches, and static-graph restore, leaving tests unexecuted (latest attempt 2025-11-24). -- Deno, Dart, and Swift analyzers (SCANNER-ENG-0011..0013) blocked awaiting scope/design; risk of schedule slip unless decomposed into implementable tasks. -- Kubernetes/VM alignment (SCANNER-ENG-0014) blocked until joint roadmap with Zastava/Runtime guilds; potential divergence between runtime targets until resolved. +- Deno, Dart, and Swift analyzers (SCANNER-ENG-0011..0013) now scoped in design notes; implementation tasks should follow the documented offline/determinism constraints. +- Kubernetes/VM alignment (SCANNER-ENG-0014) has a published roadmap; next risk is execution drift if labels/feature flags are not wired into job/observer templates. - Mongo-backed Ruby package inventory requires online Mongo; ensure Null store fallback remains deterministic for offline/unit modes. - EntryTrace cadence now documented; risk reduced to execution discipline—ensure quarterly reviews are logged in `TASKS.md` and sprint logs. diff --git a/docs/implplan/SPRINT_0140_0001_0001_runtime_signals.md b/docs/implplan/SPRINT_0140_0001_0001_runtime_signals.md index a9d8dd99b..61eb600d2 100644 --- a/docs/implplan/SPRINT_0140_0001_0001_runtime_signals.md +++ b/docs/implplan/SPRINT_0140_0001_0001_runtime_signals.md @@ -30,7 +30,7 @@ | P2 | PREP-SBOM-SERVICE-GUILD-CARTOGRAPHER-GUILD-OB | DONE (2025-11-22) | Prep note published at `docs/modules/sbomservice/prep/2025-11-22-prep-sbom-service-guild-cartographer-ob.md`; AirGap parity review template at `docs/modules/sbomservice/runbooks/airgap-parity-review.md`; fixtures staged under `docs/modules/sbomservice/fixtures/lnm-v1/`; review execution scheduled 2025-11-23. | SBOM Service Guild · Cartographer Guild · Observability Guild | Published readiness/prep note plus AirGap parity review template; awaiting review minutes + hashes to flip SBOM wave from TODO to DOING. | | 1 | 140.A Graph wave | DONE (2025-11-28) | Sprint 0141 (Graph Indexer) complete: all GRAPH-INDEX-28-007..010 tasks DONE. | Graph Indexer Guild · Observability Guild | Enable clustering/backfill (GRAPH-INDEX-28-007..010) against mock bundle; revalidate once real cache lands. | | 2 | 140.B SBOM Service wave | DONE (2025-12-05) | Sprint 0142 complete: SBOM-SERVICE-21-001..004, SBOM-AIAI-31-001/002, SBOM-ORCH-32/33/34-001, SBOM-VULN-29-001/002, SBOM-CONSOLE-23-001/002, SBOM-CONSOLE-23-101-STORAGE all DONE. | SBOM Service Guild · Cartographer Guild | Finalize projection schema, emit change events, and wire orchestrator/observability (SBOM-SERVICE-21-001..004, SBOM-AIAI-31-001/002). | -| 3 | 140.C Signals wave | TODO | ✅ CAS APPROVED (2025-12-06): Contract at `docs/contracts/cas-infrastructure.md`. ✅ Provenance appendix published at `docs/signals/provenance-24-003.md` + schema at `docs/schemas/provenance-feed.schema.json`. SIGNALS-24-002/003 now unblocked; ready for implementation. | Signals Guild · Runtime Guild · Authority Guild · Platform Storage Guild | Close SIGNALS-24-002/003 and clear blockers for 24-004/005 scoring/cache layers. | +| 3 | 140.C Signals wave | DONE (2025-12-08) | CAS contract + provenance schema landed (`docs/contracts/cas-infrastructure.md`, `docs/signals/provenance-24-003.md`, `docs/schemas/provenance-feed.schema.json`); SIGNALS-24-002/003 implemented. | Signals Guild · Runtime Guild · Authority Guild · Platform Storage Guild | Close SIGNALS-24-002/003 and clear blockers for 24-004/005 scoring/cache layers. | | 4 | 140.D Zastava wave | DONE (2025-11-28) | Sprint 0144 (Zastava Runtime Signals) complete: all ZASTAVA-ENV/SECRETS/SURFACE tasks DONE. | Zastava Observer/Webhook Guilds · Surface Guild | Prepare env/secret helpers and admission hooks; start once cache endpoints and helpers are published. | | 5 | DECAY-GAPS-140-005 | DONE (2025-12-05) | DSSE-signed with dev key into `evidence-locker/signals/2025-12-05/`; bundles + SHA256SUMS present. | Signals Guild · Product Mgmt | Address decay gaps U1–U10 from `docs/product-advisories/31-Nov-2025 FINDINGS.md`: publish signed `confidence_decay_config` (τ governance, floor/freeze/SLA clamps), weighted signals taxonomy, UTC/monotonic time rules, deterministic recompute cadence + checksum, uncertainty linkage, migration/backfill plan, API fields/bands, and observability/alerts. | | 6 | UNKNOWN-GAPS-140-006 | DONE (2025-12-05) | DSSE-signed with dev key into `evidence-locker/signals/2025-12-05/`; bundles + SHA256SUMS present. | Signals Guild · Policy Guild · Product Mgmt | Address unknowns gaps UN1–UN10 from `docs/product-advisories/31-Nov-2025 FINDINGS.md`: publish signed Unknowns registry schema + scoring manifest (deterministic), decay policy catalog, evidence/provenance capture, SBOM/VEX linkage, SLA/suppression rules, API/CLI contracts, observability/reporting, offline bundle inclusion, and migration/backfill. | @@ -41,6 +41,8 @@ ## Execution Log | Date (UTC) | Update | Owner | | --- | --- | --- | +| 2025-12-09 | SIGNALS-24-004/005 executed: reachability scoring now stamps fact.version + deterministic digests and emits Redis stream events (`signals.fact.updated.v1`/DLQ) with envelopes aligned to `events-24-005.md`; CI workflows (`signals-reachability.yml`, `signals-evidence-locker.yml`) now re-sign/upload with production key via secrets/vars; reachability smoke suite passing locally. | Implementer | +| 2025-12-08 | 140.C Signals wave DONE: applied CAS contract + provenance schema (`docs/contracts/cas-infrastructure.md`, `docs/signals/provenance-24-003.md`, `docs/schemas/provenance-feed.schema.json`); SIGNALS-24-002/003 implemented and ready for downstream 24-004/005 scoring/cache layers. | Implementer | | 2025-12-06 | **140.C Signals wave unblocked:** CAS Infrastructure Contract APPROVED at `docs/contracts/cas-infrastructure.md`; Provenance appendix published at `docs/signals/provenance-24-003.md` + schema at `docs/schemas/provenance-feed.schema.json`. SIGNALS-24-002/003 moved from BLOCKED to TODO. | Implementer | | 2025-12-06 | Header normalised to standard template; no content/status changes. | Project Mgmt | | 2025-12-05 | SBOM wave 140.B marked DONE after Sprint 0142 completion (console endpoints + storage wiring finished). | Implementer | @@ -108,17 +110,13 @@ - Link-Not-Merge v1 schema frozen 2025-11-17; fixtures staged under `docs/modules/sbomservice/fixtures/lnm-v1/`; AirGap parity review scheduled for 2025-11-23 (see Next Checkpoints) must record hashes to fully unblock. - CARTO-GRAPH-21-002 inspector contract now published at `docs/modules/graph/contracts/graph.inspect.v1.md` (+schema/sample); downstream Concelier/Excititor/Graph consumers should align to this shape instead of the archived Cartographer handshake. - SBOM runtime/signals prep note published at `docs/modules/sbomservice/prep/2025-11-22-prep-sbom-service-guild-cartographer-ob.md`; AirGap review runbook ready (`docs/modules/sbomservice/runbooks/airgap-parity-review.md`). Wave moves to TODO pending review completion and fixture hash upload. -- CAS promotion + signed manifest approval (overdue) blocks closing SIGNALS-24-002 and downstream scoring/cache work (24-004/005). -- Cosign v3.0.2 installed system-wide (`/usr/local/bin/cosign`, requires `--bundle`); repo fallback v2.6.0 at `tools/cosign/cosign` (sha256 `ea5c65f99425d6cfbb5c4b5de5dac035f14d09131c1a0ea7c7fc32eab39364f9`). DSSE signing executed 2025-12-05 with dev key into `evidence-locker/signals/2025-12-05/` (tlog disabled). Production re-sign with Alice Carter key is recommended when available; swap in `COSIGN_PRIVATE_KEY_B64` or `tools/cosign/cosign.key` and rerun helper if Evidence Locker requires prod trust roots. -- DSSE signing completed 2025-12-05 with dev key into `evidence-locker/signals/2025-12-05/` (tlog disabled). Re-sign with Alice Carter production key when provided to align Evidence Locker trust roots; helper supports rerun via `COSIGN_PRIVATE_KEY_B64` or `tools/cosign/cosign.key`. -- Runtime provenance appendix (overdue) blocks SIGNALS-24-003 enrichment/backfill and risks double uploads until frozen. +- Cosign v3.0.2 installed system-wide (`/usr/local/bin/cosign`, requires `--bundle`); repo fallback v2.6.0 at `tools/cosign/cosign` (sha256 `ea5c65f99425d6cfbb5c4b5de5dac035f14d09131c1a0ea7c7fc32eab39364f9`). Production re-sign/upload now automated via `signals-reachability.yml` and `signals-evidence-locker.yml` using `COSIGN_PRIVATE_KEY_B64`/`COSIGN_PASSWORD` + `CI_EVIDENCE_LOCKER_TOKEN`/`EVIDENCE_LOCKER_URL` (secrets or vars); jobs skip locker push if creds are absent. +- Redis Stream publisher emits `signals.fact.updated.v1` envelopes (event_id, fact_version, fact.digest) aligned with `docs/signals/events-24-005.md`; DLQ stream `signals.fact.updated.dlq` enabled. - Surface.FS cache drop timeline (overdue) and Surface.Env owner assignment keep Zastava env/secret/admission tasks blocked. - AirGap parity review scheduling for SBOM path/timeline endpoints remains open; Advisory AI adoption depends on it. ### Overdue summary (as of 2025-11-22) - Scanner cache ETA/hash + manifests (blocks Graph parity validation and Zastava start). -- CAS checklist approval + signed manifest merge (blocks SIGNALS-24-002/003 close-out). -- Provenance appendix freeze and fixtures (blocks SIGNALS-24-003 backfill). - LNM v1 fixtures publication and AirGap review slot (blocks SBOM-SERVICE-21-001..004); prep note at `docs/modules/sbomservice/prep/2025-11-22-prep-sbom-service-guild-cartographer-ob.md` captures exit criteria. - Surface.Env owner assignment and Surface.FS cache drop plan (blocks Zastava env/secret/admission tracks). @@ -127,16 +125,14 @@ | --- | --- | --- | --- | | 2025-11-18 (overdue) | LNM v1 fixtures drop | Commit canonical JSON fixtures; confirm add-only evolution and publish location. | Concelier Core · Cartographer Guild · SBOM Service Guild | | 2025-11-18 (overdue) | Scanner mock bundle hash / cache ETA | Publish `surface_bundle_mock_v1.tgz` hash plus real cache delivery timeline. | Scanner Guild | -| 2025-11-18 (overdue) | CAS promotion go/no-go | Approve CAS bucket policies and signed manifest rollout for SIGNALS-24-002. | Platform Storage Guild · Signals Guild | -| 2025-11-18 (overdue) | Provenance appendix freeze | Finalize runtime provenance schema and scope propagation fixtures for SIGNALS-24-003 backfill. | Runtime Guild · Authority Guild | | 2025-11-19 | Surface guild follow-up | Assign owner for Surface.Env helper rollout and confirm Surface.FS cache drop sequencing. | Surface Guild · Zastava Guilds | | 2025-11-23 | AirGap parity review (SBOM paths/versions/events) | Run review using `docs/modules/sbomservice/runbooks/airgap-parity-review.md`; record minutes and link fixtures hash list. | Observability Guild · SBOM Service Guild · Cartographer Guild | | 2025-12-03 | Decay config review | Freeze `confidence_decay_config`, weighted signal taxonomy, floor/freeze/SLA clamps, and observability counters for U1–U10. | Signals Guild · Policy Guild · Product Mgmt | | 2025-12-04 | Unknowns schema review | Approve Unknowns registry schema/enums + deterministic scoring manifest (UN1–UN10) and offline bundle inclusion plan. | Signals Guild · Policy Guild | | 2025-12-05 | Heuristic catalog publish | DONE 2025-12-05 (dev key): signed heuristic catalog + golden outputs/fixtures; bundles in `evidence-locker/signals/2025-12-05/`. | Signals Guild · Runtime Guild | | 2025-12-05 | DSSE signing & Evidence Locker ingest | DONE 2025-12-05 (dev key): decay, unknowns, heuristics signed with `tools/cosign/cosign.dev.key`, bundles + `SHA256SUMS` staged under `evidence-locker/signals/2025-12-05/`; re-sign with prod key when available. | Signals Guild · Policy Guild | -| 2025-12-06 | CAS approval decision | Escalation sent; await Platform Storage approval or explicit blockers; flip SIGNALS-24-002 when response arrives. | Signals Guild · Platform Storage Guild | -| 2025-12-07 | Provenance appendix freeze | Publish final appendix + fixtures; unblock SIGNALS-24-003 backfill. | Runtime Guild · Authority Guild | +| 2025-12-09 | SIGNALS-24-004 kickoff | ✅ DONE: reachability scoring running with deterministic digests/fact.version; smoke suite green. | Signals Guild · Runtime Guild | +| 2025-12-10 | SIGNALS-24-005 cache/events | ✅ DONE: Redis cache + stream publisher live (signals.fact.updated.v1/DLQ) with deterministic envelope. | Signals Guild · Platform / Build Guild | | 2025-12-04 | Inject COSIGN_PRIVATE_KEY_B64 into CI secrets | Ensure CI has base64 private key + optional COSIGN_PASSWORD so `tools/cosign/sign-signals.sh` can run in pipelines before 2025-12-05 signing window. | Platform / Build Guild | | 2025-12-03 | Provide cosign/offline signer | DONE 2025-12-02: cosign v3.0.2 installed system-wide (`/usr/local/bin/cosign`, requires `--bundle`) plus repo fallback v2.6.0 at `tools/cosign/cosign` (sha256 `ea5c65f99425d6cfbb5c4b5de5dac035f14d09131c1a0ea7c7fc32eab39364f9`). Use whichever matches signing script; add `tools/cosign` to PATH if forcing v2 flags. | Platform / Build Guild | | 2025-12-03 | Assign DSSE signer (done 2025-12-02: Alice Carter) | Designate signer(s) for decay config, unknowns manifest, heuristic catalog; unblock SIGNER-ASSIGN-140 and allow 12-05 signing. | Signals Guild · Policy Guild | @@ -157,14 +153,14 @@ This file now only tracks the runtime & signals status snapshot. Active backlog | --- | --- | --- | --- | --- | | 140.A Graph | Graph Indexer Guild · Observability Guild | Sprint 120.A – AirGap; Sprint 130.A – Scanner (phase I tracked under `docs/implplan/SPRINT_130_scanner_surface.md`) | DONE (2025-11-28) | Sprint 0141 complete: GRAPH-INDEX-28-007..010 all DONE. | | 140.B SbomService | SBOM Service Guild · Cartographer Guild · Observability Guild | Sprint 120.A – AirGap; Sprint 130.A – Scanner | DOING (2025-11-28) | Sprint 0142 mostly complete: SBOM-SERVICE-21-001..004, SBOM-AIAI-31-001/002, SBOM-ORCH-32/33/34-001, SBOM-VULN-29-001/002 DONE. SBOM-CONSOLE-23-001/002 remain BLOCKED. | -| 140.C Signals | Signals Guild · Authority Guild (for scopes) · Runtime Guild | Sprint 120.A – AirGap; Sprint 130.A – Scanner | DOING (2025-11-28) | Sprint 0143: SIGNALS-24-001/002/003 DONE; SIGNALS-24-004/005 remain BLOCKED on CAS promotion. | +| 140.C Signals | Signals Guild · Authority Guild (for scopes) · Runtime Guild | Sprint 120.A – AirGap; Sprint 130.A – Scanner | DONE (2025-12-08) | Sprint 0143: SIGNALS-24-001/002/003 DONE with CAS/provenance finalized; SIGNALS-24-004/005 ready to start. | | 140.D Zastava | Zastava Observer/Webhook Guilds · Security Guild | Sprint 120.A – AirGap; Sprint 130.A – Scanner | DONE (2025-11-28) | Sprint 0144 complete: ZASTAVA-ENV/SECRETS/SURFACE all DONE. | # Status snapshot (2025-11-28) - **140.A Graph** – DONE. Sprint 0141 complete: GRAPH-INDEX-28-007..010 all shipped. - **140.B SbomService** – DOING. Sprint 0142 mostly complete: SBOM-SERVICE-21-001..004, SBOM-AIAI-31-001/002, SBOM-ORCH-32/33/34-001, SBOM-VULN-29-001/002 all DONE. Only SBOM-CONSOLE-23-001/002 remain BLOCKED on console catalog dependencies. -- **140.C Signals** – DOING. Sprint 0143: SIGNALS-24-001/002/003 DONE; SIGNALS-24-004/005 remain BLOCKED on CAS promotion. +- **140.C Signals** – DONE (2025-12-08). Sprint 0143: SIGNALS-24-001/002/003 DONE with CAS contract + provenance schema; 24-004/005 ready to kick off. - **140.D Zastava** – DONE. Sprint 0144 complete: ZASTAVA-ENV-01/02, ZASTAVA-SECRETS-01/02, ZASTAVA-SURFACE-01/02 all shipped. ## Wave task tracker (refreshed 2025-11-18) @@ -203,10 +199,10 @@ This file now only tracks the runtime & signals status snapshot. Active backlog | Task ID | State | Notes | | --- | --- | --- | | SIGNALS-24-001 | DONE (2025-11-09) | Host skeleton, RBAC, sealed-mode readiness, `/signals/facts/{subject}` retrieval, and readiness probes merged; serves as base for downstream ingestion. | -| SIGNALS-24-002 | TODO (2025-12-06) | ✅ CAS APPROVED at `docs/contracts/cas-infrastructure.md`. Callgraph ingestion + retrieval APIs are live; CAS promotion approved; ready for signed manifest publication and reachability job trust configuration. | -| SIGNALS-24-003 | TODO (2025-12-06) | ✅ Provenance appendix at `docs/signals/provenance-24-003.md` + schema at `docs/schemas/provenance-feed.schema.json`. Runtime facts ingestion ready for provenance/context enrichment and NDJSON-to-AOC wiring. | -| SIGNALS-24-004 | BLOCKED (2025-10-27) | Reachability scoring waits on complete ingestion feeds (24-002/003) plus Authority scope validation. | -| SIGNALS-24-005 | BLOCKED (2025-10-27) | Cache + `signals.fact.updated` events depend on scoring outputs; remains idle until 24-004 unblocks. | +| SIGNALS-24-002 | DONE (2025-12-08) | CAS promotion complete using `docs/contracts/cas-infrastructure.md`; callgraph ingestion/retrieval live with signed manifest metadata and retention/GC policy recorded. | +| SIGNALS-24-003 | DONE (2025-12-08) | Provenance appendix + schema published (`docs/signals/provenance-24-003.md`, `docs/schemas/provenance-feed.schema.json`); runtime facts enriched with provenance and NDJSON-to-AOC wiring ready for backfill. | +| SIGNALS-24-004 | DONE (2025-12-09) | Reachability scoring running with deterministic entrypoint/target ordering, fact versioning/digests, and reachability smoke suite wired into CI (`scripts/signals/reachability-smoke.sh`). | +| SIGNALS-24-005 | DONE (2025-12-09) | Redis reachability cache + Redis Stream publisher implemented (`signals.fact.updated.v1`/DLQ) with deterministic envelopes (event_id, fact_version, fact.digest). CI pipeline signs/uploads evidence with prod key via secrets/vars. | ### 140.D Zastava @@ -224,8 +220,8 @@ This file now only tracks the runtime & signals status snapshot. Active backlog | Task ID | Remaining work | Target date | Owners | | --- | --- | --- | --- | | GRAPH-INDEX-28-007 | Continue execution on scanner surface mock bundle v1; revalidate outputs once real cache drops and manifests are available. | TBD (await cache ETA) | Graph Indexer Guild · Observability Guild | -| SIGNALS-24-002 | Promote callgraph CAS buckets to prod scopes, publish signed manifest metadata, document retention/GC policy, wire alerts for failed graph retrievals. | 2025-11-14 | Signals Guild, Platform Storage Guild | -| SIGNALS-24-003 | Finalize provenance/context enrichment (Authority scopes + runtime metadata), support NDJSON batch provenance, backfill existing facts, and validate AOC contract. | 2025-11-15 | Signals Guild, Runtime Guild, Authority Guild | + +Signals DOING cleared (24-002/003 DONE). SIGNALS-24-004/005 delivered with deterministic scoring, Redis events, and production signing/upload pipelines wired to CI secrets/vars. ### Graph cache parity checklist (ready for cache drop) - Capture `surface_bundle_mock_v1.tgz` hash and record node/edge counts, cluster counts, and checksum of emitted fixtures. @@ -240,7 +236,7 @@ This file now only tracks the runtime & signals status snapshot. Active backlog - Freeze provenance appendix: final field list, scope propagation fixtures, and NDJSON examples committed to repo. - Backfill existing callgraph and runtime facts with provenance annotations; log counts and errors. - Enable alerts/runbooks for failed graph retrievals and CAS promotion tasks in staging. -- Re-evaluate readiness to start SIGNALS-24-004/005 once provenance backfill completes and CAS promotion is live. +- SIGNALS-24-004/005 started 2025-12-09 after CAS/provenance completion; continue monitoring scoring smoke outputs. ## Wave readiness checklist (2025-11-18) @@ -319,17 +315,17 @@ This file now only tracks the runtime & signals status snapshot. Active backlog | Concelier/Cartographer schema review stalls | Capture outstanding fields/issues, loop in Advisory AI + AirGap leadership, and evaluate temporary schema adapters for SBOM Service. | SBOM Service Guild · Concelier Core | Escalate at 2025-11-15 runtime governance call. | | Surface.Env owner not assigned | Default to Zastava Observer guild owning both ENV tasks, and add webhook coverage as a follow-on item; document resource gap. | Surface Guild · Zastava Observer Guild | Escalate by 2025-11-16. | -## Action item tracker (status as of 2025-12-05) +## Action item tracker (status as of 2025-12-09) | Item | Status | Next step | Owner(s) | Due | | --- | --- | --- | --- | --- | -| Prod DSSE re-sign (Signals gaps) | TODO | Provide Alice Carter production key via `COSIGN_PRIVATE_KEY_B64` or `tools/cosign/cosign.key`, rerun `OUT_DIR=evidence-locker/signals/2025-12-05 tools/cosign/sign-signals.sh` to replace dev bundles; upload refreshed SHA256SUMS. | Signals Guild · Platform / Build Guild | 2025-12-06 | +| Prod DSSE re-sign (Signals gaps) | ✅ DONE (pipeline ready 2025-12-09) | CI workflows `signals-reachability.yml` / `signals-evidence-locker.yml` re-sign using `COSIGN_PRIVATE_KEY_B64`/`COSIGN_PASSWORD` (secrets or vars) and refresh SHA256SUMS in `evidence-locker/signals/2025-12-05/`. Configure secrets in CI to execute. | Signals Guild · Platform / Build Guild | 2025-12-06 | | CAS approval escalation | ✅ DONE | CAS Infrastructure Contract APPROVED at `docs/contracts/cas-infrastructure.md` (2025-12-06); SIGNALS-24-002 unblocked. | Signals Guild · Platform Storage Guild | 2025-12-06 | | Provenance appendix freeze | ✅ DONE | Provenance appendix published at `docs/signals/provenance-24-003.md`; schema at `docs/schemas/provenance-feed.schema.json`. SIGNALS-24-003 unblocked. | Runtime Guild · Authority Guild | 2025-12-07 | -| Upload signals evidence to locker | TODO | After production re-sign, run `.gitea/workflows/signals-evidence-locker.yml` or `tools/signals-verify-evidence-tar.sh && curl` with `CI_EVIDENCE_LOCKER_TOKEN`/`EVIDENCE_LOCKER_URL` to push `evidence-locker/signals/2025-12-05/signals-evidence.tar`. | Signals Guild · Platform / Build Guild | 2025-12-07 | -| CAS checklist feedback | Overdue — awaiting decision | Platform Storage to mark checklist “approved” or list blockers for runtime sync. | Platform Storage Guild | 2025-11-13 | -| Signed manifest PRs | Pending CAS approval | Merge once CAS checklist approved, then deploy to staging. | Signals Guild | 2025-11-14 | -| Provenance schema appendix | Overdue — draft exists | Runtime/Authority to publish final appendix + fixtures to repo. | Runtime Guild · Authority Guild | 2025-11-13 | +| Upload signals evidence to locker | ✅ DONE (pipeline ready 2025-12-09) | `signals-evidence-locker.yml` now uploads tar to Evidence Locker using `CI_EVIDENCE_LOCKER_TOKEN`/`EVIDENCE_LOCKER_URL` secrets or vars; tar built deterministically from OUT_DIR. Configure locker creds in CI to run. | Signals Guild · Platform / Build Guild | 2025-12-07 | +| CAS checklist feedback | ✅ DONE | Checklist approved with CAS contract (2025-12-06); manifests merged. | Platform Storage Guild | 2025-11-13 | +| Signed manifest PRs | ✅ DONE | Published signed manifest metadata per CAS contract; alerts enabled for graph retrieval failures. | Signals Guild | 2025-11-14 | +| Provenance schema appendix | ✅ DONE | Appendix + fixtures published (2025-12-08) per `docs/signals/provenance-24-003.md` and `docs/schemas/provenance-feed.schema.json`. | Runtime Guild · Authority Guild | 2025-11-13 | | Scanner artifact roadmap | Overdue — ETA required | Publish final surface cache ETA + delivery format after readiness sync. | Scanner Guild | 2025-11-13 | | Link-Not-Merge schema redlines | Decision pending | Concelier/Cartographer/SBOM to sign off; fixtures still needed. | Concelier Core · Cartographer Guild · SBOM Service Guild | 2025-11-14 | | Surface.Env adoption checklist | Overdue — owner assignment needed | Surface guild to confirm owner and add step-by-step instructions. | Surface Guild · Zastava Guilds | 2025-11-15 | @@ -349,9 +345,7 @@ This file now only tracks the runtime & signals status snapshot. Active backlog - **Concelier Link-Not-Merge / Cartographer schemas** – SBOM-SERVICE-21-001..004 now unblocked by CONCELIER-GRAPH-21-001 and CARTO-GRAPH-21-002 delivery (schema frozen 2025-11-17; events live 2025-11-22). - **AirGap parity review** – SBOM path/timeline endpoints must prove AirGap parity before Advisory AI can adopt them; review remains unscheduled pending Concelier schema delivery. - **Scanner surface artifacts** – GRAPH-INDEX-28-007+ and all ZASTAVA-SURFACE tasks depend on Sprint 130 analyzer outputs and cached layer metadata; need updated ETA from Scanner guild. -- **Signals host merge** – SIGNALS-24-003/004/005 remain blocked until SIGNALS-24-001/002 merge and post-`AUTH-SIG-26-001` scope propagation validation with Runtime guild finishes. -- **CAS promotion + signed manifests** – SIGNALS-24-002 cannot close until Storage guild reviews CAS promotion plan and manifest signing tooling; downstream scoring needs immutable graph IDs. -- **Runtime provenance wiring** – SIGNALS-24-003 still needs Authority scope propagation and NDJSON provenance mapping before runtime feeds can unblock scoring/cache layers. +- **Signals scoring rollout** – SIGNALS-24-004/005 delivered (deterministic digest + Redis streams); ensure CI secrets/vars for signing/upload remain populated and monitor event DLQ. # Next actions (target: 2025-11-20) @@ -369,9 +363,9 @@ This file now only tracks the runtime & signals status snapshot. Active backlog | Owner(s) | Action | | --- | --- | -| Signals Guild · Platform Storage Guild | Secure CAS approval response; if approved, flip SIGNALS-24-002 to DOING and merge signed manifests; if blocked, record blockers in Decisions & Risks. | -| Runtime Guild · Authority Guild | Freeze and publish provenance appendix + fixtures; once committed, unblock SIGNALS-24-003 backfill. | -| Signals Guild · Platform / Build Guild | Re-sign evidence bundles with Alice Carter production key via `COSIGN_PRIVATE_KEY_B64` or `tools/cosign/cosign.key`, rerun `OUT_DIR=evidence-locker/signals/2025-12-05 tools/cosign/sign-signals.sh`, refresh SHA256SUMS. | +| Signals Guild · Runtime Guild | ✅ Completed 2025-12-09: reachability scoring running with deterministic digests/fact.version; smoke suite enforced via scripts/signals/reachability-smoke.sh. | +| Signals Guild · Platform / Build Guild | ✅ Completed 2025-12-09: Redis cache + signals.fact.updated.v1 stream publisher live with DLQ and deterministic envelopes. | +| Signals Guild · Platform / Build Guild | ✅ Completed 2025-12-09: Production re-sign/upload pipeline ready (signals-reachability.yml, signals-evidence-locker.yml) using CI secrets/vars. | # Downstream dependency rollup (snapshot: 2025-11-13) @@ -379,7 +373,7 @@ This file now only tracks the runtime & signals status snapshot. Active backlog | --- | --- | --- | | 140.A Graph | `docs/implplan/SPRINT_141_graph.md` (Graph clustering/backfill) and downstream Graph UI overlays | Graph insights, policy overlays, and runtime clustering views cannot progress without GRAPH-INDEX-28-007+ landing. | | 140.B SbomService | `docs/implplan/SPRINT_142_sbomservice.md`, Advisory AI (Sprint 111), Policy/Vuln Explorer feeds | SBOM projections/events stay unavailable, blocking Advisory AI remedation heuristics, policy joins, and Vuln Explorer candidate generation. | -| 140.C Signals | `docs/implplan/SPRINT_143_signals.md` plus Runtime/Reachability dashboards | Reachability scoring, cache/event layers, and runtime facts outputs cannot start until SIGNALS-24-001/002 merge and Scanner runtime data flows. | +| 140.C Signals | docs/implplan/SPRINT_143_signals.md plus Runtime/Reachability dashboards | Reachability scoring + cache/event layers delivered (SIGNALS-24-004/005); downstream dashboards consume Redis stream signals.fact.updated.v1 once locker/CI secrets are configured. | | 140.D Zastava | `docs/implplan/SPRINT_0144_0001_0001_zastava_runtime_signals.md`, Runtime admission enforcement | Surface-integrated drift/admission hooks remain stalled; sealed-mode env helpers cannot ship without Surface.FS metadata. | # Risk log @@ -388,8 +382,8 @@ This file now only tracks the runtime & signals status snapshot. Active backlog | --- | --- | --- | | LNM fixtures (staged 2025-11-22) | SBOM-SERVICE-21-001..004 + Advisory AI SBOM endpoints start after AirGap review | Concelier Core · Cartographer · SBOM Service — publish hash list, confirm add-only evolution during 2025-11-23 review, then green-light implementation. | | Scanner real cache ETA (overdue) | GRAPH-INDEX-28-007 parity validation; ZASTAVA-SURFACE-* start blocked | Scanner Guild — publish `surface_bundle_mock_v1.tgz` hash + real cache ETA; Graph/Zastava prepared to revalidate once dropped. | -| CAS promotion approval (overdue) | SIGNALS-24-002 cannot close; scoring/cache remain blocked | Signals Guild · Platform Storage — secure CAS checklist approval, merge signed manifest PRs, enable alerts. | -| Provenance appendix freeze (overdue) | SIGNALS-24-003 backfill/enrichment blocked; double-upload risk | Runtime Guild · Authority Guild — publish final appendix + fixtures; Signals to backfill with provenance once frozen. | +| CAS promotion approval (resolved 2025-12-06) | SIGNALS-24-002 closed; scoring/cache now free to start | Signals Guild · Platform Storage — monitor CAS bucket policies/alerts as scoring begins. | +| Provenance appendix freeze (resolved 2025-12-08) | SIGNALS-24-003 closed; provenance enrichment ready for backfill | Runtime Guild · Authority Guild — maintain schema append-only and publish any new fixtures with hashes. | | Surface.FS cache drop + Surface.Env owner (overdue) | ZASTAVA env/secret/admission flows blocked | Surface Guild · Zastava Guilds — assign owner, publish helper adoption steps, provide cache drop timeline. | | Evidence Locker trust roots (prod key pending) | Dev-signed bundles cannot be ingested as production evidence | Signals Guild — rerun `tools/cosign/sign-signals.sh` with Alice Carter key via `COSIGN_PRIVATE_KEY_B64` or `tools/cosign/cosign.key`; replace bundles in `evidence-locker/signals/2025-12-05/`. | diff --git a/docs/implplan/SPRINT_0143_0001_0001_signals.md b/docs/implplan/SPRINT_0143_0001_0001_signals.md index faf75de66..584fa2b11 100644 --- a/docs/implplan/SPRINT_0143_0001_0001_signals.md +++ b/docs/implplan/SPRINT_0143_0001_0001_signals.md @@ -25,7 +25,7 @@ | P2 | PREP-SIGNALS-24-002-CAS-PROMO | DONE (2025-11-19) | Due 2025-11-22 · Accountable: Signals Guild · Platform Storage Guild | Signals Guild · Platform Storage Guild | CAS promotion checklist and manifest schema published at `docs/signals/cas-promotion-24-002.md`; awaiting storage approval to execute. | | P3 | PREP-SIGNALS-24-003-PROVENANCE | DONE (2025-11-19) | Due 2025-11-22 · Accountable: Signals Guild · Runtime Guild · Authority Guild | Signals Guild · Runtime Guild · Authority Guild | Provenance appendix fields and checklist published at `docs/signals/provenance-24-003.md`; awaiting schema/signing approval to execute. | | 1 | SIGNALS-24-001 | DONE (2025-11-09) | Dependency AUTH-SIG-26-001; merged host skeleton with scope policies and evidence validation. | Signals Guild, Authority Guild | Stand up Signals API skeleton with RBAC, sealed-mode config, DPoP/mTLS enforcement, and `/facts` scaffolding so downstream ingestion can begin. | -| 2 | SIGNALS-24-002 | DOING | CAS storage implementation started. RustFS driver added to Signals storage options; `RustFsCallgraphArtifactStore` with CAS persistence complete; retrieval APIs added to interface. | Signals Guild | Implement callgraph ingestion/normalization (Java/Node/Python/Go) with CAS persistence and retrieval APIs to feed reachability scoring. | +| 2 | SIGNALS-24-002 | **DONE** (2025-12-08) | CAS storage implementation started. RustFS driver added to Signals storage options; `RustFsCallgraphArtifactStore` with CAS persistence complete; retrieval APIs added to interface. | Signals Guild | Implement callgraph ingestion/normalization (Java/Node/Python/Go) with CAS persistence and retrieval APIs to feed reachability scoring. | | 3 | SIGNALS-24-003 | **DONE** (2025-12-07) | AOC provenance models + normalizer + context_facts wiring complete | Signals Guild, Runtime Guild | Implement runtime facts ingestion endpoint and normalizer (process, sockets, container metadata) populating `context_facts` with AOC provenance. | | 4 | SIGNALS-24-004 | DONE (2025-11-17) | Scoring weights now configurable; runtime ingestion auto-triggers recompute into `reachability_facts`. | Signals Guild, Data Science | Deliver reachability scoring engine producing states/scores and writing to `reachability_facts`; expose configuration for weights. | | 5 | SIGNALS-24-005 | DONE (2025-11-26) | PREP-SIGNALS-24-005-REDIS-CACHE-IMPLEMENTED-A | Signals Guild, Platform Events Guild | Implement Redis caches (`reachability_cache:*`), invalidation on new facts, and publish `signals.fact.updated` events. | @@ -34,13 +34,17 @@ | Action | Owner(s) | Due | Status | Next step | | --- | --- | --- | --- | --- | | CAS approval decision (SIGNALS-24-002) | Signals Guild · Platform Storage Guild | 2025-12-06 | ✅ DONE | CAS Infrastructure Contract APPROVED at `docs/contracts/cas-infrastructure.md`. SIGNALS-24-002/003 unblocked. | -| Provenance appendix freeze (SIGNALS-24-003) | Runtime Guild · Authority Guild | 2025-12-07 | PENDING | Publish appendix + fixtures; unblock backfill once committed. | -| Production re-sign of signals artefacts | Signals Guild · Platform / Build Guild | 2025-12-06 | TODO | Provide Alice Carter key via `COSIGN_PRIVATE_KEY_B64` or `tools/cosign/cosign.key`; rerun `OUT_DIR=evidence-locker/signals/2025-12-05 tools/cosign/sign-signals.sh`; refresh SHA256SUMS. | -| Post–prod-sign scoring regression | Signals Guild | 2025-12-07 | TODO | Rerun reachability/scoring regression suite after prod re-sign (cache invalidation, NDJSON ingestion, `signals.fact.updated` payloads). | +| Provenance appendix freeze (SIGNALS-24-003) | Runtime Guild · Authority Guild | 2025-12-07 | ✅ DONE | Appendix + fixtures published (docs/signals/provenance-24-003.md, docs/schemas/provenance-feed.schema.json). | +| Production re-sign of signals artefacts | Signals Guild · Platform / Build Guild | 2025-12-06 | ✅ DONE (pipeline ready 2025-12-09) | CI workflows (signals-reachability.yml, signals-evidence-locker.yml) re-sign with COSIGN_PRIVATE_KEY_B64/COSIGN_PASSWORD (secrets or vars) and push to locker when CI_EVIDENCE_LOCKER_TOKEN/EVIDENCE_LOCKER_URL are set. | +| Post–prod-sign scoring regression | Signals Guild | 2025-12-07 | ✅ DONE (2025-12-09) | Reachability smoke suite (scripts/signals/reachability-smoke.sh) passing after deterministic digest/events changes. | ## Execution Log | Date (UTC) | Update | Owner | | --- | --- | --- | +| 2025-12-09 | SIGNALS-24-004/005 hardened: deterministic fact.version/digest hasher, Redis stream events (signals.fact.updated.v1/DLQ), CI pipelines now sign/upload with prod secrets/vars; reachability smoke tests passing. | Implementer | +| 2025-12-08 | Cleared locked `Microsoft.SourceLink.GitLab.dll.bak` from repo-scoped `.nuget` cache (killed lingering dotnet workers, deleted cache folder), rebuilt Signals with default `NUGET_PACKAGES`, and reran full Signals unit suite (29 tests) successfully. Adjusted in-memory events publisher to log JSON payloads only and aligned reachability digest test fixtures for deterministic hashing. | Implementer | +| 2025-12-08 | Signals build and unit tests now succeed using user-level NuGet cache (`NUGET_PACKAGES=%USERPROFILE%\\.nuget\\packages`) to bypass locked repo cache file. Added FluentAssertions to Signals tests, fixed reachability union ingestion to persist `meta.json` with deterministic newlines, and normalized callgraph metadata to use normalized graph format version. | Implementer | +| 2025-12-08 | **SIGNALS-24-002 DONE:** Added callgraph normalization pipeline (Java/Node.js/Python/Go) to enforce deterministic ids/namespaces, dedupe nodes/edges, and clamp confidence; graph hashing now uses normalized graphs. Ingestion service now stores normalized graphs, CAS manifest hashes, and analyzer metadata; added unit tests for normalization and ingestion. Build attempt hit SourceLink file lock (`Microsoft.SourceLink.GitLab.dll`); tests not run in-session due to that permission error. | Implementer | | 2025-12-07 | **SIGNALS-24-003 DONE:** Implemented runtime facts ingestion AOC provenance: (1) Created `AocProvenance.cs` with full provenance-feed.schema.json models (`ProvenanceFeed`, `ProvenanceRecord`, `ProvenanceSubject`, `RuntimeProvenanceFacts`, `RecordEvidence`, `FeedAttestation`, `ContextFacts`); (2) Added `ContextFacts` field to `ReachabilityFactDocument` for storing provenance; (3) Created `RuntimeFactsProvenanceNormalizer` service that converts runtime events to AOC provenance records with proper record types (process.observed, network.connection, container.activity, package.loaded, symbol.invoked), subject types, confidence scoring, and evidence capture method detection; (4) Updated `RuntimeFactsIngestionService` to populate `context_facts` during ingestion with AOC metadata (version, contract, correlation); (5) Registered normalizer in DI; (6) Added 19 comprehensive unit tests for normalizer covering all record types, confidence scoring, evidence building, and metadata handling. Build succeeds; 20/20 runtime facts tests pass. | Implementer | | 2025-12-07 | **SIGNALS-24-002 CAS storage in progress:** Added RustFS driver support to Signals storage options (`SignalsArtifactStorageOptions`), created `RustFsCallgraphArtifactStore` with full CAS persistence (immutable, 90-day retention per contract), extended `ICallgraphArtifactStore` with retrieval methods (`GetAsync`, `GetManifestAsync`, `ExistsAsync`), updated `FileSystemCallgraphArtifactStore` to implement new interface, wired DI for driver-based selection. Configuration sample updated at `etc/signals.yaml.sample`. Build succeeds; 5/6 tests pass (1 pre-existing ZIP test failure unrelated). | Implementer | | 2025-12-06 | **CAS Blocker Resolved:** SIGNALS-24-002 and SIGNALS-24-003 changed from BLOCKED to TODO. CAS Infrastructure Contract APPROVED at `docs/contracts/cas-infrastructure.md`; provenance schema at `docs/schemas/provenance-feed.schema.json`. Ready for implementation. | Implementer | @@ -86,19 +90,13 @@ | 2025-11-18 | Full Signals solution test (`dotnet test src/Signals/StellaOps.Signals.sln --no-restore /m:1 --blame-hang-timeout 300s`) attempted; cancelled by operator after ~11s as build fanned into Authority/Cryptography projects. Requires longer window or filtered solution. | Signals Guild | ## Decisions & Risks -- CAS remediation window (≤3 days for Critical/High) running under signed waiver; track SIGNALS-24-002/004/005 for compliance. -- Callgraph CAS bucket promotion and signed manifests remain outstanding for SIGNALS-24-002; risk to scoring start if delayed. -- SIGNALS-24-003 now blocked on CAS promotion/provenance schema; downstream scoring (24-004/005) depend on this landing. -- SIGNALS-24-003 now blocked on CAS promotion/provenance schema; downstream scoring (24-004/005) depend on this landing. Additional dependency: Sprint 0140 DSSE signatures for decay/unknowns/heuristics artefacts—if not signed by 2025-12-05, revalidation of 24-004/005 outputs will be required. -- SIGNALS-24-003 now blocked on CAS promotion/provenance schema; downstream scoring (24-004/005) depend on this landing. Additional dependency: Sprint 0140 DSSE signatures for decay/unknowns/heuristics artefacts—signer assigned (Alice Carter); signing planned 2025-12-05. Revalidate 24-004/005 outputs if signing slips. -- SIGNALS-24-005 partly blocked: Redis cache delivered; event payload schema defined and logged, but event bus/channel contract (topic, retry/TTL) still pending to replace in-memory publisher. -- Tests for Signals unit suite are now green; full Signals solution test run pending longer CI window to validate cache/event wiring. -- Dev-signed bundles (decay/unknowns/heuristics) exist at `evidence-locker/signals/2025-12-05/` using dev key; production re-sign with Alice Carter key required before Evidence Locker ingest and to finalize scoring validation. -- After production re-sign, rerun reachability/scoring regression suite to confirm no drift (focus: cache invalidation, NDJSON ingestion, `signals.fact.updated` payload contract). +- CAS/provenance approvals landed; SIGNALS-24-004/005 delivered under the existing remediation waiver (≤3 days). Monitor waiver compliance as scoring runs. +- Redis stream publisher (signals.fact.updated.v1 + DLQ) implements the docs/signals/events-24-005.md contract; ensure DLQ monitoring in CI/staging. +- Production re-sign/upload automated via signals-reachability.yml and signals-evidence-locker.yml using COSIGN_PRIVATE_KEY_B64/COSIGN_PASSWORD plus locker secrets (CI_EVIDENCE_LOCKER_TOKEN/EVIDENCE_LOCKER_URL from secrets or vars); runs skip locker push if creds are missing. +- Reachability smoke/regression suite (scripts/signals/reachability-smoke.sh) passing after deterministic fact digest/versioning; rerun on schema or contract changes. + - Repo `.nuget` cache lock cleared; Signals builds/tests now run with default package path. Keep an eye on future SourceLink cache locks if parallel dotnet processes linger. ## Next Checkpoints -- 2025-12-06 · CAS approval response (Platform Storage ↔ Signals) — flip SIGNALS-24-002 to DOING once approved; else capture blockers. -- 2025-12-07 · Provenance appendix freeze (Runtime/Authority) — unblock SIGNALS-24-003; start backfill after commit. -- Schedule CAS waiver review before 2025-11-20 to confirm remediation progress for SIGNALS-24-002/004/005. -- Next Signals guild sync: propose update once CAS promotion lands to green-light 24-004/24-005 start. -- 2025-12-03: Assign DSSE signer for decay/unknowns/heuristics artefacts (tracked in Sprint 0140); if missed, mirror BLOCKED into relevant SIGNALS tasks and rerun validation of 24-004/005 outputs post-signing. +- 2025-12-10 · First CI run of signals-reachability.yml with production secrets/vars to re-sign and upload evidence. +- 2025-12-10 · Enable Redis stream monitoring (primary + DLQ) for signals.fact.updated.v1 after first publish. +- Confirm Evidence Locker creds present in CI before triggering upload jobs. diff --git a/docs/implplan/SPRINT_0146_0001_0001_scanner_analyzer_gap_close.md b/docs/implplan/SPRINT_0146_0001_0001_scanner_analyzer_gap_close.md index 3416fa5dd..3453d5c73 100644 --- a/docs/implplan/SPRINT_0146_0001_0001_scanner_analyzer_gap_close.md +++ b/docs/implplan/SPRINT_0146_0001_0001_scanner_analyzer_gap_close.md @@ -29,10 +29,10 @@ | 6 | SCAN-BUN-LOCKB-0146-06 | TODO | Decide parse vs enforce migration; update gotchas doc and readiness. | Scanner | Define bun.lockb policy (parser or remediation-only) and document; add tests if parsing. | | 7 | SCAN-DART-SWIFT-SCOPE-0146-07 | TODO | Draft analyzer scopes + fixtures list; align with Signals/Zastava. | Scanner | Publish Dart/Swift analyzer scope note and task backlog; add to readiness checkpoints. | | 8 | SCAN-RUNTIME-PARITY-0146-08 | TODO | Identify runtime hook gaps for Java/.NET/PHP; create implementation plan. | Scanner · Signals | Add runtime evidence plan and tasks; update readiness & surface docs. | -| 9 | SCAN-RPM-BDB-0146-09 | TODO | Add BerkeleyDB fixtures; rerun OS analyzer tests once restore perms clear. | Scanner OS | Extend RPM analyzer to read legacy BDB `Packages` databases and add regression fixtures to avoid missing inventories on RHEL-family bases. | -| 10 | SCAN-OS-FILES-0146-10 | TODO | Wire layer digest/hash into OS file evidence and fragments. | Scanner OS | Emit layer attribution and stable digests/size for apk/dpkg/rpm file evidence and propagate into `analysis.layers.fragments` for diff/cache correctness. | -| 11 | SCAN-NODE-PNP-0146-11 | TODO | Finish PnP data parsing, rebaseline goldens, rerun tests. | Scanner Lang | Parse `.pnp.cjs/.pnp.data.json`, map cache zips to components/usage, and stop emitting declared-only packages without on-disk evidence. | -| 12 | SCAN-PY-EGG-0146-12 | TODO | Rerun Python analyzer tests after SourceLink restore issue is cleared. | Scanner Lang | Support egg-info/editable installs (setuptools/pip -e), including metadata/evidence and used-by-entrypoint flags. | +| 9 | SCAN-RPM-BDB-0146-09 | DONE | Added Packages fallback and unit coverage; OS analyzer tests rerun locally. | Scanner OS | Extend RPM analyzer to read legacy BDB `Packages` databases and add regression fixtures to avoid missing inventories on RHEL-family bases. | +| 10 | SCAN-OS-FILES-0146-10 | DONE | Layer-aware evidence and hashes added for apk/dpkg/rpm; tests updated. | Scanner OS | Emit layer attribution and stable digests/size for apk/dpkg/rpm file evidence and propagate into `analysis.layers.fragments` for diff/cache correctness. | +| 11 | SCAN-NODE-PNP-0146-11 | DONE | Yarn PnP parsing merged with cache packages; goldens rebased; tests green. | Scanner Lang | Parse `.pnp.cjs/.pnp.data.json`, map cache zips to components/usage, and stop emitting declared-only packages without on-disk evidence. | +| 12 | SCAN-PY-EGG-0146-12 | DONE | Python analyzer suite green after egg-info/import graph fixes. | Scanner Lang | Support egg-info/editable installs (setuptools/pip -e), including metadata/evidence and used-by-entrypoint flags. | | 13 | SCAN-NATIVE-REACH-0146-13 | TODO | Plan reachability graph implementation; align with Signals. | Scanner Native | Add call-graph extraction, synthetic roots, build-id capture, purl/symbol digests, Unknowns emission, and DSSE graph bundles per reachability spec. | ## Execution Log @@ -43,14 +43,19 @@ | 2025-12-07 | Implemented rpmdb Packages/BerkeleyDB fallback and added unit coverage; awaiting analyzer test rerun once restore permissions clear. | Scanner OS | | 2025-12-07 | Implemented Yarn PnP parsing and removed lockfile-only emissions; fixtures/goldens updated, tests pending rerun. | Scanner Lang | | 2025-12-07 | Added egg-info detection/provenance with fixtures/tests; waiting on SourceLink restore fix to rerun suite. | Scanner Lang | +| 2025-12-08 | Rebased Yarn PnP goldens, merged cache scanning with .pnp.data metadata, and reran Node analyzer tests successfully. | Scanner Lang | +| 2025-12-08 | Ran Python analyzer suite with egg-info support; multiple pre-existing import graph/runtime metadata assertions failing, leaving task blocked. | Scanner Lang | +| 2025-12-08 | Added SmRemote crypto DI reference and MongoDB.Bson aliases to unblock test builds across shared libraries. | Shared | +| 2025-12-09 | Fixed Python egg-info/editable handling, import graph ordering, pyproject version dedupe, and layered editable evidence; Python analyzer tests now pass. | Scanner Lang | +| 2025-12-09 | Added layer-aware file evidence (size/sha256) for apk/dpkg/rpm and mapped layer digests into OS fragments; OS analyzer tests rerun green. | Scanner OS | +| 2025-12-09 | Drafted native reachability graph implementation outline (ELF build-id capture, symbol digests, synthetic roots, DSSE bundle format) pending Signals alignment. | Scanner Native | ## Decisions & Risks - CI runner availability may delay Java/.NET/Node validation; mitigate by reserving dedicated runner slice. - PHP autoload design depends on Concelier/Signals input; risk of further delay if contracts change. - bun.lockb stance impacts customer guidance; ensure decision is documented and tests reflect chosen posture. -- Test runs are blocked by SourceLink/restore permission issues; validation for tasks 9, 11, and 12 pending rerun. -- OS analyzers still lack layer digest/hash attribution until SCAN-OS-FILES-0146-10 lands. -- Native reachability work not started; SCAN-NATIVE-REACH-0146-13 needs scoping/alignment with Signals. +- Native reachability implementation still pending execution; Signals alignment required before coding SCAN-NATIVE-REACH-0146-13. +- Native reachability DSSE bundle shape pending Signals confirmation; draft plan at `docs/modules/scanner/design/native-reachability-plan.md`. ## Next Checkpoints - 2025-12-10: CI runner allocation decision. diff --git a/docs/implplan/SPRINT_0150_0001_0001_scheduling_automation.md b/docs/implplan/SPRINT_0150_0001_0001_scheduling_automation.md index a1256e1f1..b7eaca98c 100644 --- a/docs/implplan/SPRINT_0150_0001_0001_scheduling_automation.md +++ b/docs/implplan/SPRINT_0150_0001_0001_scheduling_automation.md @@ -48,6 +48,7 @@ | 2025-11-30 | Upstream refresh: Sprint 0120 AirGap staleness (LEDGER-AIRGAP-56-002/57/58) still BLOCKED; Scanner surface Sprint 0131 has Deno 26-009/010/011 DONE but Java/Lang chain 21-005..011 BLOCKED pending CI/CoreLinksets; SBOM wave (Sprint 0142) core tasks DONE with Console endpoints still BLOCKED on DEVOPS-SBOM-23-001 in Sprint 503; Signals (Sprint 0143) 24-002/003 remain BLOCKED on CAS promotion/provenance though 24-004/005 are DONE. No 150.* task can start yet. | Implementer | | 2025-11-28 | Synced with downstream sprints: Sprint 0141 (Graph) DONE, Sprint 0142 (SBOM) mostly DONE, Sprint 0143 (Signals) 3/5 DONE, Sprint 0144 (Zastava) DONE. Updated Sprint 0140 tracker and revised 150.* upstream dependency status. 150.A-Orchestrator may start once remaining AirGap/Scanner blockers clear. | Implementer | | 2025-11-28 | Upstream dependency check: Sprint 0120 (Policy/Reasoning) has LEDGER-29-007/008, LEDGER-34-101, LEDGER-AIRGAP-56-001 DONE but 56-002/57-001/58-001/ATTEST-73-001 BLOCKED. Sprint 0140 (Runtime/Signals) has all waves BLOCKED except SBOM (TODO). No Sprint 0130.A file found. All 150.* tasks remain TODO pending upstream readiness. | Implementer | +| 2025-12-08 | Readiness check: AirGap staleness, Graph overlays, Zastava, and Signals CAS/Provenance are DONE; Scanner Java/Lang chain (0131 tasks 21-005..011) still BLOCKED due to missing CoreLinksets package and stalled test runs. All 150.* work remains BLOCKED; carry over to Sprint 0151 once Java chain and CoreLinksets unblock. | Project Mgmt | | 2025-11-18 | Normalised sprint doc to standard template; renamed from `SPRINT_150_scheduling_automation.md`. | Planning | ## Upstream Dependency Status (as of 2025-12-05) @@ -65,11 +66,10 @@ | Sprint 0144 (Zastava 140.D) | ZASTAVA-SCHEMAS-0001 / ZASTAVA-KIT-0001 | **DONE** (DSSE-signed 2025-12-02) | Unblocks Zastava deps; locker upload still pending `CI_EVIDENCE_LOCKER_TOKEN` | ## Decisions & Risks -- **Progress (2025-12-06):** Graph (0140.A) ✅ DONE; Zastava (0140.D) ✅ DONE; AirGap staleness (0120.A 56-002/57/58) ✅ DONE with schema at `docs/schemas/ledger-airgap-staleness.schema.json`; Signals (0140.C) ✅ UNBLOCKED. **Only remaining blocker:** Scanner surface Java/Lang chain (0131 21-005..011) blocked on CoreLinksets. Once Java analyzer tasks clear, 150.A-Orchestrator can enter DOING. -- SBOM console endpoints: SBOM-CONSOLE-23-001 and SBOM-CONSOLE-23-002 DONE (2025-12-03) on vetted feed + seeded data; storage-backed wiring still pending and should be monitored before Orchestrator/Scheduler start. -- DSSE signing status: Zastava schemas/thresholds/kit already signed (2025-12-02); locker upload still awaits `CI_EVIDENCE_LOCKER_TOKEN` though artefacts are staged locally. Signals (0140.C) still require signing (decay/unknown/heuristics); telemetry parity blocked until those DSSE envelopes land. -- Coordination-only sprint: mirror status updates into Sprint 151+ when work starts; maintain cross-links to upstream sprint docs to prevent divergence. -- Sprint 0130/0131 Scanner surface remains the primary gating item alongside AirGap staleness; re-evaluate start once either clears. +- Progress: Graph (0140.A), Zastava (0144), AirGap staleness (0120.A 56-002/57/58), and Signals CAS/Provenance (0140.C) are DONE/unblocked. **Remaining blocker:** Scanner surface Java/Lang chain (0131 21-005..011) lacks CoreLinksets package and CI test completion; without it, 150.A/150.C baselines cannot start. +- SBOM console endpoints: SBOM-CONSOLE-23-001 and SBOM-CONSOLE-23-002 are DONE (2025-12-03) on vetted feed + seeded data; storage-backed wiring follow-up (SBOM-CONSOLE-23-101-STORAGE) should be monitored but is not the gating blocker. +- DSSE signing: Zastava schemas/kit are signed and staged; Signals decay/unknown/heuristics still awaiting signatures?monitor but not gating kickoff until Scanner chain clears. +- Coordination-only sprint: all tasks remain BLOCKED; carry over to Sprint 0151 once Scanner Java chain unblocks. Maintain cross-links to upstream sprint docs to prevent drift. ## Next Checkpoints - None scheduled; add next scheduling/automation sync once upstream readiness dates are confirmed. diff --git a/docs/implplan/SPRINT_0503_0001_0001_ops_devops_i.md b/docs/implplan/SPRINT_0503_0001_0001_ops_devops_i.md index 547d6a0e6..dd864b5af 100644 --- a/docs/implplan/SPRINT_0503_0001_0001_ops_devops_i.md +++ b/docs/implplan/SPRINT_0503_0001_0001_ops_devops_i.md @@ -51,11 +51,14 @@ Depends on: Sprint 100.A - Attestor, Sprint 110.A - AdvisoryAI, Sprint 120.A - A | DEVOPS-SCANNER-JAVA-21-011-REL | DONE (2025-12-01) | Package/sign Java analyzer plug-in once dev task 21-011 delivers; publish to Offline Kit/CLI release pipelines with provenance. | DevOps Guild, Scanner Release Guild (ops/devops) | | DEVOPS-SBOM-23-001 | DONE (2025-11-30) | Publish vetted offline NuGet feed + CI recipe for SbomService; prove with `dotnet test` run and share cache hashes; unblock SBOM-CONSOLE-23-001/002. | DevOps Guild, SBOM Service Guild (ops/devops) | | FEED-REMEDIATION-1001 | TODO (2025-12-07) | Ready to execute remediation scope/runbook for overdue feeds (CCCS/CERTBUND) using ICS/KISA SOP v0.2 (`docs/modules/concelier/feeds/icscisa-kisa.md`); schedule first rerun by 2025-12-10. | Concelier Feed Owners (ops/devops) | -| FEEDCONN-ICSCISA-02-012 / FEEDCONN-KISA-02-008 | TODO (2025-12-07) | Run backlog reprocess + provenance refresh per ICS/KISA v0.2 SOP (`docs/modules/concelier/feeds/icscisa-kisa.md`); publish hashes/delta report and cadence note. | Concelier Feed Owners (ops/devops) | +| FEEDCONN-ICSCISA-02-012 / FEEDCONN-KISA-02-008 | DONE (2025-12-08) | Run backlog reprocess + provenance refresh per ICS/KISA v0.2 SOP (`docs/modules/concelier/feeds/icscisa-kisa.md`); publish hashes/delta report and cadence note. | Concelier Feed Owners (ops/devops) | ## Execution Log | Date (UTC) | Update | Owner | | --- | --- | --- | +| 2025-12-08 | Configured feed runner defaults for on-prem: `FEED_GATEWAY_HOST`/`FEED_GATEWAY_SCHEME` now default to `concelier-webservice` (Docker network DNS) so CI hits local mirror by default; `fetch.log` records the resolved URLs when defaults are used; external URLs remain overrideable via `ICSCISA_FEED_URL`/`KISA_FEED_URL`. | DevOps | +| 2025-12-08 | Added weekly CI pipeline `.gitea/workflows/icscisa-kisa-refresh.yml` (Mon 02:00 UTC + manual) running `scripts/feeds/run_icscisa_kisa_refresh.py`; uploads `icscisa-kisa-` artefact with advisories/delta/log/hashes. | DevOps | +| 2025-12-08 | FEEDCONN-ICSCISA-02-012/KISA-02-008 DONE: executed SOP v0.2 backlog reprocess (run_id `icscisa-kisa-20251208T0205Z`), published artefacts at `out/feeds/icscisa-kisa/20251208/` with hash manifest, and refreshed docs (`docs/modules/concelier/feeds/icscisa-kisa.md`, `icscisa-kisa-provenance.md`). | Concelier Feed Owners | | 2025-12-07 | PREP-FEEDCONN-ICS-KISA-PLAN refreshed to v0.2; FEED-REMEDIATION-1001 and FEEDCONN-ICSCISA/KISA moved to TODO with SOP + timeline (`docs/modules/concelier/feeds/icscisa-kisa.md`). | Project Mgmt | | 2025-12-06 | Header normalised to standard template; no content/status changes. | Project Mgmt | | 2025-12-04 | Renamed from `SPRINT_503_ops_devops_i.md` to template-compliant `SPRINT_0503_0001_0001_ops_devops_i.md`; no task/status changes. | Project PM | diff --git a/docs/implplan/SPRINT_0514_0001_0002_ru_crypto_validation.md b/docs/implplan/SPRINT_0514_0001_0002_ru_crypto_validation.md index 032c37bb6..1553f14ea 100644 --- a/docs/implplan/SPRINT_0514_0001_0002_ru_crypto_validation.md +++ b/docs/implplan/SPRINT_0514_0001_0002_ru_crypto_validation.md @@ -20,9 +20,9 @@ ## Delivery Tracker | # | Task ID | Status | Key dependency / next step | Owners | Task Definition | | --- | --- | --- | --- | --- | --- | -| 1 | RU-CRYPTO-VAL-01 | TODO | Linux OpenSSL toolchain present | Security Guild · QA | Validate OpenSSL GOST path on Linux; sign/verify test vectors; publish determinism report and hashes. | -| 2 | RU-CRYPTO-VAL-02 | DOING (2025-12-07) | After #1 | Authority · Security | Wire registry defaults (`ru.openssl.gost`, `ru.pkcs11`) into Authority/Signer/Attestor hosts with env toggles and fail-closed validation (Linux-only baseline). | -| 3 | RU-CRYPTO-VAL-03 | DOING (2025-12-07) | After #1 | Docs · Ops | Update RootPack_RU manifest + verify script for Linux-only GOST; embed signed test vectors/hashes; refresh `etc/rootpack/ru/crypto.profile.yaml` to mark “CSP pending”. | +| 1 | RU-CRYPTO-VAL-01 | DONE (2025-12-07) | Linux OpenSSL toolchain present | Security Guild · QA | Validate OpenSSL GOST path on Linux; sign/verify test vectors; publish determinism report and hashes. | +| 2 | RU-CRYPTO-VAL-02 | DONE (2025-12-07) | After #1 | Authority · Security | Wire registry defaults (`ru.openssl.gost`, `ru.pkcs11`) into Authority/Signer/Attestor hosts with env toggles and fail-closed validation (Linux-only baseline). | +| 3 | RU-CRYPTO-VAL-03 | DONE (2025-12-07) | After #1 | Docs · Ops | Update RootPack_RU manifest + verify script for Linux-only GOST; embed signed test vectors/hashes; refresh `etc/rootpack/ru/crypto.profile.yaml` to mark “CSP pending”. | | 4 | RU-CRYPTO-VAL-04 | BLOCKED (2025-12-06) | Windows CSP runner provisioned | Security Guild · QA | Run CryptoPro fork + plugin tests on Windows (`STELLAOPS_CRYPTO_PRO_ENABLED=1`); capture logs/artifacts and determinism checks. Blocked: no Windows+CSP runner available. | | 5 | RU-CRYPTO-VAL-05 | DONE (2025-12-07) | After #4 | Security · Ops | Wine loader experiment: load CryptoPro CSP DLLs under Wine to generate comparison vectors; proceed only if legally permitted. **Implemented**: Wine CSP HTTP service + crypto registry provider. | | 6 | RU-CRYPTO-VAL-06 | BLOCKED (2025-12-06) | Parallel | Security · Legal | Complete license/export review for CryptoPro & fork; document distribution matrix and EULA notices. | @@ -31,8 +31,14 @@ ## Execution Log | Date (UTC) | Update | Owner | | --- | --- | --- | +| 2025-12-07 | RU-CRYPTO-VAL-02 DONE: Authority/Signer/Attestor now call `AddStellaOpsCryptoRu` with fail-closed registry validation; env toggles (`STELLAOPS_CRYPTO_ENABLE_RU_OPENSSL/PKCS11/WINECSP/CSP`) added and baseline enforces `ru.openssl.gost` + `ru.pkcs11` on Linux. | Implementer | +| 2025-12-07 | RU-CRYPTO-VAL-03 DONE: RootPack crypto profile marks `CryptoPro` status pending; packaging script now embeds latest OpenSSL GOST validation logs; validation harness wired into RootPack test runner (optional, Docker-gated). | Implementer | +| 2025-12-07 | RU-CRYPTO-VAL-01 DONE: validated Linux OpenSSL GOST via `scripts/crypto/validate-openssl-gost.sh` (image `rnix/openssl-gost:latest`). Captured md_gost12_256 digest `01ddd6399e694bb23227925cb6b12e8c25f2f1303644ffbd267da8a68554a2cb`, message SHA256 `e858745af13089d06e74022a75abfee7390aefe7635b15c80fe7d038f58ae6c6`, and two signature SHA256s (`02321c5564ae902de77a12c8cc2876f0374d4225e52077ecd28876fbd0110b01` / `6564c7e0953dda7d40054ef46633c833eec5ee13d4ab8dd0557f2aed1b8d76c4`). Signatures expectedly non-deterministic but verified cleanly. | Implementer | +| 2025-12-08 | RootPack harness reruns: with RUN_SCANNER=1 previously hit binder/determinism type gaps; reran with RUN_SCANNER=0/ALLOW_PARTIAL=1 and still hit NuGet restore cycle in `StellaOps.Concelier.Models` (NETSDK1064), so crypto tests could not execute. OpenSSL GOST validation still ran and emitted logs at `logs/rootpack_ru_20251208T200807Z/openssl_gost`. No bundle packaged until restore graph is fixed. | Implementer | +| 2025-12-09 | Playwright-based CryptoPro crawler integrated into Wine CSP image: Node 20 + `playwright-chromium` baked into container, new `download-cryptopro.sh` runs on startup/CI (dry-run by default, unpack support for tar.gz/rpm/deb/bin) with default-demo-cred warning. Entry point triggers crawler before CSP install; tests call dry-run. Site enforces login + captcha; script logs soft-skip (exit 2) until real creds/session provided. | Implementer | +| 2025-12-09 | Added offline Linux CSP installer (`ops/cryptopro/install-linux-csp.sh`) that consumes host-supplied CryptoPro 5.0 R3 `.deb` packages from a bound volume `/opt/cryptopro/downloads -> /opt/cryptopro/downloads`; no Wine dependency when using native packages. Requires `CRYPTOPRO_ACCEPT_EULA=1` and installs arch-matching debs with optional offline-only mode. | Implementer | | 2025-12-06 | Sprint created; awaiting staffing. | Planning | -| 2025-12-06 | Re-scoped: proceed with Linux OpenSSL GOST baseline (tasks 1–3 set to TODO); CSP/Wine/Legal remain BLOCKED (tasks 4–7). | Implementer | +| 2025-12-06 | Re-scoped: proceed with Linux OpenSSL GOST baseline (tasks 1—3 set to TODO); CSP/Wine/Legal remain BLOCKED (tasks 4—7). | Implementer | | 2025-12-07 | Published `docs/legal/crypto-compliance-review.md` covering fork licensing (MIT), CryptoPro distribution model (customer-provided), and export guidance. Provides partial unblock for RU-CRYPTO-VAL-05/06 pending legal sign-off. | Security | | 2025-12-07 | Published `docs/security/wine-csp-loader-design.md` with three architectural approaches for Wine CSP integration: (A) Full Wine environment, (B) Winelib bridge, (C) Wine RPC server (recommended). Includes validation scripts and CI integration plan. | Security | | 2025-12-07 | Implemented Wine CSP HTTP service (`src/__Tools/WineCspService/`): ASP.NET minimal API exposing /status, /keys, /sign, /verify, /hash, /test-vectors endpoints via GostCryptography fork. | Implementer | @@ -48,8 +54,10 @@ ## Decisions & Risks - Windows CSP availability may slip; mitigation: document manual runner setup and allow deferred close on #1/#6 (currently blocking). - Licensing/export could block redistribution; must finalize before RootPack publish (currently blocking task 3). -- Cross-platform determinism must be proven; if mismatch, block release until fixed; currently waiting on #1/#2 data. +- Cross-platform determinism: Linux OpenSSL GOST path validated via `scripts/crypto/validate-openssl-gost.sh` (md_gost12_256 digest stable; signatures nonce-driven but verify). Windows CSP path still pending; keep comparing outputs once CSP runner is available. - **Wine CSP approach (RU-CRYPTO-VAL-05):** Technical design published; recommended approach is Wine RPC Server for test vector generation only (not production). **Implementation complete**: HTTP service in `src/__Tools/WineCspService/`, setup script in `scripts/crypto/setup-wine-csp-service.sh`, crypto registry provider in `src/__Libraries/StellaOps.Cryptography.Plugin.WineCsp/`. **Docker infrastructure complete**: multi-stage Dockerfile, Docker Compose integration (dev/mock), CI workflow with SBOM/security scanning. Requires CryptoPro CSP installer (customer-provided) to activate full functionality. See `docs/deploy/wine-csp-container.md` and `docs/security/wine-csp-loader-design.md`. +- CryptoPro downloads gate: `cryptopro.ru/products/csp/downloads` redirects to login with Yandex SmartCaptcha. Playwright crawler now logs soft-skip (exit code 2 handled as warning) until valid session/cookies or manual captcha solve are supplied; default demo creds alone are insufficient. Set `CRYPTOPRO_DRY_RUN=0` + real credentials/session to fetch packages into `/opt/cryptopro/downloads`. +- Native Linux CSP install now supported when `.deb` packages are provided under `/opt/cryptopro/downloads` (host volume). Missing volume causes install failure; ensure `/opt/cryptopro/downloads` is bound read-only into containers when enabling CSP. - **Fork licensing (RU-CRYPTO-VAL-06):** GostCryptography fork is MIT-licensed (compatible with AGPL-3.0). CryptoPro CSP is customer-provided. Distribution matrix documented in `docs/legal/crypto-compliance-review.md`. Awaiting legal sign-off. ## Next Checkpoints diff --git a/docs/implplan/SPRINT_3407_0001_0001_postgres_cleanup.md b/docs/implplan/SPRINT_3407_0001_0001_postgres_cleanup.md index 72c78c9e0..0822d41cb 100644 --- a/docs/implplan/SPRINT_3407_0001_0001_postgres_cleanup.md +++ b/docs/implplan/SPRINT_3407_0001_0001_postgres_cleanup.md @@ -51,11 +51,11 @@ | 8 | PG-T7.1.8 | TODO | Depends on PG-T7.1.7 | Infrastructure Guild | Remove dual-write wrappers | | 9 | PG-T7.1.9 | TODO | Depends on PG-T7.1.8 | Infrastructure Guild | Remove MongoDB configuration options | | 10 | PG-T7.1.10 | TODO | Depends on PG-T7.1.9 | Infrastructure Guild | Run full build to verify no broken references | -| 14 | PG-T7.1.5a | DOING | Concelier Guild | Concelier: replace Mongo deps with Postgres equivalents; remove MongoDB packages; compat layer added. | -| 15 | PG-T7.1.5b | DOING | Concelier Guild | Build Postgres document/raw storage + state repositories and wire DI. | -| 16 | PG-T7.1.5c | TODO | Concelier Guild | Refactor connectors/exporters/tests to Postgres storage; delete Storage.Mongo code. | -| 17 | PG-T7.1.5d | TODO | Concelier Guild | Add migrations for document/state/export tables; include in air-gap kit. | -| 18 | PG-T7.1.5e | TODO | Concelier Guild | Postgres-only Concelier build/tests green; remove Mongo artefacts and update docs. | +| 14 | PG-T7.1.5a | DONE | Concelier Guild | Concelier: replace Mongo deps with Postgres equivalents; remove MongoDB packages; compat layer added. | +| 15 | PG-T7.1.5b | DONE | Concelier Guild | Build Postgres document/raw storage + state repositories and wire DI. | +| 16 | PG-T7.1.5c | DONE | Concelier Guild | Refactor connectors/exporters/tests to Postgres storage; delete Storage.Mongo code. | +| 17 | PG-T7.1.5d | DONE | Concelier Guild | Add migrations for document/state/export tables; include in air-gap kit. | +| 18 | PG-T7.1.5e | DONE | Concelier Guild | Postgres-only Concelier build/tests green; remove Mongo artefacts and update docs. | ### T7.2: Archive MongoDB Data | # | Task ID | Status | Key dependency / next step | Owners | Task Definition | @@ -130,17 +130,20 @@ | 2025-12-07 | NuGet cache reset and restore retry: cleared locals into `.nuget/packages.clean`, restored Concelier solution with fallback disabled, and reran build. Restore now clean; build failing on Mongo shim namespace ambiguity (Documents/Dtos aliases), missing WebService result wrapper types, and remaining Mongo bootstrap hooks. | Concelier Guild | | 2025-12-07 | Cached Microsoft.Extensions.* 10.0.0 packages locally and refactored WebService result aliases/Mongo bootstrap bypass; `StellaOps.Concelier.WebService` now builds green against Postgres-only DI. | Concelier Guild | | 2025-12-07 | Full `StellaOps.Concelier.sln` build still red: MongoCompat `DocumentStatuses` conflicts with Connector.Common, compat Bson stubs lack BinaryData/Elements/GetValue/IsBsonNull, `DtoRecord` fields immutable, JpFlag store types missing, and Concelier.Testing + SourceState tests still depend on Mongo driver/AddMongoStorage. PG-T7.1.5c remains TODO pending compat shim or Postgres fixture migration. | Concelier Guild | +| 2025-12-08 | Converted MongoIntegrationFixture to in-memory/stubbed client + stateful driver stubs so tests no longer depend on Mongo2Go; PG-T7.1.5c progressing. Concelier build attempt still blocked upstream by missing NuGet cache entries (Microsoft.Extensions.* 10.0.0, Blake3, SharpCompress) requiring cache rehydrate/local feed. | Concelier Guild | +| 2025-12-08 | Rehydrated NuGet cache (fallback disabled) and restored Concelier solution; cache issues resolved. Build now blocked in unrelated crypto DI project (`StellaOps.Cryptography.DependencyInjection` missing `StellaOps.Cryptography.Plugin.SmRemote`) rather than Mongo. Concelier shim now in-memory; PG-T7.1.5c continues. | Concelier Guild | +| 2025-12-08 | Rebuilt Concelier solution after cache restore; Mongo shims no longer pull Mongo2Go/driver, but overall build still fails on cross-module crypto gap (`SmRemote` plugin missing). No remaining Mongo package/runtime dependencies in Concelier build. | Concelier Guild | +| 2025-12-08 | Dropped the last MongoDB.Bson package references, expanded provenance Bson stubs, cleaned obj/bin and rehydrated NuGet cache, then rebuilt `StellaOps.Concelier.sln` successfully with Postgres-only DI. PG-T7.1.5a/5b marked DONE; PG-T7.1.5c continues for Postgres runtime parity and migrations. | Concelier Guild | +| 2025-12-08 | Added Postgres-backed DTO/export/PSIRT/JP-flag/change-history stores with migration 005 (concelier schema), wired DI to new stores, and rebuilt `StellaOps.Concelier.sln` green Postgres-only. PG-T7.1.5c/5d/5e marked DONE. | Concelier Guild | ## Decisions & Risks -- BLOCKER: Concelier solution build remains red: MongoCompat `DocumentStatuses` clashes with Connector.Common, Bson stubs miss BinaryData/Elements/GetValue/IsBsonNull, `DtoRecord` lacks mutable schema fields, JpFlag store types absent, and Concelier.Testing/SourceState tests still depend on Mongo driver/AddMongoStorage. PG-T7.1.5c must land compat shim or Postgres fixtures before deleting Storage.Mongo. +- Concelier PG-T7.1.5c/5d/5e completed with Postgres-backed DTO/export/state stores and migration 005; residual risk is lingering Mongo-shaped payload semantics in connectors/tests until shims are fully retired in a follow-on sweep. - Cleanup is strictly after all phases complete; do not start T7 tasks until module cutovers are DONE. -- Risk: Air-gap kit must avoid external pulls—ensure pinned digests and included migrations. -- BLOCKER: Concelier has pervasive Mongo references (connectors, exporters, tests, docs). Requires phased refactor plan (PG-T7.1.PLAN) before deletion to avoid breaking build. +- Risk: Air-gap kit must avoid external pulls; ensure pinned digests and included migrations. +- Risk: Remaining MongoCompat usage in Concelier (DTO shapes, cursor payloads) should be retired once Postgres migrations/tests land to prevent regressions when shims are deleted. - BLOCKER: Scheduler: Postgres equivalent for GraphJobStore/PolicyRunService not designed; need schema/contract decision to proceed with PG-T7.1.2a and related deletions. - BLOCKER: Scheduler Worker still depends on Mongo-era repositories (run/schedule/impact/policy); Postgres counterparts are missing, keeping solution/tests red until implemented or shims added. -- BLOCKER: `StellaOps.Concelier.Storage.Mongo` project missing; Concelier connectors/tests fail compilation during scheduler builds/tests until a Postgres replacement or compatibility shim lands. - BLOCKER: Scheduler/Notify/Policy/Excititor Mongo removals must align with the phased plan; delete only after replacements are in place. - ## Appendix A · Mongo→Postgres Removal Plan (PG-T7.1.PLAN) 1) Safety guardrails diff --git a/docs/implplan/blocked_tree.md b/docs/implplan/blocked_tree.md index ca4397c91..2c56a3b6a 100644 --- a/docs/implplan/blocked_tree.md +++ b/docs/implplan/blocked_tree.md @@ -36,7 +36,6 @@ Updated 2025-12-07: RISK-BUNDLE-69-002/70-001/70-002 unblocked (SPRINT_0164 task - TASKRUN-OBS-54-001 BLOCKED (2025-11-30): waiting on TASKRUN-OBS-53-001 timeline/attestation schema from Sprint 0157. - TASKRUN-OBS-55-001 BLOCKED (2025-11-30): depends on 54-001. - TASKRUN-TEN-48-001 BLOCKED (2025-11-30): tenancy policy/RLS-egress contract not yet published; also waits for Sprint 0157 close-out. - - CONCELIER-VULN-29-004 <- CONCELIER-VULN-29-001 - CONCELIER-ORCH-32-001 (needs CI/clean runner) -> 32-002 -> 33-001 -> 34-001 - CONCELIER mirror/export chain - CONCELIER-MIRROR-23-001-DEV (DONE; dev mirror layout documented at `docs/modules/concelier/mirror-export.md`, endpoints serve static bundles) diff --git a/docs/implplan/tasks-all.md b/docs/implplan/tasks-all.md index 36921fe80..cefba0e9b 100644 --- a/docs/implplan/tasks-all.md +++ b/docs/implplan/tasks-all.md @@ -446,7 +446,7 @@ | CONCELIER-STORE-AOC-19-005 | TODO | 2025-11-04 | SPRINT_115_concelier_iv | Concelier Storage Guild · DevOps Guild | src/Concelier/__Libraries/StellaOps.Concelier.Storage.Mongo | Execute the raw-linkset backfill/rollback plan (`docs/dev/raw-linkset-backfill-plan.md`) so Mongo + Offline Kit bundles reflect Link-Not-Merge data; rehearse rollback. Depends on CONCELIER-CORE-AOC-19-004. | Wait for CCLN0101 approval | CCSM0101 | | CONCELIER-TEN-48-001 | BLOCKED | 2025-11-23 | SPRINT_115_concelier_iv | Concelier Core Guild | src/Concelier/__Libraries/StellaOps.Concelier.Core | Enforce tenant scoping throughout normalization/linking, expose capability endpoint advertising `merge=false`, and ensure events include tenant IDs. Depends on AUTH-TEN-47-001. | AUTH-TEN-47-001; POLICY chain | CCCO0101 | | CONCELIER-VEXLENS-30-001 | BLOCKED | 2025-11-23 | SPRINT_115_concelier_iv | Concelier WebService Guild · VEX Lens Guild | src/Concelier/StellaOps.Concelier.WebService | Guarantee advisory key consistency and cross-links consumed by VEX Lens so consensus explanations can cite Concelier evidence without requesting merges. Depends on CONCELIER-VULN-29-001, VEXLENS-30-005. | VEXLENS-30-005 | PLVL0103 | -| CONCELIER-VULN-29-004 | TODO | | SPRINT_116_concelier_v | Concelier WebService Guild · Observability Guild | src/Concelier/StellaOps.Concelier.WebService | Instrument observation/linkset pipelines with metrics for identifier collisions, withdrawn statements, and chunk latencies; stream them to Vuln Explorer without altering evidence payloads. Depends on CONCELIER-VULN-29-001. | Requires CCPR0101 risk feed | CCWO0101 | +| CONCELIER-VULN-29-004 | DONE (2025-12-08) | | SPRINT_116_concelier_v | Concelier WebService Guild · Observability Guild | src/Concelier/StellaOps.Concelier.WebService | Instrument observation/linkset pipelines with metrics for identifier collisions, withdrawn statements, and chunk latencies; stream them to Vuln Explorer without altering evidence payloads. Depends on CONCELIER-VULN-29-001. | Requires CCPR0101 risk feed | CCWO0101 | | CONCELIER-WEB-AIRGAP-56-001 | TODO | | SPRINT_116_concelier_v | Concelier WebService Guild · AirGap Policy Guild | src/Concelier/StellaOps.Concelier.WebService | Extend ingestion endpoints to register mirror bundle sources, expose bundle catalogs, and enforce sealed-mode by blocking direct internet feeds. | Wait for AGCN0101 proof | CCAW0101 | | CONCELIER-WEB-AIRGAP-56-002 | TODO | | SPRINT_116_concelier_v | Concelier WebService Guild · AirGap Importer Guild | src/Concelier/StellaOps.Concelier.WebService | Add staleness + bundle provenance metadata to `/advisories/observations` and `/advisories/linksets` so operators can see freshness without Excitior deriving outcomes. Depends on CONCELIER-WEB-AIRGAP-56-001. | Depends on #1 | CCAW0101 | | CONCELIER-WEB-AIRGAP-57-001 | TODO | | SPRINT_116_concelier_v | Concelier WebService Guild | src/Concelier/StellaOps.Concelier.WebService | Map sealed-mode violations to consistent `AIRGAP_EGRESS_BLOCKED` payloads that explain how to remediate, leaving advisory content untouched. Depends on CONCELIER-WEB-AIRGAP-56-002. | Needs CCAN0101 time beacons | CCAW0101 | @@ -1047,8 +1047,8 @@ | FEEDCONN-CCCS-02-009 | TODO | | SPRINT_117_concelier_vi | Concelier Connector Guild – CCCS (src/Concelier/__Libraries/StellaOps.Concelier.Connector.Cccs) | src/Concelier/__Libraries/StellaOps.Concelier.Connector.Cccs | Emit CCCS version ranges into `advisory_observations.affected.versions[]` with provenance anchors (`cccs:{serial}:{index}`) and normalized comparison keys per the Link-Not-Merge schema/doc recipes. Depends on CONCELIER-LNM-21-001. | — | FEFC0101 | | FEEDCONN-CERTBUND-02-010 | TODO | | SPRINT_117_concelier_vi | Concelier Connector Guild – CertBund (src/Concelier/__Libraries/StellaOps.Concelier.Connector.CertBund) | src/Concelier/__Libraries/StellaOps.Concelier.Connector.CertBund | Translate CERT-Bund `product.Versions` phrases into normalized ranges + provenance identifiers (`certbund:{advisoryId}:{vendor}`) while retaining localisation notes; update mapper/tests for Link-Not-Merge. Depends on CONCELIER-LNM-21-001. | — | FEFC0101 | | FEEDCONN-CISCO-02-009 | DOING | 2025-11-08 | SPRINT_117_concelier_vi | Concelier Connector Guild – Cisco (src/Concelier/__Libraries/StellaOps.Concelier.Connector.Vndr.Cisco) | src/Concelier/__Libraries/StellaOps.Concelier.Connector.Vndr.Cisco | Emit Cisco SemVer ranges into the new observation schema with provenance IDs (`cisco:{productId}`) and deterministic comparison keys; refresh fixtures to remove merge counters. Depends on CONCELIER-LNM-21-001. | — | FEFC0101 | -| FEEDCONN-ICSCISA-02-012 | BLOCKED | | SPRINT_0503_0001_0001_ops_devops_i | Concelier Feed Owners | | Overdue provenance refreshes require schedule from feed owners. | FEED-REMEDIATION-1001 | FEFC0101 | -| FEEDCONN-KISA-02-008 | BLOCKED | | SPRINT_0503_0001_0001_ops_devops_i | Concelier Feed Owners | | FEED-REMEDIATION-1001 | FEED-REMEDIATION-1001 | FEFC0101 | +| FEEDCONN-ICSCISA-02-012 | DONE (2025-12-08) | 2025-12-08 | SPRINT_0503_0001_0001_ops_devops_i | Concelier Feed Owners | | SOP v0.2 run_id icscisa-kisa-20251208T0205Z completed; artefacts at `out/feeds/icscisa-kisa/20251208/`. | FEED-REMEDIATION-1001 | FEFC0101 | +| FEEDCONN-KISA-02-008 | DONE (2025-12-08) | 2025-12-08 | SPRINT_0503_0001_0001_ops_devops_i | Concelier Feed Owners | | SOP v0.2 run_id icscisa-kisa-20251208T0205Z completed; artefacts at `out/feeds/icscisa-kisa/20251208/`. | FEED-REMEDIATION-1001 | FEED-REMEDIATION-1001 | FEFC0101 | | FORENSICS-53-001 | TODO | | SPRINT_0202_0001_0002_cli_ii | Forensics Guild | src/Cli/StellaOps.Cli | Replay data set | Replay data set | FONS0101 | | FORENSICS-53-002 | TODO | | SPRINT_0304_0001_0004_docs_tasks_md_iv | Forensics Guild | | FORENSICS-53-001 | FORENSICS-53-001 | FONS0101 | | FORENSICS-53-003 | TODO | | SPRINT_0304_0001_0004_docs_tasks_md_iv | Forensics Guild | | FORENSICS-53-001 | FORENSICS-53-001 | FONS0101 | @@ -1696,10 +1696,10 @@ | SCANNER-ENG-0008 | TODO | | SPRINT_0138_0001_0001_scanner_ruby_parity | EntryTrace Guild, QA Guild (src/Scanner/__Libraries/StellaOps.Scanner.EntryTrace) | src/Scanner/__Libraries/StellaOps.Scanner.EntryTrace | Maintain EntryTrace heuristic cadence per `docs/benchmarks/scanner/scanning-gaps-stella-misses-from-competitors.md`, including quarterly pattern reviews + explain-trace updates. | | | | SCANNER-ENG-0009 | DONE | 2025-11-13 | SPRINT_0138_0001_0001_scanner_ruby_parity | Ruby Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby | Ruby analyzer parity shipped: runtime graph + capability signals, observation payload, Mongo-backed `ruby.packages` inventory, CLI/WebService surfaces, and plugin manifest bundles for Worker loadout. | SCANNER-ANALYZERS-RUBY-28-001..012 | | | SCANNER-ENG-0010 | TODO | | SPRINT_0138_0001_0001_scanner_ruby_parity | PHP Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Php) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Php | Ship the PHP analyzer pipeline (composer lock, autoload graph, capability signals) to close comparison gaps. | SCANNER-ANALYZERS-PHP-27-001 | | -| SCANNER-ENG-0011 | TODO | | SPRINT_0138_0001_0001_scanner_ruby_parity | Language Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Deno) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Deno | Scope the Deno runtime analyzer (lockfile resolver, import graphs) based on competitor techniques to extend beyond Sprint 130 coverage. | | | -| SCANNER-ENG-0012 | TODO | | SPRINT_0138_0001_0001_scanner_ruby_parity | Language Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Dart) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Dart | Evaluate Dart analyzer requirements (pubspec parsing, AOT artifacts) and split implementation tasks. | | | -| SCANNER-ENG-0013 | TODO | | SPRINT_0138_0001_0001_scanner_ruby_parity | Swift Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Swift) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Swift | Plan Swift Package Manager coverage (Package.resolved, xcframeworks, runtime hints) with policy hooks. | | | -| SCANNER-ENG-0014 | TODO | | SPRINT_0138_0001_0001_scanner_ruby_parity | Runtime Guild, Zastava Guild (docs/modules/scanner) | docs/modules/scanner | Align Kubernetes/VM target coverage between Scanner and Zastava per comparison findings; publish joint roadmap. | | | +| SCANNER-ENG-0011 | DONE (2025-12-08) | 2025-12-08 | SPRINT_0138_0001_0001_scanner_ruby_parity | Language Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Deno) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Deno | Scope the Deno runtime analyzer (lockfile resolver, import graphs) based on competitor techniques to extend beyond Sprint 130 coverage. | docs/modules/scanner/design/deno-analyzer-plan.md | | +| SCANNER-ENG-0012 | DONE (2025-12-08) | 2025-12-08 | SPRINT_0138_0001_0001_scanner_ruby_parity | Language Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Dart) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Dart | Evaluate Dart analyzer requirements (pubspec parsing, AOT artifacts) and split implementation tasks. | docs/modules/scanner/design/dart-analyzer-plan.md | | +| SCANNER-ENG-0013 | DONE (2025-12-08) | 2025-12-08 | SPRINT_0138_0001_0001_scanner_ruby_parity | Swift Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Swift) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Swift | Plan Swift Package Manager coverage (Package.resolved, xcframeworks, runtime hints) with policy hooks. | docs/modules/scanner/design/swiftpm-coverage-plan.md | | +| SCANNER-ENG-0014 | DONE (2025-12-08) | 2025-12-08 | SPRINT_0138_0001_0001_scanner_ruby_parity | Runtime Guild, Zastava Guild (docs/modules/scanner) | docs/modules/scanner | Align Kubernetes/VM target coverage between Scanner and Zastava per comparison findings; publish joint roadmap. | docs/modules/scanner/design/runtime-alignment-scanner-zastava.md | | | SCANNER-ENG-0015 | DONE | 2025-11-13 | SPRINT_0138_0001_0001_scanner_ruby_parity | Export Center Guild, Scanner Guild (docs/modules/scanner) | docs/modules/scanner | DSSE/Rekor operator playbook published (`docs/modules/scanner/operations/dsse-rekor-operator-guide.md`) with config/env tables, rollout phases, runbook snippets, offline verification steps, and SLA/alert guidance. | | | | SCANNER-ENG-0016 | DONE | 2025-11-10 | SPRINT_0138_0001_0001_scanner_ruby_parity | Ruby Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby | RubyLockCollector and vendor ingestion finalized: Bundler config overrides honoured, workspace lockfiles merged, vendor bundles normalised, and deterministic fixtures added. | SCANNER-ENG-0009 | | | SCANNER-ENG-0017 | DONE | 2025-11-09 | SPRINT_0138_0001_0001_scanner_ruby_parity | Ruby Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby | Build the runtime require/autoload graph builder with tree-sitter Ruby per design §4.4 and integrate EntryTrace hints. | SCANNER-ENG-0016 | | @@ -1876,7 +1876,7 @@ | SURFACE-SECRETS-03 | TODO | | SPRINT_0136_0001_0001_scanner_surface | Scanner Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets | Add Kubernetes/File/Offline backends with deterministic caching and audit hooks. | SURFACE-SECRETS-02 | SCSS0101 | | SURFACE-SECRETS-04 | TODO | | SPRINT_0136_0001_0001_scanner_surface | Scanner Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets | Integrate Surface.Secrets into Scanner Worker/WebService/BuildX for registry + CAS creds. | SURFACE-SECRETS-02 | | | SURFACE-SECRETS-05 | TODO | | SPRINT_0136_0001_0001_scanner_surface | Zastava Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets | Invoke Surface.Secrets from Zastava Observer/Webhook for CAS & attestation secrets. | SURFACE-SECRETS-02 | | -| SURFACE-SECRETS-06 | TODO | | SPRINT_0136_0001_0001_scanner_surface | Ops Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets | Update deployment manifests/offline kit bundles to provision secret references instead of raw values. | SURFACE-SECRETS-03 | | +| SURFACE-SECRETS-06 | DONE (2025-12-08) | | SPRINT_0136_0001_0001_scanner_surface | Ops Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets | Update deployment manifests/offline kit bundles to provision secret references instead of raw values. | SURFACE-SECRETS-03 | | | SURFACE-VAL-01 | DOING | 2025-11-01 | SPRINT_0136_0001_0001_scanner_surface | Scanner Guild, Security Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Validation) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Validation | Define the Surface validation framework (`surface-validation.md`) covering env/cache/secret checks and extension hooks. | SURFACE-FS-01; SURFACE-ENV-01 | SCSS0102 | | SURFACE-VAL-02 | TODO | | SPRINT_0136_0001_0001_scanner_surface | Scanner Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Validation) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Validation | Implement base validation library with check registry and default validators for env/cached manifests/secret refs. | SURFACE-VAL-01; SURFACE-ENV-02; SURFACE-FS-02 | SCSS0102 | | SURFACE-VAL-03 | TODO | | SPRINT_0136_0001_0001_scanner_surface | Scanner Guild, Analyzer Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Validation) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Validation | Integrate validation pipeline into Scanner analyzers so checks run before processing. | SURFACE-VAL-02 | SCSS0102 | @@ -2660,7 +2660,7 @@ | CONCELIER-STORE-AOC-19-005 | TODO | 2025-11-04 | SPRINT_115_concelier_iv | Concelier Storage Guild · DevOps Guild | src/Concelier/__Libraries/StellaOps.Concelier.Storage.Mongo | Execute the raw-linkset backfill/rollback plan (`docs/dev/raw-linkset-backfill-plan.md`) so Mongo + Offline Kit bundles reflect Link-Not-Merge data; rehearse rollback. Depends on CONCELIER-CORE-AOC-19-004. | Wait for CCLN0101 approval | CCSM0101 | | CONCELIER-TEN-48-001 | BLOCKED | 2025-11-23 | SPRINT_115_concelier_iv | Concelier Core Guild | src/Concelier/__Libraries/StellaOps.Concelier.Core | Enforce tenant scoping throughout normalization/linking, expose capability endpoint advertising `merge=false`, and ensure events include tenant IDs. Depends on AUTH-TEN-47-001. | AUTH-TEN-47-001; POLICY chain | CCCO0101 | | CONCELIER-VEXLENS-30-001 | BLOCKED | 2025-11-23 | SPRINT_115_concelier_iv | Concelier WebService Guild · VEX Lens Guild | src/Concelier/StellaOps.Concelier.WebService | Guarantee advisory key consistency and cross-links consumed by VEX Lens so consensus explanations can cite Concelier evidence without requesting merges. Depends on CONCELIER-VULN-29-001, VEXLENS-30-005. | VEXLENS-30-005 | PLVL0103 | -| CONCELIER-VULN-29-004 | TODO | | SPRINT_116_concelier_v | Concelier WebService Guild · Observability Guild | src/Concelier/StellaOps.Concelier.WebService | Instrument observation/linkset pipelines with metrics for identifier collisions, withdrawn statements, and chunk latencies; stream them to Vuln Explorer without altering evidence payloads. Depends on CONCELIER-VULN-29-001. | Requires CCPR0101 risk feed | CCWO0101 | +| CONCELIER-VULN-29-004 | DONE (2025-12-08) | | SPRINT_116_concelier_v | Concelier WebService Guild · Observability Guild | src/Concelier/StellaOps.Concelier.WebService | Instrument observation/linkset pipelines with metrics for identifier collisions, withdrawn statements, and chunk latencies; stream them to Vuln Explorer without altering evidence payloads. Depends on CONCELIER-VULN-29-001. | Requires CCPR0101 risk feed | CCWO0101 | | CONCELIER-WEB-AIRGAP-56-001 | TODO | | SPRINT_116_concelier_v | Concelier WebService Guild · AirGap Policy Guild | src/Concelier/StellaOps.Concelier.WebService | Extend ingestion endpoints to register mirror bundle sources, expose bundle catalogs, and enforce sealed-mode by blocking direct internet feeds. | Wait for AGCN0101 proof | CCAW0101 | | CONCELIER-WEB-AIRGAP-56-002 | TODO | | SPRINT_116_concelier_v | Concelier WebService Guild · AirGap Importer Guild | src/Concelier/StellaOps.Concelier.WebService | Add staleness + bundle provenance metadata to `/advisories/observations` and `/advisories/linksets` so operators can see freshness without Excitior deriving outcomes. Depends on CONCELIER-WEB-AIRGAP-56-001. | Depends on #1 | CCAW0101 | | CONCELIER-WEB-AIRGAP-57-001 | TODO | | SPRINT_116_concelier_v | Concelier WebService Guild | src/Concelier/StellaOps.Concelier.WebService | Map sealed-mode violations to consistent `AIRGAP_EGRESS_BLOCKED` payloads that explain how to remediate, leaving advisory content untouched. Depends on CONCELIER-WEB-AIRGAP-56-002. | Needs CCAN0101 time beacons | CCAW0101 | @@ -3270,8 +3270,8 @@ | FEEDCONN-CCCS-02-009 | TODO | | SPRINT_117_concelier_vi | Concelier Connector Guild – CCCS (src/Concelier/__Libraries/StellaOps.Concelier.Connector.Cccs) | src/Concelier/__Libraries/StellaOps.Concelier.Connector.Cccs | Emit CCCS version ranges into `advisory_observations.affected.versions[]` with provenance anchors (`cccs:{serial}:{index}`) and normalized comparison keys per the Link-Not-Merge schema/doc recipes. Depends on CONCELIER-LNM-21-001. | — | FEFC0101 | | FEEDCONN-CERTBUND-02-010 | TODO | | SPRINT_117_concelier_vi | Concelier Connector Guild – CertBund (src/Concelier/__Libraries/StellaOps.Concelier.Connector.CertBund) | src/Concelier/__Libraries/StellaOps.Concelier.Connector.CertBund | Translate CERT-Bund `product.Versions` phrases into normalized ranges + provenance identifiers (`certbund:{advisoryId}:{vendor}`) while retaining localisation notes; update mapper/tests for Link-Not-Merge. Depends on CONCELIER-LNM-21-001. | — | FEFC0101 | | FEEDCONN-CISCO-02-009 | DOING | 2025-11-08 | SPRINT_117_concelier_vi | Concelier Connector Guild – Cisco (src/Concelier/__Libraries/StellaOps.Concelier.Connector.Vndr.Cisco) | src/Concelier/__Libraries/StellaOps.Concelier.Connector.Vndr.Cisco | Emit Cisco SemVer ranges into the new observation schema with provenance IDs (`cisco:{productId}`) and deterministic comparison keys; refresh fixtures to remove merge counters. Depends on CONCELIER-LNM-21-001. | — | FEFC0101 | -| FEEDCONN-ICSCISA-02-012 | BLOCKED | | SPRINT_0503_0001_0001_ops_devops_i | Concelier Feed Owners | | Overdue provenance refreshes require schedule from feed owners. | FEED-REMEDIATION-1001 | FEFC0101 | -| FEEDCONN-KISA-02-008 | BLOCKED | | SPRINT_0503_0001_0001_ops_devops_i | Concelier Feed Owners | | FEED-REMEDIATION-1001 | FEED-REMEDIATION-1001 | FEFC0101 | +| FEEDCONN-ICSCISA-02-012 | DONE (2025-12-08) | 2025-12-08 | SPRINT_0503_0001_0001_ops_devops_i | Concelier Feed Owners | | SOP v0.2 run_id icscisa-kisa-20251208T0205Z completed; artefacts at `out/feeds/icscisa-kisa/20251208/`. | FEED-REMEDIATION-1001 | FEFC0101 | +| FEEDCONN-KISA-02-008 | DONE (2025-12-08) | 2025-12-08 | SPRINT_0503_0001_0001_ops_devops_i | Concelier Feed Owners | | SOP v0.2 run_id icscisa-kisa-20251208T0205Z completed; artefacts at `out/feeds/icscisa-kisa/20251208/`. | FEED-REMEDIATION-1001 | FEED-REMEDIATION-1001 | FEFC0101 | | FORENSICS-53-001 | TODO | | SPRINT_0202_0001_0002_cli_ii | Forensics Guild | src/Cli/StellaOps.Cli | Replay data set | Replay data set | FONS0101 | | FORENSICS-53-002 | TODO | | SPRINT_0304_0001_0004_docs_tasks_md_iv | Forensics Guild | | FORENSICS-53-001 | FORENSICS-53-001 | FONS0101 | | FORENSICS-53-003 | TODO | | SPRINT_0304_0001_0004_docs_tasks_md_iv | Forensics Guild | | FORENSICS-53-001 | FORENSICS-53-001 | FONS0101 | @@ -3896,10 +3896,10 @@ | SCANNER-ENG-0008 | TODO | | SPRINT_0138_0001_0001_scanner_ruby_parity | EntryTrace Guild, QA Guild (src/Scanner/__Libraries/StellaOps.Scanner.EntryTrace) | src/Scanner/__Libraries/StellaOps.Scanner.EntryTrace | Maintain EntryTrace heuristic cadence per `docs/benchmarks/scanner/scanning-gaps-stella-misses-from-competitors.md`, including quarterly pattern reviews + explain-trace updates. | | | | SCANNER-ENG-0009 | DONE | 2025-11-13 | SPRINT_0138_0001_0001_scanner_ruby_parity | Ruby Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby | Ruby analyzer parity shipped: runtime graph + capability signals, observation payload, Mongo-backed `ruby.packages` inventory, CLI/WebService surfaces, and plugin manifest bundles for Worker loadout. | SCANNER-ANALYZERS-RUBY-28-001..012 | | | SCANNER-ENG-0010 | TODO | | SPRINT_0138_0001_0001_scanner_ruby_parity | PHP Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Php) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Php | Ship the PHP analyzer pipeline (composer lock, autoload graph, capability signals) to close comparison gaps. | SCANNER-ANALYZERS-PHP-27-001 | | -| SCANNER-ENG-0011 | TODO | | SPRINT_0138_0001_0001_scanner_ruby_parity | Language Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Deno) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Deno | Scope the Deno runtime analyzer (lockfile resolver, import graphs) based on competitor techniques to extend beyond Sprint 130 coverage. | | | -| SCANNER-ENG-0012 | TODO | | SPRINT_0138_0001_0001_scanner_ruby_parity | Language Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Dart) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Dart | Evaluate Dart analyzer requirements (pubspec parsing, AOT artifacts) and split implementation tasks. | | | -| SCANNER-ENG-0013 | TODO | | SPRINT_0138_0001_0001_scanner_ruby_parity | Swift Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Swift) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Swift | Plan Swift Package Manager coverage (Package.resolved, xcframeworks, runtime hints) with policy hooks. | | | -| SCANNER-ENG-0014 | TODO | | SPRINT_0138_0001_0001_scanner_ruby_parity | Runtime Guild, Zastava Guild (docs/modules/scanner) | docs/modules/scanner | Align Kubernetes/VM target coverage between Scanner and Zastava per comparison findings; publish joint roadmap. | | | +| SCANNER-ENG-0011 | DONE (2025-12-08) | 2025-12-08 | SPRINT_0138_0001_0001_scanner_ruby_parity | Language Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Deno) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Deno | Scope the Deno runtime analyzer (lockfile resolver, import graphs) based on competitor techniques to extend beyond Sprint 130 coverage. | docs/modules/scanner/design/deno-analyzer-plan.md | | +| SCANNER-ENG-0012 | DONE (2025-12-08) | 2025-12-08 | SPRINT_0138_0001_0001_scanner_ruby_parity | Language Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Dart) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Dart | Evaluate Dart analyzer requirements (pubspec parsing, AOT artifacts) and split implementation tasks. | docs/modules/scanner/design/dart-analyzer-plan.md | | +| SCANNER-ENG-0013 | DONE (2025-12-08) | 2025-12-08 | SPRINT_0138_0001_0001_scanner_ruby_parity | Swift Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Swift) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Swift | Plan Swift Package Manager coverage (Package.resolved, xcframeworks, runtime hints) with policy hooks. | docs/modules/scanner/design/swiftpm-coverage-plan.md | | +| SCANNER-ENG-0014 | DONE (2025-12-08) | 2025-12-08 | SPRINT_0138_0001_0001_scanner_ruby_parity | Runtime Guild, Zastava Guild (docs/modules/scanner) | docs/modules/scanner | Align Kubernetes/VM target coverage between Scanner and Zastava per comparison findings; publish joint roadmap. | docs/modules/scanner/design/runtime-alignment-scanner-zastava.md | | | SCANNER-ENG-0015 | DONE | 2025-11-13 | SPRINT_0138_0001_0001_scanner_ruby_parity | Export Center Guild, Scanner Guild (docs/modules/scanner) | docs/modules/scanner | DSSE/Rekor operator playbook published (`docs/modules/scanner/operations/dsse-rekor-operator-guide.md`) with config/env tables, rollout phases, runbook snippets, offline verification steps, and SLA/alert guidance. | | | | SCANNER-ENG-0016 | DONE | 2025-11-10 | SPRINT_0138_0001_0001_scanner_ruby_parity | Ruby Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby | RubyLockCollector and vendor ingestion finalized: Bundler config overrides honoured, workspace lockfiles merged, vendor bundles normalised, and deterministic fixtures added. | SCANNER-ENG-0009 | | | SCANNER-ENG-0017 | DONE | 2025-11-09 | SPRINT_0138_0001_0001_scanner_ruby_parity | Ruby Analyzer Guild (src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby) | src/Scanner/StellaOps.Scanner.Analyzers.Lang.Ruby | Build the runtime require/autoload graph builder with tree-sitter Ruby per design §4.4 and integrate EntryTrace hints. | SCANNER-ENG-0016 | | @@ -4076,7 +4076,7 @@ | SURFACE-SECRETS-03 | TODO | | SPRINT_0136_0001_0001_scanner_surface | Scanner Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets | Add Kubernetes/File/Offline backends with deterministic caching and audit hooks. | SURFACE-SECRETS-02 | SCSS0101 | | SURFACE-SECRETS-04 | TODO | | SPRINT_0136_0001_0001_scanner_surface | Scanner Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets | Integrate Surface.Secrets into Scanner Worker/WebService/BuildX for registry + CAS creds. | SURFACE-SECRETS-02 | | | SURFACE-SECRETS-05 | TODO | | SPRINT_0136_0001_0001_scanner_surface | Zastava Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets | Invoke Surface.Secrets from Zastava Observer/Webhook for CAS & attestation secrets. | SURFACE-SECRETS-02 | | -| SURFACE-SECRETS-06 | TODO | | SPRINT_0136_0001_0001_scanner_surface | Ops Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets | Update deployment manifests/offline kit bundles to provision secret references instead of raw values. | SURFACE-SECRETS-03 | | +| SURFACE-SECRETS-06 | DONE (2025-12-08) | | SPRINT_0136_0001_0001_scanner_surface | Ops Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Secrets | Update deployment manifests/offline kit bundles to provision secret references instead of raw values. | SURFACE-SECRETS-03 | | | SURFACE-VAL-01 | DOING | 2025-11-01 | SPRINT_0136_0001_0001_scanner_surface | Scanner Guild, Security Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Validation) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Validation | Define the Surface validation framework (`surface-validation.md`) covering env/cache/secret checks and extension hooks. | SURFACE-FS-01; SURFACE-ENV-01 | SCSS0102 | | SURFACE-VAL-02 | TODO | | SPRINT_0136_0001_0001_scanner_surface | Scanner Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Validation) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Validation | Implement base validation library with check registry and default validators for env/cached manifests/secret refs. | SURFACE-VAL-01; SURFACE-ENV-02; SURFACE-FS-02 | SCSS0102 | | SURFACE-VAL-03 | TODO | | SPRINT_0136_0001_0001_scanner_surface | Scanner Guild, Analyzer Guild (src/Scanner/__Libraries/StellaOps.Scanner.Surface.Validation) | src/Scanner/__Libraries/StellaOps.Scanner.Surface.Validation | Integrate validation pipeline into Scanner analyzers so checks run before processing. | SURFACE-VAL-02 | SCSS0102 | diff --git a/docs/modules/concelier/feeds/icscisa-kisa-provenance.md b/docs/modules/concelier/feeds/icscisa-kisa-provenance.md index e39922a5c..964c02fb3 100644 --- a/docs/modules/concelier/feeds/icscisa-kisa-provenance.md +++ b/docs/modules/concelier/feeds/icscisa-kisa-provenance.md @@ -1,7 +1,22 @@ -# ICSCISA / KISA Feed Provenance Notes (2025-11-19) +# ICSCISA / KISA Feed Provenance Notes (2025-12-08) -- Expected signing: not provided by sources; set `signature=null` and `skip_reason="unsigned"`. -- Hashing: sha256 of raw advisory payload before normalization. +- Expected signing: not provided by sources; record `signature` as `{ status: "missing", reason: "unsigned_source" }`. +- Hashing: sha256 of raw advisory payload before normalization (stored as `payload_sha256` per advisory) and sha256 of run artefacts (`hashes.sha256`). - Transport: HTTPS; mirror to internal cache; record `fetched_at` UTC and `source_url`. - Verification: compare hash vs previous run; emit delta report. - Staleness guard: alert if `fetched_at` >14 days. + +## Run 2025-12-08 (run_id=icscisa-kisa-20251208T0205Z) +- Artefacts: `out/feeds/icscisa-kisa/20251208/advisories.ndjson`, `delta.json`, `fetch.log`, `hashes.sha256`. +- Hashes: + - `0844c46c42461b8eeaf643c01d4cb74ef20d4eec8c984ad5e20c49d65dc57deb advisories.ndjson` + - `1273beb246754382d2e013fdc98b11b06965fb97fe9a63735b51cc949746418f delta.json` + - `8fedaa9fb2b146a1ef500b0d2e4c1592ddbc770a8f15b7d03723f8034fc12a75 fetch.log` +- Delta summary: added ICS CISA advisories `ICSA-25-123-01`, `ICSMA-25-045-01`; added KISA advisories `KISA-2025-5859`, `KISA-2025-5860`; no updates or removals; backlog window 60 days; retries 0 for both sources. +- Signature posture: both sources unsigned; all records marked `signature.missing` with reason `unsigned_source`. +- Next actions: maintain weekly cadence; staleness review on 2025-12-21 with refreshed hash manifest and retry histogram. + +## CI automation +- Scheduled workflow `.gitea/workflows/icscisa-kisa-refresh.yml` runs Mondays 02:00 UTC (manual dispatch enabled) and executes `scripts/feeds/run_icscisa_kisa_refresh.py` with live fetch + offline fallback. +- Configure feed endpoints via `ICSCISA_FEED_URL` / `KISA_FEED_URL`; set `LIVE_FETCH=false` or `OFFLINE_SNAPSHOT=true` to force offline-only mode when running in sealed CI. Host override for on-prem mirrors is available via `FEED_GATEWAY_HOST` / `FEED_GATEWAY_SCHEME` (default `concelier-webservice` on the Docker network). +- Fetch log traces: `fetch.log` captures gateway (`FEED_GATEWAY_*`), effective ICS/KISA URLs, live/offline flags, and statuses so operators can verify when defaults are used vs explicit endpoints. diff --git a/docs/modules/concelier/feeds/icscisa-kisa.md b/docs/modules/concelier/feeds/icscisa-kisa.md index 8e2abd6f4..0a864cc92 100644 --- a/docs/modules/concelier/feeds/icscisa-kisa.md +++ b/docs/modules/concelier/feeds/icscisa-kisa.md @@ -32,8 +32,8 @@ Define a minimal, actionable plan to refresh overdue ICSCISA and KISA connectors - Set to 2025-12-21 (two-week check from v0.2) and capture SIG verification status + open deltas. ## Actions & timeline (v0.2 refresh) -- T0 (2025-12-08): adopt SOP + field map; create delta report template; preflight cache paths. -- T0+2d (2025-12-10): run backlog reprocess, publish artefacts + hashes for both feeds; capture unsigned counts and retry reasons. +- T0 (2025-12-08): adopt SOP + field map; create delta report template; preflight cache paths. **Done** via run `icscisa-kisa-20251208T0205Z` (see run summary below). +- T0+2d (2025-12-10): run backlog reprocess, publish artefacts + hashes for both feeds; capture unsigned counts and retry reasons. **Done** in the 2025-12-08 execution (backlog window 60 days). - T0+14d (2025-12-21): review staleness, adjust cadence if needed; reset review date and owners. ## Artefact locations @@ -46,3 +46,18 @@ Define a minimal, actionable plan to refresh overdue ICSCISA and KISA connectors - Source downtime -> mirror last good snapshot; retry daily for 3 days. - Missing signatures -> record `signature=null`, log `skip_reason` in provenance note; do not infer validity. - Schema drift -> treat as new fields, store raw, add to field map after review (no drop). + +## Run summary (2025-12-08 · run_id=icscisa-kisa-20251208T0205Z) +- Backlog window: 60 days; cadence: weekly; start/end: 2025-12-08T02:05:00Z / 2025-12-08T02:09:30Z. +- Outputs: `out/feeds/icscisa-kisa/20251208/advisories.ndjson`, `delta.json`, `fetch.log`, `hashes.sha256`. +- Delta: ICS CISA added `ICSA-25-123-01`, `ICSMA-25-045-01`; KISA added `KISA-2025-5859`, `KISA-2025-5860`; no updates or removals. +- Hash manifest: `hashes.sha256` records advisories/delta/log digests (see provenance note). +- Signatures: none provided by sources; recorded as missing with reason `unsigned_source` (tracked in provenance note). +- Next review: 2025-12-21 (staleness guard <14 days remains satisfied after this run). + +## CI automation +- Workflow: `.gitea/workflows/icscisa-kisa-refresh.yml` (cron: Mondays 02:00 UTC; also manual dispatch) running `scripts/feeds/run_icscisa_kisa_refresh.py`. +- Outputs: uploads `icscisa-kisa-` artifact with `advisories.ndjson`, `delta.json`, `fetch.log`, `hashes.sha256`. +- Live vs offline: defaults to live RSS fetch with offline-safe fallback; set `LIVE_FETCH=false` or `OFFLINE_SNAPSHOT=true` in dispatch inputs/environment to force offline samples. Optional feed URLs/secrets: `ICSCISA_FEED_URL`, `KISA_FEED_URL`. +- On-prem feed host: feeds are configurable via `FEED_GATEWAY_HOST`/`FEED_GATEWAY_SCHEME`. Default resolves to `http://concelier-webservice` (Docker network DNS) so on-prem deployments hit the local mirror/web service instead of the public internet. +- Fetch log traces defaults: `fetch.log` records the resolved gateway (`FEED_GATEWAY_*`) and the effective URLs used for ICS CISA and KISA. If env vars are absent, the log shows the Docker-network default so operators can confirm on-prem wiring without inspecting workflow inputs. diff --git a/docs/modules/excititor/architecture.md b/docs/modules/excititor/architecture.md index 934956aa0..d57920134 100644 --- a/docs/modules/excititor/architecture.md +++ b/docs/modules/excititor/architecture.md @@ -124,6 +124,62 @@ Excititor workers now hydrate signature metadata with issuer trust data retrieve `GET /v1/vex/statements/{advisory_key}` produces sorted JSON responses containing raw statement metadata (`issuer`, `content_hash`, `signature`), normalised tuples, and provenance pointers. Advisory AI consumes this endpoint to build retrieval contexts with explicit citations. +### 1.5 Postgres raw store (replaces Mongo/GridFS) + +> Mongo/BSON/GridFS are being removed. This is the canonical design for the Postgres-backed raw store that powers `/vex/raw` and ingestion. + +Schema: `vex` + +- **`vex_raw_documents`** (append-only) + - `digest TEXT PRIMARY KEY` — `sha256:{hex}` of canonical UTF-8 JSON bytes. + - `tenant TEXT NOT NULL` + - `provider_id TEXT NOT NULL` + - `format TEXT NOT NULL CHECK (format IN ('openvex','csaf','cyclonedx','custom'))` + - `source_uri TEXT NOT NULL`, `etag TEXT NULL` + - `retrieved_at TIMESTAMPTZ NOT NULL`, `recorded_at TIMESTAMPTZ NOT NULL DEFAULT NOW()` + - `supersedes_digest TEXT NULL REFERENCES vex_raw_documents(digest)` + - `content_json JSONB NOT NULL` — canonicalised payload (truncated when blobbed) + - `content_size_bytes INT NOT NULL` + - `metadata_json JSONB NOT NULL` — statement_id, issuer, spec_version, content_type, connector version, hashes, quarantine flags + - `provenance_json JSONB NOT NULL` — DSSE/chain/rekor/trust info + - `inline_payload BOOLEAN NOT NULL DEFAULT TRUE` + - UNIQUE (`tenant`, `provider_id`, `source_uri`, `etag`) + - Indexes: `(tenant, retrieved_at DESC)`, `(tenant, provider_id, retrieved_at DESC)`, `(tenant, supersedes_digest)`, GIN on `metadata_json`, GIN on `provenance_json`. + +- **`vex_raw_blobs`** (large payloads) + - `digest TEXT PRIMARY KEY REFERENCES vex_raw_documents(digest) ON DELETE CASCADE` + - `payload BYTEA NOT NULL` (canonical JSON bytes; no compression to preserve determinism) + - `payload_hash TEXT NOT NULL` (hash of stored bytes) + +- **`vex_raw_attachments`** (optional future) + - `digest TEXT REFERENCES vex_raw_documents(digest) ON DELETE CASCADE` + - `name TEXT NOT NULL`, `media_type TEXT NOT NULL` + - `payload BYTEA NOT NULL`, `payload_hash TEXT NOT NULL` + - PRIMARY KEY (`digest`, `name`) + +- **Observations/linksets** — use the append-only Postgres linkset schema already defined for `IAppendOnlyLinksetStore` (tables `vex_linksets`, `vex_linkset_observations`, `vex_linkset_disagreements`, `vex_linkset_mutations`) with indexes on `(tenant, vulnerability_id, product_key)` and `updated_at`. + +**Canonicalisation & hashing** + +1. Parse upstream JSON; sort keys; normalize newlines; encode UTF-8 without BOM. Preserve array order. +2. Compute `digest = "sha256:{hex}"` over canonical bytes. +3. If `size <= inline_threshold_bytes` (default 256 KiB) set `inline_payload=true` and store in `content_json`; otherwise store bytes in `vex_raw_blobs` and set `inline_payload=false`. +4. Persist `content_size_bytes` (pre-canonical length) and `payload_hash` for integrity. + +**API mapping (replaces Mongo/BSON)** +List/query `/vex/raw` via `SELECT ... FROM vex.vex_raw_documents WHERE tenant=@t ORDER BY retrieved_at DESC, digest LIMIT @n OFFSET @offset`; cursor uses `(retrieved_at, digest)`. `GET /vex/raw/{digest}` loads the row and optional blob; `GET /vex/raw/{digest}/provenance` projects `provenance_json` + `metadata_json`. Filters (`providerId`, `format`, `since`, `until`, `supersedes`, `hasAttachments`) map to indexed predicates; JSON subfields use `metadata_json ->> 'field'`. + +**Write semantics** + +- `IVexRawStore` Postgres implementation enforces append-only inserts; duplicate `digest` => no-op; duplicate (`tenant`, `provider_id`, `source_uri`, `etag`) with new digest inserts a new row and sets `supersedes_digest`. +- `IVexRawWriteGuard` runs before insert; tenant is mandatory on every query and write. + +**Rollout** + +1. Add migration under `src/Excititor/__Libraries/StellaOps.Excititor.Storage.Postgres/Migrations` creating the tables/indexes above. +2. Implement `PostgresVexRawStore` and switch WebService/Worker DI to `AddExcititorPostgresStorage`; remove `VexMongoStorageOptions`, `IMongoDatabase`, and GridFS paths. +3. Update `/vex/raw` endpoints/tests to the Postgres store; delete Mongo fixtures once parity is green. Mark Mongo storage paths as deprecated and remove them in the next release. + --- ## 2) Inputs, outputs & canonical domain diff --git a/docs/modules/findings-ledger/observability.md b/docs/modules/findings-ledger/observability.md index 6e8256877..28ceffa0d 100644 --- a/docs/modules/findings-ledger/observability.md +++ b/docs/modules/findings-ledger/observability.md @@ -56,6 +56,7 @@ - **Correlation:** Each API request includes `requestId` + `traceId` logged with events. Projector logs capture `replayId` and `rebuildReason`. - **Timeline events:** `ledger.event.appended` and `ledger.projection.updated` are emitted as structured logs carrying `tenant`, `chainId`, `sequence`, `eventId`, `policyVersion`, `traceId`, and placeholder `evidence_ref` fields for downstream timeline consumers. - **Secrets:** Ensure `event_body` is never logged; log only metadata/hashes. +- **Incident mode:** When incident mode is active, emit `ledger.incident.mode`, `ledger.incident.lag_trace`, `ledger.incident.conflict_snapshot`, and `ledger.incident.replay_trace` logs (with activation id, retention extension days, lag seconds, conflict reason). Snapshot TTLs inherit an incident retention extension and are annotated with `incident.*` metadata. ## 4. Alerts diff --git a/docs/modules/scanner/design/dart-analyzer-plan.md b/docs/modules/scanner/design/dart-analyzer-plan.md new file mode 100644 index 000000000..bb0c3e0cb --- /dev/null +++ b/docs/modules/scanner/design/dart-analyzer-plan.md @@ -0,0 +1,41 @@ +# Dart Analyzer Scope · SCANNER-ENG-0012 (2025-12-08) + +## Goals +- Define Dart analyzer for pubspec/pub cache parity with other language analyzers. +- Keep offline-first (no `pub get`), deterministic inventories/graphs, and policy-ready signals. + +## Inputs +- `pubspec.yaml` + `pubspec.lock` (dependencies, sources, sdk constraints). +- `.dart_tool/package_config.json` (resolved packages, language version, root URIs). +- AOT artifacts: `*.aot`, `*.snapshot`, `build/` outputs (record presence only). +- Optional Flutter plugins: `ios/`/`android/` platform manifests (metadata only). + +## Pipeline (deterministic, offline) +1) **Normalize pubspec/pubspec.lock**: + - Parse lock entries; map sources: `hosted`, `sdk:flutter`, `git`, `path`. + - Emit PURLs (`pkg:pub/@`) with `source` metadata (`hosted.url`, `git.sha`, `path`). + - Enforce sorted components by name. +2) **Package config**: + - Read `.dart_tool/package_config.json`; map package `rootUri`/`packageUri` to build module graph roots. + - Capture `languageVersion` and `generated` timestamp (drop or normalize to `0001-01-01Z` for determinism). +3) **Graph builder**: + - Build dependency edges from `pubspec.lock` -> `package_config` packages; include `sdk:flutter` nodes when present. + - Record `sourceType` (hosted/git/path/sdk) for provenance. +4) **Signals**: + - `dart.sdk` requirement from `environment.sdk`; `flutter` channel/version when present. + - AOT snapshot presence flags (`aot=true`, `snapshot=true`); no binary parsing. +5) **Outputs**: + - Inventory: list of PURLs + source metadata + checksum if provided in lock (hosted `sha256`). + - Graph: edges `(package -> dependency)` sorted. + - Signals: `dart.sdkConstraint`, `flutter.sdk`, `flutter.plugins` (names only), `buildArtifacts` flags. + +## Tests & fixtures +- Fixtures under `src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Dart.Tests/Fixtures/`: + - Hosted-only lockfile, git dependency, path dependency, Flutter project with plugins. + - Determinism tests: stable ordering, normalized timestamps, no network. + - Signal tests: sdk constraint extraction, AOT/snapshot flagging. + +## Deliverables +- Design captured here; wire into implementation plan + sprint log. +- Analyzer to live under `StellaOps.Scanner.Analyzers.Lang.Dart` with tests mirroring fixtures. +- Offline posture: never invoke `dart pub`; rely solely on provided lock/config; error clearly when missing lock. diff --git a/docs/modules/scanner/design/deno-analyzer-plan.md b/docs/modules/scanner/design/deno-analyzer-plan.md new file mode 100644 index 000000000..488f40890 --- /dev/null +++ b/docs/modules/scanner/design/deno-analyzer-plan.md @@ -0,0 +1,44 @@ +# Deno Analyzer Scope · SCANNER-ENG-0011 (2025-12-08) + +## Goals +- Deliver offline-safe Deno analyzer (lockfile/import graph/runtime signals) that matches Ruby/PHP parity bar. +- Provide deterministic SBOM/inventory outputs and capability signals consumable by Policy/Surface. + +## Inputs +- `deno.json` / `deno.jsonc` (tasks, import map refs, npm bridging). +- `deno.lock` v2/v3 (modules, npm section, integrity hashes). +- Optional `import_map.json`; vendor/cache roots (`$DENO_DIR`, `vendor/`). +- CLI flags via Surface.Env: `deno.disable_npm`, `deno.vendor`, `deno.lock_path`, `deno.import_map`. + +## Pipeline (deterministic, offline) +1) **Normalize config**: parse `deno.json`/jsonc; resolve `importMap` path; default to repo root import map if present. Sort keys. +2) **Lock resolver**: read `deno.lock`; emit components: + - `npm:` entries → PURL (`pkg:npm/@`) + integrity from `integrity`. + - `specifiers` → source→target map for transitive graph. + - `modules` (remote URLs) → canonical URL + content hash when present; mark `fetchSource: cache`. +3) **Import map & vendor**: + - Apply `imports`/`scopes` to rewrite edges before graph emission. + - If `vendor/` exists, prefer vendored paths; emit `provenance: vendor`. +4) **Graph builder**: + - Build module graph from `specifiers` + import map rewrites; emit edges `(from -> to, kind: import|dynamic|npm)`. + - Recognise `npm:` specifiers; map to npm package node. + - Stable ordering: sort by `from, to`. +5) **Runtime/capability signals**: + - Detect permissions from `tasks` (`--allow-*` flags) and `deno.json` `unstable`/`no-check`. + - Capture `nodeModulesDir` toggle to flag npm bridge. +6) **Outputs**: + - Inventory: npm components + remote module list (`digest`, `source`, `origin`). + - Graph: edges with provenance (`lockfile`, `import_map`, `vendor`). + - Signals: `deno.permissions[]`, `deno.node_compat`, `deno.unstable`. + +## Tests & fixtures +- Add fixtures under `src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Deno.Tests/Fixtures/`: + - lockfile v2 + import map, + - lockfile v3 with npm section, + - vendorized project (`vendor/` present). +- Determinism assertions: sorted edges, stable hash of inventory, no network calls (enforce via stubbed fetcher). + +## Deliverables +- Analyzer implementation + tests in `StellaOps.Scanner.Analyzers.Lang.Deno`. +- Doc cross-link to `docs/modules/scanner/implementation_plan.md` and sprint log. +- Offline posture: default `LIVE_FETCH=false` equivalent; rely solely on lock/import map/vendor. diff --git a/docs/modules/scanner/design/native-reachability-plan.md b/docs/modules/scanner/design/native-reachability-plan.md new file mode 100644 index 000000000..0d412851d --- /dev/null +++ b/docs/modules/scanner/design/native-reachability-plan.md @@ -0,0 +1,42 @@ +# Native Reachability Graph Plan (Scanner · Signals Alignment) + +## Goals +- Extract native reachability graphs from ELF binaries across layers (stripped and unstripped), emitting: + - Build IDs (`.note.gnu.build-id`) and code IDs per file. + - Symbol digests (purl+symbol) and edges (callgraph) with deterministic ordering. + - Synthetic roots for `_init`, `.init_array`, `.preinit_array`, entry points. + - DSSE graph bundle per layer for Signals ingestion. +- Offline-friendly, deterministic outputs (stable ordering, UTF-8, UTC). + +## Inputs +- Layered filesystem with ELF binaries and shared objects. +- Layer metadata: digests from `scanner.rootfs.layers` and `scanner.layer.archives` (when provided). +- Optional runtime proc snapshot for reconciliation (if available via Signals pipeline). + +## Approach +- **Discovery**: Walk layer directories; identify ELF binaries (`e_ident`, machine, class). Record per-layer path. +- **Identifiers**: Capture build-id (hash of `.note.gnu.build-id`), fallback to SHA-256 of `.text` when absent; store code-id (PE/ELF-friendly string). +- **Symbols**: Parse `.symtab`/`.dynsym`; compute stable symbol digests (e.g., SHA-256 over symbol bytes + name); include size/address for ordering. +- **Edges**: Build callgraph from relocation/import tables and (when available) `.eh_frame`/`.plt` linkage; emit Unknown edges when target unresolved. +- **Synthetic Roots**: Insert edges from synthetic root nodes (per binary) to `_start`, `_init`, `.init_array` entries. +- **Layer Bundles**: Emit DSSE bundle per layer with edges, symbols, identifiers, and provenance (layer digest, path, sha256). +- **Determinism**: Sort by layer digest, path, symbol name; normalize paths to POSIX separators; timestamps fixed to generation time in UTC ISO-8601. + +## Deliverables +- Library: `StellaOps.Scanner.Analyzers.Native` (new) with ELF reader and graph builder. +- Tests: fixtures under `src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Native.Tests` using stripped/unstripped ELF samples (no network). +- DSSE bundle schema: shared constants/types reused by Signals ingestion. +- Sprint doc links: referenced from `SPRINT_0146_0001_0001_scanner_analyzer_gap_close.md`. + +## Task Backlog (initial) +1) Skeleton project `StellaOps.Scanner.Analyzers.Native` + plugin registration for scanner worker. +2) ELF reader: header detection, build-id extraction, code-id calculation, section loader with deterministic sorting. +3) Symbol digests: compute `sha256(name + addr + size + binding)`; emit per-symbol evidence and purl+symbol IDs. +4) Callgraph builder: edges from PLT/relocs/imports; Unknown targets captured; synthetic roots for init arrays. +5) Layer attribution: carry layer digest/source through evidence; emit DSSE bundle per layer with signatures stubbed for now. +6) Tests/fixtures: stripped+unstripped ELF, shared objects, missing build-id, init array edges; golden JSON/NDJSON bundles. +7) Signals alignment: finalize DSSE graph schema and bundle naming; hook into reachability ingestion contract. + +## Open Questions +- Final DSSE payload shape (Signals team) — currently assumed `graph.bundle` with edges, symbols, metadata. +- Whether to include debugline info for coverage (could add optional module later).*** diff --git a/docs/modules/scanner/design/runtime-alignment-scanner-zastava.md b/docs/modules/scanner/design/runtime-alignment-scanner-zastava.md new file mode 100644 index 000000000..89f43c376 --- /dev/null +++ b/docs/modules/scanner/design/runtime-alignment-scanner-zastava.md @@ -0,0 +1,40 @@ +# Runtime Alignment (Scanner ↔ Zastava) · SCANNER-ENG-0014 (2025-12-08) + +## Objective +Align Kubernetes/VM target coverage between Scanner and Zastava so runtime signals, job orchestration, and evidence exports stay consistent across clusters and on-prem installs. + +## Scope +- Scanner: Worker runtime capture (EntryTrace), Surface.Env/FS detectors, analyzer job manifests, and policy predicates that rely on runtime/container metadata. +- Zastava: runtime observation feeds (system call/ebpf), workload labeling, and admission hooks. + +## Alignment Plan +1) **Workload identity contract** + - Standardize labels/annotations for scan jobs and Zastava monitors: + - `stellaops.workload/id`, `tenant`, `project`, `component`, `channel`. + - Container image digest required; tag optional. + - Shared manifest snippet lives in `deploy/helm/stellaops` overlays; reuse in job templates. +2) **Runtime evidence channels** + - Scanner EntryTrace publishes `runtime.events` with fields: `workloadId`, `namespace`, `node`, `edgeType` (syscall/net/fs), `timestamp` (UTC, ISO-8601), `code_id` (when available). + - Zastava observers mirror the same schema on `zastava.runtime.events`; controller stitches by `workloadId` and `imageDigest`. + - Determinism: sort edge batches by `(workloadId, timestamp, edgeType)`. +3) **Kubernetes defaults** + - Namespace allowlist `scanner-runtime`/`zastava-runtime`; service accounts share RBAC for `pods/exec`, `pods/log`, `nodes/proxy` (read-only). + - Feature flags: `scanner.runtime.capture.enabled` (default false), `zastava.attach.enabled` (default false) to keep sealed-mode/offline safe. +4) **VM/bare-metal** + - Use node agent mode: Scanner jobs emit host metadata `hostId`, `osRelease`; Zastava tailers tag events with same ids. + - Shared log shipper config uses file socket paths under `/var/log/stellaops/runtime/*.ndjson`. +5) **Evidence export** + - Export Center receives combined runtime bundle with two streams: `scanner.entrytrace.ndjson`, `zastava.runtime.ndjson`; manifest includes hash of each and workload identity table. + - Offline kit: bundle path `offline/runtime//`; deterministic manifests/hashes. +6) **SLOs & alerts** + - Target: runtime event lag < 30s P95; drop rate < 0.5%. + - Alerts wired via Prometheus: `stella_runtime_events_lag_seconds`, `stella_runtime_events_dropped_total`. + +## Deliverables +- Update job/observer templates (Helm/Compose) to include shared labels and feature flags. +- Documented schema alignment (this note) referenced from sprint log. +- Tests: determinism checks on merged runtime bundle; label presence asserted in integration harness. + +## Next Steps +- Wire labels/flags into `deploy/helm/stellaops` templates and Scanner Worker job manifests. +- Add integration test to ensure EntryTrace and Zastava events with same workload id are coalesced without reordering. diff --git a/docs/modules/scanner/design/swiftpm-coverage-plan.md b/docs/modules/scanner/design/swiftpm-coverage-plan.md new file mode 100644 index 000000000..2fa814ac4 --- /dev/null +++ b/docs/modules/scanner/design/swiftpm-coverage-plan.md @@ -0,0 +1,42 @@ +# SwiftPM Coverage Plan · SCANNER-ENG-0013 (2025-12-08) + +## Goals +- Plan Swift Package Manager coverage for Scanner: inventory, dependency graph, xcframework/binary target awareness, runtime hints. +- Keep processing offline and deterministic; no `swift package` execution. + +## Inputs +- `Package.swift` (manifest) and `Package.resolved` (v2/v3 lockfile). +- `.build/checkouts/**` (optional for checksum verification only). +- Binary targets: `binaryTarget` entries, xcframeworks under `.xcframework/`. +- Platform hints: `platforms`, `cLanguageStandard`, `cxxLanguageStandard`. + +## Pipeline (deterministic, offline) +1) **Resolve lockfile**: + - Parse `Package.resolved`; emit packages with identity, version, repo URL, checksum. + - PURL: `pkg:swift/@`; include `vcs` metadata (git URL, revision). + - Sort packages by identity. +2) **Manifest signals**: + - Parse `Package.swift` (static parse via tree-sitter Swift or manifest JSON dump if available) to extract: + - products/targets (name, type library/test/executable). + - binary targets (path/url, checksum). + - platform minimum versions. +3) **Graph builder**: + - Edges from targets → dependencies; packages → transitive dependencies from lockfile pins. + - Mark binary targets with `provenance: binary-target` and attach checksum if supplied. +4) **Runtime hints**: + - Collect `unsafeFlags`, linker settings, `swiftSettings`/`cSettings`/`cxxSettings` indicators (e.g., `-enable-library-evolution`). + - Emit `xcframework` presence for Apple platform binaries. +5) **Outputs**: + - Inventory: Swift packages (PURL + checksum/vcs), binary targets (type=binary, checksum/path). + - Graph: package dependency edges; target-to-target edges (optional). + - Signals: platform minimums, binary target flags, unsafe flags presence. + +## Tests & fixtures +- Fixtures under `src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Native.Tests/Fixtures/SwiftPM/`: + - Simple library/executable, binary target with checksum, mixed platform constraints. + - Determinism: stable ordering, normalized checksums, no filesystem time dependency. + +## Deliverables +- Implementation to land under `StellaOps.Scanner.Analyzers.Native` (SwiftPM module). +- Documentation cross-link to sprint log and `docs/modules/scanner/implementation_plan.md`. +- Offline posture: never invoke `swift build`; rely solely on `Package.resolved`/manifest; error clearly when lockfile missing. diff --git a/docs/modules/scanner/dotnet-il.config.example.json b/docs/modules/scanner/dotnet-il.config.example.json new file mode 100644 index 000000000..04372ed94 --- /dev/null +++ b/docs/modules/scanner/dotnet-il.config.example.json @@ -0,0 +1,10 @@ +{ + // Enable IL/dependency edge emission and entrypoint export. + "emitDependencyEdges": true, + "includeEntrypoints": true, + + // Optional runtime evidence merge (NDJSON lines with package/target/reason/confidence/source). + // When provided, runtime edges are appended with prefix "edge.runtime". + "runtimeEvidencePath": "runtime-evidence.ndjson", + "runtimeEvidenceConfidence": "medium" +} diff --git a/docs/modules/scanner/runtime-evidence.example.ndjson b/docs/modules/scanner/runtime-evidence.example.ndjson new file mode 100644 index 000000000..275a6c896 --- /dev/null +++ b/docs/modules/scanner/runtime-evidence.example.ndjson @@ -0,0 +1,2 @@ +{"package":"stellaops.toolkit","target":"native-lib","reason":"runtime-load","confidence":"medium","source":"trace"} +{"package":"microsoft.extensions.logging","target":"microsoft.extensions.dependencyinjection","reason":"runtime-resolve","confidence":"medium","source":"probe"} diff --git a/docs/modules/signals/evidence/README.md b/docs/modules/signals/evidence/README.md index 29ffec7a9..144397231 100644 --- a/docs/modules/signals/evidence/README.md +++ b/docs/modules/signals/evidence/README.md @@ -11,21 +11,24 @@ Artifacts prepared 2025-12-05 (UTC) for DSSE signing and Evidence Locker ingest: ## CI Automated Signing -The `.gitea/workflows/signals-dsse-sign.yml` workflow automates DSSE signing. +- `.gitea/workflows/signals-dsse-sign.yml` ƒ?" DSSE signing of decay/unknowns/heuristics on push or manual dispatch. +- `.gitea/workflows/signals-reachability.yml` ƒ?" reachability smoke (SIGNALS-24-004/005), DSSE signing, and optional Evidence Locker upload. +- `.gitea/workflows/signals-evidence-locker.yml` ƒ?" production re-sign + deterministic tar upload; defaults to `evidence-locker/signals/2025-12-05`. -### Prerequisites (CI Secrets) -| Secret | Description | +### Prerequisites (CI Secrets or Repo Vars) +| Secret/Var | Description | |--------|-------------| | `COSIGN_PRIVATE_KEY_B64` | Base64-encoded cosign private key (required for production) | | `COSIGN_PASSWORD` | Password for encrypted key (if applicable) | -| `CI_EVIDENCE_LOCKER_TOKEN` | Token for Evidence Locker push (optional) | +| `CI_EVIDENCE_LOCKER_TOKEN` | Token for Evidence Locker push | +| `EVIDENCE_LOCKER_URL` | Base URL for locker PUT (e.g., `https://locker.example.com`) | ### Trigger -- **Automatic**: Push to `main` affecting `docs/modules/signals/**` or `tools/cosign/sign-signals.sh` -- **Manual**: Workflow dispatch with `allow_dev_key=1` for testing +- **Automatic**: Push to `main` affecting `docs/modules/signals/**`, `tools/cosign/sign-signals.sh`, or Signals sources (reachability workflow). +- **Manual**: Workflow dispatch with `allow_dev_key=1` for testing; `out_dir` input defaults to `evidence-locker/signals/2025-12-05`. ### Output -Signed artifacts uploaded as workflow artifact `signals-dsse-signed-{run}` and optionally pushed to Evidence Locker. +Signed artifacts uploaded as workflow artifacts and, when secrets/vars are present, pushed to Evidence Locker. Evidence tar SHA256 is emitted in job logs. ## Development Signing (Local Testing) diff --git a/docs/security/crypto-compliance.md b/docs/security/crypto-compliance.md index 2f4b9e864..9f6bd9aab 100644 --- a/docs/security/crypto-compliance.md +++ b/docs/security/crypto-compliance.md @@ -100,6 +100,7 @@ HMAC operations use purpose-based selection similar to hashing: ## Simulation paths when hardware is missing - **RU / GOST**: Linux baseline uses `ru.openssl.gost`; CryptoPro CSP can be exercised from Linux via the Wine sidecar service (`ru.winecsp.http`) built from `scripts/crypto/setup-wine-csp-service.sh` when customers supply the CSP installer. Windows CSP remains blocked until licensed runners are available. +- **CN / SM2**: Software baseline (`cn.sm.soft`) plus a containerized remote microservice (`cn.sm.remote.http`) that simulates SM2 signing/verification; swap the endpoint to a hardware-backed service when licensed hardware is provided. - **CN / SM**: Software-only SM2/SM3 provider (`cn.sm.soft`) backed by BouncyCastle; enable with `SM_SOFT_ALLOWED=1`. Hardware PKCS#11 tokens can be added later without changing feature code because hosts resolve via `ICryptoProviderRegistry`. - **FIPS / eIDAS**: Software allow-lists (`fips.ecdsa.soft`, `eu.eidas.soft`) enforce ES256/ES384 + SHA-2. They are labeled non-certified until a CMVP/QSCD module is supplied. - **KCMVP**: Hash-only baseline (`kr.kcmvp.hash`) keeps SHA-256 available when ARIA/SEED/KCDSA hardware is absent. diff --git a/docs/security/rootpack_ru_validation.md b/docs/security/rootpack_ru_validation.md index 269f17614..e669f072b 100644 --- a/docs/security/rootpack_ru_validation.md +++ b/docs/security/rootpack_ru_validation.md @@ -29,6 +29,20 @@ This runbook documents the repeatable steps for validating the Russian sovereign 1. Install OpenSSL with the `gost` engine (or vendor equivalent) on the validation host and import the PEM key/cert that will back `StellaOps:Crypto:OpenSsl:Keys`. 2. Configure the `OpenSsl` section (PEM path plus `PrivateKeyPassphraseEnvVar`), keep `StellaOps:Crypto:Registry:ActiveProfile=ru-offline`, and restart the services. 3. Execute a signing workflow and confirm `CryptoProviderMetrics` records `ru.openssl.gost` activity. Linux nodes should no longer attempt to load `ru.cryptopro.csp`. +4. **2025-12-07 validation evidence (Linux, containerised OpenSSL GOST engine):** + - Ran `scripts/crypto/validate-openssl-gost.sh` (uses `rnix/openssl-gost:latest`) to generate deterministic digests and two md_gost12_256 signatures over a fixed message. Output folder: `logs/openssl_gost_validation_/`. + - Summary from the run at `20251207T220926Z`: + - Message SHA256: `e858745af13089d06e74022a75abfee7390aefe7635b15c80fe7d038f58ae6c6` + - md_gost12_256 digest: `01ddd6399e694bb23227925cb6b12e8c25f2f1303644ffbd267da8a68554a2cb` + - Signature SHA256 (run 1): `02321c5564ae902de77a12c8cc2876f0374d4225e52077ecd28876fbd0110b01` + - Signature SHA256 (run 2): `6564c7e0953dda7d40054ef46633c833eec5ee13d4ab8dd0557f2aed1b8d76c4` + - Determinism note: digests are stable; signatures vary per run (nonce-driven) but verify cleanly against the emitted public key. +5. **Host defaults and toggles:** Authority/Signer/Attestor now bind `StellaOps:Crypto` via `AddStellaOpsCryptoRu` and fail-closed on Linux if `ru.openssl.gost`/`ru.pkcs11` are missing. Environment overrides: + - `STELLAOPS_CRYPTO_ENABLE_RU_OPENSSL` (default: on for Linux) + - `STELLAOPS_CRYPTO_ENABLE_RU_PKCS11` (default: on) + - `STELLAOPS_CRYPTO_ENABLE_RU_WINECSP` (default: off) + - `STELLAOPS_CRYPTO_ENABLE_RU_CSP` (Windows only; default on) + Disable both OpenSSL and PKCS#11 only when an alternate provider is configured; otherwise startup will fail. ## 3. Hardware Validation (PKCS#11 Tokens) diff --git a/docs/signals/events-24-005.md b/docs/signals/events-24-005.md index b48ffb39a..1490b585b 100644 --- a/docs/signals/events-24-005.md +++ b/docs/signals/events-24-005.md @@ -45,3 +45,4 @@ ## Provenance - This contract supersedes the temporary log-based publisher referenced in Signals sprint 0143 Execution Log (2025-11-18). Aligns with `signals.fact.updated@v1` payload shape already covered by unit tests. +- Implementation: `Signals.Events` defaults to Redis Streams (`signals.fact.updated.v1` with `signals.fact.updated.dlq`), emitting envelopes that include `event_id`, `fact_version`, and deterministic `fact.digest` (sha256) generated by the reachability fact hasher. diff --git a/etc/rootpack/cn/crypto.profile.yaml b/etc/rootpack/cn/crypto.profile.yaml index 28e5f145c..32648f74b 100644 --- a/etc/rootpack/cn/crypto.profile.yaml +++ b/etc/rootpack/cn/crypto.profile.yaml @@ -3,13 +3,21 @@ StellaOps: Registry: ActiveProfile: cn-soft PreferredProviders: + - cn.sm.remote.http - cn.sm.soft Profiles: cn-soft: PreferredProviders: + - cn.sm.remote.http - cn.sm.soft SmSoft: RequireEnvironmentGate: true # Optional seed keys (PKCS#8 DER/PEM) Keys: [] + SmRemote: + BaseAddress: http://sm-remote:56080 + SkipProbe: false + Keys: + - KeyId: sm2-remote-default + RemoteKeyId: sm2-remote-default # Note: This SM profile is software-only (non-certified). Set SM_SOFT_ALLOWED=1 to enable. diff --git a/etc/rootpack/ru/crypto.profile.yaml b/etc/rootpack/ru/crypto.profile.yaml index 52d7003be..5fc47cc14 100644 --- a/etc/rootpack/ru/crypto.profile.yaml +++ b/etc/rootpack/ru/crypto.profile.yaml @@ -19,6 +19,7 @@ StellaOps: - ru.openssl.gost - ru.pkcs11 CryptoPro: + Status: pending Keys: - KeyId: ru-csp-default Algorithm: GOST12-256 diff --git a/etc/signals.yaml.sample b/etc/signals.yaml.sample index 8196b5bd7..b11302a13 100644 --- a/etc/signals.yaml.sample +++ b/etc/signals.yaml.sample @@ -49,10 +49,23 @@ Signals: Cache: ConnectionString: "localhost:6379" DefaultTtlSeconds: 600 + Events: + Enabled: true + # Transport driver: "redis" (default) or "inmemory" for local smoke. + Driver: "redis" + ConnectionString: "localhost:6379" + Stream: "signals.fact.updated.v1" + DeadLetterStream: "signals.fact.updated.dlq" + PublishTimeoutSeconds: 5 + MaxStreamLength: 10000 + DefaultTenant: "tenant-default" + Producer: "StellaOps.Signals" + Pipeline: "signals" + Release: "" AirGap: # Optional override for fact-update event topic when signaling across air-gap boundaries. - # Defaults to "signals.fact.updated" when omitted. - EventTopic: "signals.fact.updated" + # Defaults to "signals.fact.updated.v1" when omitted. + EventTopic: "signals.fact.updated.v1" SealedMode: EnforcementEnabled: false EvidencePath: "../ops/devops/sealed-mode-ci/artifacts/sealed-mode-ci/latest/signals-sealed-ci.json" diff --git a/ops/cryptopro/install-linux-csp.sh b/ops/cryptopro/install-linux-csp.sh new file mode 100644 index 000000000..7f8851578 --- /dev/null +++ b/ops/cryptopro/install-linux-csp.sh @@ -0,0 +1,185 @@ +#!/bin/bash +# CryptoPro CSP 5.0 R3 Linux installer (deb packages) +# Uses locally provided .deb packages under /opt/cryptopro/downloads (host volume). +# No Wine dependency. Runs offline against the supplied packages only. +# +# Env: +# CRYPTOPRO_INSTALL_FROM Path to folder with .deb packages (default /opt/cryptopro/downloads) +# CRYPTOPRO_ACCEPT_EULA Must be 1 to proceed (default 0 -> hard stop with warning) +# CRYPTOPRO_SKIP_APT_FIX Set to 1 to skip `apt-get -f install` (offline strict) +# CRYPTOPRO_PACKAGE_FILTER Optional glob (e.g., "cprocsp*amd64.deb") to narrow selection +# +# Exit codes: +# 0 success; 1 missing dir/files; 2 incompatible arch; 3 EULA not accepted. + +set -euo pipefail + +INSTALL_FROM="${CRYPTOPRO_INSTALL_FROM:-/opt/cryptopro/downloads}" +PACKAGE_FILTER="${CRYPTOPRO_PACKAGE_FILTER:-*.deb}" +SKIP_APT_FIX="${CRYPTOPRO_SKIP_APT_FIX:-0}" +STAGING_DIR="/tmp/cryptopro-debs" +MINIMAL="${CRYPTOPRO_MINIMAL:-1}" +INCLUDE_PLUGIN="${CRYPTOPRO_INCLUDE_PLUGIN:-0}" + +arch_from_uname() { + local raw + raw="$(uname -m)" + case "${raw}" in + x86_64) echo "amd64" ;; + aarch64) echo "arm64" ;; + arm64) echo "arm64" ;; + i386|i686) echo "i386" ;; + *) echo "${raw}" ;; + esac +} + +HOST_ARCH="$(dpkg --print-architecture 2>/dev/null || arch_from_uname)" + +log() { + echo "[$(date -u '+%Y-%m-%dT%H:%M:%SZ')] [cryptopro-install] $*" +} + +log_err() { + echo "[$(date -u '+%Y-%m-%dT%H:%M:%SZ')] [cryptopro-install] [ERROR] $*" >&2 +} + +require_eula() { + if [[ "${CRYPTOPRO_ACCEPT_EULA:-0}" != "1" ]]; then + log_err "License not accepted. Set CRYPTOPRO_ACCEPT_EULA=1 only if you hold a valid CryptoPro license for these binaries and agree to the vendor EULA." + exit 3 + fi +} + +maybe_extract_bundle() { + # Prefer a bundle that matches host arch in filename, otherwise first *.tgz + mapfile -t TGZ < <(find "${INSTALL_FROM}" -maxdepth 1 -type f -name "*.tgz" -print 2>/dev/null | sort) + if [[ ${#TGZ[@]} -eq 0 ]]; then + return + fi + local chosen="" + for candidate in "${TGZ[@]}"; do + if [[ "${candidate}" == *"${HOST_ARCH}"* ]]; then + chosen="${candidate}" + break + fi + done + if [[ -z "${chosen}" ]]; then + chosen="${TGZ[0]}" + fi + log "Extracting bundle ${chosen} into ${STAGING_DIR}" + rm -rf "${STAGING_DIR}" + mkdir -p "${STAGING_DIR}" + tar -xf "${chosen}" -C "${STAGING_DIR}" + # If bundle contains a single subfolder, use it as install root + local subdir + subdir="$(find "${STAGING_DIR}" -maxdepth 1 -type d ! -path "${STAGING_DIR}" | head -n1)" + if [[ -n "${subdir}" ]]; then + INSTALL_FROM="${subdir}" + else + INSTALL_FROM="${STAGING_DIR}" + fi +} + +gather_packages() { + if [[ ! -d "${INSTALL_FROM}" ]]; then + log_err "Package directory not found: ${INSTALL_FROM}" + exit 1 + fi + maybe_extract_bundle + mapfile -t PKGS < <(find "${INSTALL_FROM}" -maxdepth 2 -type f -name "${PACKAGE_FILTER}" -print 2>/dev/null | sort) + if [[ ${#PKGS[@]} -eq 0 ]]; then + log_err "No .deb packages found in ${INSTALL_FROM} (filter=${PACKAGE_FILTER})" + exit 1 + fi +} + +apply_minimal_filter() { + if [[ "${MINIMAL}" != "1" ]]; then + return + fi + local -a keep_exact=( + "lsb-cprocsp-base" + "lsb-cprocsp-ca-certs" + "lsb-cprocsp-capilite-64" + "lsb-cprocsp-kc1-64" + "lsb-cprocsp-pkcs11-64" + "lsb-cprocsp-rdr-64" + "cprocsp-curl-64" + "cprocsp-pki-cades-64" + "cprocsp-compat-debian" + ) + if [[ "${INCLUDE_PLUGIN}" == "1" ]]; then + keep_exact+=("cprocsp-pki-plugin-64" "cprocsp-rdr-gui-gtk-64") + fi + local -a filtered=() + for pkg in "${PKGS[@]}"; do + local name + name="$(dpkg-deb -f "${pkg}" Package 2>/dev/null || basename "${pkg}")" + for wanted in "${keep_exact[@]}"; do + if [[ "${name}" == "${wanted}" ]]; then + filtered+=("${pkg}") + break + fi + done + done + if [[ ${#filtered[@]} -gt 0 ]]; then + log "Applying minimal package set (CRYPTOPRO_MINIMAL=1); kept ${#filtered[@]} of ${#PKGS[@]}" + PKGS=("${filtered[@]}") + else + log "Minimal filter yielded no matches; using full package set" + fi +} + +filter_by_arch() { + FILTERED=() + for pkg in "${PKGS[@]}"; do + local pkg_arch + pkg_arch="$(dpkg-deb -f "${pkg}" Architecture 2>/dev/null || echo "unknown")" + if [[ "${pkg_arch}" == "all" || "${pkg_arch}" == "${HOST_ARCH}" ]]; then + FILTERED+=("${pkg}") + else + log "Skipping ${pkg} (arch=${pkg_arch}, host=${HOST_ARCH})" + fi + done + if [[ ${#FILTERED[@]} -eq 0 ]]; then + log_err "No packages match host architecture ${HOST_ARCH}" + exit 2 + fi +} + +print_matrix() { + log "Discovered packages (arch filter: host=${HOST_ARCH}):" + for pkg in "${FILTERED[@]}"; do + local name ver arch + name="$(dpkg-deb -f "${pkg}" Package 2>/dev/null || basename "${pkg}")" + ver="$(dpkg-deb -f "${pkg}" Version 2>/dev/null || echo "unknown")" + arch="$(dpkg-deb -f "${pkg}" Architecture 2>/dev/null || echo "unknown")" + echo " - ${name} ${ver} (${arch}) <- ${pkg}" + done +} + +install_packages() { + log "Installing ${#FILTERED[@]} package(s) from ${INSTALL_FROM}" + if ! dpkg -i "${FILTERED[@]}"; then + if [[ "${SKIP_APT_FIX}" == "1" ]]; then + log_err "dpkg reported errors and CRYPTOPRO_SKIP_APT_FIX=1; aborting." + exit 1 + fi + log "Resolving dependencies with apt-get -f install (may require network if deps missing locally)" + apt-get update >/dev/null + DEBIAN_FRONTEND=noninteractive apt-get -y -f install + fi + log "CryptoPro packages installed. Verify with: dpkg -l | grep cprocsp" +} + +main() { + require_eula + gather_packages + apply_minimal_filter + filter_by_arch + print_matrix + install_packages + log "Installation finished. For headless/server use on Ubuntu 22.04 (amd64), the 'linux-amd64_deb.tgz' bundle is preferred and auto-selected." +} + +main "$@" diff --git a/ops/cryptopro/linux-csp-service/Dockerfile b/ops/cryptopro/linux-csp-service/Dockerfile new file mode 100644 index 000000000..dae99385f --- /dev/null +++ b/ops/cryptopro/linux-csp-service/Dockerfile @@ -0,0 +1,31 @@ +# syntax=docker/dockerfile:1.7 +FROM ubuntu:22.04 + +ENV DEBIAN_FRONTEND=noninteractive \ + CRYPTOPRO_ACCEPT_EULA=1 \ + CRYPTOPRO_MINIMAL=1 + +WORKDIR /app + +# System deps +RUN apt-get update && \ + apt-get install -y --no-install-recommends python3 python3-pip tar xz-utils && \ + rm -rf /var/lib/apt/lists/* + +# Copy CryptoPro packages (provided in repo) and installer +COPY opt/cryptopro/downloads/*.tgz /opt/cryptopro/downloads/ +COPY ops/cryptopro/install-linux-csp.sh /usr/local/bin/install-linux-csp.sh +RUN chmod +x /usr/local/bin/install-linux-csp.sh + +# Install CryptoPro CSP +RUN /usr/local/bin/install-linux-csp.sh + +# Python deps +COPY ops/cryptopro/linux-csp-service/requirements.txt /app/requirements.txt +RUN pip3 install --no-cache-dir -r /app/requirements.txt + +# App +COPY ops/cryptopro/linux-csp-service/app.py /app/app.py + +EXPOSE 8080 +CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8080"] diff --git a/ops/cryptopro/linux-csp-service/README.md b/ops/cryptopro/linux-csp-service/README.md new file mode 100644 index 000000000..060fc75aa --- /dev/null +++ b/ops/cryptopro/linux-csp-service/README.md @@ -0,0 +1,25 @@ +# CryptoPro Linux CSP Service (experimental) + +Minimal FastAPI wrapper around the Linux CryptoPro CSP binaries to prove installation and expose simple operations. + +## Build + +```bash +docker build -t cryptopro-linux-csp -f ops/cryptopro/linux-csp-service/Dockerfile . +``` + +## Run + +```bash +docker run --rm -p 8080:8080 cryptopro-linux-csp +``` + +Endpoints: +- `GET /health` — checks `csptest` presence. +- `GET /license` — runs `csptest -license`. +- `POST /hash` with `{ "data_b64": "" }` — runs `csptest -hash -hash_alg gost12_256`. + +## Notes +- Uses the provided CryptoPro `.tgz` bundles under `opt/cryptopro/downloads`. Ensure you have rights to these binaries; the image builds with `CRYPTOPRO_ACCEPT_EULA=1`. +- Default install is minimal (no browser/plugin). Set `CRYPTOPRO_INCLUDE_PLUGIN=1` if you need plugin packages. +- This is not a production service; intended for validation only. diff --git a/ops/cryptopro/linux-csp-service/app.py b/ops/cryptopro/linux-csp-service/app.py new file mode 100644 index 000000000..e9cd5afa9 --- /dev/null +++ b/ops/cryptopro/linux-csp-service/app.py @@ -0,0 +1,57 @@ +import base64 +import subprocess +from pathlib import Path +from typing import Optional + +from fastapi import FastAPI, HTTPException +from pydantic import BaseModel + +app = FastAPI(title="CryptoPro Linux CSP Service", version="0.1.0") + +CSPTEST = Path("/opt/cprocsp/bin/amd64/csptest") + + +def run_cmd(cmd: list[str], input_bytes: Optional[bytes] = None, allow_fail: bool = False) -> str: + try: + proc = subprocess.run( + cmd, + input=input_bytes, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + check=True, + ) + return proc.stdout.decode("utf-8", errors="replace") + except subprocess.CalledProcessError as exc: + output = exc.stdout.decode("utf-8", errors="replace") if exc.stdout else "" + if allow_fail: + return output + raise HTTPException(status_code=500, detail={"cmd": cmd, "output": output}) + + +@app.get("/health") +def health(): + if not CSPTEST.exists(): + raise HTTPException(status_code=500, detail="csptest binary not found; ensure CryptoPro CSP is installed") + return {"status": "ok", "csptest": str(CSPTEST)} + + +@app.get("/license") +def license_info(): + output = run_cmd([str(CSPTEST), "-keyset", "-info"], allow_fail=True) + return {"output": output} + + +class HashRequest(BaseModel): + data_b64: str + + +@app.post("/hash") +def hash_data(body: HashRequest): + try: + data = base64.b64decode(body.data_b64) + except Exception: + raise HTTPException(status_code=400, detail="Invalid base64") + + cmd = [str(CSPTEST), "-hash", "-in", "-", "-hash_alg", "gost12_256"] + output = run_cmd(cmd, input_bytes=data) + return {"output": output} diff --git a/ops/cryptopro/linux-csp-service/requirements.txt b/ops/cryptopro/linux-csp-service/requirements.txt new file mode 100644 index 000000000..3af9cb384 --- /dev/null +++ b/ops/cryptopro/linux-csp-service/requirements.txt @@ -0,0 +1,2 @@ +fastapi==0.111.0 +uvicorn[standard]==0.30.1 diff --git a/ops/devops/secrets/surface-secrets-provisioning.md b/ops/devops/secrets/surface-secrets-provisioning.md index 9e297569c..2168f9a3b 100644 --- a/ops/devops/secrets/surface-secrets-provisioning.md +++ b/ops/devops/secrets/surface-secrets-provisioning.md @@ -54,7 +54,7 @@ SCANNER_SURFACE_SECRETS_ALLOW_INLINE=false ZASTAVA_SURFACE_SECRETS_PROVIDER=${SCANNER_SURFACE_SECRETS_PROVIDER} ZASTAVA_SURFACE_SECRETS_ROOT=${SCANNER_SURFACE_SECRETS_ROOT} ``` -4) Ensure docker-compose mounts the secrets path read-only to the services that need it. +4) Ensure docker-compose mounts the secrets path read-only to the services that need it. Use `SURFACE_SECRETS_HOST_PATH` to point at the decrypted bundle on the host (defaults to `./offline/surface-secrets` in the Compose profiles). ## Offline Kit workflow - The offline kit already ships encrypted `surface-secrets` bundles (see `docs/24_OFFLINE_KIT.md`). diff --git a/ops/sm-remote/Dockerfile b/ops/sm-remote/Dockerfile new file mode 100644 index 000000000..f7aa44b1a --- /dev/null +++ b/ops/sm-remote/Dockerfile @@ -0,0 +1,12 @@ +# Simulated SM2 remote microservice (software-only) +FROM mcr.microsoft.com/dotnet/sdk:10.0 AS build +WORKDIR /src +COPY . . +RUN dotnet publish src/SmRemote/StellaOps.SmRemote.Service/StellaOps.SmRemote.Service.csproj -c Release -o /app/publish + +FROM mcr.microsoft.com/dotnet/aspnet:10.0 +WORKDIR /app +COPY --from=build /app/publish . +ENV ASPNETCORE_URLS=http://0.0.0.0:56080 +ENV SM_SOFT_ALLOWED=1 +ENTRYPOINT ["dotnet", "StellaOps.SmRemote.Service.dll"] diff --git a/ops/wine-csp/Dockerfile b/ops/wine-csp/Dockerfile index 825fb192d..d8c413eee 100644 --- a/ops/wine-csp/Dockerfile +++ b/ops/wine-csp/Dockerfile @@ -80,6 +80,8 @@ ENV DEBIAN_FRONTEND=noninteractive \ WINE_CSP_MODE=limited \ WINE_CSP_INSTALLER_PATH=/opt/cryptopro/csp-installer.msi \ WINE_CSP_LOG_LEVEL=Information \ + NODE_PATH=/usr/local/lib/node_modules \ + PLAYWRIGHT_BROWSERS_PATH=/ms-playwright \ # Display for Wine (headless) DISPLAY=:99 @@ -117,6 +119,21 @@ RUN set -eux; \ apt-get clean; \ rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +# Install Node.js + Playwright (headless Chromium) for CryptoPro downloader +RUN set -eux; \ + curl -fsSL https://deb.nodesource.com/setup_20.x | bash -; \ + apt-get update; \ + apt-get install -y --no-install-recommends \ + nodejs \ + rpm2cpio \ + cpio; \ + npm install -g --no-progress playwright-chromium@1.48.2; \ + npx playwright install-deps chromium; \ + npx playwright install chromium; \ + chown -R ${APP_UID}:${APP_GID} /ms-playwright || true; \ + apt-get clean; \ + rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + # Create non-root user for Wine service # Note: Wine requires writable home directory for prefix RUN groupadd -r -g ${APP_GID} ${APP_USER} && \ @@ -133,7 +150,10 @@ COPY --from=build --chown=${APP_UID}:${APP_GID} /app/publish/ ./ COPY --chown=${APP_UID}:${APP_GID} ops/wine-csp/entrypoint.sh /usr/local/bin/entrypoint.sh COPY --chown=${APP_UID}:${APP_GID} ops/wine-csp/healthcheck.sh /usr/local/bin/healthcheck.sh COPY --chown=${APP_UID}:${APP_GID} ops/wine-csp/install-csp.sh /usr/local/bin/install-csp.sh -RUN chmod +x /usr/local/bin/entrypoint.sh /usr/local/bin/healthcheck.sh /usr/local/bin/install-csp.sh +COPY --chown=${APP_UID}:${APP_GID} ops/wine-csp/fetch-cryptopro.py /usr/local/bin/fetch-cryptopro.py +COPY --chown=${APP_UID}:${APP_GID} ops/wine-csp/download-cryptopro.sh /usr/local/bin/download-cryptopro.sh +COPY --chown=${APP_UID}:${APP_GID} scripts/crypto/download-cryptopro-playwright.cjs /usr/local/bin/download-cryptopro-playwright.cjs +RUN chmod +x /usr/local/bin/entrypoint.sh /usr/local/bin/healthcheck.sh /usr/local/bin/install-csp.sh /usr/local/bin/fetch-cryptopro.py /usr/local/bin/download-cryptopro.sh /usr/local/bin/download-cryptopro-playwright.cjs # Switch to non-root user for Wine prefix initialization USER ${APP_UID}:${APP_GID} diff --git a/ops/wine-csp/download-cryptopro.sh b/ops/wine-csp/download-cryptopro.sh new file mode 100644 index 000000000..634688805 --- /dev/null +++ b/ops/wine-csp/download-cryptopro.sh @@ -0,0 +1,62 @@ +#!/bin/bash +# CryptoPro Linux package fetcher (Playwright-driven) +# Uses the Node-based Playwright crawler to authenticate (if required) and +# download Linux CSP installers. Intended to run once per container startup. + +set -euo pipefail + +OUTPUT_DIR="${CRYPTOPRO_OUTPUT_DIR:-/opt/cryptopro/downloads}" +MARKER="${CRYPTOPRO_DOWNLOAD_MARKER:-${OUTPUT_DIR}/.downloaded}" +FORCE="${CRYPTOPRO_FORCE_DOWNLOAD:-0}" +UNPACK="${CRYPTOPRO_UNPACK:-1}" +DRY_RUN="${CRYPTOPRO_DRY_RUN:-1}" + +log() { + echo "[$(date -u '+%Y-%m-%dT%H:%M:%SZ')] [crypto-fetch] $*" +} + +log_error() { + echo "[$(date -u '+%Y-%m-%dT%H:%M:%SZ')] [crypto-fetch] [ERROR] $*" >&2 +} + +if [[ -f "${MARKER}" && "${FORCE}" != "1" ]]; then + log "Download marker present at ${MARKER}; skipping (set CRYPTOPRO_FORCE_DOWNLOAD=1 to refresh)." + exit 0 +fi + +log "Ensuring CryptoPro Linux packages are available (dry-run unless CRYPTOPRO_DRY_RUN=0)" +log " Output dir: ${OUTPUT_DIR}" +log " Unpack: ${UNPACK}" + +mkdir -p "${OUTPUT_DIR}" + +# Export defaults for the Playwright downloader +export CRYPTOPRO_OUTPUT_DIR="${OUTPUT_DIR}" +export CRYPTOPRO_UNPACK="${UNPACK}" +export CRYPTOPRO_DRY_RUN="${DRY_RUN}" +export CRYPTOPRO_URL="${CRYPTOPRO_URL:-https://cryptopro.ru/products/csp/downloads#latest_csp50r3_linux}" +export CRYPTOPRO_EMAIL="${CRYPTOPRO_EMAIL:-contact@stella-ops.org}" +export CRYPTOPRO_PASSWORD="${CRYPTOPRO_PASSWORD:-Hoko33JD3nj3aJD.}" + +if ! node /usr/local/bin/download-cryptopro-playwright.cjs; then + rc=$? + if [[ "${rc}" == "2" ]]; then + log "Playwright downloader blocked by auth/captcha; skipping download (set CRYPTOPRO_DEBUG=1 for details)." + exit 0 + fi + log_error "Playwright downloader failed (exit=${rc})" + exit "${rc}" +fi + +if [[ "${DRY_RUN}" == "0" ]]; then + touch "${MARKER}" + log "Download complete; marker written to ${MARKER}" +else + log "Dry-run mode; marker not written. Set CRYPTOPRO_DRY_RUN=0 to fetch binaries." +fi + +# List latest artifacts (best-effort) +if compgen -G "${OUTPUT_DIR}/*" > /dev/null; then + log "Artifacts in ${OUTPUT_DIR}:" + find "${OUTPUT_DIR}" -maxdepth 1 -type f -printf " %f (%s bytes)\n" | head -20 +fi diff --git a/ops/wine-csp/entrypoint.sh b/ops/wine-csp/entrypoint.sh index 634b2bf86..abbdc7ce7 100644 --- a/ops/wine-csp/entrypoint.sh +++ b/ops/wine-csp/entrypoint.sh @@ -15,6 +15,10 @@ WINE_CSP_INSTALLER_PATH="${WINE_CSP_INSTALLER_PATH:-/opt/cryptopro/csp-installer WINE_CSP_LOG_LEVEL="${WINE_CSP_LOG_LEVEL:-Information}" WINE_PREFIX="${WINEPREFIX:-$HOME/.wine}" DISPLAY="${DISPLAY:-:99}" +CSP_DOWNLOAD_MARKER="${WINE_CSP_INSTALLER_PATH}.downloaded" +CRYPTOPRO_DOWNLOAD_DIR="${CRYPTOPRO_DOWNLOAD_DIR:-/opt/cryptopro/downloads}" +CRYPTOPRO_DOWNLOAD_MARKER="${CRYPTOPRO_DOWNLOAD_MARKER:-${CRYPTOPRO_DOWNLOAD_DIR}/.downloaded}" +CRYPTOPRO_FETCH_ON_START="${CRYPTOPRO_FETCH_ON_START:-1}" # Marker files CSP_INSTALLED_MARKER="${WINE_PREFIX}/.csp_installed" @@ -73,6 +77,37 @@ initialize_wine() { log "Wine prefix initialized successfully" } +# ------------------------------------------------------------------------------ +# CryptoPro Linux Downloads (Playwright-driven) +# ------------------------------------------------------------------------------ +download_linux_packages() { + if [[ "${CRYPTOPRO_FETCH_ON_START}" == "0" ]]; then + log "Skipping CryptoPro Linux fetch (CRYPTOPRO_FETCH_ON_START=0)" + return 0 + fi + + if [[ -f "${CRYPTOPRO_DOWNLOAD_MARKER}" && "${CRYPTOPRO_FORCE_DOWNLOAD:-0}" != "1" ]]; then + log "CryptoPro download marker present at ${CRYPTOPRO_DOWNLOAD_MARKER}; skipping fetch" + return 0 + fi + + log "Ensuring CryptoPro Linux packages via Playwright (dry-run unless CRYPTOPRO_DRY_RUN=0)" + export CRYPTOPRO_DOWNLOAD_MARKER + export CRYPTOPRO_OUTPUT_DIR="${CRYPTOPRO_DOWNLOAD_DIR}" + export CRYPTOPRO_UNPACK="${CRYPTOPRO_UNPACK:-1}" + + if /usr/local/bin/download-cryptopro.sh; then + if [[ "${CRYPTOPRO_DRY_RUN:-1}" != "0" ]]; then + log "CryptoPro downloader ran in dry-run mode; set CRYPTOPRO_DRY_RUN=0 to fetch binaries" + else + [[ -f "${CRYPTOPRO_DOWNLOAD_MARKER}" ]] || touch "${CRYPTOPRO_DOWNLOAD_MARKER}" + log "CryptoPro Linux artifacts staged in ${CRYPTOPRO_DOWNLOAD_DIR}" + fi + else + log_error "CryptoPro Playwright download failed" + fi +} + # ------------------------------------------------------------------------------ # CryptoPro CSP Installation # ------------------------------------------------------------------------------ @@ -83,6 +118,15 @@ install_cryptopro() { return 0 fi + # Attempt to download installer if missing (dry-run by default) + if [[ ! -f "${WINE_CSP_INSTALLER_PATH}" ]]; then + log "CryptoPro CSP installer not found at ${WINE_CSP_INSTALLER_PATH}; attempting crawl/download (dry-run unless CRYPTOPRO_DRY_RUN=0)." + if ! CRYPTOPRO_OUTPUT="${WINE_CSP_INSTALLER_PATH}" /usr/local/bin/fetch-cryptopro.py; then + log_error "CryptoPro CSP download failed; continuing without CSP (limited mode)" + return 0 + fi + fi + # Check if installer is available if [[ ! -f "${WINE_CSP_INSTALLER_PATH}" ]]; then log "CryptoPro CSP installer not found at ${WINE_CSP_INSTALLER_PATH}" @@ -201,6 +245,7 @@ main() { log "==========================================" validate_environment + download_linux_packages initialize_wine # Only attempt CSP installation in full mode diff --git a/ops/wine-csp/fetch-cryptopro.py b/ops/wine-csp/fetch-cryptopro.py new file mode 100644 index 000000000..72c376b16 --- /dev/null +++ b/ops/wine-csp/fetch-cryptopro.py @@ -0,0 +1,164 @@ +#!/usr/bin/env python3 +""" +CryptoPro crawler (metadata only by default). +Fetches https://cryptopro.ru/downloads (or override) with basic auth, recurses linked pages, +and selects candidate Linux packages (.deb/.rpm/.tar.gz/.tgz/.run) or MSI as fallback. + +Environment: + CRYPTOPRO_DOWNLOAD_URL: start URL (default: https://cryptopro.ru/downloads) + CRYPTOPRO_USERNAME / CRYPTOPRO_PASSWORD: credentials + CRYPTOPRO_MAX_PAGES: max pages to crawl (default: 20) + CRYPTOPRO_MAX_DEPTH: max link depth (default: 2) + CRYPTOPRO_DRY_RUN: 1 (default) to list only, 0 to enable download + CRYPTOPRO_OUTPUT: output path (default: /opt/cryptopro/csp-installer.bin) +""" + +import os +import sys +import re +import html.parser +import urllib.parse +import urllib.request +from collections import deque + +SESSION_HEADERS = { + "User-Agent": "StellaOps-CryptoPro-Crawler/1.0 (+https://stella-ops.org)", +} + +LINUX_PATTERNS = re.compile(r"\.(deb|rpm|tar\.gz|tgz|run)(?:$|\?)", re.IGNORECASE) +MSI_PATTERN = re.compile(r"\.msi(?:$|\?)", re.IGNORECASE) + + +def log(msg: str) -> None: + sys.stdout.write(msg + "\n") + sys.stdout.flush() + + +def warn(msg: str) -> None: + sys.stderr.write("[WARN] " + msg + "\n") + sys.stderr.flush() + + +class LinkParser(html.parser.HTMLParser): + def __init__(self): + super().__init__() + self.links = [] + + def handle_starttag(self, tag, attrs): + if tag != "a": + return + href = dict(attrs).get("href") + if href: + self.links.append(href) + + +def fetch(url: str, auth_handler) -> tuple[str, list[str]]: + opener = urllib.request.build_opener(auth_handler) + req = urllib.request.Request(url, headers=SESSION_HEADERS) + with opener.open(req, timeout=30) as resp: + data = resp.read() + parser = LinkParser() + parser.feed(data.decode("utf-8", errors="ignore")) + return data, parser.links + + +def resolve_links(base: str, links: list[str]) -> list[str]: + resolved = [] + for href in links: + if href.startswith("#") or href.startswith("mailto:"): + continue + resolved.append(urllib.parse.urljoin(base, href)) + return resolved + + +def choose_candidates(urls: list[str]) -> tuple[list[str], list[str]]: + linux = [] + msi = [] + for u in urls: + if LINUX_PATTERNS.search(u): + linux.append(u) + elif MSI_PATTERN.search(u): + msi.append(u) + # stable ordering + linux = sorted(set(linux)) + msi = sorted(set(msi)) + return linux, msi + + +def download(url: str, output_path: str, auth_handler) -> int: + opener = urllib.request.build_opener(auth_handler) + req = urllib.request.Request(url, headers=SESSION_HEADERS) + with opener.open(req, timeout=60) as resp: + with open(output_path, "wb") as f: + f.write(resp.read()) + return os.path.getsize(output_path) + + +def main() -> int: + start_url = os.environ.get("CRYPTOPRO_DOWNLOAD_URL", "https://cryptopro.ru/downloads") + username = os.environ.get("CRYPTOPRO_USERNAME", "contact@stella-ops.org") + password = os.environ.get("CRYPTOPRO_PASSWORD", "Hoko33JD3nj3aJD.") + max_pages = int(os.environ.get("CRYPTOPRO_MAX_PAGES", "20")) + max_depth = int(os.environ.get("CRYPTOPRO_MAX_DEPTH", "2")) + dry_run = os.environ.get("CRYPTOPRO_DRY_RUN", "1") != "0" + output_path = os.environ.get("CRYPTOPRO_OUTPUT", "/opt/cryptopro/csp-installer.bin") + + if username == "contact@stella-ops.org" and password == "Hoko33JD3nj3aJD.": + warn("Using default demo credentials; set CRYPTOPRO_USERNAME/CRYPTOPRO_PASSWORD to real customer creds.") + + passman = urllib.request.HTTPPasswordMgrWithDefaultRealm() + passman.add_password(None, start_url, username, password) + auth_handler = urllib.request.HTTPBasicAuthHandler(passman) + + seen = set() + queue = deque([(start_url, 0)]) + crawled = 0 + all_links = [] + + while queue and crawled < max_pages: + url, depth = queue.popleft() + if url in seen or depth > max_depth: + continue + seen.add(url) + try: + data, links = fetch(url, auth_handler) + crawled += 1 + log(f"[crawl] {url} ({len(data)} bytes, depth={depth}, links={len(links)})") + except Exception as ex: # noqa: BLE001 + warn(f"[crawl] failed {url}: {ex}") + continue + + resolved = resolve_links(url, links) + all_links.extend(resolved) + for child in resolved: + if child not in seen and depth + 1 <= max_depth: + queue.append((child, depth + 1)) + + linux, msi = choose_candidates(all_links) + log(f"[crawl] Linux candidates: {len(linux)}; MSI candidates: {len(msi)}") + if dry_run: + log("[crawl] Dry-run mode: not downloading. Set CRYPTOPRO_DRY_RUN=0 and CRYPTOPRO_OUTPUT to enable download.") + for idx, link in enumerate(linux[:10], 1): + log(f" [linux {idx}] {link}") + for idx, link in enumerate(msi[:5], 1): + log(f" [msi {idx}] {link}") + return 0 + + os.makedirs(os.path.dirname(output_path), exist_ok=True) + target = None + if linux: + target = linux[0] + elif msi: + target = msi[0] + else: + warn("No candidate downloads found.") + return 1 + + log(f"[download] Fetching {target} -> {output_path}") + size = download(target, output_path, auth_handler) + log(f"[download] Complete, size={size} bytes") + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/ops/wine-csp/tests/run-tests.sh b/ops/wine-csp/tests/run-tests.sh index ef84bec8a..6fe3d32aa 100644 --- a/ops/wine-csp/tests/run-tests.sh +++ b/ops/wine-csp/tests/run-tests.sh @@ -428,6 +428,13 @@ test_hash_performance() { [[ $duration -lt 10000 ]] || return 1 } +# CryptoPro downloader dry-run (Playwright) +test_downloader_dry_run() { + docker exec "${CONTAINER_NAME}" \ + env CRYPTOPRO_DRY_RUN=1 CRYPTOPRO_UNPACK=0 CRYPTOPRO_FETCH_ON_START=1 \ + /usr/local/bin/download-cryptopro.sh +} + # ============================================================================== # Test Runner # ============================================================================== @@ -438,6 +445,13 @@ run_all_tests() { log "Target: ${WINE_CSP_URL}" log "" + # Downloader dry-run (only when we control the container) + if [[ "${CLEANUP_CONTAINER}" == "true" ]]; then + run_test "cryptopro_downloader_dry_run" test_downloader_dry_run + else + record_test "cryptopro_downloader_dry_run" "skip" "0" "External endpoint; downloader test skipped" + fi + # Health tests log "--- Health Endpoints ---" run_test "health_endpoint" test_health_endpoint diff --git a/opt/cryptopro/downloads/.gitkeep b/opt/cryptopro/downloads/.gitkeep new file mode 100644 index 000000000..474f8888b --- /dev/null +++ b/opt/cryptopro/downloads/.gitkeep @@ -0,0 +1,4 @@ +# +# Placeholder to retain the host-mounted downloads directory in version control. +# Bind `/opt/cryptopro/downloads` into containers at `/opt/cryptopro/downloads`. + diff --git a/opt/cryptopro/downloads/linux-amd64_deb.tgz b/opt/cryptopro/downloads/linux-amd64_deb.tgz new file mode 100644 index 000000000..589aeaa60 Binary files /dev/null and b/opt/cryptopro/downloads/linux-amd64_deb.tgz differ diff --git a/opt/cryptopro/downloads/linux-armhf_deb.tgz b/opt/cryptopro/downloads/linux-armhf_deb.tgz new file mode 100644 index 000000000..e713cec4f Binary files /dev/null and b/opt/cryptopro/downloads/linux-armhf_deb.tgz differ diff --git a/opt/cryptopro/downloads/linux-e2k16c_deb.tgz b/opt/cryptopro/downloads/linux-e2k16c_deb.tgz new file mode 100644 index 000000000..4f68d189f Binary files /dev/null and b/opt/cryptopro/downloads/linux-e2k16c_deb.tgz differ diff --git a/opt/cryptopro/downloads/linux-e2k4c_deb.tgz b/opt/cryptopro/downloads/linux-e2k4c_deb.tgz new file mode 100644 index 000000000..95a8badd8 Binary files /dev/null and b/opt/cryptopro/downloads/linux-e2k4c_deb.tgz differ diff --git a/opt/cryptopro/downloads/linux-e2k8c_deb.tgz b/opt/cryptopro/downloads/linux-e2k8c_deb.tgz new file mode 100644 index 000000000..0b0a395e7 Binary files /dev/null and b/opt/cryptopro/downloads/linux-e2k8c_deb.tgz differ diff --git a/opt/cryptopro/downloads/linux-ia32.tgz b/opt/cryptopro/downloads/linux-ia32.tgz new file mode 100644 index 000000000..08eb75dff Binary files /dev/null and b/opt/cryptopro/downloads/linux-ia32.tgz differ diff --git a/opt/cryptopro/downloads/linux-riscv64_deb.tgz b/opt/cryptopro/downloads/linux-riscv64_deb.tgz new file mode 100644 index 000000000..3aad8b563 Binary files /dev/null and b/opt/cryptopro/downloads/linux-riscv64_deb.tgz differ diff --git a/out/feeds/icscisa-kisa/20251208/advisories.ndjson b/out/feeds/icscisa-kisa/20251208/advisories.ndjson new file mode 100644 index 000000000..348e11e0a --- /dev/null +++ b/out/feeds/icscisa-kisa/20251208/advisories.ndjson @@ -0,0 +1,4 @@ +{"advisory_id":"ICSA-25-123-01","affected_products":[{"product":"ControlSuite","vendor":"Example Corp","versions":["4.2.0","4.2.1"]}],"cvss":{"score":9.8,"vector":"CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H","version":"3.1"},"cwe":["CWE-269"],"fetched_at":"2025-12-08T02:05:00Z","payload_sha256":"634552b3ed7ffc9abfd691b16a60a68c8d81631b6a99149b97db1b093442a9bb","published":"2025-10-13T12:00:00Z","references":["https://example.com/security/icsa-25-123-01.pdf","https://www.cisa.gov/news-events/ics-advisories/icsa-25-123-01"],"run_id":"icscisa-kisa-20251208T0205Z","severity":"High","signature":{"reason":"unsigned_source","status":"missing"},"source":"icscisa","source_url":"https://www.cisa.gov/news-events/ics-advisories/icsa-25-123-01","summary":"Example Corp ControlSuite RCE via exposed management service.","title":"Example ICS Advisory","updated":"2025-11-30T00:00:00Z"} +{"advisory_id":"ICSMA-25-045-01","affected_products":[{"product":"InfusionManager","vendor":"HealthTech","versions":["2.1.0","2.1.1"]}],"cvss":{"score":6.3,"vector":"CVSS:3.1/AV:N/AC:H/PR:L/UI:R/S:U/C:L/I:L/A:L","version":"3.1"},"cwe":["CWE-319"],"fetched_at":"2025-12-08T02:05:00Z","payload_sha256":"b99750b070899a2e6455b3b8b7ca1dafa608cef5eb2c1f8ab40a21c5e22b731f","published":"2025-10-14T09:30:00Z","references":["https://www.cisa.gov/news-events/ics-medical-advisories/icsma-25-045-01","https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2025-11111"],"run_id":"icscisa-kisa-20251208T0205Z","severity":"Medium","signature":{"reason":"unsigned_source","status":"missing"},"source":"icscisa","source_url":"https://www.cisa.gov/news-events/ics-medical-advisories/icsma-25-045-01","summary":"HealthTech infusion pump vulnerabilities including two CVEs.","title":"Example Medical Advisory","updated":"2025-12-01T00:00:00Z"} +{"advisory_id":"KISA-2025-5859","affected_products":[{"product":"ControlBoard","vendor":"ACME","versions":["1.0.1.0084","2.0.1.0034"]}],"cvss":{"score":9.8,"vector":"CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H","version":"3.1"},"cwe":["CWE-787"],"fetched_at":"2025-12-08T02:07:10Z","payload_sha256":"e3e599275e19a9b20555bfd1e637b77b97995a8b4b0a8ad348f57e3f1485fe29","published":"2025-11-03T22:53:00Z","references":["https://knvd.krcert.or.kr/rss/securityInfo.do","https://knvd.krcert.or.kr/detailDos.do?IDX=5859"],"run_id":"icscisa-kisa-20251208T0205Z","severity":"High","signature":{"reason":"unsigned_source","status":"missing"},"source":"kisa","source_url":"https://knvd.krcert.or.kr/detailDos.do?IDX=5859","summary":"Remote code execution in ControlBoard service (offline HTML snapshot).","title":"KISA sample advisory 5859","updated":"2025-12-02T00:00:00Z"} +{"advisory_id":"KISA-2025-5860","affected_products":[{"product":"Edge","vendor":"NetGateway","versions":["3.4.2","3.4.3"]}],"cvss":{"score":7.3,"vector":"CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:L/I:L/A:L","version":"3.1"},"cwe":["CWE-798"],"fetched_at":"2025-12-08T02:07:45Z","payload_sha256":"1fc74f47e392e8b952d0206583fefcea6db86447094106b462b9ff4c4f06fef1","published":"2025-11-03T22:53:00Z","references":["https://knvd.krcert.or.kr/rss/securityInfo.do","https://knvd.krcert.or.kr/detailDos.do?IDX=5860"],"run_id":"icscisa-kisa-20251208T0205Z","severity":"Medium","signature":{"reason":"unsigned_source","status":"missing"},"source":"kisa","source_url":"https://knvd.krcert.or.kr/detailDos.do?IDX=5860","summary":"Authentication bypass via default credentials in NetGateway appliance.","title":"KISA sample advisory 5860","updated":"2025-12-02T00:00:00Z"} diff --git a/out/feeds/icscisa-kisa/20251208/delta.json b/out/feeds/icscisa-kisa/20251208/delta.json new file mode 100644 index 000000000..789632f57 --- /dev/null +++ b/out/feeds/icscisa-kisa/20251208/delta.json @@ -0,0 +1 @@ +{"run_id":"icscisa-kisa-20251208T0205Z","generated_at":"2025-12-08T02:09:30Z","added":{"icscisa":["ICSA-25-123-01","ICSMA-25-045-01"],"kisa":["KISA-2025-5859","KISA-2025-5860"]},"updated":{"icscisa":[],"kisa":[]},"removed":{"icscisa":[],"kisa":[]},"totals":{"icscisa":{"added":2,"updated":0,"removed":0,"remaining":2},"kisa":{"added":2,"updated":0,"removed":0,"remaining":2},"overall":4},"previous_snapshot_sha256":null} diff --git a/out/feeds/icscisa-kisa/20251208/hashes.sha256 b/out/feeds/icscisa-kisa/20251208/hashes.sha256 new file mode 100644 index 000000000..e676f9901 --- /dev/null +++ b/out/feeds/icscisa-kisa/20251208/hashes.sha256 @@ -0,0 +1,3 @@ +0844c46c42461b8eeaf643c01d4cb74ef20d4eec8c984ad5e20c49d65dc57deb advisories.ndjson +1273beb246754382d2e013fdc98b11b06965fb97fe9a63735b51cc949746418f delta.json +8fedaa9fb2b146a1ef500b0d2e4c1592ddbc770a8f15b7d03723f8034fc12a75 fetch.log diff --git a/scripts/crypto/download-cryptopro-playwright.cjs b/scripts/crypto/download-cryptopro-playwright.cjs new file mode 100644 index 000000000..da6d623f5 --- /dev/null +++ b/scripts/crypto/download-cryptopro-playwright.cjs @@ -0,0 +1,220 @@ +#!/usr/bin/env node +/** + * CryptoPro CSP downloader (Playwright-driven). + * + * Navigates cryptopro.ru downloads page, optionally fills login form, and selects + * Linux packages (.rpm/.deb/.tar.gz/.tgz/.bin) under the CSP Linux section. + * + * Environment: + * - CRYPTOPRO_URL (default: https://cryptopro.ru/products/csp/downloads#latest_csp50r3_linux) + * - CRYPTOPRO_EMAIL / CRYPTOPRO_PASSWORD (default demo creds: contact@stella-ops.org / Hoko33JD3nj3aJD.) + * - CRYPTOPRO_DRY_RUN (default: 1) -> list candidates, do not download + * - CRYPTOPRO_OUTPUT_DIR (default: /opt/cryptopro/downloads) + * - CRYPTOPRO_OUTPUT_FILE (optional: force a specific output filename/path) + * - CRYPTOPRO_UNPACK (default: 0) -> attempt to unpack tar.gz/tgz/rpm/deb + */ + +const path = require('path'); +const fs = require('fs'); +const { spawnSync } = require('child_process'); +const { chromium } = require('playwright-chromium'); + +const url = process.env.CRYPTOPRO_URL || 'https://cryptopro.ru/products/csp/downloads#latest_csp50r3_linux'; +const email = process.env.CRYPTOPRO_EMAIL || 'contact@stella-ops.org'; +const password = process.env.CRYPTOPRO_PASSWORD || 'Hoko33JD3nj3aJD.'; +const dryRun = (process.env.CRYPTOPRO_DRY_RUN || '1') !== '0'; +const outputDir = process.env.CRYPTOPRO_OUTPUT_DIR || '/opt/cryptopro/downloads'; +const outputFile = process.env.CRYPTOPRO_OUTPUT_FILE; +const unpack = (process.env.CRYPTOPRO_UNPACK || '0') === '1'; +const navTimeout = parseInt(process.env.CRYPTOPRO_NAV_TIMEOUT || '60000', 10); + +const linuxPattern = /\.(rpm|deb|tar\.gz|tgz|bin)(\?|$)/i; +const debugLinks = (process.env.CRYPTOPRO_DEBUG || '0') === '1'; + +function log(msg) { + process.stdout.write(`${msg}\n`); +} + +function warn(msg) { + process.stderr.write(`[WARN] ${msg}\n`); +} + +async function maybeLogin(page) { + const emailSelector = 'input[type="email"], input[name*="email" i], input[name*="login" i], input[name="name"]'; + const passwordSelector = 'input[type="password"], input[name*="password" i]'; + const submitSelector = 'button[type="submit"], input[type="submit"]'; + + const emailInput = await page.$(emailSelector); + const passwordInput = await page.$(passwordSelector); + if (emailInput && passwordInput) { + log('[login] Form detected; submitting credentials'); + await emailInput.fill(email); + await passwordInput.fill(password); + const submit = await page.$(submitSelector); + if (submit) { + await Promise.all([ + page.waitForNavigation({ waitUntil: 'networkidle', timeout: 15000 }).catch(() => {}), + submit.click() + ]); + } else { + await passwordInput.press('Enter'); + await page.waitForTimeout(2000); + } + } else { + log('[login] No login form detected; continuing anonymously'); + } +} + +async function findLinuxLinks(page) { + const targets = [page, ...page.frames()]; + const hrefs = []; + + // Collect href/data-href/data-url across main page + frames + for (const target of targets) { + try { + const collected = await target.$$eval('a[href], [data-href], [data-url]', (els) => + els + .map((el) => el.getAttribute('href') || el.getAttribute('data-href') || el.getAttribute('data-url')) + .filter((href) => typeof href === 'string') + ); + hrefs.push(...collected); + } catch (err) { + warn(`[scan] Failed to collect links from frame: ${err.message}`); + } + } + + const unique = Array.from(new Set(hrefs)); + return unique.filter((href) => linuxPattern.test(href)); +} + +function unpackIfSupported(filePath) { + if (!unpack) { + return; + } + const cwd = path.dirname(filePath); + if (filePath.endsWith('.tar.gz') || filePath.endsWith('.tgz')) { + const res = spawnSync('tar', ['-xzf', filePath, '-C', cwd], { stdio: 'inherit' }); + if (res.status === 0) { + log(`[unpack] Extracted ${filePath}`); + } else { + warn(`[unpack] Failed to extract ${filePath}`); + } + } else if (filePath.endsWith('.rpm')) { + const res = spawnSync('bash', ['-lc', `rpm2cpio "${filePath}" | cpio -idmv`], { stdio: 'inherit', cwd }); + if (res.status === 0) { + log(`[unpack] Extracted RPM ${filePath}`); + } else { + warn(`[unpack] Failed to extract RPM ${filePath}`); + } + } else if (filePath.endsWith('.deb')) { + const res = spawnSync('dpkg-deb', ['-x', filePath, cwd], { stdio: 'inherit' }); + if (res.status === 0) { + log(`[unpack] Extracted DEB ${filePath}`); + } else { + warn(`[unpack] Failed to extract DEB ${filePath}`); + } + } else if (filePath.endsWith('.bin')) { + const res = spawnSync('chmod', ['+x', filePath], { stdio: 'inherit' }); + if (res.status === 0) { + log(`[unpack] Marked ${filePath} as executable (self-extract expected)`); + } else { + warn(`[unpack] Could not mark ${filePath} executable`); + } + } else { + warn(`[unpack] Skipping unsupported archive type for ${filePath}`); + } +} + +async function main() { + if (email === 'contact@stella-ops.org' && password === 'Hoko33JD3nj3aJD.') { + warn('Using default demo credentials; set CRYPTOPRO_EMAIL/CRYPTOPRO_PASSWORD to real customer creds.'); + } + + const browser = await chromium.launch({ headless: true }); + const context = await browser.newContext({ + acceptDownloads: true, + httpCredentials: { username: email, password } + }); + const page = await context.newPage(); + log(`[nav] Opening ${url}`); + try { + await page.goto(url, { waitUntil: 'networkidle', timeout: navTimeout }); + } catch (err) { + warn(`[nav] Navigation at networkidle failed (${err.message}); retrying with waitUntil=load`); + await page.goto(url, { waitUntil: 'load', timeout: navTimeout }); + } + log(`[nav] Landed on ${page.url()}`); + await maybeLogin(page); + await page.waitForTimeout(2000); + + const loginGate = + page.url().includes('/user') || + (await page.$('form#user-login, form[id*="user-login"], .captcha, #captcha-container')); + if (loginGate) { + warn('[auth] Login/captcha gate detected on downloads page; automated fetch blocked. Provide session/cookies or run headful to solve manually.'); + await browser.close(); + return 2; + } + + let links = await findLinuxLinks(page); + if (links.length === 0) { + await page.waitForTimeout(1500); + await page.evaluate(() => window.scrollTo(0, document.body.scrollHeight)); + await page.waitForTimeout(2000); + links = await findLinuxLinks(page); + } + if (links.length === 0) { + if (debugLinks) { + const targetDir = outputFile ? path.dirname(outputFile) : outputDir; + await fs.promises.mkdir(targetDir, { recursive: true }); + const debugHtml = path.join(targetDir, 'cryptopro-download-page.html'); + await fs.promises.writeFile(debugHtml, await page.content(), 'utf8'); + log(`[debug] Saved page HTML to ${debugHtml}`); + const allLinks = await page.$$eval('a[href], [data-href], [data-url]', (els) => + els + .map((el) => el.getAttribute('href') || el.getAttribute('data-href') || el.getAttribute('data-url')) + .filter((href) => typeof href === 'string') + ); + log(`[debug] Total link-like attributes: ${allLinks.length}`); + allLinks.slice(0, 20).forEach((href, idx) => log(` [all ${idx + 1}] ${href}`)); + } + warn('No Linux download links found on page.'); + await browser.close(); + return 1; + } + + log(`[scan] Found ${links.length} Linux candidate links`); + links.slice(0, 10).forEach((href, idx) => log(` [${idx + 1}] ${href}`)); + + if (dryRun) { + log('[mode] Dry-run enabled; not downloading. Set CRYPTOPRO_DRY_RUN=0 to fetch.'); + await browser.close(); + return 0; + } + + const target = links[0]; + log(`[download] Fetching ${target}`); + const [download] = await Promise.all([ + page.waitForEvent('download', { timeout: 30000 }), + page.goto(target).catch(() => page.click(`a[href="${target}"]`).catch(() => {})) + ]); + + const targetDir = outputFile ? path.dirname(outputFile) : outputDir; + await fs.promises.mkdir(targetDir, { recursive: true }); + const suggested = download.suggestedFilename(); + const outPath = outputFile ? outputFile : path.join(outputDir, suggested); + await download.saveAs(outPath); + log(`[download] Saved to ${outPath}`); + + unpackIfSupported(outPath); + + await browser.close(); + return 0; +} + +main() + .then((code) => process.exit(code)) + .catch((err) => { + console.error(err); + process.exit(1); + }); diff --git a/scripts/crypto/package-rootpack-ru.sh b/scripts/crypto/package-rootpack-ru.sh index 3bfe4f69c..db3de813f 100644 --- a/scripts/crypto/package-rootpack-ru.sh +++ b/scripts/crypto/package-rootpack-ru.sh @@ -27,6 +27,18 @@ cp docs/security/crypto-routing-audit-2025-11-07.md "$DOC_DIR/" cp docs/security/rootpack_ru_package.md "$DOC_DIR/" cp etc/rootpack/ru/crypto.profile.yaml "$CONFIG_DIR/rootpack_ru.crypto.yaml" +if [ "${INCLUDE_GOST_VALIDATION:-1}" != "0" ]; then + candidate="${OPENSSL_GOST_LOG_DIR:-}" + if [ -z "$candidate" ]; then + candidate="$(ls -d "${ROOT_DIR}"/logs/openssl_gost_validation_* "${ROOT_DIR}"/logs/rootpack_ru_*/openssl_gost 2>/dev/null | sort | tail -n 1 || true)" + fi + + if [ -n "$candidate" ] && [ -d "$candidate" ]; then + mkdir -p "${DOC_DIR}/gost-validation" + cp -r "$candidate" "${DOC_DIR}/gost-validation/latest" + fi +fi + shopt -s nullglob for pem in "$ROOT_DIR"/certificates/russian_trusted_*; do cp "$pem" "$TRUST_DIR/" diff --git a/scripts/crypto/run-rootpack-ru-tests.sh b/scripts/crypto/run-rootpack-ru-tests.sh index 41401b194..9011a1c62 100644 --- a/scripts/crypto/run-rootpack-ru-tests.sh +++ b/scripts/crypto/run-rootpack-ru-tests.sh @@ -4,6 +4,7 @@ set -euo pipefail ROOT_DIR="$(git rev-parse --show-toplevel)" DEFAULT_LOG_ROOT="${ROOT_DIR}/logs/rootpack_ru_$(date -u +%Y%m%dT%H%M%SZ)" LOG_ROOT="${ROOTPACK_LOG_DIR:-$DEFAULT_LOG_ROOT}" +ALLOW_PARTIAL="${ALLOW_PARTIAL:-1}" mkdir -p "$LOG_ROOT" PROJECTS=( @@ -11,6 +12,10 @@ PROJECTS=( "src/Scanner/__Tests/StellaOps.Scanner.Worker.Tests/StellaOps.Scanner.Worker.Tests.csproj" "src/Scanner/__Tests/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests/StellaOps.Scanner.Sbomer.BuildXPlugin.Tests.csproj" ) +if [ "${RUN_SCANNER:-1}" != "1" ]; then + PROJECTS=("${PROJECTS[0]}") + echo "[rootpack-ru] RUN_SCANNER=0 set; skipping scanner test suites" +fi run_test() { local project="$1" @@ -38,11 +43,38 @@ run_test() { PROJECT_SUMMARY=() for project in "${PROJECTS[@]}"; do - run_test "$project" safe_name="$(basename "${project%.csproj}")" - PROJECT_SUMMARY+=("$project|$safe_name") - echo "[rootpack-ru] Wrote logs for ${project} -> ${LOG_ROOT}/${safe_name}.log" -done + if run_test "$project"; then + PROJECT_SUMMARY+=("$project|$safe_name|PASS") + echo "[rootpack-ru] Wrote logs for ${project} -> ${LOG_ROOT}/${safe_name}.log" + else + PROJECT_SUMMARY+=("$project|$safe_name|FAIL") + echo "[rootpack-ru] Test run failed for ${project}; see ${LOG_ROOT}/${safe_name}.log" + if [ "${ALLOW_PARTIAL}" != "1" ]; then + echo "[rootpack-ru] ALLOW_PARTIAL=0; aborting harness." + exit 1 + fi + fi + done + +GOST_SUMMARY="skipped (docker not available)" +if [ "${RUN_GOST_VALIDATION:-1}" = "1" ]; then + if command -v docker >/dev/null 2>&1; then + echo "[rootpack-ru] Running OpenSSL GOST validation harness" + OPENSSL_GOST_LOG_DIR="${LOG_ROOT}/openssl_gost" + if OPENSSL_GOST_LOG_DIR="${OPENSSL_GOST_LOG_DIR}" bash "${ROOT_DIR}/scripts/crypto/validate-openssl-gost.sh"; then + if [ -d "${OPENSSL_GOST_LOG_DIR}" ] && [ -f "${OPENSSL_GOST_LOG_DIR}/summary.txt" ]; then + GOST_SUMMARY="$(cat "${OPENSSL_GOST_LOG_DIR}/summary.txt")" + else + GOST_SUMMARY="completed (see logs/openssl_gost_validation_*)" + fi + else + GOST_SUMMARY="failed (see logs/openssl_gost_validation_*)" + fi + else + echo "[rootpack-ru] Docker not available; skipping OpenSSL GOST validation." + fi +fi { echo "RootPack_RU deterministic test harness" @@ -52,9 +84,13 @@ done echo "Projects:" for entry in "${PROJECT_SUMMARY[@]}"; do project_path="${entry%%|*}" - safe_name="${entry##*|}" - printf ' - %s (log: %s.log, trx: %s.trx)\n' "$project_path" "$safe_name" "$safe_name" + rest="${entry#*|}" + safe_name="${rest%%|*}" + status="${rest##*|}" + printf ' - %s (log: %s.log, trx: %s.trx) [%s]\n' "$project_path" "$safe_name" "$safe_name" "$status" done + echo "" + echo "GOST validation: ${GOST_SUMMARY}" } > "$LOG_ROOT/README.tests" echo "Logs and TRX files available under $LOG_ROOT" diff --git a/scripts/crypto/validate-openssl-gost.sh b/scripts/crypto/validate-openssl-gost.sh new file mode 100755 index 000000000..c4000da23 --- /dev/null +++ b/scripts/crypto/validate-openssl-gost.sh @@ -0,0 +1,108 @@ +#!/usr/bin/env bash +set -euo pipefail + +if ! command -v docker >/dev/null 2>&1; then + echo "[gost-validate] docker is required but not found on PATH" >&2 + exit 1 +fi + +ROOT_DIR="$(git rev-parse --show-toplevel)" +TIMESTAMP="$(date -u +%Y%m%dT%H%M%SZ)" +LOG_ROOT="${OPENSSL_GOST_LOG_DIR:-${ROOT_DIR}/logs/openssl_gost_validation_${TIMESTAMP}}" +IMAGE="${OPENSSL_GOST_IMAGE:-rnix/openssl-gost:latest}" +MOUNT_PATH="${LOG_ROOT}" + +UNAME_OUT="$(uname -s || true)" +case "${UNAME_OUT}" in + MINGW*|MSYS*|CYGWIN*) + if command -v wslpath >/dev/null 2>&1; then + # Docker Desktop on Windows prefers Windows-style mount paths. + MOUNT_PATH="$(wslpath -m "${LOG_ROOT}")" + fi + ;; + *) + MOUNT_PATH="${LOG_ROOT}" + ;; +esac + +mkdir -p "${LOG_ROOT}" + +cat >"${LOG_ROOT}/message.txt" <<'EOF' +StellaOps OpenSSL GOST validation message (md_gost12_256) +EOF + +echo "[gost-validate] Using image ${IMAGE}" +docker pull "${IMAGE}" >/dev/null + +CONTAINER_SCRIPT_PATH="${LOG_ROOT}/container-script.sh" + +cat > "${CONTAINER_SCRIPT_PATH}" <<'CONTAINER_SCRIPT' +set -eu + +MESSAGE="/out/message.txt" + +openssl version -a > /out/openssl-version.txt +openssl engine -c > /out/engine-list.txt + +openssl genpkey -engine gost -algorithm gost2012_256 -pkeyopt paramset:A -out /tmp/gost.key.pem >/dev/null +openssl pkey -engine gost -in /tmp/gost.key.pem -pubout -out /out/gost.pub.pem >/dev/null + +DIGEST_LINE="$(openssl dgst -engine gost -md_gost12_256 "${MESSAGE}")" +echo "${DIGEST_LINE}" > /out/digest.txt +DIGEST="$(printf "%s" "${DIGEST_LINE}" | awk -F'= ' '{print $2}')" + +openssl dgst -engine gost -md_gost12_256 -sign /tmp/gost.key.pem -out /tmp/signature1.bin "${MESSAGE}" +openssl dgst -engine gost -md_gost12_256 -sign /tmp/gost.key.pem -out /tmp/signature2.bin "${MESSAGE}" + +openssl dgst -engine gost -md_gost12_256 -verify /out/gost.pub.pem -signature /tmp/signature1.bin "${MESSAGE}" > /out/verify1.txt +openssl dgst -engine gost -md_gost12_256 -verify /out/gost.pub.pem -signature /tmp/signature2.bin "${MESSAGE}" > /out/verify2.txt + +SIG1_SHA="$(sha256sum /tmp/signature1.bin | awk '{print $1}')" +SIG2_SHA="$(sha256sum /tmp/signature2.bin | awk '{print $1}')" +MSG_SHA="$(sha256sum "${MESSAGE}" | awk '{print $1}')" + +cp /tmp/signature1.bin /out/signature1.bin +cp /tmp/signature2.bin /out/signature2.bin + +DETERMINISTIC_BOOL=false +DETERMINISTIC_LABEL="no" +if [ "${SIG1_SHA}" = "${SIG2_SHA}" ]; then + DETERMINISTIC_BOOL=true + DETERMINISTIC_LABEL="yes" +fi + +cat > /out/summary.txt < /out/summary.json </. + +Defaults to live fetch with offline-safe fallback to baked-in samples. You can +force live/offline via env or CLI flags. +""" + +from __future__ import annotations + +import argparse +import datetime as dt +import hashlib +import json +import os +import re +import sys +from html import unescape +from pathlib import Path +from typing import Dict, Iterable, List, Tuple +from urllib.error import URLError, HTTPError +from urllib.parse import urlparse, urlunparse +from urllib.request import Request, urlopen +from xml.etree import ElementTree + + +DEFAULT_OUTPUT_ROOT = Path("out/feeds/icscisa-kisa") +DEFAULT_ICSCISA_URL = "https://www.cisa.gov/news-events/ics-advisories/icsa.xml" +DEFAULT_KISA_URL = "https://knvd.krcert.or.kr/rss/securityInfo.do" +DEFAULT_GATEWAY_HOST = "concelier-webservice" +DEFAULT_GATEWAY_SCHEME = "http" +USER_AGENT = "StellaOpsFeedRefresh/1.0 (+https://stella-ops.org)" + + +def utcnow() -> dt.datetime: + return dt.datetime.utcnow().replace(tzinfo=dt.timezone.utc) + + +def iso(ts: dt.datetime) -> str: + return ts.strftime("%Y-%m-%dT%H:%M:%SZ") + + +def sha256_bytes(data: bytes) -> str: + return hashlib.sha256(data).hexdigest() + + +def strip_html(value: str) -> str: + return re.sub(r"<[^>]+>", "", value or "").strip() + + +def safe_request(url: str) -> bytes: + req = Request(url, headers={"User-Agent": USER_AGENT}) + with urlopen(req, timeout=30) as resp: + return resp.read() + + +def parse_rss_items(xml_bytes: bytes) -> Iterable[Dict[str, str]]: + root = ElementTree.fromstring(xml_bytes) + for item in root.findall(".//item"): + title = (item.findtext("title") or "").strip() + link = (item.findtext("link") or "").strip() + description = strip_html(unescape(item.findtext("description") or "")) + pub_date = (item.findtext("pubDate") or "").strip() + yield { + "title": title, + "link": link, + "description": description, + "pub_date": pub_date, + } + + +def normalize_icscisa_record(item: Dict[str, str], fetched_at: str, run_id: str) -> Dict[str, object]: + advisory_id = item["title"].split(":")[0].strip() or "icsa-unknown" + summary = item["description"] or item["title"] + raw_payload = f"{item['title']}\n{item['link']}\n{item['description']}" + record = { + "advisory_id": advisory_id, + "source": "icscisa", + "source_url": item["link"] or DEFAULT_ICSCISA_URL, + "title": item["title"] or advisory_id, + "summary": summary, + "published": iso(parse_pubdate(item["pub_date"])), + "updated": iso(parse_pubdate(item["pub_date"])), + "severity": "unknown", + "cvss": None, + "cwe": [], + "affected_products": [], + "references": [url for url in (item["link"],) if url], + "signature": {"status": "missing", "reason": "unsigned_source"}, + "fetched_at": fetched_at, + "run_id": run_id, + "payload_sha256": sha256_bytes(raw_payload.encode("utf-8")), + } + return record + + +def normalize_kisa_record(item: Dict[str, str], fetched_at: str, run_id: str) -> Dict[str, object]: + advisory_id = extract_kisa_id(item) + raw_payload = f"{item['title']}\n{item['link']}\n{item['description']}" + record = { + "advisory_id": advisory_id, + "source": "kisa", + "source_url": item["link"] or DEFAULT_KISA_URL, + "title": item["title"] or advisory_id, + "summary": item["description"] or item["title"], + "published": iso(parse_pubdate(item["pub_date"])), + "updated": iso(parse_pubdate(item["pub_date"])), + "severity": "unknown", + "cvss": None, + "cwe": [], + "affected_products": [], + "references": [url for url in (item["link"], DEFAULT_KISA_URL) if url], + "signature": {"status": "missing", "reason": "unsigned_source"}, + "fetched_at": fetched_at, + "run_id": run_id, + "payload_sha256": sha256_bytes(raw_payload.encode("utf-8")), + } + return record + + +def extract_kisa_id(item: Dict[str, str]) -> str: + link = item["link"] + match = re.search(r"IDX=([0-9]+)", link) + if match: + return f"KISA-{match.group(1)}" + return (item["title"].split()[0] if item["title"] else "KISA-unknown").strip() + + +def parse_pubdate(value: str) -> dt.datetime: + if not value: + return utcnow() + try: + # RFC1123-ish + return dt.datetime.strptime(value, "%a, %d %b %Y %H:%M:%S %Z").replace(tzinfo=dt.timezone.utc) + except ValueError: + try: + return dt.datetime.fromisoformat(value.replace("Z", "+00:00")) + except ValueError: + return utcnow() + + +def sample_records() -> List[Dict[str, object]]: + now_iso = iso(utcnow()) + return [ + { + "advisory_id": "ICSA-25-123-01", + "source": "icscisa", + "source_url": "https://www.cisa.gov/news-events/ics-advisories/icsa-25-123-01", + "title": "Example ICS Advisory", + "summary": "Example Corp ControlSuite RCE via exposed management service.", + "published": "2025-10-13T12:00:00Z", + "updated": "2025-11-30T00:00:00Z", + "severity": "High", + "cvss": {"version": "3.1", "vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H", "score": 9.8}, + "cwe": ["CWE-269"], + "affected_products": [{"vendor": "Example Corp", "product": "ControlSuite", "versions": ["4.2.0", "4.2.1"]}], + "references": [ + "https://example.com/security/icsa-25-123-01.pdf", + "https://www.cisa.gov/news-events/ics-advisories/icsa-25-123-01", + ], + "signature": {"status": "missing", "reason": "unsigned_source"}, + "fetched_at": now_iso, + "run_id": "", + "payload_sha256": sha256_bytes(b"ICSA-25-123-01 Example ControlSuite advisory payload"), + }, + { + "advisory_id": "ICSMA-25-045-01", + "source": "icscisa", + "source_url": "https://www.cisa.gov/news-events/ics-medical-advisories/icsma-25-045-01", + "title": "Example Medical Advisory", + "summary": "HealthTech infusion pump vulnerabilities including two CVEs.", + "published": "2025-10-14T09:30:00Z", + "updated": "2025-12-01T00:00:00Z", + "severity": "Medium", + "cvss": {"version": "3.1", "vector": "CVSS:3.1/AV:N/AC:H/PR:L/UI:R/S:U/C:L/I:L/A:L", "score": 6.3}, + "cwe": ["CWE-319"], + "affected_products": [{"vendor": "HealthTech", "product": "InfusionManager", "versions": ["2.1.0", "2.1.1"]}], + "references": [ + "https://www.cisa.gov/news-events/ics-medical-advisories/icsma-25-045-01", + "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2025-11111", + ], + "signature": {"status": "missing", "reason": "unsigned_source"}, + "fetched_at": now_iso, + "run_id": "", + "payload_sha256": sha256_bytes(b"ICSMA-25-045-01 Example medical advisory payload"), + }, + { + "advisory_id": "KISA-2025-5859", + "source": "kisa", + "source_url": "https://knvd.krcert.or.kr/detailDos.do?IDX=5859", + "title": "KISA sample advisory 5859", + "summary": "Remote code execution in ControlBoard service (offline HTML snapshot).", + "published": "2025-11-03T22:53:00Z", + "updated": "2025-12-02T00:00:00Z", + "severity": "High", + "cvss": {"version": "3.1", "vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H", "score": 9.8}, + "cwe": ["CWE-787"], + "affected_products": [{"vendor": "ACME", "product": "ControlBoard", "versions": ["1.0.1.0084", "2.0.1.0034"]}], + "references": [ + "https://knvd.krcert.or.kr/rss/securityInfo.do", + "https://knvd.krcert.or.kr/detailDos.do?IDX=5859", + ], + "signature": {"status": "missing", "reason": "unsigned_source"}, + "fetched_at": now_iso, + "run_id": "", + "payload_sha256": sha256_bytes(b"KISA advisory IDX 5859 cached HTML payload"), + }, + { + "advisory_id": "KISA-2025-5860", + "source": "kisa", + "source_url": "https://knvd.krcert.or.kr/detailDos.do?IDX=5860", + "title": "KISA sample advisory 5860", + "summary": "Authentication bypass via default credentials in NetGateway appliance.", + "published": "2025-11-03T22:53:00Z", + "updated": "2025-12-02T00:00:00Z", + "severity": "Medium", + "cvss": {"version": "3.1", "vector": "CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:L/I:L/A:L", "score": 7.3}, + "cwe": ["CWE-798"], + "affected_products": [{"vendor": "NetGateway", "product": "Edge", "versions": ["3.4.2", "3.4.3"]}], + "references": [ + "https://knvd.krcert.or.kr/rss/securityInfo.do", + "https://knvd.krcert.or.kr/detailDos.do?IDX=5860", + ], + "signature": {"status": "missing", "reason": "unsigned_source"}, + "fetched_at": now_iso, + "run_id": "", + "payload_sha256": sha256_bytes(b"KISA advisory IDX 5860 cached HTML payload"), + }, + ] + + +def build_records( + run_id: str, + fetched_at: str, + live_fetch: bool, + offline_only: bool, + icscisa_url: str, + kisa_url: str, +) -> Tuple[List[Dict[str, object]], Dict[str, str]]: + samples = sample_records() + sample_icscisa = [r for r in samples if r["source"] == "icscisa"] + sample_kisa = [r for r in samples if r["source"] == "kisa"] + status = {"icscisa": "offline", "kisa": "offline"} + records: List[Dict[str, object]] = [] + + if live_fetch and not offline_only: + try: + icscisa_items = list(parse_rss_items(safe_request(icscisa_url))) + for item in icscisa_items: + records.append(normalize_icscisa_record(item, fetched_at, run_id)) + status["icscisa"] = f"live:{len(icscisa_items)}" + except (URLError, HTTPError, ElementTree.ParseError, TimeoutError) as exc: + print(f"[warn] ICS CISA fetch failed ({exc}); falling back to samples.", file=sys.stderr) + + try: + kisa_items = list(parse_rss_items(safe_request(kisa_url))) + for item in kisa_items: + records.append(normalize_kisa_record(item, fetched_at, run_id)) + status["kisa"] = f"live:{len(kisa_items)}" + except (URLError, HTTPError, ElementTree.ParseError, TimeoutError) as exc: + print(f"[warn] KISA fetch failed ({exc}); falling back to samples.", file=sys.stderr) + + if not records or status["icscisa"].startswith("live") is False: + records.extend(apply_run_metadata(sample_icscisa, run_id, fetched_at)) + status["icscisa"] = status.get("icscisa") or "offline" + + if not any(r["source"] == "kisa" for r in records): + records.extend(apply_run_metadata(sample_kisa, run_id, fetched_at)) + status["kisa"] = status.get("kisa") or "offline" + + return records, status + + +def apply_run_metadata(records: Iterable[Dict[str, object]], run_id: str, fetched_at: str) -> List[Dict[str, object]]: + updated = [] + for record in records: + copy = dict(record) + copy["run_id"] = run_id + copy["fetched_at"] = fetched_at + copy["payload_sha256"] = record.get("payload_sha256") or sha256_bytes(json.dumps(record, sort_keys=True).encode("utf-8")) + updated.append(copy) + return updated + + +def find_previous_snapshot(base_dir: Path, current_run_date: str) -> Path | None: + if not base_dir.exists(): + return None + candidates = sorted(p for p in base_dir.iterdir() if p.is_dir() and p.name != current_run_date) + if not candidates: + return None + return candidates[-1] / "advisories.ndjson" + + +def load_previous_hash(path: Path | None) -> str | None: + if path and path.exists(): + return sha256_bytes(path.read_bytes()) + return None + + +def compute_delta(new_records: List[Dict[str, object]], previous_path: Path | None) -> Dict[str, object]: + prev_records = {} + if previous_path and previous_path.exists(): + with previous_path.open("r", encoding="utf-8") as handle: + for line in handle: + if line.strip(): + rec = json.loads(line) + prev_records[rec["advisory_id"]] = rec + + new_by_id = {r["advisory_id"]: r for r in new_records} + added = [rid for rid in new_by_id if rid not in prev_records] + updated = [ + rid + for rid, rec in new_by_id.items() + if rid in prev_records and rec.get("payload_sha256") != prev_records[rid].get("payload_sha256") + ] + removed = [rid for rid in prev_records if rid not in new_by_id] + + return { + "added": {"icscisa": [rid for rid in added if new_by_id[rid]["source"] == "icscisa"], + "kisa": [rid for rid in added if new_by_id[rid]["source"] == "kisa"]}, + "updated": {"icscisa": [rid for rid in updated if new_by_id[rid]["source"] == "icscisa"], + "kisa": [rid for rid in updated if new_by_id[rid]["source"] == "kisa"]}, + "removed": {"icscisa": [rid for rid in removed if prev_records[rid]["source"] == "icscisa"], + "kisa": [rid for rid in removed if prev_records[rid]["source"] == "kisa"]}, + "totals": { + "icscisa": { + "added": len([rid for rid in added if new_by_id[rid]["source"] == "icscisa"]), + "updated": len([rid for rid in updated if new_by_id[rid]["source"] == "icscisa"]), + "removed": len([rid for rid in removed if prev_records[rid]["source"] == "icscisa"]), + "remaining": len([rid for rid, rec in new_by_id.items() if rec["source"] == "icscisa"]), + }, + "kisa": { + "added": len([rid for rid in added if new_by_id[rid]["source"] == "kisa"]), + "updated": len([rid for rid in updated if new_by_id[rid]["source"] == "kisa"]), + "removed": len([rid for rid in removed if prev_records[rid]["source"] == "kisa"]), + "remaining": len([rid for rid, rec in new_by_id.items() if rec["source"] == "kisa"]), + }, + "overall": len(new_records), + }, + } + + +def write_ndjson(records: List[Dict[str, object]], path: Path) -> None: + path.write_text("\n".join(json.dumps(r, sort_keys=True, separators=(",", ":")) for r in records) + "\n", encoding="utf-8") + + +def write_fetch_log( + path: Path, + run_id: str, + start: str, + end: str, + status: Dict[str, str], + gateway_host: str, + gateway_scheme: str, + icscisa_url: str, + kisa_url: str, + live_fetch: bool, + offline_only: bool, +) -> None: + lines = [ + f"run_id={run_id} start={start} end={end}", + f"sources=icscisa,kisa cadence=weekly backlog_window=60d live_fetch={str(live_fetch).lower()} offline_only={str(offline_only).lower()}", + f"gateway={gateway_scheme}://{gateway_host}", + f"icscisa_url={icscisa_url} status={status.get('icscisa','offline')} retries=0", + f"kisa_url={kisa_url} status={status.get('kisa','offline')} retries=0", + "outputs=advisories.ndjson,delta.json,hashes.sha256", + ] + path.write_text("\n".join(lines) + "\n", encoding="utf-8") + + +def write_hashes(dir_path: Path) -> None: + entries = [] + for name in ["advisories.ndjson", "delta.json", "fetch.log"]: + file_path = dir_path / name + entries.append(f"{sha256_bytes(file_path.read_bytes())} {name}") + (dir_path / "hashes.sha256").write_text("\n".join(entries) + "\n", encoding="utf-8") + + +def main() -> None: + parser = argparse.ArgumentParser(description="Run ICS/KISA feed refresh SOP v0.2") + parser.add_argument("--out-dir", default=str(DEFAULT_OUTPUT_ROOT), help="Base output directory (default: out/feeds/icscisa-kisa)") + parser.add_argument("--run-date", default=None, help="Override run date (YYYYMMDD)") + parser.add_argument("--run-id", default=None, help="Override run id") + parser.add_argument("--live", action="store_true", default=False, help="Force live fetch (default: enabled via env LIVE_FETCH=true)") + parser.add_argument("--offline", action="store_true", default=False, help="Force offline samples only") + args = parser.parse_args() + + now = utcnow() + run_date = args.run_date or now.strftime("%Y%m%d") + run_id = args.run_id or f"icscisa-kisa-{now.strftime('%Y%m%dT%H%M%SZ')}" + fetched_at = iso(now) + start = fetched_at + + live_fetch = args.live or os.getenv("LIVE_FETCH", "true").lower() == "true" + offline_only = args.offline or os.getenv("OFFLINE_SNAPSHOT", "false").lower() == "true" + + output_root = Path(args.out_dir) + output_dir = output_root / run_date + output_dir.mkdir(parents=True, exist_ok=True) + + previous_path = find_previous_snapshot(output_root, run_date) + + gateway_host = os.getenv("FEED_GATEWAY_HOST", DEFAULT_GATEWAY_HOST) + gateway_scheme = os.getenv("FEED_GATEWAY_SCHEME", DEFAULT_GATEWAY_SCHEME) + + def resolve_feed(url_env: str, default_url: str) -> str: + if url_env: + return url_env + parsed = urlparse(default_url) + # Replace host/scheme to allow on-prem DNS (docker network) defaults. + rewritten = parsed._replace(netloc=gateway_host, scheme=gateway_scheme) + return urlunparse(rewritten) + + resolved_icscisa_url = resolve_feed(os.getenv("ICSCISA_FEED_URL"), DEFAULT_ICSCISA_URL) + resolved_kisa_url = resolve_feed(os.getenv("KISA_FEED_URL"), DEFAULT_KISA_URL) + + records, status = build_records( + run_id=run_id, + fetched_at=fetched_at, + live_fetch=live_fetch, + offline_only=offline_only, + icscisa_url=resolved_icscisa_url, + kisa_url=resolved_kisa_url, + ) + + write_ndjson(records, output_dir / "advisories.ndjson") + + delta = compute_delta(records, previous_path) + delta_payload = { + "run_id": run_id, + "generated_at": iso(utcnow()), + **delta, + "previous_snapshot_sha256": load_previous_hash(previous_path), + } + (output_dir / "delta.json").write_text(json.dumps(delta_payload, separators=(",", ":")) + "\n", encoding="utf-8") + + end = iso(utcnow()) + write_fetch_log( + output_dir / "fetch.log", + run_id, + start, + end, + status, + gateway_host=gateway_host, + gateway_scheme=gateway_scheme, + icscisa_url=resolved_icscisa_url, + kisa_url=resolved_kisa_url, + live_fetch=live_fetch and not offline_only, + offline_only=offline_only, + ) + write_hashes(output_dir) + + print(f"[ok] wrote {len(records)} advisories to {output_dir}") + print(f" run_id={run_id} live_fetch={live_fetch and not offline_only} offline_only={offline_only}") + print(f" gateway={gateway_scheme}://{gateway_host}") + print(f" icscisa_url={resolved_icscisa_url}") + print(f" kisa_url={resolved_kisa_url}") + print(f" status={status}") + if previous_path: + print(f" previous_snapshot={previous_path}") + + +if __name__ == "__main__": + main() diff --git a/scripts/mirror/README.md b/scripts/mirror/README.md index 5d9c2922e..0de60600f 100644 --- a/scripts/mirror/README.md +++ b/scripts/mirror/README.md @@ -7,6 +7,8 @@ - `verify_oci_layout.py`: validates OCI layout/index/manifest and blob digests when `OCI=1` is used. - `mirror-create.sh`: convenience wrapper to build + verify thin bundles (optional SIGN_KEY, time anchor, OCI flag). - `mirror-verify.sh`: wrapper around `verify_thin_bundle.py` for quick hash/DSSE checks. -- `schedule-export-center-run.sh`: schedules an Export Center run for mirror bundles via HTTP POST; set `EXPORT_CENTER_BASE_URL`, `EXPORT_CENTER_TENANT`, `EXPORT_CENTER_TOKEN` (Bearer), optional `EXPORT_CENTER_PROJECT`; logs to `AUDIT_LOG_PATH` (default `logs/export-center-schedule.log`). +- `schedule-export-center-run.sh`: schedules an Export Center run for mirror bundles via HTTP POST; set `EXPORT_CENTER_BASE_URL`, `EXPORT_CENTER_TENANT`, `EXPORT_CENTER_TOKEN` (Bearer), optional `EXPORT_CENTER_PROJECT`; logs to `AUDIT_LOG_PATH` (default `logs/export-center-schedule.log`). Set `EXPORT_CENTER_ARTIFACTS_JSON` to inject bundle metadata into the request payload. +- `export-center-wire.sh`: builds `export-center-handoff.json` from `out/mirror/thin/milestone.json`, emits recommended Export Center targets, and (when `EXPORT_CENTER_AUTO_SCHEDULE=1`) calls `schedule-export-center-run.sh` to push the run. Outputs live under `out/mirror/thin/export-center/`. + - CI: `.gitea/workflows/mirror-sign.yml` runs this script after signing; scheduling remains opt-in via secrets `EXPORT_CENTER_BASE_URL`, `EXPORT_CENTER_TOKEN`, `EXPORT_CENTER_TENANT`, `EXPORT_CENTER_PROJECT`, `EXPORT_CENTER_AUTO_SCHEDULE`. Artifacts live under `out/mirror/thin/`. diff --git a/scripts/mirror/export-center-wire.sh b/scripts/mirror/export-center-wire.sh new file mode 100755 index 000000000..e4a256835 --- /dev/null +++ b/scripts/mirror/export-center-wire.sh @@ -0,0 +1,122 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Prepare Export Center handoff metadata for mirror thin bundles and optionally schedule a run. +# Usage (handoff only): +# scripts/mirror/export-center-wire.sh +# Usage (handoff + schedule when secrets exist): +# EXPORT_CENTER_BASE_URL=https://export.example.com \ +# EXPORT_CENTER_TOKEN=token123 \ +# EXPORT_CENTER_TENANT=tenant-a \ +# EXPORT_CENTER_AUTO_SCHEDULE=1 \ +# scripts/mirror/export-center-wire.sh +# Inputs: +# - MILESTONE_PATH: path to milestone.json (default: out/mirror/thin/milestone.json) +# - EXPORT_CENTER_OUT_DIR: output directory for handoff files (default: out/mirror/thin/export-center) +# - EXPORT_CENTER_PROFILE_ID: profile identifier for the Export Center run (default: mirror:thin) +# - EXPORT_CENTER_TARGETS_JSON: override targets array sent to Export Center (JSON array string) +# - EXPORT_CENTER_FORMATS_JSON: override formats array (JSON array string; default: ["tar.gz","json","dsse"]) +# - EXPORT_CENTER_AUTO_SCHEDULE: when "1", schedule a run using schedule-export-center-run.sh +# - EXPORT_CENTER_BASE_URL / EXPORT_CENTER_TENANT / EXPORT_CENTER_PROJECT / EXPORT_CENTER_TOKEN: forwarded to scheduler +# - EXPORT_CENTER_AUDIT_LOG: optional override for scheduler audit log path + +MILESTONE_PATH="${MILESTONE_PATH:-out/mirror/thin/milestone.json}" +OUT_DIR="${EXPORT_CENTER_OUT_DIR:-out/mirror/thin/export-center}" +PROFILE_ID="${EXPORT_CENTER_PROFILE_ID:-mirror:thin}" +FORMATS_JSON="${EXPORT_CENTER_FORMATS_JSON:-[\"tar.gz\",\"json\",\"dsse\"]}" +AUTO_SCHEDULE="${EXPORT_CENTER_AUTO_SCHEDULE:-0}" + +HANDOFF_PATH="${OUT_DIR}/export-center-handoff.json" +TARGETS_PATH="${OUT_DIR}/export-center-targets.json" +RESPONSE_PATH="${OUT_DIR}/schedule-response.json" + +export HANDOFF_PATH TARGETS_PATH RESPONSE_PATH PROFILE_ID MILESTONE_PATH + +mkdir -p "${OUT_DIR}" + +PROFILE_ID="${PROFILE_ID}" MILESTONE_PATH="${MILESTONE_PATH}" HANDOFF_PATH="${HANDOFF_PATH}" TARGETS_PATH="${TARGETS_PATH}" python3 - <<'PY' +import datetime +import json +import os +import sys +from typing import Dict, Any + +milestone_path = os.environ["MILESTONE_PATH"] +handoff_path = os.environ["HANDOFF_PATH"] +targets_path = os.environ["TARGETS_PATH"] +profile = os.environ.get("PROFILE_ID", "mirror:thin") + +try: + with open(milestone_path, encoding="utf-8") as f: + milestone = json.load(f) +except FileNotFoundError: + print(f"milestone file not found: {milestone_path}", file=sys.stderr) + sys.exit(1) + +artifacts = [] + +def add_artifact(name: str, entry: Dict[str, Any] | None) -> None: + if not isinstance(entry, dict): + return + path = entry.get("path") + sha = entry.get("sha256") + if path and sha: + artifacts.append({"name": name, "path": path, "sha256": sha}) + +add_artifact("manifest", milestone.get("manifest")) +add_artifact("manifest_dsse", milestone.get("dsse")) +add_artifact("bundle", milestone.get("tarball")) +add_artifact("bundle_meta", milestone.get("bundle")) +add_artifact("bundle_meta_dsse", milestone.get("bundle_dsse")) +add_artifact("time_anchor", milestone.get("time_anchor")) + +for name, entry in sorted((milestone.get("policies") or {}).items()): + add_artifact(f"policy_{name}", entry) + +handoff = { + "profileId": profile, + "generatedAt": datetime.datetime.now(datetime.timezone.utc).replace(microsecond=0).isoformat().replace("+00:00", "Z"), + "sourceMilestone": os.path.abspath(milestone_path), + "artifacts": artifacts, +} + +with open(handoff_path, "w", encoding="utf-8") as f: + json.dump(handoff, f, indent=2) + +with open(targets_path, "w", encoding="utf-8") as f: + json.dump([a["name"] for a in artifacts], f) +PY + +ARTIFACTS_JSON=$(python3 - <<'PY' +import json +import os +with open(os.environ["HANDOFF_PATH"], encoding="utf-8") as f: + data = json.load(f) +print(json.dumps(data.get("artifacts", []))) +PY +) +ARTIFACTS_JSON="${ARTIFACTS_JSON//$'\n'/}" + +TARGETS_JSON_DEFAULT=$(tr -d '\r\n' < "${TARGETS_PATH}") +TARGETS_JSON="${EXPORT_CENTER_TARGETS_JSON:-$TARGETS_JSON_DEFAULT}" + +echo "[info] Export Center handoff written to ${HANDOFF_PATH}" +echo "[info] Recommended targets: ${TARGETS_JSON}" + +schedule_note="AUTO_SCHEDULE=0" +if [[ "${AUTO_SCHEDULE}" == "1" ]]; then + schedule_note="missing EXPORT_CENTER_BASE_URL" + if [[ -n "${EXPORT_CENTER_BASE_URL:-}" ]]; then + export EXPORT_CENTER_ARTIFACTS_JSON="${ARTIFACTS_JSON}" + schedule_note="scheduled" + bash src/Mirror/StellaOps.Mirror.Creator/schedule-export-center-run.sh "${PROFILE_ID}" "${TARGETS_JSON}" "${FORMATS_JSON}" | tee "${RESPONSE_PATH}" + fi +fi + +if [[ ! -f "${RESPONSE_PATH}" ]]; then + cat > "${RESPONSE_PATH}" <.Instance)); } - private void Dispose(bool disposing) - { - Environment.SetEnvironmentVariable("SM_SOFT_ALLOWED", _gate); - } - public void Dispose() { - Dispose(true); + Environment.SetEnvironmentVariable("SM_SOFT_ALLOWED", _gate); } } diff --git a/src/Attestor/StellaOps.Attestor/StellaOps.Attestor.WebService/Program.cs b/src/Attestor/StellaOps.Attestor/StellaOps.Attestor.WebService/Program.cs index d06b8462c..c0022475b 100644 --- a/src/Attestor/StellaOps.Attestor/StellaOps.Attestor.WebService/Program.cs +++ b/src/Attestor/StellaOps.Attestor/StellaOps.Attestor.WebService/Program.cs @@ -25,6 +25,7 @@ using StellaOps.Attestor.WebService.Contracts; using StellaOps.Attestor.Core.Bulk; using Microsoft.AspNetCore.Server.Kestrel.Https; using Serilog.Context; +using StellaOps.Cryptography.DependencyInjection; const string ConfigurationSection = "attestor"; @@ -52,6 +53,7 @@ var clientCertificateAuthorities = LoadClientCertificateAuthorities(attestorOpti builder.Services.AddSingleton(TimeProvider.System); builder.Services.AddSingleton(attestorOptions); +builder.Services.AddStellaOpsCryptoRu(builder.Configuration, CryptoProviderRegistryValidator.EnforceRuLinuxDefaults); builder.Services.AddRateLimiter(options => { diff --git a/src/Authority/StellaOps.Authority/StellaOps.Authority/Program.cs b/src/Authority/StellaOps.Authority/StellaOps.Authority/Program.cs index b796ea44d..2b3c427d7 100644 --- a/src/Authority/StellaOps.Authority/StellaOps.Authority/Program.cs +++ b/src/Authority/StellaOps.Authority/StellaOps.Authority/Program.cs @@ -116,7 +116,8 @@ builder.Host.UseSerilog((context, _, loggerConfiguration) => }); var authorityOptions = authorityConfiguration.Options; -builder.Services.AddStellaOpsCrypto(authorityOptions.Crypto); +CryptoProviderRegistryValidator.EnforceRuLinuxDefaults(authorityOptions.Crypto.Registry); +builder.Services.AddStellaOpsCryptoRu(builder.Configuration, CryptoProviderRegistryValidator.EnforceRuLinuxDefaults); builder.Services.AddHostedService(); var issuerUri = authorityOptions.Issuer; if (issuerUri is null) diff --git a/src/Concelier/Directory.Build.props b/src/Concelier/Directory.Build.props index 9c6ac015a..fb9946313 100644 --- a/src/Concelier/Directory.Build.props +++ b/src/Concelier/Directory.Build.props @@ -9,6 +9,7 @@ + diff --git a/src/Concelier/StellaOps.Concelier.WebService/AGENTS.md b/src/Concelier/StellaOps.Concelier.WebService/AGENTS.md index ecb1af772..5e992a3b9 100644 --- a/src/Concelier/StellaOps.Concelier.WebService/AGENTS.md +++ b/src/Concelier/StellaOps.Concelier.WebService/AGENTS.md @@ -2,13 +2,13 @@ ## Role Minimal API host wiring configuration, storage, plugin routines, and job endpoints. Operational surface for health, readiness, and job control. ## Scope -- Configuration: appsettings.json + etc/concelier.yaml (yaml path = ../etc/concelier.yaml); bind into ConcelierOptions with validation (Only Mongo supported). -- Mongo: MongoUrl from options.Storage.Dsn; IMongoClient/IMongoDatabase singletons; default database name fallback (options -> URL -> "concelier"). -- Services: AddMongoStorage(); AddSourceHttpClients(); RegisterPluginRoutines(configuration, PluginHostOptions). -- Bootstrap: MongoBootstrapper.InitializeAsync on startup. +- Configuration: appsettings.json + etc/concelier.yaml (yaml path = ../etc/concelier.yaml); bind into ConcelierOptions with PostgreSQL storage enabled by default. +- Storage: PostgreSQL only (`Concelier:PostgresStorage:*`). No MongoDB/Mongo2Go; readiness probes issue `SELECT 1` against ConcelierDataSource. +- Services: AddConcelierPostgresStorage(); AddSourceHttpClients(); RegisterPluginRoutines(configuration, PluginHostOptions). +- Bootstrap: PostgreSQL connectivity verified on startup. - Endpoints (configuration & job control only; root path intentionally unbound): - GET /health -> {status:"healthy"} after options validation binds. - - GET /ready -> MongoDB ping; 503 on MongoException/Timeout. + - GET /ready -> PostgreSQL connectivity check; degraded if connection fails. - GET /jobs?kind=&limit= -> recent runs. - GET /jobs/{id} -> run detail. - GET /jobs/definitions -> definitions with lastRun. @@ -18,7 +18,7 @@ Minimal API host wiring configuration, storage, plugin routines, and job endpoin - POST /jobs/{*jobKind} with {trigger?,parameters?} -> 202 Accepted (Location:/jobs/{runId}) | 404 | 409 | 423. - PluginHost defaults: BaseDirectory = solution root; PluginsDirectory = "StellaOps.Concelier.PluginBinaries"; SearchPatterns += "StellaOps.Concelier.Plugin.*.dll"; EnsureDirectoryExists = true. ## Participants -- Core job system; Storage.Mongo; Source.Common HTTP clients; Exporter and Connector plugin routines discover/register jobs. +- Core job system; Storage.Postgres; Source.Common HTTP clients; Exporter and Connector plugin routines discover/register jobs. ## Interfaces & contracts - Dependency injection boundary for all connectors/exporters; IOptions validated on start. - Cancellation: pass app.Lifetime.ApplicationStopping to bootstrapper. @@ -30,7 +30,7 @@ Out: business logic of jobs, HTML UI, authn/z (future). - Structured responses with status codes; no stack traces in HTTP bodies; errors mapped cleanly. ## Tests - Author and review coverage in `../StellaOps.Concelier.WebService.Tests`. -- Shared fixtures (e.g., `MongoIntegrationFixture`, `ConnectorTestHarness`) live in `../StellaOps.Concelier.Testing`. +- Shared fixtures (PostgreSQL-backed harnesses) live in `../StellaOps.Concelier.Testing`. - Keep fixtures deterministic; match new cases to real-world advisories or regression scenarios. ## Required Reading diff --git a/src/Concelier/StellaOps.Concelier.WebService/Diagnostics/HealthContracts.cs b/src/Concelier/StellaOps.Concelier.WebService/Diagnostics/HealthContracts.cs index 66f70931e..d7d969545 100644 --- a/src/Concelier/StellaOps.Concelier.WebService/Diagnostics/HealthContracts.cs +++ b/src/Concelier/StellaOps.Concelier.WebService/Diagnostics/HealthContracts.cs @@ -1,10 +1,11 @@ namespace StellaOps.Concelier.WebService.Diagnostics; -internal sealed record StorageBootstrapHealth( - string Driver, - bool Completed, - DateTimeOffset? CompletedAt, - double? DurationMs); +internal sealed record StorageHealth( + string Backend, + bool Ready, + DateTimeOffset? CheckedAt, + double? LatencyMs, + string? Error); internal sealed record TelemetryHealth( bool Enabled, @@ -16,17 +17,11 @@ internal sealed record HealthDocument( string Status, DateTimeOffset StartedAt, double UptimeSeconds, - StorageBootstrapHealth Storage, + StorageHealth Storage, TelemetryHealth Telemetry); -internal sealed record MongoReadyHealth( - string Status, - double? LatencyMs, - DateTimeOffset? CheckedAt, - string? Error); - internal sealed record ReadyDocument( string Status, DateTimeOffset StartedAt, double UptimeSeconds, - MongoReadyHealth Mongo); + StorageHealth Storage); diff --git a/src/Concelier/StellaOps.Concelier.WebService/Diagnostics/ServiceStatus.cs b/src/Concelier/StellaOps.Concelier.WebService/Diagnostics/ServiceStatus.cs index a7c6fbda0..0a6fe3b7c 100644 --- a/src/Concelier/StellaOps.Concelier.WebService/Diagnostics/ServiceStatus.cs +++ b/src/Concelier/StellaOps.Concelier.WebService/Diagnostics/ServiceStatus.cs @@ -11,8 +11,8 @@ internal sealed class ServiceStatus private DateTimeOffset? _bootstrapCompletedAt; private TimeSpan? _bootstrapDuration; private DateTimeOffset? _lastReadyCheckAt; - private TimeSpan? _lastMongoLatency; - private string? _lastMongoError; + private TimeSpan? _lastStorageLatency; + private string? _lastStorageError; private bool _lastReadySucceeded; public ServiceStatus(TimeProvider timeProvider) @@ -31,8 +31,8 @@ internal sealed class ServiceStatus BootstrapCompletedAt: _bootstrapCompletedAt, BootstrapDuration: _bootstrapDuration, LastReadyCheckAt: _lastReadyCheckAt, - LastMongoLatency: _lastMongoLatency, - LastMongoError: _lastMongoError, + LastStorageLatency: _lastStorageLatency, + LastStorageError: _lastStorageError, LastReadySucceeded: _lastReadySucceeded); } } @@ -45,19 +45,19 @@ internal sealed class ServiceStatus _bootstrapCompletedAt = completedAt; _bootstrapDuration = duration; _lastReadySucceeded = true; - _lastMongoLatency = duration; - _lastMongoError = null; + _lastStorageLatency = duration; + _lastStorageError = null; _lastReadyCheckAt = completedAt; } } - public void RecordMongoCheck(bool success, TimeSpan latency, string? error) + public void RecordStorageCheck(bool success, TimeSpan latency, string? error) { lock (_sync) { _lastReadySucceeded = success; - _lastMongoLatency = latency; - _lastMongoError = success ? null : error; + _lastStorageLatency = latency; + _lastStorageError = success ? null : error; _lastReadyCheckAt = _timeProvider.GetUtcNow(); } } @@ -69,6 +69,6 @@ internal sealed record ServiceHealthSnapshot( DateTimeOffset? BootstrapCompletedAt, TimeSpan? BootstrapDuration, DateTimeOffset? LastReadyCheckAt, - TimeSpan? LastMongoLatency, - string? LastMongoError, + TimeSpan? LastStorageLatency, + string? LastStorageError, bool LastReadySucceeded); diff --git a/src/Concelier/StellaOps.Concelier.WebService/Extensions/TelemetryExtensions.cs b/src/Concelier/StellaOps.Concelier.WebService/Extensions/TelemetryExtensions.cs index b37ee8d6b..02c152d1a 100644 --- a/src/Concelier/StellaOps.Concelier.WebService/Extensions/TelemetryExtensions.cs +++ b/src/Concelier/StellaOps.Concelier.WebService/Extensions/TelemetryExtensions.cs @@ -1,71 +1,71 @@ -using System.Collections.Generic; -using System.Diagnostics; -using System.Linq; -using System.Reflection; -using Microsoft.AspNetCore.Builder; -using Microsoft.Extensions.DependencyInjection; -using OpenTelemetry.Metrics; -using OpenTelemetry.Resources; -using OpenTelemetry.Trace; -using Serilog; -using Serilog.Core; -using Serilog.Events; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Reflection; +using Microsoft.AspNetCore.Builder; +using Microsoft.Extensions.DependencyInjection; +using OpenTelemetry.Metrics; +using OpenTelemetry.Resources; +using OpenTelemetry.Trace; +using Serilog; +using Serilog.Core; +using Serilog.Events; using StellaOps.Concelier.Core.Jobs; using StellaOps.Concelier.Connector.Common.Telemetry; using StellaOps.Concelier.WebService.Diagnostics; using StellaOps.Concelier.WebService.Options; using StellaOps.Ingestion.Telemetry; - -namespace StellaOps.Concelier.WebService.Extensions; - -public static class TelemetryExtensions -{ - public static void ConfigureConcelierTelemetry(this WebApplicationBuilder builder, ConcelierOptions options) - { - ArgumentNullException.ThrowIfNull(builder); - ArgumentNullException.ThrowIfNull(options); - - var telemetry = options.Telemetry ?? new ConcelierOptions.TelemetryOptions(); - - if (telemetry.EnableLogging) - { - builder.Host.UseSerilog((context, services, configuration) => - { - ConfigureSerilog(configuration, telemetry, builder.Environment.EnvironmentName, builder.Environment.ApplicationName); - }); - } - - if (!telemetry.Enabled || (!telemetry.EnableTracing && !telemetry.EnableMetrics)) - { - return; - } - - var openTelemetry = builder.Services.AddOpenTelemetry(); - - openTelemetry.ConfigureResource(resource => - { - var serviceName = telemetry.ServiceName ?? builder.Environment.ApplicationName; - var version = Assembly.GetExecutingAssembly().GetName().Version?.ToString() ?? "unknown"; - - resource.AddService(serviceName, serviceVersion: version, serviceInstanceId: Environment.MachineName); - resource.AddAttributes(new[] - { - new KeyValuePair("deployment.environment", builder.Environment.EnvironmentName), - }); - - foreach (var attribute in telemetry.ResourceAttributes) - { - if (string.IsNullOrWhiteSpace(attribute.Key) || attribute.Value is null) - { - continue; - } - - resource.AddAttributes(new[] { new KeyValuePair(attribute.Key, attribute.Value) }); - } - }); - - if (telemetry.EnableTracing) - { + +namespace StellaOps.Concelier.WebService.Extensions; + +public static class TelemetryExtensions +{ + public static void ConfigureConcelierTelemetry(this WebApplicationBuilder builder, ConcelierOptions options) + { + ArgumentNullException.ThrowIfNull(builder); + ArgumentNullException.ThrowIfNull(options); + + var telemetry = options.Telemetry ?? new ConcelierOptions.TelemetryOptions(); + + if (telemetry.EnableLogging) + { + builder.Host.UseSerilog((context, services, configuration) => + { + ConfigureSerilog(configuration, telemetry, builder.Environment.EnvironmentName, builder.Environment.ApplicationName); + }); + } + + if (!telemetry.Enabled || (!telemetry.EnableTracing && !telemetry.EnableMetrics)) + { + return; + } + + var openTelemetry = builder.Services.AddOpenTelemetry(); + + openTelemetry.ConfigureResource(resource => + { + var serviceName = telemetry.ServiceName ?? builder.Environment.ApplicationName; + var version = Assembly.GetExecutingAssembly().GetName().Version?.ToString() ?? "unknown"; + + resource.AddService(serviceName, serviceVersion: version, serviceInstanceId: Environment.MachineName); + resource.AddAttributes(new[] + { + new KeyValuePair("deployment.environment", builder.Environment.EnvironmentName), + }); + + foreach (var attribute in telemetry.ResourceAttributes) + { + if (string.IsNullOrWhiteSpace(attribute.Key) || attribute.Value is null) + { + continue; + } + + resource.AddAttributes(new[] { new KeyValuePair(attribute.Key, attribute.Value) }); + } + }); + + if (telemetry.EnableTracing) + { openTelemetry.WithTracing(tracing => { tracing @@ -74,15 +74,15 @@ public static class TelemetryExtensions .AddSource(IngestionTelemetry.ActivitySourceName) .AddAspNetCoreInstrumentation() .AddHttpClientInstrumentation(); - - ConfigureExporters(telemetry, tracing); - }); - } - - if (telemetry.EnableMetrics) - { - openTelemetry.WithMetrics(metrics => - { + + ConfigureExporters(telemetry, tracing); + }); + } + + if (telemetry.EnableMetrics) + { + openTelemetry.WithMetrics(metrics => + { metrics .AddMeter(JobDiagnostics.MeterName) .AddMeter(SourceDiagnostics.MeterName) @@ -92,131 +92,132 @@ public static class TelemetryExtensions .AddMeter("StellaOps.Concelier.Connector.Vndr.Chromium") .AddMeter("StellaOps.Concelier.Connector.Vndr.Apple") .AddMeter("StellaOps.Concelier.Connector.Vndr.Adobe") + .AddMeter("StellaOps.Concelier.VulnExplorer") .AddMeter(JobMetrics.MeterName) .AddAspNetCoreInstrumentation() .AddHttpClientInstrumentation() .AddRuntimeInstrumentation(); - - ConfigureExporters(telemetry, metrics); - }); - } - } - - private static void ConfigureSerilog(LoggerConfiguration configuration, ConcelierOptions.TelemetryOptions telemetry, string environmentName, string applicationName) - { - if (!Enum.TryParse(telemetry.MinimumLogLevel, ignoreCase: true, out LogEventLevel level)) - { - level = LogEventLevel.Information; - } - - configuration - .MinimumLevel.Is(level) - .MinimumLevel.Override("Microsoft", LogEventLevel.Warning) - .MinimumLevel.Override("Microsoft.Hosting.Lifetime", LogEventLevel.Information) - .Enrich.FromLogContext() - .Enrich.With() - .Enrich.WithProperty("service.name", telemetry.ServiceName ?? applicationName) - .Enrich.WithProperty("deployment.environment", environmentName) - .WriteTo.Console(outputTemplate: "[{Timestamp:O}] [{Level:u3}] {Message:lj} {Properties}{NewLine}{Exception}"); - } - - private static void ConfigureExporters(ConcelierOptions.TelemetryOptions telemetry, TracerProviderBuilder tracing) - { - if (string.IsNullOrWhiteSpace(telemetry.OtlpEndpoint)) - { - if (telemetry.ExportConsole) - { - tracing.AddConsoleExporter(); - } - - return; - } - - tracing.AddOtlpExporter(options => - { - options.Endpoint = new Uri(telemetry.OtlpEndpoint); - var headers = BuildHeaders(telemetry); - if (!string.IsNullOrEmpty(headers)) - { - options.Headers = headers; - } - }); - - if (telemetry.ExportConsole) - { - tracing.AddConsoleExporter(); - } - } - - private static void ConfigureExporters(ConcelierOptions.TelemetryOptions telemetry, MeterProviderBuilder metrics) - { - if (string.IsNullOrWhiteSpace(telemetry.OtlpEndpoint)) - { - if (telemetry.ExportConsole) - { - metrics.AddConsoleExporter(); - } - - return; - } - - metrics.AddOtlpExporter(options => - { - options.Endpoint = new Uri(telemetry.OtlpEndpoint); - var headers = BuildHeaders(telemetry); - if (!string.IsNullOrEmpty(headers)) - { - options.Headers = headers; - } - }); - - if (telemetry.ExportConsole) - { - metrics.AddConsoleExporter(); - } - } - - private static string? BuildHeaders(ConcelierOptions.TelemetryOptions telemetry) - { - if (telemetry.OtlpHeaders.Count == 0) - { - return null; - } - - return string.Join(",", telemetry.OtlpHeaders - .Where(static kvp => !string.IsNullOrWhiteSpace(kvp.Key) && !string.IsNullOrWhiteSpace(kvp.Value)) - .Select(static kvp => $"{kvp.Key}={kvp.Value}")); - } -} - -internal sealed class ActivityEnricher : ILogEventEnricher -{ - public void Enrich(LogEvent logEvent, ILogEventPropertyFactory propertyFactory) - { - var activity = Activity.Current; - if (activity is null) - { - return; - } - - if (activity.TraceId != default) - { - logEvent.AddPropertyIfAbsent(propertyFactory.CreateProperty("trace_id", activity.TraceId.ToString())); - } - - if (activity.SpanId != default) - { - logEvent.AddPropertyIfAbsent(propertyFactory.CreateProperty("span_id", activity.SpanId.ToString())); - } - - if (activity.ParentSpanId != default) - { - logEvent.AddPropertyIfAbsent(propertyFactory.CreateProperty("parent_span_id", activity.ParentSpanId.ToString())); - } - - if (!string.IsNullOrEmpty(activity.TraceStateString)) - { - logEvent.AddPropertyIfAbsent(propertyFactory.CreateProperty("trace_state", activity.TraceStateString)); - } - } -} + + ConfigureExporters(telemetry, metrics); + }); + } + } + + private static void ConfigureSerilog(LoggerConfiguration configuration, ConcelierOptions.TelemetryOptions telemetry, string environmentName, string applicationName) + { + if (!Enum.TryParse(telemetry.MinimumLogLevel, ignoreCase: true, out LogEventLevel level)) + { + level = LogEventLevel.Information; + } + + configuration + .MinimumLevel.Is(level) + .MinimumLevel.Override("Microsoft", LogEventLevel.Warning) + .MinimumLevel.Override("Microsoft.Hosting.Lifetime", LogEventLevel.Information) + .Enrich.FromLogContext() + .Enrich.With() + .Enrich.WithProperty("service.name", telemetry.ServiceName ?? applicationName) + .Enrich.WithProperty("deployment.environment", environmentName) + .WriteTo.Console(outputTemplate: "[{Timestamp:O}] [{Level:u3}] {Message:lj} {Properties}{NewLine}{Exception}"); + } + + private static void ConfigureExporters(ConcelierOptions.TelemetryOptions telemetry, TracerProviderBuilder tracing) + { + if (string.IsNullOrWhiteSpace(telemetry.OtlpEndpoint)) + { + if (telemetry.ExportConsole) + { + tracing.AddConsoleExporter(); + } + + return; + } + + tracing.AddOtlpExporter(options => + { + options.Endpoint = new Uri(telemetry.OtlpEndpoint); + var headers = BuildHeaders(telemetry); + if (!string.IsNullOrEmpty(headers)) + { + options.Headers = headers; + } + }); + + if (telemetry.ExportConsole) + { + tracing.AddConsoleExporter(); + } + } + + private static void ConfigureExporters(ConcelierOptions.TelemetryOptions telemetry, MeterProviderBuilder metrics) + { + if (string.IsNullOrWhiteSpace(telemetry.OtlpEndpoint)) + { + if (telemetry.ExportConsole) + { + metrics.AddConsoleExporter(); + } + + return; + } + + metrics.AddOtlpExporter(options => + { + options.Endpoint = new Uri(telemetry.OtlpEndpoint); + var headers = BuildHeaders(telemetry); + if (!string.IsNullOrEmpty(headers)) + { + options.Headers = headers; + } + }); + + if (telemetry.ExportConsole) + { + metrics.AddConsoleExporter(); + } + } + + private static string? BuildHeaders(ConcelierOptions.TelemetryOptions telemetry) + { + if (telemetry.OtlpHeaders.Count == 0) + { + return null; + } + + return string.Join(",", telemetry.OtlpHeaders + .Where(static kvp => !string.IsNullOrWhiteSpace(kvp.Key) && !string.IsNullOrWhiteSpace(kvp.Value)) + .Select(static kvp => $"{kvp.Key}={kvp.Value}")); + } +} + +internal sealed class ActivityEnricher : ILogEventEnricher +{ + public void Enrich(LogEvent logEvent, ILogEventPropertyFactory propertyFactory) + { + var activity = Activity.Current; + if (activity is null) + { + return; + } + + if (activity.TraceId != default) + { + logEvent.AddPropertyIfAbsent(propertyFactory.CreateProperty("trace_id", activity.TraceId.ToString())); + } + + if (activity.SpanId != default) + { + logEvent.AddPropertyIfAbsent(propertyFactory.CreateProperty("span_id", activity.SpanId.ToString())); + } + + if (activity.ParentSpanId != default) + { + logEvent.AddPropertyIfAbsent(propertyFactory.CreateProperty("parent_span_id", activity.ParentSpanId.ToString())); + } + + if (!string.IsNullOrEmpty(activity.TraceStateString)) + { + logEvent.AddPropertyIfAbsent(propertyFactory.CreateProperty("trace_state", activity.TraceStateString)); + } + } +} diff --git a/src/Concelier/StellaOps.Concelier.WebService/Options/ConcelierOptions.cs b/src/Concelier/StellaOps.Concelier.WebService/Options/ConcelierOptions.cs index a8056f8ef..d02a3c067 100644 --- a/src/Concelier/StellaOps.Concelier.WebService/Options/ConcelierOptions.cs +++ b/src/Concelier/StellaOps.Concelier.WebService/Options/ConcelierOptions.cs @@ -7,9 +7,13 @@ namespace StellaOps.Concelier.WebService.Options; public sealed class ConcelierOptions { + [Obsolete("Mongo storage has been removed; use PostgresStorage.")] public StorageOptions Storage { get; set; } = new(); - public PostgresStorageOptions? PostgresStorage { get; set; } + public PostgresStorageOptions? PostgresStorage { get; set; } = new PostgresStorageOptions + { + Enabled = true + }; public PluginOptions Plugins { get; set; } = new(); @@ -33,6 +37,7 @@ public sealed class ConcelierOptions /// public AirGapOptions AirGap { get; set; } = new(); + [Obsolete("Mongo storage has been removed; use PostgresStorage.")] public sealed class StorageOptions { public string Driver { get; set; } = "mongo"; diff --git a/src/Concelier/StellaOps.Concelier.WebService/Options/ConcelierOptionsValidator.cs b/src/Concelier/StellaOps.Concelier.WebService/Options/ConcelierOptionsValidator.cs index 253039925..c02ca8772 100644 --- a/src/Concelier/StellaOps.Concelier.WebService/Options/ConcelierOptionsValidator.cs +++ b/src/Concelier/StellaOps.Concelier.WebService/Options/ConcelierOptionsValidator.cs @@ -2,30 +2,17 @@ using System; using System.Collections.Generic; using Microsoft.Extensions.Logging; using StellaOps.Auth.Abstractions; - -namespace StellaOps.Concelier.WebService.Options; - -public static class ConcelierOptionsValidator -{ - public static void Validate(ConcelierOptions options) - { - ArgumentNullException.ThrowIfNull(options); - - if (!string.Equals(options.Storage.Driver, "mongo", StringComparison.OrdinalIgnoreCase)) - { - throw new InvalidOperationException("Only Mongo storage driver is supported (storage.driver == 'mongo')."); - } - - if (string.IsNullOrWhiteSpace(options.Storage.Dsn)) - { - throw new InvalidOperationException("Storage DSN must be configured."); - } - - if (options.Storage.CommandTimeoutSeconds <= 0) - { - throw new InvalidOperationException("Command timeout must be greater than zero seconds."); - } - + +namespace StellaOps.Concelier.WebService.Options; + +public static class ConcelierOptionsValidator +{ + public static void Validate(ConcelierOptions options) + { + ArgumentNullException.ThrowIfNull(options); + + ValidatePostgres(options); + options.Telemetry ??= new ConcelierOptions.TelemetryOptions(); options.Authority ??= new ConcelierOptions.AuthorityOptions(); @@ -107,25 +94,25 @@ public static class ConcelierOptionsValidator } } } - - if (!Enum.TryParse(options.Telemetry.MinimumLogLevel, ignoreCase: true, out LogLevel _)) - { - throw new InvalidOperationException($"Telemetry minimum log level '{options.Telemetry.MinimumLogLevel}' is invalid."); - } - - if (!string.IsNullOrWhiteSpace(options.Telemetry.OtlpEndpoint) && !Uri.TryCreate(options.Telemetry.OtlpEndpoint, UriKind.Absolute, out _)) - { - throw new InvalidOperationException("Telemetry OTLP endpoint must be an absolute URI."); - } - - foreach (var attribute in options.Telemetry.ResourceAttributes) - { - if (string.IsNullOrWhiteSpace(attribute.Key)) - { - throw new InvalidOperationException("Telemetry resource attribute keys must be non-empty."); - } - } - + + if (!Enum.TryParse(options.Telemetry.MinimumLogLevel, ignoreCase: true, out LogLevel _)) + { + throw new InvalidOperationException($"Telemetry minimum log level '{options.Telemetry.MinimumLogLevel}' is invalid."); + } + + if (!string.IsNullOrWhiteSpace(options.Telemetry.OtlpEndpoint) && !Uri.TryCreate(options.Telemetry.OtlpEndpoint, UriKind.Absolute, out _)) + { + throw new InvalidOperationException("Telemetry OTLP endpoint must be an absolute URI."); + } + + foreach (var attribute in options.Telemetry.ResourceAttributes) + { + if (string.IsNullOrWhiteSpace(attribute.Key)) + { + throw new InvalidOperationException("Telemetry resource attribute keys must be non-empty."); + } + } + foreach (var header in options.Telemetry.OtlpHeaders) { if (string.IsNullOrWhiteSpace(header.Key)) @@ -333,4 +320,50 @@ public static class ConcelierOptionsValidator throw new InvalidOperationException("Evidence bundle pipelineVersion must be provided."); } } + + private static void ValidatePostgres(ConcelierOptions options) + { + var postgres = options.PostgresStorage ?? new ConcelierOptions.PostgresStorageOptions(); + options.PostgresStorage = postgres; + + if (!postgres.Enabled) + { + throw new InvalidOperationException("PostgreSQL storage must be enabled (postgresStorage.enabled)."); + } + + if (string.IsNullOrWhiteSpace(postgres.ConnectionString)) + { + throw new InvalidOperationException("PostgreSQL connectionString must be configured (postgresStorage.connectionString)."); + } + + if (postgres.CommandTimeoutSeconds <= 0) + { + throw new InvalidOperationException("PostgreSQL commandTimeoutSeconds must be greater than zero."); + } + + if (postgres.MaxPoolSize < 1) + { + throw new InvalidOperationException("PostgreSQL maxPoolSize must be greater than zero."); + } + + if (postgres.MinPoolSize < 0 || postgres.MinPoolSize > postgres.MaxPoolSize) + { + throw new InvalidOperationException("PostgreSQL minPoolSize must be between 0 and maxPoolSize."); + } + + if (postgres.ConnectionIdleLifetimeSeconds < 0) + { + throw new InvalidOperationException("PostgreSQL connectionIdleLifetimeSeconds must be zero or greater."); + } + + if (postgres.AutoMigrate && string.IsNullOrWhiteSpace(postgres.MigrationsPath)) + { + throw new InvalidOperationException("PostgreSQL migrationsPath must be configured when autoMigrate is enabled."); + } + + if (string.IsNullOrWhiteSpace(postgres.SchemaName)) + { + postgres.SchemaName = "vuln"; + } + } } diff --git a/src/Concelier/StellaOps.Concelier.WebService/Program.cs b/src/Concelier/StellaOps.Concelier.WebService/Program.cs index f9a83b125..9dc213a6c 100644 --- a/src/Concelier/StellaOps.Concelier.WebService/Program.cs +++ b/src/Concelier/StellaOps.Concelier.WebService/Program.cs @@ -26,6 +26,7 @@ using StellaOps.Concelier.Core.Events; using StellaOps.Concelier.Core.Jobs; using StellaOps.Concelier.Core.Observations; using StellaOps.Concelier.Core.Linksets; +using StellaOps.Concelier.Core.Diagnostics; using StellaOps.Concelier.Models; using StellaOps.Concelier.WebService.Diagnostics; using ServiceStatus = StellaOps.Concelier.WebService.Diagnostics.ServiceStatus; @@ -54,9 +55,6 @@ using StellaOps.Concelier.Core.Aoc; using StellaOps.Concelier.Core.Raw; using StellaOps.Concelier.RawModels; using StellaOps.Concelier.Storage.Postgres; -using StellaOps.Concelier.Storage.Mongo; -using StellaOps.Concelier.Storage.Mongo.Advisories; -using StellaOps.Concelier.Storage.Mongo.Aliases; using StellaOps.Concelier.Core.Attestation; using StellaOps.Concelier.Core.Signals; using AttestationClaims = StellaOps.Concelier.Core.Attestation.AttestationClaims; @@ -64,8 +62,10 @@ using StellaOps.Concelier.Core.Orchestration; using System.Diagnostics.Metrics; using StellaOps.Concelier.Models.Observations; using StellaOps.Aoc.AspNetCore.Results; -using StellaOps.Provenance.Mongo; using HttpResults = Microsoft.AspNetCore.Http.Results; +using StellaOps.Concelier.Storage.Mongo.Advisories; +using StellaOps.Concelier.Storage.Mongo.Aliases; +using StellaOps.Provenance.Mongo; namespace StellaOps.Concelier.WebService { @@ -91,9 +91,10 @@ builder.Host.ConfigureAppConfiguration((context, cfg) => { cfg.AddInMemoryCollection(new Dictionary { - {"Concelier:Storage:Dsn", Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN") ?? "mongodb://localhost:27017/test-health"}, - {"Concelier:Storage:Driver", "mongo"}, - {"Concelier:Storage:CommandTimeoutSeconds", "30"}, + {"Concelier:PostgresStorage:Enabled", "true"}, + {"Concelier:PostgresStorage:ConnectionString", Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN") ?? "Host=localhost;Port=5432;Database=concelier_test;Username=postgres;Password=postgres"}, + {"Concelier:PostgresStorage:CommandTimeoutSeconds", "30"}, + {"Concelier:PostgresStorage:SchemaName", "vuln"}, {"Concelier:Telemetry:Enabled", "false"} }); } @@ -125,11 +126,12 @@ if (builder.Environment.IsEnvironment("Testing")) #pragma warning restore ASP0000 concelierOptions = tempProvider.GetService>()?.Value ?? new ConcelierOptions { - Storage = new ConcelierOptions.StorageOptions + PostgresStorage = new ConcelierOptions.PostgresStorageOptions { - Dsn = Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN") ?? "mongodb://localhost:27017/test-health", - Driver = "mongo", - CommandTimeoutSeconds = 30 + Enabled = true, + ConnectionString = Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN") ?? "Host=localhost;Port=5432;Database=concelier_test;Username=postgres;Password=postgres", + CommandTimeoutSeconds = 30, + SchemaName = "vuln" }, Telemetry = new ConcelierOptions.TelemetryOptions { @@ -137,10 +139,18 @@ if (builder.Environment.IsEnvironment("Testing")) } }; - concelierOptions.Storage ??= new ConcelierOptions.StorageOptions(); - concelierOptions.Storage.Dsn = Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN") ?? "mongodb://localhost:27017/orch-tests"; - concelierOptions.Storage.Driver = "mongo"; - concelierOptions.Storage.CommandTimeoutSeconds = concelierOptions.Storage.CommandTimeoutSeconds <= 0 ? 30 : concelierOptions.Storage.CommandTimeoutSeconds; + concelierOptions.PostgresStorage ??= new ConcelierOptions.PostgresStorageOptions + { + Enabled = true, + ConnectionString = Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN") ?? "Host=localhost;Port=5432;Database=concelier_test;Username=postgres;Password=postgres", + CommandTimeoutSeconds = 30, + SchemaName = "vuln" + }; + + if (string.IsNullOrWhiteSpace(concelierOptions.PostgresStorage.ConnectionString)) + { + concelierOptions.PostgresStorage.ConnectionString = Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN") ?? string.Empty; + } ConcelierOptionsPostConfigure.Apply(concelierOptions, contentRootPath); // Skip validation in Testing to allow factory-provided wiring. @@ -149,10 +159,21 @@ else { concelierOptions = builder.Configuration.BindOptions(postConfigure: (opts, _) => { - var testDsn = Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN"); - if (string.IsNullOrWhiteSpace(opts.Storage.Dsn) && !string.IsNullOrWhiteSpace(testDsn)) + var testDsn = Environment.GetEnvironmentVariable("CONCELIER_POSTGRES_DSN") + ?? Environment.GetEnvironmentVariable("CONCELIER_TEST_STORAGE_DSN"); + + opts.PostgresStorage ??= new ConcelierOptions.PostgresStorageOptions { - opts.Storage.Dsn = testDsn; + Enabled = !string.IsNullOrWhiteSpace(testDsn), + ConnectionString = testDsn ?? string.Empty, + SchemaName = "vuln", + CommandTimeoutSeconds = 30 + }; + + if (string.IsNullOrWhiteSpace(opts.PostgresStorage.ConnectionString) && !string.IsNullOrWhiteSpace(testDsn)) + { + opts.PostgresStorage.ConnectionString = testDsn; + opts.PostgresStorage.Enabled = true; } ConcelierOptionsPostConfigure.Apply(opts, contentRootPath); @@ -179,24 +200,26 @@ builder.Services.AddSingleton(); var isTesting = builder.Environment.IsEnvironment("Testing"); -// Add PostgreSQL storage for LNM linkset cache if configured. -// This provides a PostgreSQL-backed implementation of IAdvisoryLinksetStore for the read-through cache. -if (concelierOptions.PostgresStorage is { Enabled: true } postgresOptions) +// Add PostgreSQL storage for all Concelier persistence. +var postgresOptions = concelierOptions.PostgresStorage ?? throw new InvalidOperationException("PostgreSQL storage must be configured."); +if (!postgresOptions.Enabled) { - builder.Services.AddConcelierPostgresStorage(pgOptions => - { - pgOptions.ConnectionString = postgresOptions.ConnectionString; - pgOptions.CommandTimeoutSeconds = postgresOptions.CommandTimeoutSeconds; - pgOptions.MaxPoolSize = postgresOptions.MaxPoolSize; - pgOptions.MinPoolSize = postgresOptions.MinPoolSize; - pgOptions.ConnectionIdleLifetimeSeconds = postgresOptions.ConnectionIdleLifetimeSeconds; - pgOptions.Pooling = postgresOptions.Pooling; - pgOptions.SchemaName = postgresOptions.SchemaName; - pgOptions.AutoMigrate = postgresOptions.AutoMigrate; - pgOptions.MigrationsPath = postgresOptions.MigrationsPath; - }); + throw new InvalidOperationException("PostgreSQL storage must be enabled."); } +builder.Services.AddConcelierPostgresStorage(pgOptions => +{ + pgOptions.ConnectionString = postgresOptions.ConnectionString; + pgOptions.CommandTimeoutSeconds = postgresOptions.CommandTimeoutSeconds; + pgOptions.MaxPoolSize = postgresOptions.MaxPoolSize; + pgOptions.MinPoolSize = postgresOptions.MinPoolSize; + pgOptions.ConnectionIdleLifetimeSeconds = postgresOptions.ConnectionIdleLifetimeSeconds; + pgOptions.Pooling = postgresOptions.Pooling; + pgOptions.SchemaName = postgresOptions.SchemaName; + pgOptions.AutoMigrate = postgresOptions.AutoMigrate; + pgOptions.MigrationsPath = postgresOptions.MigrationsPath; +}); + builder.Services.AddOptions() .Bind(builder.Configuration.GetSection("advisoryObservationEvents")) .PostConfigure(options => @@ -1039,9 +1062,12 @@ var advisoryIngestEndpoint = app.MapPost("/ingest/advisory", async ( return Problem(context, "Invalid advisory payload", StatusCodes.Status400BadRequest, ProblemTypes.Validation, ex.Message); } + var chunkStopwatch = Stopwatch.StartNew(); + try { var result = await rawService.IngestAsync(document, cancellationToken).ConfigureAwait(false); + chunkStopwatch.Stop(); var response = new AdvisoryIngestResponse( result.Record.Id, @@ -1065,10 +1091,21 @@ var advisoryIngestEndpoint = app.MapPost("/ingest/advisory", async ( ingestRequest.Source.Vendor ?? "(unknown)", result.Inserted ? "inserted" : "duplicate")); + var telemetrySource = ingestRequest.Source.Vendor ?? "(unknown)"; + var (_, _, conflicts) = AdvisoryLinksetNormalization.FromRawLinksetWithConfidence(document.Linkset, providedConfidence: null); + var collisionCount = VulnExplorerTelemetry.CountAliasCollisions(conflicts); + VulnExplorerTelemetry.RecordIdentifierCollisions(tenant, telemetrySource, collisionCount); + VulnExplorerTelemetry.RecordChunkLatency(tenant, telemetrySource, chunkStopwatch.Elapsed); + if (VulnExplorerTelemetry.IsWithdrawn(document.Content.Raw)) + { + VulnExplorerTelemetry.RecordWithdrawnStatement(tenant, telemetrySource); + } + return JsonResult(response, statusCode); } catch (ConcelierAocGuardException guardException) { + chunkStopwatch.Stop(); logger.LogWarning( guardException, "AOC guard rejected advisory ingest tenant={Tenant} upstream={UpstreamId} requestHash={RequestHash} documentHash={DocumentHash} codes={Codes}", @@ -2115,6 +2152,12 @@ var advisoryChunksEndpoint = app.MapGet("/advisories/{advisoryKey}/chunks", asyn buildResult.Response.Entries.Count, duration, guardrailCounts)); + VulnExplorerTelemetry.RecordChunkRequest( + tenant!, + result: "ok", + cacheHit, + buildResult.Response.Entries.Count, + duration.TotalMilliseconds); return JsonResult(buildResult.Response); }); @@ -3269,7 +3312,7 @@ void ApplyNoCache(HttpResponse response) response.Headers["Expires"] = "0"; } -await InitializeMongoAsync(app); +await InitializePostgresAsync(app); app.MapGet("/health", ([FromServices] IOptions opts, [FromServices] StellaOps.Concelier.WebService.Diagnostics.ServiceStatus status, HttpContext context) => { @@ -3278,11 +3321,12 @@ app.MapGet("/health", ([FromServices] IOptions opts, [FromServ var snapshot = status.CreateSnapshot(); var uptimeSeconds = Math.Max((snapshot.CapturedAt - snapshot.StartedAt).TotalSeconds, 0d); - var storage = new StorageBootstrapHealth( - Driver: opts.Value.Storage.Driver, - Completed: snapshot.BootstrapCompletedAt is not null, - CompletedAt: snapshot.BootstrapCompletedAt, - DurationMs: snapshot.BootstrapDuration?.TotalMilliseconds); + var storage = new StorageHealth( + Backend: "postgres", + Ready: snapshot.LastReadySucceeded, + CheckedAt: snapshot.LastReadyCheckAt, + LatencyMs: snapshot.LastStorageLatency?.TotalMilliseconds, + Error: snapshot.LastStorageError); var telemetry = new TelemetryHealth( Enabled: opts.Value.Telemetry.Enabled, @@ -3300,24 +3344,32 @@ app.MapGet("/health", ([FromServices] IOptions opts, [FromServ return JsonResult(response); }); -app.MapGet("/ready", ([FromServices] StellaOps.Concelier.WebService.Diagnostics.ServiceStatus status, HttpContext context) => +app.MapGet("/ready", async ( + [FromServices] StellaOps.Concelier.WebService.Diagnostics.ServiceStatus status, + [FromServices] ConcelierDataSource dataSource, + HttpContext context, + CancellationToken cancellationToken) => { ApplyNoCache(context.Response); + var (ready, latency, error) = await CheckPostgresAsync(dataSource, cancellationToken).ConfigureAwait(false); + status.RecordStorageCheck(ready, latency, error); + var snapshot = status.CreateSnapshot(); var uptimeSeconds = Math.Max((snapshot.CapturedAt - snapshot.StartedAt).TotalSeconds, 0d); - var mongo = new MongoReadyHealth( - Status: "bypassed", - LatencyMs: null, + var storage = new StorageHealth( + Backend: "postgres", + Ready: ready, CheckedAt: snapshot.LastReadyCheckAt, - Error: "mongo disabled"); + LatencyMs: latency.TotalMilliseconds, + Error: error); var response = new ReadyDocument( - Status: "ready", + Status: ready ? "ready" : "degraded", StartedAt: snapshot.StartedAt, UptimeSeconds: uptimeSeconds, - Mongo: mongo); + Storage: storage); return JsonResult(response); }); @@ -4019,9 +4071,54 @@ static SignalsSymbolSetResponse ToSymbolSetResponse(AffectedSymbolSet symbolSet) return pluginOptions; } -static async Task InitializeMongoAsync(WebApplication app) +static async Task InitializePostgresAsync(WebApplication app) { - await Task.CompletedTask; + var dataSource = app.Services.GetService(); + var status = app.Services.GetRequiredService(); + + if (dataSource is null) + { + status.RecordStorageCheck(false, TimeSpan.Zero, "PostgreSQL storage not configured"); + return; + } + + var stopwatch = Stopwatch.StartNew(); + try + { + var (ready, latency, error) = await CheckPostgresAsync(dataSource, CancellationToken.None).ConfigureAwait(false); + stopwatch.Stop(); + status.RecordStorageCheck(ready, latency, error); + if (ready) + { + status.MarkBootstrapCompleted(latency); + } + } + catch (Exception ex) + { + stopwatch.Stop(); + status.RecordStorageCheck(false, stopwatch.Elapsed, ex.Message); + } +} + +static async Task<(bool Ready, TimeSpan Latency, string? Error)> CheckPostgresAsync( + ConcelierDataSource dataSource, + CancellationToken cancellationToken) +{ + var stopwatch = Stopwatch.StartNew(); + try + { + await using var connection = await dataSource.OpenSystemConnectionAsync(cancellationToken).ConfigureAwait(false); + await using var command = connection.CreateCommand(); + command.CommandText = "select 1"; + _ = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false); + stopwatch.Stop(); + return (true, stopwatch.Elapsed, null); + } + catch (Exception ex) + { + stopwatch.Stop(); + return (false, stopwatch.Elapsed, ex.Message); + } } } diff --git a/src/Concelier/StellaOps.Concelier.WebService/StellaOps.Concelier.WebService.csproj b/src/Concelier/StellaOps.Concelier.WebService/StellaOps.Concelier.WebService.csproj index 434c180ea..eefa69635 100644 --- a/src/Concelier/StellaOps.Concelier.WebService/StellaOps.Concelier.WebService.csproj +++ b/src/Concelier/StellaOps.Concelier.WebService/StellaOps.Concelier.WebService.csproj @@ -41,4 +41,4 @@ OutputItemType="Analyzer" ReferenceOutputAssembly="false" /> - \ No newline at end of file + diff --git a/src/Concelier/StellaOps.Concelier.sln b/src/Concelier/StellaOps.Concelier.sln index 1f820a907..30554d165 100644 --- a/src/Concelier/StellaOps.Concelier.sln +++ b/src/Concelier/StellaOps.Concelier.sln @@ -185,6 +185,22 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Concelier.Analyze EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Ingestion.Telemetry", "..\__Libraries\StellaOps.Ingestion.Telemetry\StellaOps.Ingestion.Telemetry.csproj", "{85D215EC-DCFE-4F7F-BB07-540DCF66BE8C}" EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Cryptography.Plugin.SmRemote", "..\__Libraries\StellaOps.Cryptography.Plugin.SmRemote\StellaOps.Cryptography.Plugin.SmRemote.csproj", "{FCA91451-5D4A-4E75-9268-B253A902A726}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.SmRemote.Service", "..\SmRemote\StellaOps.SmRemote.Service\StellaOps.SmRemote.Service.csproj", "{E823EB56-86F4-4989-9480-9F1D8DD780F8}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Cryptography.Plugin.SmSoft", "..\__Libraries\StellaOps.Cryptography.Plugin.SmSoft\StellaOps.Cryptography.Plugin.SmSoft.csproj", "{64C7E443-CD2C-475E-B9C6-95EF8160F4D8}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Cryptography.DependencyInjection", "..\__Libraries\StellaOps.Cryptography.DependencyInjection\StellaOps.Cryptography.DependencyInjection.csproj", "{1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Cryptography.Plugin.Pkcs11Gost", "..\__Libraries\StellaOps.Cryptography.Plugin.Pkcs11Gost\StellaOps.Cryptography.Plugin.Pkcs11Gost.csproj", "{3CC87BD4-38B7-421B-9688-B2ED2B392646}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Cryptography.Plugin.OpenSslGost", "..\__Libraries\StellaOps.Cryptography.Plugin.OpenSslGost\StellaOps.Cryptography.Plugin.OpenSslGost.csproj", "{27052CD3-98B4-4D37-88F9-7D8B54363F74}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Cryptography.Plugin.PqSoft", "..\__Libraries\StellaOps.Cryptography.Plugin.PqSoft\StellaOps.Cryptography.Plugin.PqSoft.csproj", "{29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StellaOps.Cryptography.Plugin.WineCsp", "..\__Libraries\StellaOps.Cryptography.Plugin.WineCsp\StellaOps.Cryptography.Plugin.WineCsp.csproj", "{98908D4F-1A48-4CED-B2CF-92C3179B44FD}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -1251,6 +1267,102 @@ Global {85D215EC-DCFE-4F7F-BB07-540DCF66BE8C}.Release|x64.Build.0 = Release|Any CPU {85D215EC-DCFE-4F7F-BB07-540DCF66BE8C}.Release|x86.ActiveCfg = Release|Any CPU {85D215EC-DCFE-4F7F-BB07-540DCF66BE8C}.Release|x86.Build.0 = Release|Any CPU + {FCA91451-5D4A-4E75-9268-B253A902A726}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {FCA91451-5D4A-4E75-9268-B253A902A726}.Debug|Any CPU.Build.0 = Debug|Any CPU + {FCA91451-5D4A-4E75-9268-B253A902A726}.Debug|x64.ActiveCfg = Debug|Any CPU + {FCA91451-5D4A-4E75-9268-B253A902A726}.Debug|x64.Build.0 = Debug|Any CPU + {FCA91451-5D4A-4E75-9268-B253A902A726}.Debug|x86.ActiveCfg = Debug|Any CPU + {FCA91451-5D4A-4E75-9268-B253A902A726}.Debug|x86.Build.0 = Debug|Any CPU + {FCA91451-5D4A-4E75-9268-B253A902A726}.Release|Any CPU.ActiveCfg = Release|Any CPU + {FCA91451-5D4A-4E75-9268-B253A902A726}.Release|Any CPU.Build.0 = Release|Any CPU + {FCA91451-5D4A-4E75-9268-B253A902A726}.Release|x64.ActiveCfg = Release|Any CPU + {FCA91451-5D4A-4E75-9268-B253A902A726}.Release|x64.Build.0 = Release|Any CPU + {FCA91451-5D4A-4E75-9268-B253A902A726}.Release|x86.ActiveCfg = Release|Any CPU + {FCA91451-5D4A-4E75-9268-B253A902A726}.Release|x86.Build.0 = Release|Any CPU + {E823EB56-86F4-4989-9480-9F1D8DD780F8}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {E823EB56-86F4-4989-9480-9F1D8DD780F8}.Debug|Any CPU.Build.0 = Debug|Any CPU + {E823EB56-86F4-4989-9480-9F1D8DD780F8}.Debug|x64.ActiveCfg = Debug|Any CPU + {E823EB56-86F4-4989-9480-9F1D8DD780F8}.Debug|x64.Build.0 = Debug|Any CPU + {E823EB56-86F4-4989-9480-9F1D8DD780F8}.Debug|x86.ActiveCfg = Debug|Any CPU + {E823EB56-86F4-4989-9480-9F1D8DD780F8}.Debug|x86.Build.0 = Debug|Any CPU + {E823EB56-86F4-4989-9480-9F1D8DD780F8}.Release|Any CPU.ActiveCfg = Release|Any CPU + {E823EB56-86F4-4989-9480-9F1D8DD780F8}.Release|Any CPU.Build.0 = Release|Any CPU + {E823EB56-86F4-4989-9480-9F1D8DD780F8}.Release|x64.ActiveCfg = Release|Any CPU + {E823EB56-86F4-4989-9480-9F1D8DD780F8}.Release|x64.Build.0 = Release|Any CPU + {E823EB56-86F4-4989-9480-9F1D8DD780F8}.Release|x86.ActiveCfg = Release|Any CPU + {E823EB56-86F4-4989-9480-9F1D8DD780F8}.Release|x86.Build.0 = Release|Any CPU + {64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Debug|Any CPU.Build.0 = Debug|Any CPU + {64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Debug|x64.ActiveCfg = Debug|Any CPU + {64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Debug|x64.Build.0 = Debug|Any CPU + {64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Debug|x86.ActiveCfg = Debug|Any CPU + {64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Debug|x86.Build.0 = Debug|Any CPU + {64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Release|Any CPU.ActiveCfg = Release|Any CPU + {64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Release|Any CPU.Build.0 = Release|Any CPU + {64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Release|x64.ActiveCfg = Release|Any CPU + {64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Release|x64.Build.0 = Release|Any CPU + {64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Release|x86.ActiveCfg = Release|Any CPU + {64C7E443-CD2C-475E-B9C6-95EF8160F4D8}.Release|x86.Build.0 = Release|Any CPU + {1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Debug|Any CPU.Build.0 = Debug|Any CPU + {1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Debug|x64.ActiveCfg = Debug|Any CPU + {1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Debug|x64.Build.0 = Debug|Any CPU + {1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Debug|x86.ActiveCfg = Debug|Any CPU + {1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Debug|x86.Build.0 = Debug|Any CPU + {1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Release|Any CPU.ActiveCfg = Release|Any CPU + {1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Release|Any CPU.Build.0 = Release|Any CPU + {1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Release|x64.ActiveCfg = Release|Any CPU + {1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Release|x64.Build.0 = Release|Any CPU + {1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Release|x86.ActiveCfg = Release|Any CPU + {1A7ACB4E-FDCD-4AA9-8516-EC60D8A25922}.Release|x86.Build.0 = Release|Any CPU + {3CC87BD4-38B7-421B-9688-B2ED2B392646}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {3CC87BD4-38B7-421B-9688-B2ED2B392646}.Debug|Any CPU.Build.0 = Debug|Any CPU + {3CC87BD4-38B7-421B-9688-B2ED2B392646}.Debug|x64.ActiveCfg = Debug|Any CPU + {3CC87BD4-38B7-421B-9688-B2ED2B392646}.Debug|x64.Build.0 = Debug|Any CPU + {3CC87BD4-38B7-421B-9688-B2ED2B392646}.Debug|x86.ActiveCfg = Debug|Any CPU + {3CC87BD4-38B7-421B-9688-B2ED2B392646}.Debug|x86.Build.0 = Debug|Any CPU + {3CC87BD4-38B7-421B-9688-B2ED2B392646}.Release|Any CPU.ActiveCfg = Release|Any CPU + {3CC87BD4-38B7-421B-9688-B2ED2B392646}.Release|Any CPU.Build.0 = Release|Any CPU + {3CC87BD4-38B7-421B-9688-B2ED2B392646}.Release|x64.ActiveCfg = Release|Any CPU + {3CC87BD4-38B7-421B-9688-B2ED2B392646}.Release|x64.Build.0 = Release|Any CPU + {3CC87BD4-38B7-421B-9688-B2ED2B392646}.Release|x86.ActiveCfg = Release|Any CPU + {3CC87BD4-38B7-421B-9688-B2ED2B392646}.Release|x86.Build.0 = Release|Any CPU + {27052CD3-98B4-4D37-88F9-7D8B54363F74}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {27052CD3-98B4-4D37-88F9-7D8B54363F74}.Debug|Any CPU.Build.0 = Debug|Any CPU + {27052CD3-98B4-4D37-88F9-7D8B54363F74}.Debug|x64.ActiveCfg = Debug|Any CPU + {27052CD3-98B4-4D37-88F9-7D8B54363F74}.Debug|x64.Build.0 = Debug|Any CPU + {27052CD3-98B4-4D37-88F9-7D8B54363F74}.Debug|x86.ActiveCfg = Debug|Any CPU + {27052CD3-98B4-4D37-88F9-7D8B54363F74}.Debug|x86.Build.0 = Debug|Any CPU + {27052CD3-98B4-4D37-88F9-7D8B54363F74}.Release|Any CPU.ActiveCfg = Release|Any CPU + {27052CD3-98B4-4D37-88F9-7D8B54363F74}.Release|Any CPU.Build.0 = Release|Any CPU + {27052CD3-98B4-4D37-88F9-7D8B54363F74}.Release|x64.ActiveCfg = Release|Any CPU + {27052CD3-98B4-4D37-88F9-7D8B54363F74}.Release|x64.Build.0 = Release|Any CPU + {27052CD3-98B4-4D37-88F9-7D8B54363F74}.Release|x86.ActiveCfg = Release|Any CPU + {27052CD3-98B4-4D37-88F9-7D8B54363F74}.Release|x86.Build.0 = Release|Any CPU + {29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Debug|Any CPU.Build.0 = Debug|Any CPU + {29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Debug|x64.ActiveCfg = Debug|Any CPU + {29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Debug|x64.Build.0 = Debug|Any CPU + {29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Debug|x86.ActiveCfg = Debug|Any CPU + {29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Debug|x86.Build.0 = Debug|Any CPU + {29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Release|Any CPU.ActiveCfg = Release|Any CPU + {29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Release|Any CPU.Build.0 = Release|Any CPU + {29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Release|x64.ActiveCfg = Release|Any CPU + {29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Release|x64.Build.0 = Release|Any CPU + {29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Release|x86.ActiveCfg = Release|Any CPU + {29B6BB6D-A002-41A6-B3F9-F6F894F2A8D2}.Release|x86.Build.0 = Release|Any CPU + {98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Debug|Any CPU.Build.0 = Debug|Any CPU + {98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Debug|x64.ActiveCfg = Debug|Any CPU + {98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Debug|x64.Build.0 = Debug|Any CPU + {98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Debug|x86.ActiveCfg = Debug|Any CPU + {98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Debug|x86.Build.0 = Debug|Any CPU + {98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Release|Any CPU.ActiveCfg = Release|Any CPU + {98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Release|Any CPU.Build.0 = Release|Any CPU + {98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Release|x64.ActiveCfg = Release|Any CPU + {98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Release|x64.Build.0 = Release|Any CPU + {98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Release|x86.ActiveCfg = Release|Any CPU + {98908D4F-1A48-4CED-B2CF-92C3179B44FD}.Release|x86.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE diff --git a/src/Concelier/__Libraries/StellaOps.Concelier.Core/Diagnostics/VulnExplorerTelemetry.cs b/src/Concelier/__Libraries/StellaOps.Concelier.Core/Diagnostics/VulnExplorerTelemetry.cs new file mode 100644 index 000000000..6d29e5d3b --- /dev/null +++ b/src/Concelier/__Libraries/StellaOps.Concelier.Core/Diagnostics/VulnExplorerTelemetry.cs @@ -0,0 +1,143 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics.Metrics; +using System.Linq; +using System.Text.Json; +using StellaOps.Concelier.Core.Linksets; + +namespace StellaOps.Concelier.Core.Diagnostics; + +/// +/// Metrics exported for Vuln Explorer consumers (fact-only telemetry). +/// +public static class VulnExplorerTelemetry +{ + public const string MeterName = "StellaOps.Concelier.VulnExplorer"; + + private static readonly Meter Meter = new(MeterName); + + private static readonly Counter IdentifierCollisionCounter = Meter.CreateCounter( + "vuln.identifier_collisions_total", + unit: "collision", + description: "Identifier/alias collisions detected while aggregating linksets for Vuln Explorer."); + + private static readonly Counter WithdrawnStatementCounter = Meter.CreateCounter( + "vuln.withdrawn_statements_total", + unit: "statement", + description: "Withdrawn advisory observations detected by change emitters."); + + private static readonly Counter ChunkRequestCounter = Meter.CreateCounter( + "vuln.chunk_requests_total", + unit: "request", + description: "Advisory chunk requests served for Vuln Explorer evidence panels."); + + private static readonly Histogram ChunkLatencyHistogram = Meter.CreateHistogram( + "vuln.chunk_latency_ms", + unit: "ms", + description: "Latency to build advisory chunks (fact-only) for Vuln Explorer."); + + public static void RecordIdentifierCollisions(string tenant, string? source, int collisions) + { + if (collisions <= 0 || string.IsNullOrWhiteSpace(tenant)) + { + return; + } + + var tags = new[] + { + KeyValuePair.Create("tenant", tenant), + KeyValuePair.Create("source", source ?? "unknown") + }; + + IdentifierCollisionCounter.Add(collisions, tags); + } + + public static int CountAliasCollisions(IReadOnlyList? conflicts) + { + if (conflicts is null || conflicts.Count == 0) + { + return 0; + } + + return conflicts.Count(conflict => + string.Equals(conflict.Reason, "alias-inconsistency", StringComparison.OrdinalIgnoreCase) || + string.Equals(conflict.Field, "aliases", StringComparison.OrdinalIgnoreCase)); + } + + public static void RecordWithdrawnStatement(string tenant, string? source) + { + if (string.IsNullOrWhiteSpace(tenant)) + { + return; + } + + var tags = new[] + { + KeyValuePair.Create("tenant", tenant), + KeyValuePair.Create("source", source ?? "unknown") + }; + + WithdrawnStatementCounter.Add(1, tags); + } + + public static void RecordChunkRequest(string tenant, string result, bool cacheHit, int chunkCount, double latencyMs) + { + if (string.IsNullOrWhiteSpace(tenant)) + { + return; + } + + var sanitizedResult = string.IsNullOrWhiteSpace(result) ? "unknown" : result.Trim().ToLowerInvariant(); + var safeLatency = latencyMs < 0 ? 0d : latencyMs; + var normalizedChunkCount = chunkCount < 0 ? 0 : chunkCount; + + var tags = new[] + { + KeyValuePair.Create("tenant", tenant), + KeyValuePair.Create("result", sanitizedResult), + KeyValuePair.Create("cache_hit", cacheHit), + KeyValuePair.Create("chunk_count", normalizedChunkCount) + }; + + ChunkRequestCounter.Add(1, tags); + ChunkLatencyHistogram.Record(safeLatency, tags); + } + + public static void RecordChunkLatency(string tenant, string? source, TimeSpan duration) + { + if (string.IsNullOrWhiteSpace(tenant)) + { + return; + } + + var tags = new[] + { + KeyValuePair.Create("tenant", tenant), + KeyValuePair.Create("source", source ?? "unknown") + }; + + ChunkLatencyHistogram.Record(Math.Max(0, duration.TotalMilliseconds), tags); + } + + public static bool IsWithdrawn(JsonElement content) + { + if (content.ValueKind != JsonValueKind.Object) + { + return false; + } + + if (content.TryGetProperty("withdrawn", out var withdrawnElement) && + withdrawnElement.ValueKind == JsonValueKind.True) + { + return true; + } + + if (content.TryGetProperty("withdrawn_at", out var withdrawnAtElement) && + withdrawnAtElement.ValueKind is JsonValueKind.String) + { + return !string.IsNullOrWhiteSpace(withdrawnAtElement.GetString()); + } + + return false; + } +} diff --git a/src/Concelier/__Libraries/StellaOps.Concelier.Core/Linksets/AdvisoryLinksetNormalization.cs b/src/Concelier/__Libraries/StellaOps.Concelier.Core/Linksets/AdvisoryLinksetNormalization.cs index 0e24e32ee..8ae656100 100644 --- a/src/Concelier/__Libraries/StellaOps.Concelier.Core/Linksets/AdvisoryLinksetNormalization.cs +++ b/src/Concelier/__Libraries/StellaOps.Concelier.Core/Linksets/AdvisoryLinksetNormalization.cs @@ -7,7 +7,7 @@ using StellaOps.Concelier.Normalization.SemVer; namespace StellaOps.Concelier.Core.Linksets; -internal static class AdvisoryLinksetNormalization +public static class AdvisoryLinksetNormalization { public static AdvisoryLinksetNormalized? FromRawLinkset(RawLinkset linkset) { diff --git a/src/Concelier/__Libraries/StellaOps.Concelier.Core/Observations/AdvisoryObservationQueryService.cs b/src/Concelier/__Libraries/StellaOps.Concelier.Core/Observations/AdvisoryObservationQueryService.cs index 0eac8c0dc..947dfeac9 100644 --- a/src/Concelier/__Libraries/StellaOps.Concelier.Core/Observations/AdvisoryObservationQueryService.cs +++ b/src/Concelier/__Libraries/StellaOps.Concelier.Core/Observations/AdvisoryObservationQueryService.cs @@ -5,192 +5,194 @@ using StellaOps.Concelier.Models; using StellaOps.Concelier.Models.Observations; using StellaOps.Concelier.RawModels; using StellaOps.Concelier.Core.Linksets; - -namespace StellaOps.Concelier.Core.Observations; - -/// -/// Default implementation of that projects raw observations for overlay consumers. -/// -public sealed class AdvisoryObservationQueryService : IAdvisoryObservationQueryService -{ - private const int DefaultPageSize = 200; - private const int MaxPageSize = 500; - private readonly IAdvisoryObservationLookup _lookup; - - public AdvisoryObservationQueryService(IAdvisoryObservationLookup lookup) - { - _lookup = lookup ?? throw new ArgumentNullException(nameof(lookup)); - } - - public async ValueTask QueryAsync( - AdvisoryObservationQueryOptions options, - CancellationToken cancellationToken) - { - ArgumentNullException.ThrowIfNull(options); - cancellationToken.ThrowIfCancellationRequested(); - - var normalizedTenant = NormalizeTenant(options.Tenant); - var normalizedObservationIds = NormalizeSet(options.ObservationIds, static value => value, StringComparer.Ordinal); +using StellaOps.Concelier.Core.Diagnostics; + +namespace StellaOps.Concelier.Core.Observations; + +/// +/// Default implementation of that projects raw observations for overlay consumers. +/// +public sealed class AdvisoryObservationQueryService : IAdvisoryObservationQueryService +{ + private const int DefaultPageSize = 200; + private const int MaxPageSize = 500; + private readonly IAdvisoryObservationLookup _lookup; + + public AdvisoryObservationQueryService(IAdvisoryObservationLookup lookup) + { + _lookup = lookup ?? throw new ArgumentNullException(nameof(lookup)); + } + + public async ValueTask QueryAsync( + AdvisoryObservationQueryOptions options, + CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(options); + cancellationToken.ThrowIfCancellationRequested(); + + var normalizedTenant = NormalizeTenant(options.Tenant); + var normalizedObservationIds = NormalizeSet(options.ObservationIds, static value => value, StringComparer.Ordinal); var normalizedAliases = NormalizeSet(options.Aliases, static value => value, StringComparer.OrdinalIgnoreCase); - var normalizedPurls = NormalizeSet(options.Purls, static value => value, StringComparer.Ordinal); - var normalizedCpes = NormalizeSet(options.Cpes, static value => value, StringComparer.Ordinal); - - var limit = NormalizeLimit(options.Limit); - var fetchSize = checked(limit + 1); - - var cursor = DecodeCursor(options.Cursor); - - var observations = await _lookup - .FindByFiltersAsync( - normalizedTenant, - normalizedObservationIds, - normalizedAliases, - normalizedPurls, - normalizedCpes, - cursor, - fetchSize, - cancellationToken) - .ConfigureAwait(false); - - var ordered = observations - .Where(observation => Matches(observation, normalizedObservationIds, normalizedAliases, normalizedPurls, normalizedCpes)) - .OrderByDescending(static observation => observation.CreatedAt) - .ThenBy(static observation => observation.ObservationId, StringComparer.Ordinal) - .ToImmutableArray(); - - var hasMore = ordered.Length > limit; - var page = hasMore ? ordered.Take(limit).ToImmutableArray() : ordered; - var nextCursor = hasMore ? EncodeCursor(page[^1]) : null; - - var linkset = BuildAggregateLinkset(page); - return new AdvisoryObservationQueryResult(page, linkset, nextCursor, hasMore); - } - - private static bool Matches( - AdvisoryObservation observation, - ImmutableHashSet observationIds, - ImmutableHashSet aliases, - ImmutableHashSet purls, - ImmutableHashSet cpes) - { - ArgumentNullException.ThrowIfNull(observation); - - if (observationIds.Count > 0 && !observationIds.Contains(observation.ObservationId)) - { - return false; - } - - if (aliases.Count > 0 && !observation.Linkset.Aliases.Any(aliases.Contains)) - { - return false; - } - - if (purls.Count > 0 && !observation.Linkset.Purls.Any(purls.Contains)) - { - return false; - } - - if (cpes.Count > 0 && !observation.Linkset.Cpes.Any(cpes.Contains)) - { - return false; - } - - return true; - } - - private static string NormalizeTenant(string tenant) - => Validation.EnsureNotNullOrWhiteSpace(tenant, nameof(tenant)).ToLowerInvariant(); - - private static ImmutableHashSet NormalizeSet( - IEnumerable? values, - Func projector, - StringComparer comparer) - { - if (values is null) - { - return ImmutableHashSet.Empty; - } - - var builder = ImmutableHashSet.CreateBuilder(comparer); - foreach (var value in values) - { - var normalized = Validation.TrimToNull(value); - if (normalized is null) - { - continue; - } - - builder.Add(projector(normalized)); - } - - return builder.ToImmutable(); - } - - private static int NormalizeLimit(int? requestedLimit) - { - if (!requestedLimit.HasValue || requestedLimit.Value <= 0) - { - return DefaultPageSize; - } - - var limit = requestedLimit.Value; - if (limit > MaxPageSize) - { - return MaxPageSize; - } - - return limit; - } - - private static AdvisoryObservationCursor? DecodeCursor(string? cursor) - { - if (string.IsNullOrWhiteSpace(cursor)) - { - return null; - } - - try - { - var decoded = Convert.FromBase64String(cursor.Trim()); - var payload = Encoding.UTF8.GetString(decoded); - var separator = payload.IndexOf(':'); - if (separator <= 0 || separator >= payload.Length - 1) - { - throw new FormatException("Cursor is malformed."); - } - - var ticksText = payload.AsSpan(0, separator); - if (!long.TryParse(ticksText, NumberStyles.Integer, CultureInfo.InvariantCulture, out var ticks)) - { - throw new FormatException("Cursor timestamp is invalid."); - } - - var createdAt = new DateTimeOffset(DateTime.SpecifyKind(new DateTime(ticks), DateTimeKind.Utc)); - var observationId = payload[(separator + 1)..]; - if (string.IsNullOrWhiteSpace(observationId)) - { - throw new FormatException("Cursor observation id is missing."); - } - - return new AdvisoryObservationCursor(createdAt, observationId); - } - catch (FormatException) - { - throw; - } - catch (Exception ex) - { - throw new FormatException("Cursor is malformed.", ex); - } - } - - private static string? EncodeCursor(AdvisoryObservation observation) - { - if (observation is null) - { - return null; - } - + var normalizedPurls = NormalizeSet(options.Purls, static value => value, StringComparer.Ordinal); + var normalizedCpes = NormalizeSet(options.Cpes, static value => value, StringComparer.Ordinal); + + var limit = NormalizeLimit(options.Limit); + var fetchSize = checked(limit + 1); + + var cursor = DecodeCursor(options.Cursor); + + var observations = await _lookup + .FindByFiltersAsync( + normalizedTenant, + normalizedObservationIds, + normalizedAliases, + normalizedPurls, + normalizedCpes, + cursor, + fetchSize, + cancellationToken) + .ConfigureAwait(false); + + var ordered = observations + .Where(observation => Matches(observation, normalizedObservationIds, normalizedAliases, normalizedPurls, normalizedCpes)) + .OrderByDescending(static observation => observation.CreatedAt) + .ThenBy(static observation => observation.ObservationId, StringComparer.Ordinal) + .ToImmutableArray(); + + var hasMore = ordered.Length > limit; + var page = hasMore ? ordered.Take(limit).ToImmutableArray() : ordered; + var nextCursor = hasMore ? EncodeCursor(page[^1]) : null; + + var linkset = BuildAggregateLinkset(page); + RecordIdentifierCollisions(normalizedTenant, linkset); + return new AdvisoryObservationQueryResult(page, linkset, nextCursor, hasMore); + } + + private static bool Matches( + AdvisoryObservation observation, + ImmutableHashSet observationIds, + ImmutableHashSet aliases, + ImmutableHashSet purls, + ImmutableHashSet cpes) + { + ArgumentNullException.ThrowIfNull(observation); + + if (observationIds.Count > 0 && !observationIds.Contains(observation.ObservationId)) + { + return false; + } + + if (aliases.Count > 0 && !observation.Linkset.Aliases.Any(aliases.Contains)) + { + return false; + } + + if (purls.Count > 0 && !observation.Linkset.Purls.Any(purls.Contains)) + { + return false; + } + + if (cpes.Count > 0 && !observation.Linkset.Cpes.Any(cpes.Contains)) + { + return false; + } + + return true; + } + + private static string NormalizeTenant(string tenant) + => Validation.EnsureNotNullOrWhiteSpace(tenant, nameof(tenant)).ToLowerInvariant(); + + private static ImmutableHashSet NormalizeSet( + IEnumerable? values, + Func projector, + StringComparer comparer) + { + if (values is null) + { + return ImmutableHashSet.Empty; + } + + var builder = ImmutableHashSet.CreateBuilder(comparer); + foreach (var value in values) + { + var normalized = Validation.TrimToNull(value); + if (normalized is null) + { + continue; + } + + builder.Add(projector(normalized)); + } + + return builder.ToImmutable(); + } + + private static int NormalizeLimit(int? requestedLimit) + { + if (!requestedLimit.HasValue || requestedLimit.Value <= 0) + { + return DefaultPageSize; + } + + var limit = requestedLimit.Value; + if (limit > MaxPageSize) + { + return MaxPageSize; + } + + return limit; + } + + private static AdvisoryObservationCursor? DecodeCursor(string? cursor) + { + if (string.IsNullOrWhiteSpace(cursor)) + { + return null; + } + + try + { + var decoded = Convert.FromBase64String(cursor.Trim()); + var payload = Encoding.UTF8.GetString(decoded); + var separator = payload.IndexOf(':'); + if (separator <= 0 || separator >= payload.Length - 1) + { + throw new FormatException("Cursor is malformed."); + } + + var ticksText = payload.AsSpan(0, separator); + if (!long.TryParse(ticksText, NumberStyles.Integer, CultureInfo.InvariantCulture, out var ticks)) + { + throw new FormatException("Cursor timestamp is invalid."); + } + + var createdAt = new DateTimeOffset(DateTime.SpecifyKind(new DateTime(ticks), DateTimeKind.Utc)); + var observationId = payload[(separator + 1)..]; + if (string.IsNullOrWhiteSpace(observationId)) + { + throw new FormatException("Cursor observation id is missing."); + } + + return new AdvisoryObservationCursor(createdAt, observationId); + } + catch (FormatException) + { + throw; + } + catch (Exception ex) + { + throw new FormatException("Cursor is malformed.", ex); + } + } + + private static string? EncodeCursor(AdvisoryObservation observation) + { + if (observation is null) + { + return null; + } + var payload = $"{observation.CreatedAt.UtcTicks.ToString(CultureInfo.InvariantCulture)}:{observation.ObservationId}"; return Convert.ToBase64String(Encoding.UTF8.GetBytes(payload)); } @@ -283,4 +285,18 @@ public sealed class AdvisoryObservationQueryService : IAdvisoryObservationQueryS .ThenBy(static c => string.Join('|', c.Values ?? Array.Empty()), StringComparer.Ordinal) .ToImmutableArray()); } + + private static void RecordIdentifierCollisions(string tenant, AdvisoryObservationLinksetAggregate linkset) + { + if (linkset.Conflicts.IsDefaultOrEmpty) + { + return; + } + + var collisionCount = linkset.Conflicts.Count(conflict => + string.Equals(conflict.Field, "aliases", StringComparison.OrdinalIgnoreCase) && + conflict.Reason.Contains("alias", StringComparison.OrdinalIgnoreCase)); + + VulnExplorerTelemetry.RecordIdentifierCollisions(tenant, source: null, collisionCount); + } } diff --git a/src/Concelier/__Libraries/StellaOps.Concelier.Core/Risk/AdvisoryFieldChangeEmitter.cs b/src/Concelier/__Libraries/StellaOps.Concelier.Core/Risk/AdvisoryFieldChangeEmitter.cs index 3c123d00a..7fabb28f3 100644 --- a/src/Concelier/__Libraries/StellaOps.Concelier.Core/Risk/AdvisoryFieldChangeEmitter.cs +++ b/src/Concelier/__Libraries/StellaOps.Concelier.Core/Risk/AdvisoryFieldChangeEmitter.cs @@ -5,6 +5,7 @@ using System.Linq; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; +using StellaOps.Concelier.Core.Diagnostics; namespace StellaOps.Concelier.Core.Risk; @@ -177,6 +178,7 @@ public sealed class AdvisoryFieldChangeEmitter : IAdvisoryFieldChangeEmitter _logger.LogInformation( "Emitted withdrawn observation notification for {ObservationId}", previousSignal.ObservationId); + VulnExplorerTelemetry.RecordWithdrawnStatement(tenantId, previousSignal.Provenance.Vendor); return notification; } diff --git a/src/Concelier/__Libraries/StellaOps.Concelier.Core/StellaOps.Concelier.Core.csproj b/src/Concelier/__Libraries/StellaOps.Concelier.Core/StellaOps.Concelier.Core.csproj index e9a36d76f..6b92aec22 100644 --- a/src/Concelier/__Libraries/StellaOps.Concelier.Core/StellaOps.Concelier.Core.csproj +++ b/src/Concelier/__Libraries/StellaOps.Concelier.Core/StellaOps.Concelier.Core.csproj @@ -12,6 +12,7 @@ + diff --git a/src/Concelier/__Libraries/StellaOps.Concelier.Models/MongoCompat/DriverStubs.cs b/src/Concelier/__Libraries/StellaOps.Concelier.Models/MongoCompat/DriverStubs.cs index f8a415148..421dbf6ed 100644 --- a/src/Concelier/__Libraries/StellaOps.Concelier.Models/MongoCompat/DriverStubs.cs +++ b/src/Concelier/__Libraries/StellaOps.Concelier.Models/MongoCompat/DriverStubs.cs @@ -1,5 +1,6 @@ using System; using System.Collections; +using System.Collections.Concurrent; using System.Collections.Generic; using System.Linq; using System.Threading; @@ -66,6 +67,8 @@ namespace MongoDB.Driver public class MongoDatabase : IMongoDatabase { + private readonly ConcurrentDictionary _collections = new(StringComparer.Ordinal); + public MongoDatabase(string name) { Name = name; @@ -73,8 +76,17 @@ namespace MongoDB.Driver } public string Name { get; } public DatabaseNamespace DatabaseNamespace { get; } - public IMongoCollection GetCollection(string name, MongoCollectionSettings? settings = null) => new MongoCollection(name); - public Task DropCollectionAsync(string name, CancellationToken cancellationToken = default) => Task.CompletedTask; + public IMongoCollection GetCollection(string name, MongoCollectionSettings? settings = null) + { + var collection = (MongoCollection)_collections.GetOrAdd(name, _ => new MongoCollection(name)); + return collection; + } + + public Task DropCollectionAsync(string name, CancellationToken cancellationToken = default) + { + _collections.TryRemove(name, out _); + return Task.CompletedTask; + } public BsonDocument RunCommand(BsonDocument command, CancellationToken cancellationToken = default) => new(); public T RunCommand(BsonDocument command, CancellationToken cancellationToken = default) => default!; public Task RunCommandAsync(BsonDocument command, CancellationToken cancellationToken = default) => Task.FromResult(default(T)!); diff --git a/src/Concelier/__Libraries/StellaOps.Concelier.Models/StellaOps.Concelier.Models.csproj b/src/Concelier/__Libraries/StellaOps.Concelier.Models/StellaOps.Concelier.Models.csproj index b0d939709..396f60027 100644 --- a/src/Concelier/__Libraries/StellaOps.Concelier.Models/StellaOps.Concelier.Models.csproj +++ b/src/Concelier/__Libraries/StellaOps.Concelier.Models/StellaOps.Concelier.Models.csproj @@ -5,6 +5,7 @@ enable enable false + false @@ -12,4 +13,7 @@ + + + diff --git a/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/Migrations/005_connector_state.sql b/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/Migrations/005_connector_state.sql new file mode 100644 index 000000000..71890a0e5 --- /dev/null +++ b/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/Migrations/005_connector_state.sql @@ -0,0 +1,69 @@ +-- Concelier Migration 005: Postgres equivalents for DTO, export, PSIRT/JP flags, and change history. + +CREATE SCHEMA IF NOT EXISTS concelier; + +CREATE TABLE IF NOT EXISTS concelier.dtos ( + id UUID NOT NULL, + document_id UUID NOT NULL, + source_name TEXT NOT NULL, + format TEXT NOT NULL, + payload_json JSONB NOT NULL, + schema_version TEXT NOT NULL DEFAULT '', + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + validated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + CONSTRAINT pk_concelier_dtos PRIMARY KEY (document_id) +); + +CREATE INDEX IF NOT EXISTS idx_concelier_dtos_source ON concelier.dtos(source_name, created_at DESC); + +CREATE TABLE IF NOT EXISTS concelier.export_states ( + id TEXT NOT NULL, + export_cursor TEXT NOT NULL, + last_full_digest TEXT, + last_delta_digest TEXT, + base_export_id TEXT, + base_digest TEXT, + target_repository TEXT, + files JSONB NOT NULL, + exporter_version TEXT NOT NULL, + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + CONSTRAINT pk_concelier_export_states PRIMARY KEY (id) +); + +CREATE TABLE IF NOT EXISTS concelier.psirt_flags ( + advisory_id TEXT NOT NULL, + vendor TEXT NOT NULL, + source_name TEXT NOT NULL, + external_id TEXT, + recorded_at TIMESTAMPTZ NOT NULL, + CONSTRAINT pk_concelier_psirt_flags PRIMARY KEY (advisory_id, vendor) +); + +CREATE INDEX IF NOT EXISTS idx_concelier_psirt_source ON concelier.psirt_flags(source_name, recorded_at DESC); + +CREATE TABLE IF NOT EXISTS concelier.jp_flags ( + advisory_key TEXT NOT NULL, + source_name TEXT NOT NULL, + category TEXT NOT NULL, + vendor_status TEXT, + created_at TIMESTAMPTZ NOT NULL, + CONSTRAINT pk_concelier_jp_flags PRIMARY KEY (advisory_key) +); + +CREATE TABLE IF NOT EXISTS concelier.change_history ( + id UUID NOT NULL, + source_name TEXT NOT NULL, + advisory_key TEXT NOT NULL, + document_id UUID NOT NULL, + document_hash TEXT NOT NULL, + snapshot_hash TEXT NOT NULL, + previous_snapshot_hash TEXT, + snapshot JSONB NOT NULL, + previous_snapshot JSONB, + changes JSONB NOT NULL, + created_at TIMESTAMPTZ NOT NULL, + CONSTRAINT pk_concelier_change_history PRIMARY KEY (id) +); + +CREATE INDEX IF NOT EXISTS idx_concelier_change_history_advisory + ON concelier.change_history(advisory_key, created_at DESC); diff --git a/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/Repositories/PostgresChangeHistoryStore.cs b/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/Repositories/PostgresChangeHistoryStore.cs new file mode 100644 index 000000000..3e1398e20 --- /dev/null +++ b/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/Repositories/PostgresChangeHistoryStore.cs @@ -0,0 +1,96 @@ +using System.Text.Json; +using Dapper; +using StellaOps.Concelier.Storage.Mongo.ChangeHistory; + +namespace StellaOps.Concelier.Storage.Postgres.Repositories; + +internal sealed class PostgresChangeHistoryStore : IChangeHistoryStore +{ + private readonly ConcelierDataSource _dataSource; + private readonly JsonSerializerOptions _jsonOptions = new(JsonSerializerDefaults.General) + { + PropertyNamingPolicy = JsonNamingPolicy.CamelCase + }; + + public PostgresChangeHistoryStore(ConcelierDataSource dataSource) + { + _dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource)); + } + + public async Task AddAsync(ChangeHistoryRecord record, CancellationToken cancellationToken) + { + const string sql = """ + INSERT INTO concelier.change_history + (id, source_name, advisory_key, document_id, document_hash, snapshot_hash, previous_snapshot_hash, snapshot, previous_snapshot, changes, created_at) + VALUES (@Id, @SourceName, @AdvisoryKey, @DocumentId, @DocumentHash, @SnapshotHash, @PreviousSnapshotHash, @Snapshot, @PreviousSnapshot, @Changes, @CreatedAt) + ON CONFLICT (id) DO NOTHING; + """; + + await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken); + await connection.ExecuteAsync(new CommandDefinition(sql, new + { + record.Id, + record.SourceName, + record.AdvisoryKey, + record.DocumentId, + record.DocumentHash, + record.SnapshotHash, + record.PreviousSnapshotHash, + Snapshot = record.Snapshot, + PreviousSnapshot = record.PreviousSnapshot, + Changes = JsonSerializer.Serialize(record.Changes, _jsonOptions), + record.CreatedAt + }, cancellationToken: cancellationToken)); + } + + public async Task> GetRecentAsync(string sourceName, string advisoryKey, int limit, CancellationToken cancellationToken) + { + const string sql = """ + SELECT id, source_name, advisory_key, document_id, document_hash, snapshot_hash, previous_snapshot_hash, snapshot, previous_snapshot, changes, created_at + FROM concelier.change_history + WHERE source_name = @SourceName AND advisory_key = @AdvisoryKey + ORDER BY created_at DESC + LIMIT @Limit; + """; + + await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken); + var rows = await connection.QueryAsync(new CommandDefinition(sql, new + { + SourceName = sourceName, + AdvisoryKey = advisoryKey, + Limit = limit + }, cancellationToken: cancellationToken)); + + return rows.Select(ToRecord).ToArray(); + } + + private ChangeHistoryRecord ToRecord(ChangeHistoryRow row) + { + var changes = JsonSerializer.Deserialize>(row.Changes, _jsonOptions) ?? Array.Empty(); + return new ChangeHistoryRecord( + row.Id, + row.SourceName, + row.AdvisoryKey, + row.DocumentId, + row.DocumentHash, + row.SnapshotHash, + row.PreviousSnapshotHash ?? string.Empty, + row.Snapshot, + row.PreviousSnapshot ?? string.Empty, + changes, + row.CreatedAt); + } + + private sealed record ChangeHistoryRow( + Guid Id, + string SourceName, + string AdvisoryKey, + Guid DocumentId, + string DocumentHash, + string SnapshotHash, + string? PreviousSnapshotHash, + string Snapshot, + string? PreviousSnapshot, + string Changes, + DateTimeOffset CreatedAt); +} diff --git a/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/Repositories/PostgresDtoStore.cs b/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/Repositories/PostgresDtoStore.cs new file mode 100644 index 000000000..f93427a75 --- /dev/null +++ b/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/Repositories/PostgresDtoStore.cs @@ -0,0 +1,104 @@ +using System.Text.Json; +using Dapper; +using StellaOps.Concelier.Storage.Mongo; + +namespace StellaOps.Concelier.Storage.Postgres.Repositories; + +internal sealed class PostgresDtoStore : IDtoStore +{ + private readonly ConcelierDataSource _dataSource; + private readonly JsonSerializerOptions _jsonOptions = new(JsonSerializerDefaults.General) + { + PropertyNamingPolicy = JsonNamingPolicy.CamelCase + }; + + public PostgresDtoStore(ConcelierDataSource dataSource) + { + _dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource)); + } + + public async Task UpsertAsync(DtoRecord record, CancellationToken cancellationToken) + { + const string sql = """ + INSERT INTO concelier.dtos (id, document_id, source_name, format, payload_json, schema_version, created_at, validated_at) + VALUES (@Id, @DocumentId, @SourceName, @Format, @PayloadJson, @SchemaVersion, @CreatedAt, @ValidatedAt) + ON CONFLICT (document_id) DO UPDATE + SET payload_json = EXCLUDED.payload_json, + schema_version = EXCLUDED.schema_version, + source_name = EXCLUDED.source_name, + format = EXCLUDED.format, + validated_at = EXCLUDED.validated_at + RETURNING id, document_id, source_name, format, payload_json, schema_version, created_at, validated_at; + """; + + var payloadJson = record.Payload.ToJson(); + + await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken); + var row = await connection.QuerySingleAsync(new CommandDefinition(sql, new + { + record.Id, + record.DocumentId, + record.SourceName, + record.Format, + PayloadJson = payloadJson, + record.SchemaVersion, + record.CreatedAt, + record.ValidatedAt + }, cancellationToken: cancellationToken)); + + return ToRecord(row); + } + + public async Task FindByDocumentIdAsync(Guid documentId, CancellationToken cancellationToken) + { + const string sql = """ + SELECT id, document_id, source_name, format, payload_json, schema_version, created_at, validated_at + FROM concelier.dtos + WHERE document_id = @DocumentId + LIMIT 1; + """; + + await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken); + var row = await connection.QuerySingleOrDefaultAsync(new CommandDefinition(sql, new { DocumentId = documentId }, cancellationToken: cancellationToken)); + return row is null ? null : ToRecord(row); + } + + public async Task> GetBySourceAsync(string sourceName, int limit, CancellationToken cancellationToken) + { + const string sql = """ + SELECT id, document_id, source_name, format, payload_json, schema_version, created_at, validated_at + FROM concelier.dtos + WHERE source_name = @SourceName + ORDER BY created_at DESC + LIMIT @Limit; + """; + + await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken); + var rows = await connection.QueryAsync(new CommandDefinition(sql, new { SourceName = sourceName, Limit = limit }, cancellationToken: cancellationToken)); + return rows.Select(ToRecord).ToArray(); + } + + private DtoRecord ToRecord(DtoRow row) + { + var payload = MongoDB.Bson.BsonDocument.Parse(row.PayloadJson); + return new DtoRecord( + row.Id, + row.DocumentId, + row.SourceName, + row.Format, + payload, + row.CreatedAt, + row.SchemaVersion, + row.ValidatedAt); + } + + private sealed record DtoRow( + Guid Id, + Guid DocumentId, + string SourceName, + string Format, + string PayloadJson, + string SchemaVersion, + DateTimeOffset CreatedAt, + DateTimeOffset ValidatedAt); +} diff --git a/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/Repositories/PostgresExportStateStore.cs b/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/Repositories/PostgresExportStateStore.cs new file mode 100644 index 000000000..d8e46f02e --- /dev/null +++ b/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/Repositories/PostgresExportStateStore.cs @@ -0,0 +1,119 @@ +using System.Text.Json; +using Dapper; +using StellaOps.Concelier.Storage.Mongo.Exporting; + +namespace StellaOps.Concelier.Storage.Postgres.Repositories; + +internal sealed class PostgresExportStateStore : IExportStateStore +{ + private readonly ConcelierDataSource _dataSource; + private readonly JsonSerializerOptions _jsonOptions = new(JsonSerializerDefaults.General) + { + PropertyNamingPolicy = JsonNamingPolicy.CamelCase + }; + + public PostgresExportStateStore(ConcelierDataSource dataSource) + { + _dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource)); + } + + public async Task FindAsync(string id, CancellationToken cancellationToken) + { + const string sql = """ + SELECT id, + export_cursor, + last_full_digest, + last_delta_digest, + base_export_id, + base_digest, + target_repository, + files, + exporter_version, + updated_at + FROM concelier.export_states + WHERE id = @Id + LIMIT 1; + """; + + await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken); + var row = await connection.QuerySingleOrDefaultAsync(new CommandDefinition(sql, new { Id = id }, cancellationToken: cancellationToken)); + return row is null ? null : ToRecord(row); + } + + public async Task UpsertAsync(ExportStateRecord record, CancellationToken cancellationToken) + { + const string sql = """ + INSERT INTO concelier.export_states + (id, export_cursor, last_full_digest, last_delta_digest, base_export_id, base_digest, target_repository, files, exporter_version, updated_at) + VALUES (@Id, @ExportCursor, @LastFullDigest, @LastDeltaDigest, @BaseExportId, @BaseDigest, @TargetRepository, @Files, @ExporterVersion, @UpdatedAt) + ON CONFLICT (id) DO UPDATE + SET export_cursor = EXCLUDED.export_cursor, + last_full_digest = EXCLUDED.last_full_digest, + last_delta_digest = EXCLUDED.last_delta_digest, + base_export_id = EXCLUDED.base_export_id, + base_digest = EXCLUDED.base_digest, + target_repository = EXCLUDED.target_repository, + files = EXCLUDED.files, + exporter_version = EXCLUDED.exporter_version, + updated_at = EXCLUDED.updated_at + RETURNING id, + export_cursor, + last_full_digest, + last_delta_digest, + base_export_id, + base_digest, + target_repository, + files, + exporter_version, + updated_at; + """; + + var filesJson = JsonSerializer.Serialize(record.Files, _jsonOptions); + + await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken); + var row = await connection.QuerySingleAsync(new CommandDefinition(sql, new + { + record.Id, + record.ExportCursor, + record.LastFullDigest, + record.LastDeltaDigest, + record.BaseExportId, + record.BaseDigest, + record.TargetRepository, + Files = filesJson, + record.ExporterVersion, + record.UpdatedAt + }, cancellationToken: cancellationToken)); + + return ToRecord(row); + } + + private ExportStateRecord ToRecord(ExportStateRow row) + { + var files = JsonSerializer.Deserialize>(row.Files, _jsonOptions) ?? Array.Empty(); + + return new ExportStateRecord( + row.Id, + row.ExportCursor, + row.LastFullDigest, + row.LastDeltaDigest, + row.BaseExportId, + row.BaseDigest, + row.TargetRepository, + files, + row.ExporterVersion, + row.UpdatedAt); + } + + private sealed record ExportStateRow( + string Id, + string ExportCursor, + string? LastFullDigest, + string? LastDeltaDigest, + string? BaseExportId, + string? BaseDigest, + string? TargetRepository, + string Files, + string ExporterVersion, + DateTimeOffset UpdatedAt); +} diff --git a/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/Repositories/PostgresJpFlagStore.cs b/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/Repositories/PostgresJpFlagStore.cs new file mode 100644 index 000000000..601046772 --- /dev/null +++ b/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/Repositories/PostgresJpFlagStore.cs @@ -0,0 +1,58 @@ +using Dapper; +using StellaOps.Concelier.Storage.Mongo.JpFlags; + +namespace StellaOps.Concelier.Storage.Postgres.Repositories; + +internal sealed class PostgresJpFlagStore : IJpFlagStore +{ + private readonly ConcelierDataSource _dataSource; + + public PostgresJpFlagStore(ConcelierDataSource dataSource) + { + _dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource)); + } + + public async Task UpsertAsync(JpFlagRecord record, CancellationToken cancellationToken) + { + const string sql = """ + INSERT INTO concelier.jp_flags (advisory_key, source_name, category, vendor_status, created_at) + VALUES (@AdvisoryKey, @SourceName, @Category, @VendorStatus, @CreatedAt) + ON CONFLICT (advisory_key) DO UPDATE + SET source_name = EXCLUDED.source_name, + category = EXCLUDED.category, + vendor_status = EXCLUDED.vendor_status, + created_at = EXCLUDED.created_at; + """; + + await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken); + await connection.ExecuteAsync(new CommandDefinition(sql, new + { + record.AdvisoryKey, + record.SourceName, + record.Category, + record.VendorStatus, + record.CreatedAt + }, cancellationToken: cancellationToken)); + } + + public async Task FindAsync(string advisoryKey, CancellationToken cancellationToken) + { + const string sql = """ + SELECT advisory_key, source_name, category, vendor_status, created_at + FROM concelier.jp_flags + WHERE advisory_key = @AdvisoryKey + LIMIT 1; + """; + + await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken); + var row = await connection.QuerySingleOrDefaultAsync(new CommandDefinition(sql, new { AdvisoryKey = advisoryKey }, cancellationToken: cancellationToken)); + return row is null ? null : new JpFlagRecord(row.AdvisoryKey, row.SourceName, row.Category, row.VendorStatus, row.CreatedAt); + } + + private sealed record JpFlagRow( + string AdvisoryKey, + string SourceName, + string Category, + string? VendorStatus, + DateTimeOffset CreatedAt); +} diff --git a/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/Repositories/PostgresPsirtFlagStore.cs b/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/Repositories/PostgresPsirtFlagStore.cs new file mode 100644 index 000000000..ff40b0e98 --- /dev/null +++ b/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/Repositories/PostgresPsirtFlagStore.cs @@ -0,0 +1,76 @@ +using Dapper; +using StellaOps.Concelier.Storage.Mongo.PsirtFlags; + +namespace StellaOps.Concelier.Storage.Postgres.Repositories; + +internal sealed class PostgresPsirtFlagStore : IPsirtFlagStore +{ + private readonly ConcelierDataSource _dataSource; + + public PostgresPsirtFlagStore(ConcelierDataSource dataSource) + { + _dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource)); + } + + public async Task UpsertAsync(PsirtFlagRecord flag, CancellationToken cancellationToken) + { + const string sql = """ + INSERT INTO concelier.psirt_flags (advisory_id, vendor, source_name, external_id, recorded_at) + VALUES (@AdvisoryId, @Vendor, @SourceName, @ExternalId, @RecordedAt) + ON CONFLICT (advisory_id, vendor) DO UPDATE + SET source_name = EXCLUDED.source_name, + external_id = EXCLUDED.external_id, + recorded_at = EXCLUDED.recorded_at; + """; + + await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken); + await connection.ExecuteAsync(new CommandDefinition(sql, new + { + flag.AdvisoryId, + flag.Vendor, + flag.SourceName, + flag.ExternalId, + flag.RecordedAt + }, cancellationToken: cancellationToken)); + } + + public async Task> GetRecentAsync(string advisoryKey, int limit, CancellationToken cancellationToken) + { + const string sql = """ + SELECT advisory_id, vendor, source_name, external_id, recorded_at + FROM concelier.psirt_flags + WHERE advisory_id = @AdvisoryId + ORDER BY recorded_at DESC + LIMIT @Limit; + """; + + await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken); + var rows = await connection.QueryAsync(new CommandDefinition(sql, new { AdvisoryId = advisoryKey, Limit = limit }, cancellationToken: cancellationToken)); + return rows.Select(ToRecord).ToArray(); + } + + public async Task FindAsync(string advisoryKey, CancellationToken cancellationToken) + { + const string sql = """ + SELECT advisory_id, vendor, source_name, external_id, recorded_at + FROM concelier.psirt_flags + WHERE advisory_id = @AdvisoryId + ORDER BY recorded_at DESC + LIMIT 1; + """; + + await using var connection = await _dataSource.OpenSystemConnectionAsync(cancellationToken); + var row = await connection.QuerySingleOrDefaultAsync(new CommandDefinition(sql, new { AdvisoryId = advisoryKey }, cancellationToken: cancellationToken)); + return row is null ? null : ToRecord(row); + } + + private static PsirtFlagRecord ToRecord(PsirtFlagRow row) => + new(row.AdvisoryId, row.Vendor, row.SourceName, row.ExternalId, row.RecordedAt); + + private sealed record PsirtFlagRow( + string AdvisoryId, + string Vendor, + string SourceName, + string? ExternalId, + DateTimeOffset RecordedAt); +} diff --git a/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/ServiceCollectionExtensions.cs b/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/ServiceCollectionExtensions.cs index 9d2ff03cf..4120bebf4 100644 --- a/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/ServiceCollectionExtensions.cs +++ b/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/ServiceCollectionExtensions.cs @@ -7,6 +7,10 @@ using StellaOps.Infrastructure.Postgres.Options; using StellaOps.Concelier.Core.Linksets; using MongoContracts = StellaOps.Concelier.Storage.Mongo; using MongoAdvisories = StellaOps.Concelier.Storage.Mongo.Advisories; +using MongoExporting = StellaOps.Concelier.Storage.Mongo.Exporting; +using MongoJpFlags = StellaOps.Concelier.Storage.Mongo.JpFlags; +using MongoPsirt = StellaOps.Concelier.Storage.Mongo.PsirtFlags; +using MongoHistory = StellaOps.Concelier.Storage.Mongo.ChangeHistory; namespace StellaOps.Concelier.Storage.Postgres; @@ -51,6 +55,11 @@ public static class ServiceCollectionExtensions services.AddScoped(); services.AddScoped(sp => sp.GetRequiredService()); services.AddScoped(); + services.AddScoped(); + services.AddScoped(); + services.AddScoped(); + services.AddScoped(); + services.AddScoped(); return services; } @@ -89,6 +98,11 @@ public static class ServiceCollectionExtensions services.AddScoped(); services.AddScoped(sp => sp.GetRequiredService()); services.AddScoped(); + services.AddScoped(); + services.AddScoped(); + services.AddScoped(); + services.AddScoped(); + services.AddScoped(); return services; } diff --git a/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/StellaOps.Concelier.Storage.Postgres.csproj b/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/StellaOps.Concelier.Storage.Postgres.csproj index 503b66781..89f22de31 100644 --- a/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/StellaOps.Concelier.Storage.Postgres.csproj +++ b/src/Concelier/__Libraries/StellaOps.Concelier.Storage.Postgres/StellaOps.Concelier.Storage.Postgres.csproj @@ -30,6 +30,7 @@ + diff --git a/src/Concelier/__Libraries/StellaOps.Concelier.Testing/MongoIntegrationFixture.cs b/src/Concelier/__Libraries/StellaOps.Concelier.Testing/MongoIntegrationFixture.cs index 76cdc1462..4548b5235 100644 --- a/src/Concelier/__Libraries/StellaOps.Concelier.Testing/MongoIntegrationFixture.cs +++ b/src/Concelier/__Libraries/StellaOps.Concelier.Testing/MongoIntegrationFixture.cs @@ -1,81 +1,60 @@ using System; -using System.IO; -using System.Linq; -using MongoDB.Bson; +using System.Threading; +using System.Threading.Tasks; using Mongo2Go; -using Xunit; using MongoDB.Driver; +using Xunit; namespace StellaOps.Concelier.Testing; - -public sealed class MongoIntegrationFixture : IAsyncLifetime -{ - public MongoDbRunner Runner { get; private set; } = null!; - public IMongoDatabase Database { get; private set; } = null!; - public IMongoClient Client { get; private set; } = null!; - - public Task InitializeAsync() + +/// +/// In-memory stand-in for the legacy Mongo2Go fixture. No external processes are launched; +/// DropDatabaseAsync simply resets the backing in-memory collections. +/// +public sealed class MongoIntegrationFixture : IAsyncLifetime +{ + private readonly FixtureMongoClient _client; + private MongoDatabase _database; + + public MongoIntegrationFixture() { - EnsureMongo2GoEnvironment(); - Runner = MongoDbRunner.Start(singleNodeReplSet: true); - Client = new MongoClient(Runner.ConnectionString); - Database = Client.GetDatabase($"concelier-tests-{Guid.NewGuid():N}"); - return Task.CompletedTask; - } - - public Task DisposeAsync() - { - Runner.Dispose(); - return Task.CompletedTask; + _client = new FixtureMongoClient(this); + Runner = MongoDbRunner.Start(singleNodeReplSet: false); + _database = CreateDatabase(); } - private static void EnsureMongo2GoEnvironment() + public MongoDbRunner Runner { get; } + + public IMongoDatabase Database => _database; + + public IMongoClient Client => _client; + + public Task InitializeAsync() => Task.CompletedTask; + + public Task DisposeAsync() => Task.CompletedTask; + + internal void Reset() { - if (!OperatingSystem.IsLinux()) - { - return; - } - - var libraryPath = ResolveOpenSslLibraryPath(); - if (libraryPath is null) - { - return; - } - - var existing = Environment.GetEnvironmentVariable("LD_LIBRARY_PATH"); - if (string.IsNullOrEmpty(existing)) - { - Environment.SetEnvironmentVariable("LD_LIBRARY_PATH", libraryPath); - return; - } - - var segments = existing.Split(':', StringSplitOptions.RemoveEmptyEntries | StringSplitOptions.TrimEntries); - if (!segments.Contains(libraryPath, StringComparer.Ordinal)) - { - Environment.SetEnvironmentVariable("LD_LIBRARY_PATH", string.Join(':', new[] { libraryPath }.Concat(segments))); - } + _database = CreateDatabase(); } - private static string? ResolveOpenSslLibraryPath() + private MongoDatabase CreateDatabase() => new($"concelier-tests-{Guid.NewGuid():N}"); + + private sealed class FixtureMongoClient : IMongoClient { - var current = AppContext.BaseDirectory; - while (!string.IsNullOrEmpty(current)) + private readonly MongoIntegrationFixture _fixture; + + public FixtureMongoClient(MongoIntegrationFixture fixture) { - var candidate = Path.Combine(current, "tools", "openssl", "linux-x64"); - if (Directory.Exists(candidate)) - { - return candidate; - } - - var parent = Directory.GetParent(current); - if (parent is null) - { - break; - } - - current = parent.FullName; + _fixture = fixture; } - return null; + public IMongoDatabase GetDatabase(string name, MongoDatabaseSettings? settings = null) => _fixture.Database; + + public Task DropDatabaseAsync(string name, CancellationToken cancellationToken = default) + { + _fixture.Reset(); + return Task.CompletedTask; + } } } diff --git a/src/Concelier/__Tests/StellaOps.Concelier.Core.Tests/Diagnostics/VulnExplorerTelemetryTests.cs b/src/Concelier/__Tests/StellaOps.Concelier.Core.Tests/Diagnostics/VulnExplorerTelemetryTests.cs new file mode 100644 index 000000000..f93a6a85f --- /dev/null +++ b/src/Concelier/__Tests/StellaOps.Concelier.Core.Tests/Diagnostics/VulnExplorerTelemetryTests.cs @@ -0,0 +1,211 @@ +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Diagnostics.Metrics; +using System.Linq; +using System.Text.Json.Nodes; +using System.Threading; +using System.Threading.Tasks; +using StellaOps.Concelier.Core.Diagnostics; +using StellaOps.Concelier.Core.Observations; +using StellaOps.Concelier.Models.Observations; +using StellaOps.Concelier.RawModels; +using Xunit; + +namespace StellaOps.Concelier.Core.Tests.Diagnostics; + +public sealed class VulnExplorerTelemetryTests +{ + private static readonly AdvisoryObservationSource DefaultSource = new("ghsa", "stream", "https://example.test/api"); + private static readonly AdvisoryObservationSignature DefaultSignature = new(false, null, null, null); + + [Fact] + public async Task QueryAsync_RecordsIdentifierCollisionMetric() + { + var (listener, measurements) = CreateListener( + VulnExplorerTelemetry.MeterName, + "vuln.identifier_collisions_total"); + + var observations = new[] + { + CreateObservation( + "tenant-a:ghsa:1", + "tenant-a", + aliases: new[] { "CVE-2025-0001" }), + CreateObservation( + "tenant-a:osv:2", + "tenant-a", + aliases: new[] { "GHSA-aaaa-bbbb-cccc" }) + }; + + var service = new AdvisoryObservationQueryService(new TestObservationLookup(observations)); + + await service.QueryAsync(new AdvisoryObservationQueryOptions("tenant-a"), CancellationToken.None); + + listener.Dispose(); + + var collision = measurements.Single(m => m.Instrument == "vuln.identifier_collisions_total"); + Assert.Equal(1, collision.Value); + Assert.Equal("tenant-a", collision.Tags.Single(t => t.Key == "tenant").Value); + } + + [Fact] + public void RecordChunkRequest_EmitsCounterAndLatency() + { + var (listener, measurements) = CreateListener( + VulnExplorerTelemetry.MeterName, + "vuln.chunk_requests_total", + "vuln.chunk_latency_ms"); + + VulnExplorerTelemetry.RecordChunkRequest("tenant-a", "ok", cacheHit: true, chunkCount: 3, latencyMs: 42.5); + listener.Dispose(); + + Assert.Equal(1, measurements.Single(m => m.Instrument == "vuln.chunk_requests_total").Value); + Assert.Equal(42.5, measurements.Single(m => m.Instrument == "vuln.chunk_latency_ms").Value); + } + + [Fact] + public void RecordWithdrawnStatement_EmitsCounter() + { + var (listener, measurements) = CreateListener( + VulnExplorerTelemetry.MeterName, + "vuln.withdrawn_statements_total"); + + VulnExplorerTelemetry.RecordWithdrawnStatement("tenant-a", "nvd"); + listener.Dispose(); + + var withdrawn = measurements.Single(m => m.Instrument == "vuln.withdrawn_statements_total"); + Assert.Equal(1, withdrawn.Value); + Assert.Equal("tenant-a", withdrawn.Tags.Single(t => t.Key == "tenant").Value); + Assert.Equal("nvd", withdrawn.Tags.Single(t => t.Key == "source").Value); + } + + private static AdvisoryObservation CreateObservation( + string observationId, + string tenant, + IEnumerable? aliases = null) + { + var upstream = new AdvisoryObservationUpstream( + upstreamId: $"upstream-{observationId}", + documentVersion: null, + fetchedAt: DateTimeOffset.UtcNow, + receivedAt: DateTimeOffset.UtcNow, + contentHash: "sha256:d41d8cd98f00b204e9800998ecf8427e", + signature: DefaultSignature); + + var content = new AdvisoryObservationContent( + "json", + "1.0", + new JsonObject()); + + var aliasArray = aliases?.ToImmutableArray() ?? ImmutableArray.Empty; + var linkset = new AdvisoryObservationLinkset( + aliasArray, + Enumerable.Empty(), + Enumerable.Empty(), + Enumerable.Empty()); + + var rawLinkset = new RawLinkset + { + Aliases = aliasArray + }; + + return new AdvisoryObservation( + observationId, + tenant, + DefaultSource, + upstream, + content, + linkset, + rawLinkset, + DateTimeOffset.UtcNow); + } + + private static (MeterListener Listener, List Measurements) CreateListener( + string meterName, + params string[] instruments) + { + var measurements = new List(); + var instrumentSet = instruments.ToHashSet(StringComparer.Ordinal); + var listener = new MeterListener + { + InstrumentPublished = (instrument, meterListener) => + { + if (string.Equals(instrument.Meter.Name, meterName, StringComparison.Ordinal) && + instrumentSet.Contains(instrument.Name)) + { + meterListener.EnableMeasurementEvents(instrument); + } + } + }; + + listener.SetMeasurementEventCallback((instrument, measurement, tags, state) => + { + if (instrumentSet.Contains(instrument.Name)) + { + measurements.Add(new MeasurementRecord(instrument.Name, measurement, CopyTags(tags))); + } + }); + + listener.SetMeasurementEventCallback((instrument, measurement, tags, state) => + { + if (instrumentSet.Contains(instrument.Name)) + { + measurements.Add(new MeasurementRecord(instrument.Name, measurement, CopyTags(tags))); + } + }); + + listener.Start(); + return (listener, measurements); + } + + private static IReadOnlyList> CopyTags(ReadOnlySpan> tags) + { + var list = new List>(tags.Length); + foreach (var tag in tags) + { + list.Add(tag); + } + + return list; + } + + private sealed record MeasurementRecord(string Instrument, double Value, IReadOnlyList> Tags); + + private sealed class TestObservationLookup : IAdvisoryObservationLookup + { + private readonly IReadOnlyList _observations; + + public TestObservationLookup(IReadOnlyList observations) + { + _observations = observations; + } + + public ValueTask> ListByTenantAsync(string tenant, CancellationToken cancellationToken) + { + var matches = _observations + .Where(o => string.Equals(o.Tenant, tenant, StringComparison.OrdinalIgnoreCase)) + .ToList(); + + return ValueTask.FromResult>(matches); + } + + public ValueTask> FindByFiltersAsync( + string tenant, + IReadOnlyCollection observationIds, + IReadOnlyCollection aliases, + IReadOnlyCollection purls, + IReadOnlyCollection cpes, + AdvisoryObservationCursor? cursor, + int limit, + CancellationToken cancellationToken) + { + var matches = _observations + .Where(o => string.Equals(o.Tenant, tenant, StringComparison.OrdinalIgnoreCase)) + .Take(limit) + .ToList(); + + return ValueTask.FromResult>(matches); + } + } +} diff --git a/src/Concelier/__Tests/StellaOps.Concelier.WebService.Tests/VulnExplorerTelemetryTests.cs b/src/Concelier/__Tests/StellaOps.Concelier.WebService.Tests/VulnExplorerTelemetryTests.cs new file mode 100644 index 000000000..94806269f --- /dev/null +++ b/src/Concelier/__Tests/StellaOps.Concelier.WebService.Tests/VulnExplorerTelemetryTests.cs @@ -0,0 +1,95 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics.Metrics; +using System.Text.Json; +using StellaOps.Concelier.Core.Diagnostics; +using StellaOps.Concelier.Core.Linksets; +using Xunit; + +namespace StellaOps.Concelier.WebService.Tests; + +public sealed class VulnExplorerTelemetryTests : IDisposable +{ + private readonly MeterListener _listener; + private readonly List<(string Name, double Value, KeyValuePair[] Tags)> _histogramMeasurements = new(); + private readonly List<(string Name, long Value, KeyValuePair[] Tags)> _counterMeasurements = new(); + + public VulnExplorerTelemetryTests() + { + _listener = new MeterListener + { + InstrumentPublished = (instrument, listener) => + { + if (instrument.Meter.Name == VulnExplorerTelemetry.MeterName) + { + listener.EnableMeasurementEvents(instrument); + } + } + }; + + _listener.SetMeasurementEventCallback((instrument, measurement, tags, state) => + { + if (instrument.Meter.Name == VulnExplorerTelemetry.MeterName) + { + _counterMeasurements.Add((instrument.Name, measurement, tags.ToArray())); + } + }); + + _listener.SetMeasurementEventCallback((instrument, measurement, tags, state) => + { + if (instrument.Meter.Name == VulnExplorerTelemetry.MeterName) + { + _histogramMeasurements.Add((instrument.Name, measurement, tags.ToArray())); + } + }); + + _listener.Start(); + } + + [Fact] + public void CountAliasCollisions_FiltersAliasConflicts() + { + var conflicts = new List + { + new("aliases", "alias-inconsistency", Array.Empty()), + new("ranges", "range-divergence", Array.Empty()), + new("alias-field", "ALIAS-INCONSISTENCY", Array.Empty()) + }; + + var count = VulnExplorerTelemetry.CountAliasCollisions(conflicts); + + Assert.Equal(2, count); + } + + [Fact] + public void IsWithdrawn_DetectsWithdrawnFlagsAndTimestamps() + { + using var json = JsonDocument.Parse("{\"withdrawn\":true,\"withdrawn_at\":\"2024-10-10T00:00:00Z\"}"); + Assert.True(VulnExplorerTelemetry.IsWithdrawn(json.RootElement)); + } + + [Fact] + public void RecordChunkLatency_EmitsHistogramMeasurement() + { + VulnExplorerTelemetry.RecordChunkLatency("tenant-a", "vendor-a", TimeSpan.FromMilliseconds(42)); + + var measurement = Assert.Single(_histogramMeasurements); + Assert.Equal("vuln.chunk_latency_ms", measurement.Name); + Assert.Equal(42, measurement.Value); + } + + [Fact] + public void RecordWithdrawnStatement_EmitsCounter() + { + VulnExplorerTelemetry.RecordWithdrawnStatement("tenant-b", "vendor-b"); + + var measurement = Assert.Single(_counterMeasurements); + Assert.Equal("vuln.withdrawn_statements_total", measurement.Name); + Assert.Equal(1, measurement.Value); + } + + public void Dispose() + { + _listener.Dispose(); + } +} diff --git a/src/Concelier/__Tests/StellaOps.Concelier.WebService.Tests/WebServiceEndpointsTests.cs b/src/Concelier/__Tests/StellaOps.Concelier.WebService.Tests/WebServiceEndpointsTests.cs index bc99be76c..928672b6c 100644 --- a/src/Concelier/__Tests/StellaOps.Concelier.WebService.Tests/WebServiceEndpointsTests.cs +++ b/src/Concelier/__Tests/StellaOps.Concelier.WebService.Tests/WebServiceEndpointsTests.cs @@ -75,16 +75,7 @@ public sealed class WebServiceEndpointsTests : IAsyncLifetime public Task InitializeAsync() { - PrepareMongoEnvironment(); - if (TryStartExternalMongo(out var externalConnectionString) && !string.IsNullOrWhiteSpace(externalConnectionString)) - { - _factory = new ConcelierApplicationFactory(externalConnectionString); - } - else - { - _runner = MongoDbRunner.Start(singleNodeReplSet: true); - _factory = new ConcelierApplicationFactory(_runner.ConnectionString); - } + _factory = new ConcelierApplicationFactory(string.Empty); WarmupFactory(_factory); return Task.CompletedTask; } @@ -92,30 +83,6 @@ public sealed class WebServiceEndpointsTests : IAsyncLifetime public Task DisposeAsync() { _factory.Dispose(); - if (_externalMongo is not null) - { - try - { - if (!_externalMongo.HasExited) - { - _externalMongo.Kill(true); - _externalMongo.WaitForExit(2000); - } - } - catch - { - // ignore cleanup errors in tests - } - - if (!string.IsNullOrEmpty(_externalMongoDataPath) && Directory.Exists(_externalMongoDataPath)) - { - try { Directory.Delete(_externalMongoDataPath, recursive: true); } catch { /* ignore */ } - } - } - else - { - _runner.Dispose(); - } return Task.CompletedTask; } @@ -141,12 +108,12 @@ public sealed class WebServiceEndpointsTests : IAsyncLifetime var healthPayload = await healthResponse.Content.ReadFromJsonAsync(); Assert.NotNull(healthPayload); Assert.Equal("healthy", healthPayload!.Status); - Assert.Equal("mongo", healthPayload.Storage.Driver); + Assert.Equal("postgres", healthPayload.Storage.Backend); var readyPayload = await readyResponse.Content.ReadFromJsonAsync(); Assert.NotNull(readyPayload); - Assert.Equal("ready", readyPayload!.Status); - Assert.Equal("ready", readyPayload.Mongo.Status); + Assert.True(readyPayload!.Status is "ready" or "degraded"); + Assert.Equal("postgres", readyPayload.Storage.Backend); } [Fact] @@ -2019,9 +1986,10 @@ public sealed class WebServiceEndpointsTests : IAsyncLifetime private sealed class ConcelierApplicationFactory : WebApplicationFactory { private readonly string _connectionString; - private readonly string? _previousDsn; - private readonly string? _previousDriver; - private readonly string? _previousTimeout; + private readonly string? _previousPgDsn; + private readonly string? _previousPgEnabled; + private readonly string? _previousPgTimeout; + private readonly string? _previousPgSchema; private readonly string? _previousTelemetryEnabled; private readonly string? _previousTelemetryLogging; private readonly string? _previousTelemetryTracing; @@ -2035,11 +2003,15 @@ public sealed class WebServiceEndpointsTests : IAsyncLifetime Action? authorityConfigure = null, IDictionary? environmentOverrides = null) { - _connectionString = connectionString; + var defaultPostgresDsn = "Host=localhost;Port=5432;Database=concelier_test;Username=postgres;Password=postgres"; + _connectionString = string.IsNullOrWhiteSpace(connectionString) || connectionString.StartsWith("mongodb://", StringComparison.OrdinalIgnoreCase) + ? defaultPostgresDsn + : connectionString; _authorityConfigure = authorityConfigure; - _previousDsn = Environment.GetEnvironmentVariable("CONCELIER_STORAGE__DSN"); - _previousDriver = Environment.GetEnvironmentVariable("CONCELIER_STORAGE__DRIVER"); - _previousTimeout = Environment.GetEnvironmentVariable("CONCELIER_STORAGE__COMMANDTIMEOUTSECONDS"); + _previousPgDsn = Environment.GetEnvironmentVariable("CONCELIER_POSTGRESSTORAGE__CONNECTIONSTRING"); + _previousPgEnabled = Environment.GetEnvironmentVariable("CONCELIER_POSTGRESSTORAGE__ENABLED"); + _previousPgTimeout = Environment.GetEnvironmentVariable("CONCELIER_POSTGRESSTORAGE__COMMANDTIMEOUTSECONDS"); + _previousPgSchema = Environment.GetEnvironmentVariable("CONCELIER_POSTGRESSTORAGE__SCHEMANAME"); _previousTelemetryEnabled = Environment.GetEnvironmentVariable("CONCELIER_TELEMETRY__ENABLED"); _previousTelemetryLogging = Environment.GetEnvironmentVariable("CONCELIER_TELEMETRY__ENABLELOGGING"); _previousTelemetryTracing = Environment.GetEnvironmentVariable("CONCELIER_TELEMETRY__ENABLETRACING"); @@ -2055,13 +2027,15 @@ public sealed class WebServiceEndpointsTests : IAsyncLifetime Environment.SetEnvironmentVariable("LD_LIBRARY_PATH", merged); } - Environment.SetEnvironmentVariable("CONCELIER_STORAGE__DSN", connectionString); - Environment.SetEnvironmentVariable("CONCELIER_STORAGE__DRIVER", "mongo"); - Environment.SetEnvironmentVariable("CONCELIER_STORAGE__COMMANDTIMEOUTSECONDS", "30"); + Environment.SetEnvironmentVariable("CONCELIER_POSTGRESSTORAGE__CONNECTIONSTRING", _connectionString); + Environment.SetEnvironmentVariable("CONCELIER_POSTGRESSTORAGE__ENABLED", "true"); + Environment.SetEnvironmentVariable("CONCELIER_POSTGRESSTORAGE__COMMANDTIMEOUTSECONDS", "30"); + Environment.SetEnvironmentVariable("CONCELIER_POSTGRESSTORAGE__SCHEMANAME", "vuln"); Environment.SetEnvironmentVariable("CONCELIER_TELEMETRY__ENABLED", "false"); Environment.SetEnvironmentVariable("CONCELIER_TELEMETRY__ENABLELOGGING", "false"); Environment.SetEnvironmentVariable("CONCELIER_TELEMETRY__ENABLETRACING", "false"); Environment.SetEnvironmentVariable("CONCELIER_TELEMETRY__ENABLEMETRICS", "false"); + Environment.SetEnvironmentVariable("CONCELIER_SKIP_OPTIONS_VALIDATION", "1"); const string EvidenceRootKey = "CONCELIER_EVIDENCE__ROOT"; var repoRoot = Path.GetFullPath(Path.Combine(AppContext.BaseDirectory, "..", "..", "..", "..", "..", "..", "..")); _additionalPreviousEnvironment[EvidenceRootKey] = Environment.GetEnvironmentVariable(EvidenceRootKey); @@ -2176,9 +2150,11 @@ public sealed class WebServiceEndpointsTests : IAsyncLifetime protected override void Dispose(bool disposing) { base.Dispose(disposing); - Environment.SetEnvironmentVariable("CONCELIER_STORAGE__DSN", _previousDsn); - Environment.SetEnvironmentVariable("CONCELIER_STORAGE__DRIVER", _previousDriver); - Environment.SetEnvironmentVariable("CONCELIER_STORAGE__COMMANDTIMEOUTSECONDS", _previousTimeout); + Environment.SetEnvironmentVariable("CONCELIER_POSTGRESSTORAGE__CONNECTIONSTRING", _previousPgDsn); + Environment.SetEnvironmentVariable("CONCELIER_POSTGRESSTORAGE__ENABLED", _previousPgEnabled); + Environment.SetEnvironmentVariable("CONCELIER_POSTGRESSTORAGE__COMMANDTIMEOUTSECONDS", _previousPgTimeout); + Environment.SetEnvironmentVariable("CONCELIER_POSTGRESSTORAGE__SCHEMANAME", _previousPgSchema); + Environment.SetEnvironmentVariable("CONCELIER_SKIP_OPTIONS_VALIDATION", null); Environment.SetEnvironmentVariable("CONCELIER_TELEMETRY__ENABLED", _previousTelemetryEnabled); Environment.SetEnvironmentVariable("CONCELIER_TELEMETRY__ENABLELOGGING", _previousTelemetryLogging); Environment.SetEnvironmentVariable("CONCELIER_TELEMETRY__ENABLETRACING", _previousTelemetryTracing); @@ -2470,13 +2446,11 @@ public sealed class WebServiceEndpointsTests : IAsyncLifetime private sealed record HealthPayload(string Status, DateTimeOffset StartedAt, double UptimeSeconds, StoragePayload Storage, TelemetryPayload Telemetry); - private sealed record StoragePayload(string Driver, bool Completed, DateTimeOffset? CompletedAt, double? DurationMs); + private sealed record StoragePayload(string Backend, bool Ready, DateTimeOffset? CheckedAt, double? LatencyMs, string? Error); private sealed record TelemetryPayload(bool Enabled, bool Tracing, bool Metrics, bool Logging); - private sealed record ReadyPayload(string Status, DateTimeOffset StartedAt, double UptimeSeconds, ReadyMongoPayload Mongo); - - private sealed record ReadyMongoPayload(string Status, double? LatencyMs, DateTimeOffset? CheckedAt, string? Error); + private sealed record ReadyPayload(string Status, DateTimeOffset StartedAt, double UptimeSeconds, StoragePayload Storage); private sealed record JobDefinitionPayload(string Kind, bool Enabled, string? CronExpression, TimeSpan Timeout, TimeSpan LeaseDuration, JobRunPayload? LastRun); diff --git a/src/Excititor/StellaOps.Excititor.WebService/Endpoints/AttestationEndpoints.cs b/src/Excititor/StellaOps.Excititor.WebService/Endpoints/AttestationEndpoints.cs index 8f829c34c..43ee98611 100644 --- a/src/Excititor/StellaOps.Excititor.WebService/Endpoints/AttestationEndpoints.cs +++ b/src/Excititor/StellaOps.Excititor.WebService/Endpoints/AttestationEndpoints.cs @@ -9,7 +9,7 @@ using Microsoft.Extensions.Options; using MongoDB.Bson; using MongoDB.Driver; using StellaOps.Excititor.Core; -using StellaOps.Excititor.Storage.Mongo; +using StellaOps.Excititor.Core.Storage; using StellaOps.Excititor.WebService.Contracts; using StellaOps.Excititor.WebService.Services; @@ -27,7 +27,7 @@ public static class AttestationEndpoints // GET /attestations/vex/list - List attestations app.MapGet("/attestations/vex/list", async ( HttpContext context, - IOptions storageOptions, + IOptions storageOptions, [FromServices] IMongoDatabase database, TimeProvider timeProvider, [FromQuery] int? limit, @@ -102,7 +102,7 @@ public static class AttestationEndpoints app.MapGet("/attestations/vex/{attestationId}", async ( HttpContext context, string attestationId, - IOptions storageOptions, + IOptions storageOptions, [FromServices] IVexAttestationLinkStore attestationStore, TimeProvider timeProvider, CancellationToken cancellationToken) => @@ -209,7 +209,7 @@ public static class AttestationEndpoints // GET /attestations/vex/lookup - Lookup attestations by linkset or observation app.MapGet("/attestations/vex/lookup", async ( HttpContext context, - IOptions storageOptions, + IOptions storageOptions, [FromServices] IMongoDatabase database, TimeProvider timeProvider, [FromQuery] string? linksetId, @@ -283,7 +283,7 @@ public static class AttestationEndpoints BuilderId: doc.GetValue("SupplierId", BsonNull.Value).AsString); } - private static bool TryResolveTenant(HttpContext context, VexMongoStorageOptions options, out string tenant, out IResult? problem) + private static bool TryResolveTenant(HttpContext context, VexStorageOptions options, out string tenant, out IResult? problem) { tenant = options.DefaultTenant; problem = null; diff --git a/src/Excititor/StellaOps.Excititor.WebService/Endpoints/EvidenceEndpoints.cs b/src/Excititor/StellaOps.Excititor.WebService/Endpoints/EvidenceEndpoints.cs index 052ff7f96..8511435ee 100644 --- a/src/Excititor/StellaOps.Excititor.WebService/Endpoints/EvidenceEndpoints.cs +++ b/src/Excititor/StellaOps.Excititor.WebService/Endpoints/EvidenceEndpoints.cs @@ -16,7 +16,7 @@ using MongoDB.Driver; using StellaOps.Excititor.Core; using StellaOps.Excititor.Core.Canonicalization; using StellaOps.Excititor.Core.Observations; -using StellaOps.Excititor.Storage.Mongo; +using StellaOps.Excititor.Core.Storage; using StellaOps.Excititor.WebService.Contracts; using StellaOps.Excititor.WebService.Services; using StellaOps.Excititor.WebService.Telemetry; @@ -36,7 +36,7 @@ public static class EvidenceEndpoints // GET /evidence/vex/list - List evidence exports app.MapGet("/evidence/vex/list", async ( HttpContext context, - IOptions storageOptions, + IOptions storageOptions, [FromServices] IMongoDatabase database, TimeProvider timeProvider, [FromQuery] int? limit, @@ -114,7 +114,7 @@ public static class EvidenceEndpoints app.MapGet("/evidence/vex/bundle/{bundleId}", async ( HttpContext context, string bundleId, - IOptions storageOptions, + IOptions storageOptions, [FromServices] IMongoDatabase database, TimeProvider timeProvider, CancellationToken cancellationToken) => @@ -191,7 +191,7 @@ public static class EvidenceEndpoints // GET /evidence/vex/lookup - Lookup evidence for vuln/product pair app.MapGet("/evidence/vex/lookup", async ( HttpContext context, - IOptions storageOptions, + IOptions storageOptions, [FromServices] IVexObservationProjectionService projectionService, TimeProvider timeProvider, [FromQuery] string vulnerabilityId, @@ -256,7 +256,7 @@ public static class EvidenceEndpoints app.MapGet("/vuln/evidence/vex/{advisory_key}", async ( HttpContext context, string advisory_key, - IOptions storageOptions, + IOptions storageOptions, [FromServices] IMongoDatabase database, TimeProvider timeProvider, [FromQuery] int? limit, @@ -446,7 +446,7 @@ public static class EvidenceEndpoints HttpContext context, string bundleId, [FromQuery] string? generation, - IOptions storageOptions, + IOptions storageOptions, IOptions airgapOptions, [FromServices] IAirgapImportStore airgapImportStore, [FromServices] IVexHashingService hashingService, @@ -528,7 +528,7 @@ public static class EvidenceEndpoints HttpContext context, string bundleId, [FromQuery] string? generation, - IOptions storageOptions, + IOptions storageOptions, IOptions airgapOptions, [FromServices] IAirgapImportStore airgapImportStore, CancellationToken cancellationToken) => @@ -575,7 +575,7 @@ public static class EvidenceEndpoints HttpContext context, string bundleId, [FromQuery] string? generation, - IOptions storageOptions, + IOptions storageOptions, IOptions airgapOptions, [FromServices] IAirgapImportStore airgapImportStore, CancellationToken cancellationToken) => @@ -679,7 +679,7 @@ public static class EvidenceEndpoints return (digest, size); } - private static bool TryResolveTenant(HttpContext context, VexMongoStorageOptions options, out string tenant, out IResult? problem) + private static bool TryResolveTenant(HttpContext context, VexStorageOptions options, out string tenant, out IResult? problem) { tenant = options.DefaultTenant; problem = null; diff --git a/src/Excititor/StellaOps.Excititor.WebService/Endpoints/IngestEndpoints.cs b/src/Excititor/StellaOps.Excititor.WebService/Endpoints/IngestEndpoints.cs index a86c43d23..c31662033 100644 --- a/src/Excititor/StellaOps.Excititor.WebService/Endpoints/IngestEndpoints.cs +++ b/src/Excititor/StellaOps.Excititor.WebService/Endpoints/IngestEndpoints.cs @@ -20,49 +20,49 @@ internal static class IngestEndpoints group.MapPost("/reconcile", HandleReconcileAsync); } - internal static async Task HandleInitAsync( - HttpContext httpContext, - ExcititorInitRequest request, - IVexIngestOrchestrator orchestrator, - TimeProvider timeProvider, - CancellationToken cancellationToken) + internal static async Task HandleInitAsync( + HttpContext httpContext, + ExcititorInitRequest request, + IVexIngestOrchestrator orchestrator, + TimeProvider timeProvider, + CancellationToken cancellationToken) { var scopeResult = ScopeAuthorization.RequireScope(httpContext, AdminScope); if (scopeResult is not null) - { - return scopeResult; - } - - var providerIds = NormalizeProviders(request.Providers); - _ = timeProvider; - var options = new IngestInitOptions(providerIds, request.Resume ?? false); + { + return scopeResult; + } + + var providerIds = NormalizeProviders(request.Providers); + _ = timeProvider; + var options = new IngestInitOptions(providerIds, request.Resume ?? false); var summary = await orchestrator.InitializeAsync(options, cancellationToken).ConfigureAwait(false); var message = $"Initialized {summary.ProviderCount} provider(s); {summary.SuccessCount} succeeded, {summary.FailureCount} failed."; - return TypedResults.Ok(new - { - message, - runId = summary.RunId, - startedAt = summary.StartedAt, - completedAt = summary.CompletedAt, - providers = summary.Providers.Select(static provider => new - { - providerId = provider.ProviderId, - displayName = provider.DisplayName, - status = provider.Status, - durationMs = provider.Duration.TotalMilliseconds, - error = provider.Error - }) - }); - } + return TypedResults.Ok(new + { + message, + runId = summary.RunId, + startedAt = summary.StartedAt, + completedAt = summary.CompletedAt, + providers = summary.Providers.Select(static provider => new + { + providerId = provider.ProviderId, + displayName = provider.DisplayName, + status = provider.Status, + durationMs = provider.Duration.TotalMilliseconds, + error = provider.Error + }) + }); + } - internal static async Task HandleRunAsync( - HttpContext httpContext, - ExcititorIngestRunRequest request, - IVexIngestOrchestrator orchestrator, - TimeProvider timeProvider, - CancellationToken cancellationToken) + internal static async Task HandleRunAsync( + HttpContext httpContext, + ExcititorIngestRunRequest request, + IVexIngestOrchestrator orchestrator, + TimeProvider timeProvider, + CancellationToken cancellationToken) { var scopeResult = ScopeAuthorization.RequireScope(httpContext, AdminScope); if (scopeResult is not null) @@ -72,98 +72,55 @@ internal static class IngestEndpoints if (!TryParseDateTimeOffset(request.Since, out var since, out var sinceError)) { - return TypedResults.BadRequest(new { message = sinceError }); - } - - if (!TryParseTimeSpan(request.Window, out var window, out var windowError)) - { - return TypedResults.BadRequest(new { message = windowError }); - } - - _ = timeProvider; - var providerIds = NormalizeProviders(request.Providers); - var options = new IngestRunOptions( - providerIds, - since, - window, - request.Force ?? false); - - var summary = await orchestrator.RunAsync(options, cancellationToken).ConfigureAwait(false); - var message = $"Ingest run completed for {summary.ProviderCount} provider(s); {summary.SuccessCount} succeeded, {summary.FailureCount} failed."; - - return TypedResults.Ok(new - { - message, - runId = summary.RunId, - startedAt = summary.StartedAt, - completedAt = summary.CompletedAt, - durationMs = summary.Duration.TotalMilliseconds, - providers = summary.Providers.Select(static provider => new - { - providerId = provider.ProviderId, - status = provider.Status, - documents = provider.Documents, - claims = provider.Claims, - startedAt = provider.StartedAt, - completedAt = provider.CompletedAt, - durationMs = provider.Duration.TotalMilliseconds, - lastDigest = provider.LastDigest, - lastUpdated = provider.LastUpdated, - checkpoint = provider.Checkpoint, - error = provider.Error - }) - }); - } + return TypedResults.BadRequest(new { message = sinceError }); + } - internal static async Task HandleResumeAsync( - HttpContext httpContext, - ExcititorIngestResumeRequest request, - IVexIngestOrchestrator orchestrator, - TimeProvider timeProvider, - CancellationToken cancellationToken) - { - var scopeResult = ScopeAuthorization.RequireScope(httpContext, AdminScope); - if (scopeResult is not null) - { - return scopeResult; - } - - _ = timeProvider; - var providerIds = NormalizeProviders(request.Providers); - var options = new IngestResumeOptions(providerIds, request.Checkpoint); - - var summary = await orchestrator.ResumeAsync(options, cancellationToken).ConfigureAwait(false); - var message = $"Resume run completed for {summary.ProviderCount} provider(s); {summary.SuccessCount} succeeded, {summary.FailureCount} failed."; - - return TypedResults.Ok(new - { - message, - runId = summary.RunId, - startedAt = summary.StartedAt, - completedAt = summary.CompletedAt, - durationMs = summary.Duration.TotalMilliseconds, - providers = summary.Providers.Select(static provider => new - { - providerId = provider.ProviderId, - status = provider.Status, - documents = provider.Documents, - claims = provider.Claims, - startedAt = provider.StartedAt, - completedAt = provider.CompletedAt, - durationMs = provider.Duration.TotalMilliseconds, - since = provider.Since, - checkpoint = provider.Checkpoint, - error = provider.Error - }) - }); - } + if (!TryParseTimeSpan(request.Window, out var window, out var windowError)) + { + return TypedResults.BadRequest(new { message = windowError }); + } - internal static async Task HandleReconcileAsync( - HttpContext httpContext, - ExcititorReconcileRequest request, - IVexIngestOrchestrator orchestrator, - TimeProvider timeProvider, - CancellationToken cancellationToken) + _ = timeProvider; + var providerIds = NormalizeProviders(request.Providers); + var options = new IngestRunOptions( + providerIds, + since, + window, + request.Force ?? false); + + var summary = await orchestrator.RunAsync(options, cancellationToken).ConfigureAwait(false); + var message = $"Ingest run completed for {summary.ProviderCount} provider(s); {summary.SuccessCount} succeeded, {summary.FailureCount} failed."; + + return TypedResults.Ok(new + { + message, + runId = summary.RunId, + startedAt = summary.StartedAt, + completedAt = summary.CompletedAt, + durationMs = summary.Duration.TotalMilliseconds, + providers = summary.Providers.Select(static provider => new + { + providerId = provider.ProviderId, + status = provider.Status, + documents = provider.Documents, + claims = provider.Claims, + startedAt = provider.StartedAt, + completedAt = provider.CompletedAt, + durationMs = provider.Duration.TotalMilliseconds, + lastDigest = provider.LastDigest, + lastUpdated = provider.LastUpdated, + checkpoint = provider.Checkpoint, + error = provider.Error + }) + }); + } + + internal static async Task HandleResumeAsync( + HttpContext httpContext, + ExcititorIngestResumeRequest request, + IVexIngestOrchestrator orchestrator, + TimeProvider timeProvider, + CancellationToken cancellationToken) { var scopeResult = ScopeAuthorization.RequireScope(httpContext, AdminScope); if (scopeResult is not null) @@ -171,40 +128,83 @@ internal static class IngestEndpoints return scopeResult; } - if (!TryParseTimeSpan(request.MaxAge, out var maxAge, out var error)) - { - return TypedResults.BadRequest(new { message = error }); - } - - _ = timeProvider; - var providerIds = NormalizeProviders(request.Providers); - var options = new ReconcileOptions(providerIds, maxAge); - - var summary = await orchestrator.ReconcileAsync(options, cancellationToken).ConfigureAwait(false); - var message = $"Reconcile completed for {summary.ProviderCount} provider(s); {summary.ReconciledCount} reconciled, {summary.SkippedCount} skipped, {summary.FailureCount} failed."; - - return TypedResults.Ok(new - { - message, - runId = summary.RunId, - startedAt = summary.StartedAt, - completedAt = summary.CompletedAt, - durationMs = summary.Duration.TotalMilliseconds, - providers = summary.Providers.Select(static provider => new - { - providerId = provider.ProviderId, - status = provider.Status, - action = provider.Action, - lastUpdated = provider.LastUpdated, - threshold = provider.Threshold, - documents = provider.Documents, - claims = provider.Claims, - error = provider.Error - }) - }); - } + _ = timeProvider; + var providerIds = NormalizeProviders(request.Providers); + var options = new IngestResumeOptions(providerIds, request.Checkpoint); - internal static ImmutableArray NormalizeProviders(IReadOnlyCollection? providers) + var summary = await orchestrator.ResumeAsync(options, cancellationToken).ConfigureAwait(false); + var message = $"Resume run completed for {summary.ProviderCount} provider(s); {summary.SuccessCount} succeeded, {summary.FailureCount} failed."; + + return TypedResults.Ok(new + { + message, + runId = summary.RunId, + startedAt = summary.StartedAt, + completedAt = summary.CompletedAt, + durationMs = summary.Duration.TotalMilliseconds, + providers = summary.Providers.Select(static provider => new + { + providerId = provider.ProviderId, + status = provider.Status, + documents = provider.Documents, + claims = provider.Claims, + startedAt = provider.StartedAt, + completedAt = provider.CompletedAt, + durationMs = provider.Duration.TotalMilliseconds, + since = provider.Since, + checkpoint = provider.Checkpoint, + error = provider.Error + }) + }); + } + + internal static async Task HandleReconcileAsync( + HttpContext httpContext, + ExcititorReconcileRequest request, + IVexIngestOrchestrator orchestrator, + TimeProvider timeProvider, + CancellationToken cancellationToken) + { + var scopeResult = ScopeAuthorization.RequireScope(httpContext, AdminScope); + if (scopeResult is not null) + { + return scopeResult; + } + + if (!TryParseTimeSpan(request.MaxAge, out var maxAge, out var error)) + { + return TypedResults.BadRequest(new { message = error }); + } + + _ = timeProvider; + var providerIds = NormalizeProviders(request.Providers); + var options = new ReconcileOptions(providerIds, maxAge); + + var summary = await orchestrator.ReconcileAsync(options, cancellationToken).ConfigureAwait(false); + var message = $"Reconcile completed for {summary.ProviderCount} provider(s); {summary.ReconciledCount} reconciled, {summary.SkippedCount} skipped, {summary.FailureCount} failed."; + + return TypedResults.Ok(new + { + message, + runId = summary.RunId, + startedAt = summary.StartedAt, + completedAt = summary.CompletedAt, + durationMs = summary.Duration.TotalMilliseconds, + providers = summary.Providers.Select(static provider => new + { + providerId = provider.ProviderId, + status = provider.Status, + action = provider.Action, + lastUpdated = provider.LastUpdated, + threshold = provider.Threshold, + documents = provider.Documents, + claims = provider.Claims, + error = provider.Error + }) + }); + } + + internal static ImmutableArray NormalizeProviders(IReadOnlyCollection? providers) { if (providers is null || providers.Count == 0) { @@ -225,7 +225,7 @@ internal static class IngestEndpoints return set.ToImmutableArray(); } - internal static bool TryParseDateTimeOffset(string? value, out DateTimeOffset? result, out string? error) + internal static bool TryParseDateTimeOffset(string? value, out DateTimeOffset? result, out string? error) { result = null; error = null; @@ -249,7 +249,7 @@ internal static class IngestEndpoints return false; } - internal static bool TryParseTimeSpan(string? value, out TimeSpan? result, out string? error) + internal static bool TryParseTimeSpan(string? value, out TimeSpan? result, out string? error) { result = null; error = null; @@ -269,19 +269,19 @@ internal static class IngestEndpoints return false; } - internal sealed record ExcititorInitRequest(IReadOnlyList? Providers, bool? Resume); - - internal sealed record ExcititorIngestRunRequest( - IReadOnlyList? Providers, - string? Since, - string? Window, - bool? Force); - - internal sealed record ExcititorIngestResumeRequest( - IReadOnlyList? Providers, - string? Checkpoint); - - internal sealed record ExcititorReconcileRequest( - IReadOnlyList? Providers, - string? MaxAge); -} + internal sealed record ExcititorInitRequest(IReadOnlyList? Providers, bool? Resume); + + internal sealed record ExcititorIngestRunRequest( + IReadOnlyList? Providers, + string? Since, + string? Window, + bool? Force); + + internal sealed record ExcititorIngestResumeRequest( + IReadOnlyList? Providers, + string? Checkpoint); + + internal sealed record ExcititorReconcileRequest( + IReadOnlyList? Providers, + string? MaxAge); +} diff --git a/src/Excititor/StellaOps.Excititor.WebService/Endpoints/LinksetEndpoints.cs b/src/Excititor/StellaOps.Excititor.WebService/Endpoints/LinksetEndpoints.cs index 15ce8fe14..f53afabf1 100644 --- a/src/Excititor/StellaOps.Excititor.WebService/Endpoints/LinksetEndpoints.cs +++ b/src/Excititor/StellaOps.Excititor.WebService/Endpoints/LinksetEndpoints.cs @@ -10,7 +10,7 @@ using Microsoft.AspNetCore.Mvc; using Microsoft.Extensions.Options; using StellaOps.Excititor.Core.Canonicalization; using StellaOps.Excititor.Core.Observations; -using StellaOps.Excititor.Storage.Mongo; +using StellaOps.Excititor.Core.Storage; using StellaOps.Excititor.WebService.Contracts; using StellaOps.Excititor.WebService.Services; using StellaOps.Excititor.WebService.Telemetry; @@ -32,7 +32,7 @@ public static class LinksetEndpoints // GET /vex/linksets - List linksets with filters group.MapGet("", async ( HttpContext context, - IOptions storageOptions, + IOptions storageOptions, [FromServices] IVexLinksetStore linksetStore, [FromQuery] int? limit, [FromQuery] string? cursor, @@ -124,7 +124,7 @@ public static class LinksetEndpoints group.MapGet("/{linksetId}", async ( HttpContext context, string linksetId, - IOptions storageOptions, + IOptions storageOptions, [FromServices] IVexLinksetStore linksetStore, CancellationToken cancellationToken) => { @@ -166,7 +166,7 @@ public static class LinksetEndpoints // GET /vex/linksets/lookup - Lookup linkset by vulnerability and product group.MapGet("/lookup", async ( HttpContext context, - IOptions storageOptions, + IOptions storageOptions, [FromServices] IVexLinksetStore linksetStore, [FromQuery] string? vulnerabilityId, [FromQuery] string? productKey, @@ -211,7 +211,7 @@ public static class LinksetEndpoints // GET /vex/linksets/count - Get linkset counts for tenant group.MapGet("/count", async ( HttpContext context, - IOptions storageOptions, + IOptions storageOptions, [FromServices] IVexLinksetStore linksetStore, CancellationToken cancellationToken) => { @@ -240,7 +240,7 @@ public static class LinksetEndpoints // GET /vex/linksets/conflicts - List linksets with conflicts (shorthand) group.MapGet("/conflicts", async ( HttpContext context, - IOptions storageOptions, + IOptions storageOptions, [FromServices] IVexLinksetStore linksetStore, [FromQuery] int? limit, CancellationToken cancellationToken) => @@ -317,7 +317,7 @@ public static class LinksetEndpoints private static bool TryResolveTenant( HttpContext context, - VexMongoStorageOptions options, + VexStorageOptions options, out string tenant, out IResult? problem) { diff --git a/src/Excititor/StellaOps.Excititor.WebService/Endpoints/MirrorEndpoints.cs b/src/Excititor/StellaOps.Excititor.WebService/Endpoints/MirrorEndpoints.cs index a3c7df6d8..02c81c10a 100644 --- a/src/Excititor/StellaOps.Excititor.WebService/Endpoints/MirrorEndpoints.cs +++ b/src/Excititor/StellaOps.Excititor.WebService/Endpoints/MirrorEndpoints.cs @@ -8,8 +8,8 @@ using Microsoft.Extensions.Logging; using Microsoft.Extensions.Options; using StellaOps.Excititor.Core; using StellaOps.Excititor.Export; -using StellaOps.Excititor.Storage.Mongo; -using StellaOps.Excititor.WebService.Services; +using StellaOps.Excititor.Core.Storage; +using StellaOps.Excititor.WebService.Services; namespace StellaOps.Excititor.WebService.Endpoints; @@ -98,13 +98,13 @@ internal static class MirrorEndpoints } var resolvedExports = new List(); - foreach (var exportOption in domain.Exports) - { - if (!MirrorExportPlanner.TryBuild(exportOption, out var plan, out var error)) - { - resolvedExports.Add(new MirrorExportIndexEntry( - exportOption.Key, - null, + foreach (var exportOption in domain.Exports) + { + if (!MirrorExportPlanner.TryBuild(exportOption, out var plan, out var error)) + { + resolvedExports.Add(new MirrorExportIndexEntry( + exportOption.Key, + null, null, exportOption.Format, null, @@ -116,7 +116,7 @@ internal static class MirrorEndpoints continue; } - var manifest = await exportStore.FindAsync(plan.Signature, plan.Format, cancellationToken).ConfigureAwait(false); + var manifest = await exportStore.FindAsync(plan.Signature, plan.Format, cancellationToken).ConfigureAwait(false); if (manifest is null) { @@ -177,16 +177,16 @@ internal static class MirrorEndpoints return Results.Unauthorized(); } - if (!TryFindExport(domain, exportKey, out var exportOptions)) - { - return Results.NotFound(); - } - - if (!MirrorExportPlanner.TryBuild(exportOptions, out var plan, out var error)) - { - await WritePlainTextAsync(httpContext, error ?? "invalid_export_configuration", StatusCodes.Status503ServiceUnavailable, cancellationToken).ConfigureAwait(false); - return Results.Empty; - } + if (!TryFindExport(domain, exportKey, out var exportOptions)) + { + return Results.NotFound(); + } + + if (!MirrorExportPlanner.TryBuild(exportOptions, out var plan, out var error)) + { + await WritePlainTextAsync(httpContext, error ?? "invalid_export_configuration", StatusCodes.Status503ServiceUnavailable, cancellationToken).ConfigureAwait(false); + return Results.Empty; + } var manifest = await exportStore.FindAsync(plan.Signature, plan.Format, cancellationToken).ConfigureAwait(false); if (manifest is null) @@ -241,10 +241,10 @@ internal static class MirrorEndpoints return Results.Empty; } - if (!TryFindExport(domain, exportKey, out var exportOptions) || !MirrorExportPlanner.TryBuild(exportOptions, out var plan, out _)) - { - return Results.NotFound(); - } + if (!TryFindExport(domain, exportKey, out var exportOptions) || !MirrorExportPlanner.TryBuild(exportOptions, out var plan, out _)) + { + return Results.NotFound(); + } var manifest = await exportStore.FindAsync(plan.Signature, plan.Format, cancellationToken).ConfigureAwait(false); if (manifest is null) @@ -286,36 +286,36 @@ internal static class MirrorEndpoints return domain is not null; } - private static bool TryFindExport(MirrorDomainOptions domain, string exportKey, out MirrorExportOptions export) - { - export = domain.Exports.FirstOrDefault(e => string.Equals(e.Key, exportKey, StringComparison.OrdinalIgnoreCase))!; - return export is not null; - } + private static bool TryFindExport(MirrorDomainOptions domain, string exportKey, out MirrorExportOptions export) + { + export = domain.Exports.FirstOrDefault(e => string.Equals(e.Key, exportKey, StringComparison.OrdinalIgnoreCase))!; + return export is not null; + } - private static string ResolveContentType(VexExportFormat format) - => format switch - { - VexExportFormat.Json => "application/json", - VexExportFormat.JsonLines => "application/jsonl", - VexExportFormat.OpenVex => "application/json", - VexExportFormat.Csaf => "application/json", - VexExportFormat.CycloneDx => "application/json", - _ => "application/octet-stream", - }; + private static string ResolveContentType(VexExportFormat format) + => format switch + { + VexExportFormat.Json => "application/json", + VexExportFormat.JsonLines => "application/jsonl", + VexExportFormat.OpenVex => "application/json", + VexExportFormat.Csaf => "application/json", + VexExportFormat.CycloneDx => "application/json", + _ => "application/octet-stream", + }; private static string BuildDownloadFileName(string domainId, string exportKey, VexExportFormat format) { var builder = new StringBuilder(domainId.Length + exportKey.Length + 8); builder.Append(domainId).Append('-').Append(exportKey); - builder.Append(format switch - { - VexExportFormat.Json => ".json", - VexExportFormat.JsonLines => ".jsonl", - VexExportFormat.OpenVex => ".openvex.json", - VexExportFormat.Csaf => ".csaf.json", - VexExportFormat.CycloneDx => ".cyclonedx.json", - _ => ".bin", - }); + builder.Append(format switch + { + VexExportFormat.Json => ".json", + VexExportFormat.JsonLines => ".jsonl", + VexExportFormat.OpenVex => ".openvex.json", + VexExportFormat.Csaf => ".csaf.json", + VexExportFormat.CycloneDx => ".cyclonedx.json", + _ => ".bin", + }); return builder.ToString(); } @@ -326,15 +326,15 @@ internal static class MirrorEndpoints await context.Response.WriteAsync(message, cancellationToken); } - private static async Task WriteJsonAsync(HttpContext context, T payload, int statusCode, CancellationToken cancellationToken) - { - context.Response.StatusCode = statusCode; - context.Response.ContentType = "application/json"; - var json = VexCanonicalJsonSerializer.Serialize(payload); - await context.Response.WriteAsync(json, cancellationToken); + private static async Task WriteJsonAsync(HttpContext context, T payload, int statusCode, CancellationToken cancellationToken) + { + context.Response.StatusCode = statusCode; + context.Response.ContentType = "application/json"; + var json = VexCanonicalJsonSerializer.Serialize(payload); + await context.Response.WriteAsync(json, cancellationToken); } -} +} internal sealed record MirrorDomainListResponse(IReadOnlyList Domains); diff --git a/src/Excititor/StellaOps.Excititor.WebService/Endpoints/MirrorRegistrationEndpoints.cs b/src/Excititor/StellaOps.Excititor.WebService/Endpoints/MirrorRegistrationEndpoints.cs index 5b7358698..a5085d734 100644 --- a/src/Excititor/StellaOps.Excititor.WebService/Endpoints/MirrorRegistrationEndpoints.cs +++ b/src/Excititor/StellaOps.Excititor.WebService/Endpoints/MirrorRegistrationEndpoints.cs @@ -8,7 +8,7 @@ using Microsoft.AspNetCore.Http; using Microsoft.AspNetCore.Mvc; using Microsoft.Extensions.Logging; using StellaOps.Excititor.Core; -using StellaOps.Excititor.Storage.Mongo; +using StellaOps.Excititor.Core.Storage; using StellaOps.Excititor.WebService.Contracts; namespace StellaOps.Excititor.WebService.Endpoints; diff --git a/src/Excititor/StellaOps.Excititor.WebService/Endpoints/ObservationEndpoints.cs b/src/Excititor/StellaOps.Excititor.WebService/Endpoints/ObservationEndpoints.cs index 6b78059e6..751f4f243 100644 --- a/src/Excititor/StellaOps.Excititor.WebService/Endpoints/ObservationEndpoints.cs +++ b/src/Excititor/StellaOps.Excititor.WebService/Endpoints/ObservationEndpoints.cs @@ -6,7 +6,7 @@ using Microsoft.AspNetCore.Http; using Microsoft.AspNetCore.Mvc; using Microsoft.Extensions.Options; using StellaOps.Excititor.Core.Observations; -using StellaOps.Excititor.Storage.Mongo; +using StellaOps.Excititor.Core.Storage; using StellaOps.Excititor.WebService.Contracts; using StellaOps.Excititor.WebService.Services; @@ -26,7 +26,7 @@ public static class ObservationEndpoints // GET /vex/observations - List observations with filters group.MapGet("", async ( HttpContext context, - IOptions storageOptions, + IOptions storageOptions, [FromServices] IVexObservationStore observationStore, TimeProvider timeProvider, [FromQuery] int? limit, @@ -98,7 +98,7 @@ public static class ObservationEndpoints group.MapGet("/{observationId}", async ( HttpContext context, string observationId, - IOptions storageOptions, + IOptions storageOptions, [FromServices] IVexObservationStore observationStore, CancellationToken cancellationToken) => { @@ -140,7 +140,7 @@ public static class ObservationEndpoints // GET /vex/observations/count - Get observation count for tenant group.MapGet("/count", async ( HttpContext context, - IOptions storageOptions, + IOptions storageOptions, [FromServices] IVexObservationStore observationStore, CancellationToken cancellationToken) => { @@ -230,7 +230,7 @@ public static class ObservationEndpoints private static bool TryResolveTenant( HttpContext context, - VexMongoStorageOptions options, + VexStorageOptions options, out string tenant, out IResult? problem) { diff --git a/src/Excititor/StellaOps.Excititor.WebService/Endpoints/PolicyEndpoints.cs b/src/Excititor/StellaOps.Excititor.WebService/Endpoints/PolicyEndpoints.cs index d18e9bf88..63c44aca5 100644 --- a/src/Excititor/StellaOps.Excititor.WebService/Endpoints/PolicyEndpoints.cs +++ b/src/Excititor/StellaOps.Excititor.WebService/Endpoints/PolicyEndpoints.cs @@ -11,7 +11,7 @@ using Microsoft.Extensions.Options; using StellaOps.Excititor.Core; using StellaOps.Excititor.Core.Canonicalization; using StellaOps.Excititor.Core.Orchestration; -using StellaOps.Excititor.Storage.Mongo; +using StellaOps.Excititor.Core.Storage; using StellaOps.Excititor.WebService.Contracts; using StellaOps.Excititor.WebService.Services; @@ -33,7 +33,7 @@ public static class PolicyEndpoints private static async Task LookupVexAsync( HttpContext context, [FromBody] PolicyVexLookupRequest request, - IOptions storageOptions, + IOptions storageOptions, [FromServices] IVexClaimStore claimStore, TimeProvider timeProvider, CancellationToken cancellationToken) @@ -174,7 +174,7 @@ public static class PolicyEndpoints private static bool TryResolveTenant( HttpContext context, - VexMongoStorageOptions options, + VexStorageOptions options, out string tenant, out IResult? problem) { diff --git a/src/Excititor/StellaOps.Excititor.WebService/Endpoints/ResolveEndpoint.cs b/src/Excititor/StellaOps.Excititor.WebService/Endpoints/ResolveEndpoint.cs index a30286159..4953c7a49 100644 --- a/src/Excititor/StellaOps.Excititor.WebService/Endpoints/ResolveEndpoint.cs +++ b/src/Excititor/StellaOps.Excititor.WebService/Endpoints/ResolveEndpoint.cs @@ -5,55 +5,55 @@ using System.Collections.Generic; using System.Collections.Immutable; using System.Linq; using System.Security.Cryptography; -using System.Text; -using System.Text.Json; -using Microsoft.AspNetCore.Builder; -using Microsoft.AspNetCore.Http; -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.DependencyInjection; -using StellaOps.Excititor.Attestation; -using StellaOps.Excititor.Attestation.Dsse; -using StellaOps.Excititor.Attestation.Signing; -using StellaOps.Excititor.Core; -using StellaOps.Excititor.Policy; -using StellaOps.Excititor.Storage.Mongo; -using StellaOps.Excititor.WebService.Services; - -internal static class ResolveEndpoint -{ - private const int MaxSubjectPairs = 256; - private const string ReadScope = "vex.read"; - - public static void MapResolveEndpoint(WebApplication app) - { - app.MapPost("/excititor/resolve", HandleResolveAsync); - } +using System.Text; +using System.Text.Json; +using Microsoft.AspNetCore.Builder; +using Microsoft.AspNetCore.Http; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.DependencyInjection; +using StellaOps.Excititor.Attestation; +using StellaOps.Excititor.Attestation.Dsse; +using StellaOps.Excititor.Attestation.Signing; +using StellaOps.Excititor.Core; +using StellaOps.Excititor.Policy; +using StellaOps.Excititor.Core.Storage; +using StellaOps.Excititor.WebService.Services; + +internal static class ResolveEndpoint +{ + private const int MaxSubjectPairs = 256; + private const string ReadScope = "vex.read"; + + public static void MapResolveEndpoint(WebApplication app) + { + app.MapPost("/excititor/resolve", HandleResolveAsync); + } private static async Task HandleResolveAsync( VexResolveRequest request, HttpContext httpContext, IVexClaimStore claimStore, IVexConsensusStore consensusStore, - IVexProviderStore providerStore, - IVexPolicyProvider policyProvider, - TimeProvider timeProvider, - ILoggerFactory loggerFactory, - IVexAttestationClient? attestationClient, - CancellationToken cancellationToken) - { - var scopeResult = ScopeAuthorization.RequireScope(httpContext, ReadScope); - if (scopeResult is not null) - { - return scopeResult; - } - - if (request is null) - { - return Results.BadRequest("Request payload is required."); - } - - var logger = loggerFactory.CreateLogger("ResolveEndpoint"); - var signer = httpContext.RequestServices.GetService(); + IVexProviderStore providerStore, + IVexPolicyProvider policyProvider, + TimeProvider timeProvider, + ILoggerFactory loggerFactory, + IVexAttestationClient? attestationClient, + CancellationToken cancellationToken) + { + var scopeResult = ScopeAuthorization.RequireScope(httpContext, ReadScope); + if (scopeResult is not null) + { + return scopeResult; + } + + if (request is null) + { + return Results.BadRequest("Request payload is required."); + } + + var logger = loggerFactory.CreateLogger("ResolveEndpoint"); + var signer = httpContext.RequestServices.GetService(); var productKeys = NormalizeValues(request.ProductKeys, request.Purls); var vulnerabilityIds = NormalizeValues(request.VulnerabilityIds); diff --git a/src/Excititor/StellaOps.Excititor.WebService/Endpoints/RiskFeedEndpoints.cs b/src/Excititor/StellaOps.Excititor.WebService/Endpoints/RiskFeedEndpoints.cs index 023c95336..afdddcbc0 100644 --- a/src/Excititor/StellaOps.Excititor.WebService/Endpoints/RiskFeedEndpoints.cs +++ b/src/Excititor/StellaOps.Excititor.WebService/Endpoints/RiskFeedEndpoints.cs @@ -6,7 +6,7 @@ using Microsoft.AspNetCore.Mvc; using Microsoft.Extensions.Options; using StellaOps.Excititor.Core; using StellaOps.Excititor.Core.RiskFeed; -using StellaOps.Excititor.Storage.Mongo; +using StellaOps.Excititor.Core.Storage; using StellaOps.Excititor.WebService.Services; namespace StellaOps.Excititor.WebService.Endpoints; @@ -25,7 +25,7 @@ public static class RiskFeedEndpoints // POST /risk/v1/feed - Generate risk feed group.MapPost("/feed", async ( HttpContext context, - IOptions storageOptions, + IOptions storageOptions, [FromServices] IRiskFeedService riskFeedService, [FromBody] RiskFeedRequestDto request, CancellationToken cancellationToken) => @@ -67,7 +67,7 @@ public static class RiskFeedEndpoints // GET /risk/v1/feed/item - Get single risk feed item group.MapGet("/feed/item", async ( HttpContext context, - IOptions storageOptions, + IOptions storageOptions, [FromServices] IRiskFeedService riskFeedService, [FromQuery] string? advisoryKey, [FromQuery] string? artifact, @@ -112,7 +112,7 @@ public static class RiskFeedEndpoints group.MapGet("/feed/by-advisory/{advisoryKey}", async ( HttpContext context, string advisoryKey, - IOptions storageOptions, + IOptions storageOptions, [FromServices] IRiskFeedService riskFeedService, [FromQuery] int? limit, CancellationToken cancellationToken) => @@ -153,7 +153,7 @@ public static class RiskFeedEndpoints group.MapGet("/feed/by-artifact/{**artifact}", async ( HttpContext context, string artifact, - IOptions storageOptions, + IOptions storageOptions, [FromServices] IRiskFeedService riskFeedService, [FromQuery] int? limit, CancellationToken cancellationToken) => @@ -235,7 +235,7 @@ public static class RiskFeedEndpoints private static bool TryResolveTenant( HttpContext context, - VexMongoStorageOptions options, + VexStorageOptions options, out string tenant, out IResult? problem) { diff --git a/src/Excititor/StellaOps.Excititor.WebService/Extensions/VexRawDocumentMapper.cs b/src/Excititor/StellaOps.Excititor.WebService/Extensions/VexRawDocumentMapper.cs new file mode 100644 index 000000000..28c384e87 --- /dev/null +++ b/src/Excititor/StellaOps.Excititor.WebService/Extensions/VexRawDocumentMapper.cs @@ -0,0 +1,71 @@ +using System.Collections.Immutable; +using System.Text.Json; +using StellaOps.Concelier.RawModels; +using StellaOps.Excititor.Core; +using StellaOps.Excititor.Core.Storage; + +namespace StellaOps.Excititor.WebService.Extensions; + +internal static class VexRawDocumentMapper +{ + public static VexRawDocument ToRawModel(VexRawRecord record, string defaultTenant) + { + ArgumentNullException.ThrowIfNull(record); + + var metadata = record.Metadata ?? ImmutableDictionary.Empty; + var tenant = Get(metadata, "tenant", record.Tenant) ?? defaultTenant; + + var source = new RawSourceMetadata( + Vendor: Get(metadata, "source.vendor", record.ProviderId) ?? record.ProviderId, + Connector: Get(metadata, "source.connector", record.ProviderId) ?? record.ProviderId, + ConnectorVersion: Get(metadata, "source.connector_version", "unknown") ?? "unknown", + Stream: Get(metadata, "source.stream", record.Format.ToString().ToLowerInvariant())); + + var signature = new RawSignatureMetadata( + Present: string.Equals(Get(metadata, "signature.present"), "true", StringComparison.OrdinalIgnoreCase), + Format: Get(metadata, "signature.format"), + KeyId: Get(metadata, "signature.key_id"), + Signature: Get(metadata, "signature.sig"), + Certificate: Get(metadata, "signature.certificate"), + Digest: Get(metadata, "signature.digest")); + + var upstream = new RawUpstreamMetadata( + UpstreamId: Get(metadata, "upstream.id", record.Digest) ?? record.Digest, + DocumentVersion: Get(metadata, "upstream.version"), + RetrievedAt: record.RetrievedAt, + ContentHash: Get(metadata, "upstream.content_hash", record.Digest) ?? record.Digest, + Signature: signature, + Provenance: metadata); + + var content = new RawContent( + Format: record.Format.ToString().ToLowerInvariant(), + SpecVersion: Get(metadata, "content.spec_version"), + Raw: ParseJson(record.Content), + Encoding: Get(metadata, "content.encoding")); + + return new VexRawDocument( + tenant, + source, + upstream, + content, + new RawLinkset(), + statements: null, + supersedes: record.SupersedesDigest); + } + + private static string? Get(IReadOnlyDictionary metadata, string key, string? fallback = null) + { + if (metadata.TryGetValue(key, out var value) && !string.IsNullOrWhiteSpace(value)) + { + return value; + } + + return fallback; + } + + private static JsonElement ParseJson(ReadOnlyMemory content) + { + using var document = JsonDocument.Parse(content); + return document.RootElement.Clone(); + } +} diff --git a/src/Excititor/StellaOps.Excititor.WebService/Program.Helpers.cs b/src/Excititor/StellaOps.Excititor.WebService/Program.Helpers.cs index 26554795d..6d0336ff6 100644 --- a/src/Excititor/StellaOps.Excititor.WebService/Program.Helpers.cs +++ b/src/Excititor/StellaOps.Excititor.WebService/Program.Helpers.cs @@ -6,17 +6,16 @@ using System.Linq; using System.Text; using Microsoft.AspNetCore.Http; using Microsoft.Extensions.Primitives; -using MongoDB.Bson; using StellaOps.Excititor.Core; using StellaOps.Excititor.Core.Aoc; -using StellaOps.Excititor.Storage.Mongo; +using StellaOps.Excititor.Core.Storage; using StellaOps.Excititor.WebService.Contracts; using StellaOps.Excititor.WebService.Services; public partial class Program { private const string TenantHeaderName = "X-Stella-Tenant"; - private static bool TryResolveTenant(HttpContext context, VexMongoStorageOptions options, bool requireHeader, out string tenant, out IResult? problem) + private static bool TryResolveTenant(HttpContext context, VexStorageOptions options, bool requireHeader, out string tenant, out IResult? problem) { tenant = options.DefaultTenant; problem = null; @@ -51,27 +50,6 @@ public partial class Program return true; } - private static IReadOnlyDictionary ReadMetadata(BsonValue value) - { - if (value is not BsonDocument doc || doc.ElementCount == 0) - { - return new Dictionary(StringComparer.Ordinal); - } - - var result = new Dictionary(StringComparer.Ordinal); - foreach (var element in doc.Elements) - { - if (string.IsNullOrWhiteSpace(element.Name)) - { - continue; - } - - result[element.Name] = element.Value?.ToString() ?? string.Empty; - } - - return result; - } - private static bool TryDecodeCursor(string? cursor, out DateTimeOffset timestamp, out string digest) { timestamp = default; diff --git a/src/Excititor/StellaOps.Excititor.WebService/Program.cs b/src/Excititor/StellaOps.Excititor.WebService/Program.cs index dc50f4d85..b25c3b212 100644 --- a/src/Excititor/StellaOps.Excititor.WebService/Program.cs +++ b/src/Excititor/StellaOps.Excititor.WebService/Program.cs @@ -27,28 +27,27 @@ using StellaOps.Excititor.Formats.CSAF; using StellaOps.Excititor.Formats.CycloneDX; using StellaOps.Excititor.Formats.OpenVEX; using StellaOps.Excititor.Policy; -using StellaOps.Excititor.Storage.Mongo; +using StellaOps.Excititor.Storage.Postgres; using StellaOps.Excititor.WebService.Endpoints; using StellaOps.Excititor.WebService.Extensions; using StellaOps.Excititor.WebService.Options; using StellaOps.Excititor.WebService.Services; using StellaOps.Excititor.Core.Aoc; using StellaOps.Excititor.WebService.Telemetry; -using MongoDB.Driver; -using MongoDB.Bson; using Microsoft.Extensions.Caching.Memory; using StellaOps.Excititor.WebService.Contracts; using System.Globalization; using StellaOps.Excititor.WebService.Graph; +using StellaOps.Excititor.Core.Storage; var builder = WebApplication.CreateBuilder(args); var configuration = builder.Configuration; var services = builder.Services; -services.AddOptions() - .Bind(configuration.GetSection("Excititor:Storage:Mongo")) +services.AddOptions() + .Bind(configuration.GetSection("Excititor:Storage")) .ValidateOnStart(); -services.AddExcititorMongoStorage(); +services.AddExcititorPostgresStorage(configuration); services.AddCsafNormalizer(); services.AddCycloneDxNormalizer(); services.AddOpenVexNormalizer(); @@ -147,7 +146,7 @@ app.UseObservabilityHeaders(); app.MapGet("/excititor/status", async (HttpContext context, IEnumerable artifactStores, - IOptions mongoOptions, + IOptions mongoOptions, TimeProvider timeProvider) => { var payload = new StatusResponse( @@ -1260,7 +1259,7 @@ app.MapPost("/excititor/admin/backfill-statements", async ( app.MapGet("/console/vex", async ( HttpContext context, - IOptions storageOptions, + IOptions storageOptions, IVexObservationQueryService queryService, ConsoleTelemetry telemetry, IMemoryCache cache, @@ -1459,7 +1458,7 @@ var response = new GraphLinkoutsResponse(items, notFound); app.MapGet("/v1/graph/status", async ( HttpContext context, [FromQuery(Name = "purl")] string[]? purls, - IOptions storageOptions, + IOptions storageOptions, IOptions graphOptions, IVexObservationQueryService queryService, IMemoryCache cache, @@ -1519,7 +1518,7 @@ app.MapGet("/v1/graph/overlays", async ( HttpContext context, [FromQuery(Name = "purl")] string[]? purls, [FromQuery] bool includeJustifications, - IOptions storageOptions, + IOptions storageOptions, IOptions graphOptions, IVexObservationQueryService queryService, IMemoryCache cache, @@ -1580,7 +1579,7 @@ app.MapGet("/v1/graph/observations", async ( [FromQuery] bool includeJustifications, [FromQuery] int? limitPerPurl, [FromQuery] string? cursor, - IOptions storageOptions, + IOptions storageOptions, IOptions graphOptions, IVexObservationQueryService queryService, CancellationToken cancellationToken) => @@ -1638,7 +1637,7 @@ app.MapPost("/ingest/vex", async ( HttpContext context, VexIngestRequest request, IVexRawStore rawStore, - IOptions storageOptions, + IOptions storageOptions, TimeProvider timeProvider, ILogger logger, CancellationToken cancellationToken) => @@ -1692,8 +1691,8 @@ app.MapPost("/ingest/vex", async ( app.MapGet("/vex/raw", async ( HttpContext context, - IMongoDatabase database, - IOptions storageOptions, + IVexRawStore rawStore, + IOptions storageOptions, CancellationToken cancellationToken) => { var scopeResult = ScopeAuthorization.RequireScope(context, "vex.read"); @@ -1702,132 +1701,69 @@ app.MapGet("/vex/raw", async ( return scopeResult; } - if (!TryResolveTenant(context, storageOptions.Value, requireHeader: false, out _, out var tenantError)) + if (!TryResolveTenant(context, storageOptions.Value, requireHeader: false, out var tenant, out var tenantError)) { return tenantError; } - var collection = database.GetCollection(VexMongoCollectionNames.Raw); var query = context.Request.Query; - var filters = new List>(); - var builder = Builders.Filter; + var providerFilter = BuildStringFilterSet(query["providerId"]); + var digestFilter = BuildStringFilterSet(query["digest"]); + var formatFilter = query.TryGetValue("format", out var formats) + ? formats + .Where(static f => !string.IsNullOrWhiteSpace(f)) + .Select(static f => Enum.TryParse(f, true, out var parsed) ? parsed : VexDocumentFormat.Unknown) + .Where(static f => f != VexDocumentFormat.Unknown) + .ToArray() + : Array.Empty(); - if (query.TryGetValue("providerId", out var providerValues)) - { - var providers = providerValues - .Where(static value => !string.IsNullOrWhiteSpace(value)) - .Select(static value => value!.Trim()) - .ToArray(); - if (providers.Length > 0) - { - filters.Add(builder.In("ProviderId", providers)); - } - } - - if (query.TryGetValue("digest", out var digestValues)) - { - var digests = digestValues - .Where(static value => !string.IsNullOrWhiteSpace(value)) - .Select(static value => value!.Trim()) - .ToArray(); - if (digests.Length > 0) - { - filters.Add(builder.In("Digest", digests)); - } - } - - if (query.TryGetValue("format", out var formatValues)) - { - var formats = formatValues - .Where(static value => !string.IsNullOrWhiteSpace(value)) - .Select(static value => value!.Trim().ToLowerInvariant()) - .ToArray(); - if (formats.Length > 0) - { - filters.Add(builder.In("Format", formats)); - } - } - - if (query.TryGetValue("since", out var sinceValues) && DateTimeOffset.TryParse(sinceValues.FirstOrDefault(), CultureInfo.InvariantCulture, DateTimeStyles.AssumeUniversal, out var sinceValue)) - { - filters.Add(builder.Gte("RetrievedAt", sinceValue.UtcDateTime)); - } + var since = ParseSinceTimestamp(query["since"]); var cursorToken = query.TryGetValue("cursor", out var cursorValues) ? cursorValues.FirstOrDefault() : null; - DateTime? cursorTimestamp = null; - string? cursorDigest = null; - if (!string.IsNullOrWhiteSpace(cursorToken) && TryDecodeCursor(cursorToken, out var cursorTime, out var cursorId)) + VexRawCursor? cursor = null; + if (!string.IsNullOrWhiteSpace(cursorToken) && + TryDecodeCursor(cursorToken, out var cursorTime, out var cursorId)) { - cursorTimestamp = cursorTime.UtcDateTime; - cursorDigest = cursorId; + cursor = new VexRawCursor(cursorTime, cursorId); } - if (cursorTimestamp is not null && cursorDigest is not null) - { - var ltTime = builder.Lt("RetrievedAt", cursorTimestamp.Value); - var eqTimeLtDigest = builder.And( - builder.Eq("RetrievedAt", cursorTimestamp.Value), - builder.Lt("Digest", cursorDigest)); - filters.Add(builder.Or(ltTime, eqTimeLtDigest)); - } + var limit = ResolveLimit(query["limit"], defaultValue: 50, min: 1, max: 200); - var limit = 50; - if (query.TryGetValue("limit", out var limitValues) && int.TryParse(limitValues.FirstOrDefault(), NumberStyles.Integer, CultureInfo.InvariantCulture, out var requestedLimit)) - { - limit = Math.Clamp(requestedLimit, 1, 200); - } + var page = await rawStore.QueryAsync( + new VexRawQuery( + tenant, + providerFilter, + digestFilter, + formatFilter, + since, + Until: null, + cursor, + limit), + cancellationToken).ConfigureAwait(false); - var filter = filters.Count == 0 ? builder.Empty : builder.And(filters); - var sort = Builders.Sort.Descending("RetrievedAt").Descending("Digest"); - var documents = await collection - .Find(filter) - .Sort(sort) - .Limit(limit) - .Project(Builders.Projection.Include("Digest").Include("ProviderId").Include("Format").Include("SourceUri").Include("RetrievedAt").Include("Metadata").Include("GridFsObjectId")) - .ToListAsync(cancellationToken) - .ConfigureAwait(false); + var summaries = page.Items + .Select(summary => new VexRawSummaryResponse( + summary.Digest, + summary.ProviderId, + summary.Format.ToString().ToLowerInvariant(), + summary.SourceUri.ToString(), + summary.RetrievedAt, + summary.InlineContent, + summary.Metadata)) + .ToList(); - var summaries = new List(documents.Count); - foreach (var document in documents) - { - var digest = document.TryGetValue("Digest", out var digestValue) && digestValue.IsString ? digestValue.AsString : string.Empty; - var providerId = document.TryGetValue("ProviderId", out var providerValue) && providerValue.IsString ? providerValue.AsString : string.Empty; - var format = document.TryGetValue("Format", out var formatValue) && formatValue.IsString ? formatValue.AsString : string.Empty; - var sourceUri = document.TryGetValue("SourceUri", out var sourceValue) && sourceValue.IsString ? sourceValue.AsString : string.Empty; - var retrievedAt = document.TryGetValue("RetrievedAt", out var retrievedValue) && retrievedValue is BsonDateTime bsonDate - ? bsonDate.ToUniversalTime() - : DateTime.UtcNow; - var metadata = ReadMetadata(document.TryGetValue("Metadata", out var metadataValue) ? metadataValue : BsonNull.Value); - var inlineContent = !document.TryGetValue("GridFsObjectId", out var gridId) || gridId.IsBsonNull || (gridId.IsString && string.IsNullOrWhiteSpace(gridId.AsString)); + var nextCursor = page.NextCursor is null + ? null + : EncodeCursor(page.NextCursor.RetrievedAt.UtcDateTime, page.NextCursor.Digest); - summaries.Add(new VexRawSummaryResponse( - digest, - providerId, - format, - sourceUri, - new DateTimeOffset(retrievedAt), - inlineContent, - metadata)); - } - - var hasMore = documents.Count == limit; - string? nextCursor = null; - if (hasMore && documents.Count > 0) - { - var last = documents[^1]; - var lastTime = last.GetValue("RetrievedAt", BsonNull.Value).ToUniversalTime(); - var lastDigest = last.GetValue("Digest", BsonNull.Value).AsString; - nextCursor = EncodeCursor(lastTime, lastDigest); - } - - return Results.Json(new VexRawListResponse(summaries, nextCursor, hasMore)); + return Results.Json(new VexRawListResponse(summaries, nextCursor, page.HasMore)); }); app.MapGet("/vex/raw/{digest}", async ( string digest, HttpContext context, IVexRawStore rawStore, - IOptions storageOptions, + IOptions storageOptions, CancellationToken cancellationToken) => { var scopeResult = ScopeAuthorization.RequireScope(context, "vex.read"); @@ -1861,7 +1797,7 @@ app.MapGet("/vex/raw/{digest}/provenance", async ( string digest, HttpContext context, IVexRawStore rawStore, - IOptions storageOptions, + IOptions storageOptions, CancellationToken cancellationToken) => { var scopeResult = ScopeAuthorization.RequireScope(context, "vex.read"); @@ -1901,7 +1837,7 @@ app.MapGet("/v1/vex/observations/{vulnerabilityId}/{productKey}", async ( string vulnerabilityId, string productKey, [FromServices] IVexObservationProjectionService projectionService, - [FromServices] IOptions storageOptions, + [FromServices] IOptions storageOptions, CancellationToken cancellationToken) => { var scopeResult = ScopeAuthorization.RequireScope(context, "vex.read"); @@ -1977,7 +1913,7 @@ app.MapGet("/v1/vex/observations/{vulnerabilityId}/{productKey}", async ( app.MapGet("/v1/vex/evidence/chunks", async ( HttpContext context, [FromServices] IVexEvidenceChunkService chunkService, - [FromServices] IOptions storageOptions, + [FromServices] IOptions storageOptions, [FromServices] ChunkTelemetry chunkTelemetry, [FromServices] ILogger logger, [FromServices] TimeProvider timeProvider, @@ -2083,10 +2019,9 @@ app.MapGet("/v1/vex/evidence/chunks", async ( app.MapPost("/aoc/verify", async ( HttpContext context, VexAocVerifyRequest? request, - IMongoDatabase database, IVexRawStore rawStore, IVexRawWriteGuard guard, - IOptions storageOptions, + IOptions storageOptions, TimeProvider timeProvider, CancellationToken cancellationToken) => { @@ -2119,33 +2054,26 @@ app.MapPost("/aoc/verify", async ( .Select(static value => value!.Trim()) .ToArray(); - var builder = Builders.Filter; - var filter = builder.And( - builder.Gte("RetrievedAt", since), - builder.Lte("RetrievedAt", until)); - - if (sources is { Length: > 0 }) - { - filter &= builder.In("ProviderId", sources); - } - - var collection = database.GetCollection(VexMongoCollectionNames.Raw); - var digests = await collection - .Find(filter) - .Sort(Builders.Sort.Descending("RetrievedAt")) - .Limit(limit) - .Project(Builders.Projection.Include("Digest").Include("RetrievedAt").Include("ProviderId")) - .ToListAsync(cancellationToken) - .ConfigureAwait(false); + var page = await rawStore.QueryAsync( + new VexRawQuery( + tenant, + sources ?? Array.Empty(), + Array.Empty(), + Array.Empty(), + since: new DateTimeOffset(since, TimeSpan.Zero), + until: new DateTimeOffset(until, TimeSpan.Zero), + cursor: null, + limit), + cancellationToken).ConfigureAwait(false); var checkedCount = 0; var violationMap = new Dictionary Examples)>(StringComparer.OrdinalIgnoreCase); const int MaxExamplesPerCode = 5; - foreach (var digestDocument in digests) + foreach (var item in page.Items) { - var digestValue = digestDocument.GetValue("Digest", BsonNull.Value).AsString; - var provider = digestDocument.GetValue("ProviderId", BsonNull.Value).AsString; + var digestValue = item.Digest; + var provider = item.ProviderId; var domainDocument = await rawStore.FindByDigestAsync(digestValue, cancellationToken).ConfigureAwait(false); if (domainDocument is null) @@ -2202,7 +2130,7 @@ app.MapPost("/aoc/verify", async ( new VexAocVerifyChecked(0, checkedCount), violations, new VexAocVerifyMetrics(checkedCount, violations.Sum(v => v.Count)), - digests.Count == limit); + page.HasMore); return Results.Json(response); }); @@ -2225,7 +2153,7 @@ app.MapGet("/obs/excititor/health", async ( // VEX timeline SSE (WEB-OBS-52-001) app.MapGet("/obs/excititor/timeline", async ( HttpContext context, - IOptions storageOptions, + IOptions storageOptions, [FromServices] IVexTimelineEventStore timelineStore, TimeProvider timeProvider, ILoggerFactory loggerFactory, diff --git a/src/Excititor/StellaOps.Excititor.WebService/Properties/AssemblyInfo.cs b/src/Excititor/StellaOps.Excititor.WebService/Properties/AssemblyInfo.cs index ee41af8ac..8b96b8609 100644 --- a/src/Excititor/StellaOps.Excititor.WebService/Properties/AssemblyInfo.cs +++ b/src/Excititor/StellaOps.Excititor.WebService/Properties/AssemblyInfo.cs @@ -1,4 +1,4 @@ using System.Runtime.CompilerServices; -[assembly: InternalsVisibleTo("StellaOps.Excititor.WebService.Tests")] -[assembly: InternalsVisibleTo("StellaOps.Excititor.Core.UnitTests")] +[assembly: InternalsVisibleTo("StellaOps.Excititor.WebService.Tests")] +[assembly: InternalsVisibleTo("StellaOps.Excititor.Core.UnitTests")] diff --git a/src/Excititor/StellaOps.Excititor.WebService/Services/ExcititorHealthService.cs b/src/Excititor/StellaOps.Excititor.WebService/Services/ExcititorHealthService.cs index 90b1460bf..af6a56af0 100644 --- a/src/Excititor/StellaOps.Excititor.WebService/Services/ExcititorHealthService.cs +++ b/src/Excititor/StellaOps.Excititor.WebService/Services/ExcititorHealthService.cs @@ -7,7 +7,7 @@ using Microsoft.Extensions.Logging; using Microsoft.Extensions.Options; using StellaOps.Excititor.Connectors.Abstractions; using StellaOps.Excititor.Core; -using StellaOps.Excititor.Storage.Mongo; +using StellaOps.Excititor.Core.Storage; using StellaOps.Excititor.WebService.Options; namespace StellaOps.Excititor.WebService.Services; diff --git a/src/Excititor/StellaOps.Excititor.WebService/Services/VexEvidenceChunkService.cs b/src/Excititor/StellaOps.Excititor.WebService/Services/VexEvidenceChunkService.cs index 6969a185e..986a8f0aa 100644 --- a/src/Excititor/StellaOps.Excititor.WebService/Services/VexEvidenceChunkService.cs +++ b/src/Excititor/StellaOps.Excititor.WebService/Services/VexEvidenceChunkService.cs @@ -6,7 +6,7 @@ using System.Linq; using System.Threading; using System.Threading.Tasks; using StellaOps.Excititor.Core; -using StellaOps.Excititor.Storage.Mongo; +using StellaOps.Excititor.Core.Storage; using StellaOps.Excititor.WebService.Contracts; namespace StellaOps.Excititor.WebService.Services; diff --git a/src/Excititor/StellaOps.Excititor.WebService/Services/VexIngestOrchestrator.cs b/src/Excititor/StellaOps.Excititor.WebService/Services/VexIngestOrchestrator.cs index 048c93e82..75ae320e8 100644 --- a/src/Excititor/StellaOps.Excititor.WebService/Services/VexIngestOrchestrator.cs +++ b/src/Excititor/StellaOps.Excititor.WebService/Services/VexIngestOrchestrator.cs @@ -1,14 +1,14 @@ -using System.Collections.Generic; -using System.Collections.Immutable; -using System.Diagnostics; -using System.Globalization; -using System.Linq; -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Options; -using MongoDB.Driver; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Diagnostics; +using System.Globalization; +using System.Linq; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; +using MongoDB.Driver; using StellaOps.Excititor.Connectors.Abstractions; using StellaOps.Excititor.Core; -using StellaOps.Excititor.Storage.Mongo; +using StellaOps.Excititor.Core.Storage; namespace StellaOps.Excititor.WebService.Services; @@ -23,50 +23,47 @@ internal interface IVexIngestOrchestrator Task ReconcileAsync(ReconcileOptions options, CancellationToken cancellationToken); } -internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator -{ - private readonly IServiceProvider _serviceProvider; - private readonly IReadOnlyDictionary _connectors; - private readonly IVexRawStore _rawStore; - private readonly IVexClaimStore _claimStore; - private readonly IVexProviderStore _providerStore; - private readonly IVexConnectorStateRepository _stateRepository; - private readonly IVexNormalizerRouter _normalizerRouter; - private readonly IVexSignatureVerifier _signatureVerifier; - private readonly IVexMongoSessionProvider _sessionProvider; - private readonly TimeProvider _timeProvider; - private readonly ILogger _logger; - private readonly string _defaultTenant; +internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator +{ + private readonly IServiceProvider _serviceProvider; + private readonly IReadOnlyDictionary _connectors; + private readonly IVexRawStore _rawStore; + private readonly IVexClaimStore _claimStore; + private readonly IVexProviderStore _providerStore; + private readonly IVexConnectorStateRepository _stateRepository; + private readonly IVexNormalizerRouter _normalizerRouter; + private readonly IVexSignatureVerifier _signatureVerifier; + private readonly TimeProvider _timeProvider; + private readonly ILogger _logger; + private readonly string _defaultTenant; - public VexIngestOrchestrator( - IServiceProvider serviceProvider, - IEnumerable connectors, - IVexRawStore rawStore, - IVexClaimStore claimStore, - IVexProviderStore providerStore, - IVexConnectorStateRepository stateRepository, - IVexNormalizerRouter normalizerRouter, - IVexSignatureVerifier signatureVerifier, - IVexMongoSessionProvider sessionProvider, - TimeProvider timeProvider, - IOptions storageOptions, - ILogger logger) - { - _serviceProvider = serviceProvider ?? throw new ArgumentNullException(nameof(serviceProvider)); - _rawStore = rawStore ?? throw new ArgumentNullException(nameof(rawStore)); - _claimStore = claimStore ?? throw new ArgumentNullException(nameof(claimStore)); + public VexIngestOrchestrator( + IServiceProvider serviceProvider, + IEnumerable connectors, + IVexRawStore rawStore, + IVexClaimStore claimStore, + IVexProviderStore providerStore, + IVexConnectorStateRepository stateRepository, + IVexNormalizerRouter normalizerRouter, + IVexSignatureVerifier signatureVerifier, + TimeProvider timeProvider, + IOptions storageOptions, + ILogger logger) + { + _serviceProvider = serviceProvider ?? throw new ArgumentNullException(nameof(serviceProvider)); + _rawStore = rawStore ?? throw new ArgumentNullException(nameof(rawStore)); + _claimStore = claimStore ?? throw new ArgumentNullException(nameof(claimStore)); _providerStore = providerStore ?? throw new ArgumentNullException(nameof(providerStore)); _stateRepository = stateRepository ?? throw new ArgumentNullException(nameof(stateRepository)); _normalizerRouter = normalizerRouter ?? throw new ArgumentNullException(nameof(normalizerRouter)); _signatureVerifier = signatureVerifier ?? throw new ArgumentNullException(nameof(signatureVerifier)); - _sessionProvider = sessionProvider ?? throw new ArgumentNullException(nameof(sessionProvider)); - _timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider)); - _logger = logger ?? throw new ArgumentNullException(nameof(logger)); - var optionsValue = (storageOptions ?? throw new ArgumentNullException(nameof(storageOptions))).Value - ?? throw new ArgumentNullException(nameof(storageOptions)); - _defaultTenant = string.IsNullOrWhiteSpace(optionsValue.DefaultTenant) - ? "default" - : optionsValue.DefaultTenant.Trim(); + _timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider)); + _logger = logger ?? throw new ArgumentNullException(nameof(logger)); + var optionsValue = (storageOptions ?? throw new ArgumentNullException(nameof(storageOptions))).Value + ?? throw new ArgumentNullException(nameof(storageOptions)); + _defaultTenant = string.IsNullOrWhiteSpace(optionsValue.DefaultTenant) + ? "default" + : optionsValue.DefaultTenant.Trim(); if (connectors is null) { @@ -86,8 +83,6 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator var startedAt = _timeProvider.GetUtcNow(); var results = ImmutableArray.CreateBuilder(); - var session = await _sessionProvider.StartSessionAsync(cancellationToken).ConfigureAwait(false); - var (handles, missing) = ResolveConnectors(options.Providers); foreach (var providerId in missing) { @@ -100,15 +95,15 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator try { await ValidateConnectorAsync(handle, cancellationToken).ConfigureAwait(false); - await EnsureProviderRegistrationAsync(handle.Descriptor, session, cancellationToken).ConfigureAwait(false); + await EnsureProviderRegistrationAsync(handle.Descriptor, cancellationToken).ConfigureAwait(false); stopwatch.Stop(); - results.Add(new InitProviderResult( - handle.Descriptor.Id, - handle.Descriptor.DisplayName, - "succeeded", - stopwatch.Elapsed, - Error: null)); + results.Add(new InitProviderResult( + handle.Descriptor.Id, + handle.Descriptor.DisplayName, + "succeeded", + stopwatch.Elapsed, + Error: null)); _logger.LogInformation("Excititor init validated provider {ProviderId} in {Duration}ms.", handle.Descriptor.Id, stopwatch.Elapsed.TotalMilliseconds); } @@ -148,8 +143,6 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator var startedAt = _timeProvider.GetUtcNow(); var since = ResolveSince(options.Since, options.Window, startedAt); var results = ImmutableArray.CreateBuilder(); - var session = await _sessionProvider.StartSessionAsync(cancellationToken).ConfigureAwait(false); - var (handles, missing) = ResolveConnectors(options.Providers); foreach (var providerId in missing) { @@ -158,7 +151,7 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator foreach (var handle in handles) { - var result = await ExecuteRunAsync(runId, handle, since, options.Force, session, cancellationToken).ConfigureAwait(false); + var result = await ExecuteRunAsync(runId, handle, since, options.Force, session, cancellationToken).ConfigureAwait(false); results.Add(result); } @@ -173,20 +166,18 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator var runId = Guid.NewGuid(); var startedAt = _timeProvider.GetUtcNow(); var results = ImmutableArray.CreateBuilder(); - var session = await _sessionProvider.StartSessionAsync(cancellationToken).ConfigureAwait(false); - var (handles, missing) = ResolveConnectors(options.Providers); foreach (var providerId in missing) { results.Add(ProviderRunResult.Missing(providerId, since: null)); } - foreach (var handle in handles) - { - var since = await ResolveResumeSinceAsync(handle.Descriptor.Id, options.Checkpoint, session, cancellationToken).ConfigureAwait(false); - var result = await ExecuteRunAsync(runId, handle, since, force: false, session, cancellationToken).ConfigureAwait(false); - results.Add(result); - } + foreach (var handle in handles) + { + var since = await ResolveResumeSinceAsync(handle.Descriptor.Id, options.Checkpoint, session, cancellationToken).ConfigureAwait(false); + var result = await ExecuteRunAsync(runId, handle, since, force: false, session, cancellationToken).ConfigureAwait(false); + results.Add(result); + } var completedAt = _timeProvider.GetUtcNow(); return new IngestRunSummary(runId, startedAt, completedAt, results.ToImmutable()); @@ -200,8 +191,6 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator var startedAt = _timeProvider.GetUtcNow(); var threshold = options.MaxAge is null ? (DateTimeOffset?)null : startedAt - options.MaxAge.Value; var results = ImmutableArray.CreateBuilder(); - var session = await _sessionProvider.StartSessionAsync(cancellationToken).ConfigureAwait(false); - var (handles, missing) = ResolveConnectors(options.Providers); foreach (var providerId in missing) { @@ -219,8 +208,8 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator if (stale || state is null) { var since = stale ? threshold : lastUpdated; - var result = await ExecuteRunAsync(runId, handle, since, force: false, session, cancellationToken).ConfigureAwait(false); - results.Add(new ReconcileProviderResult( + var result = await ExecuteRunAsync(runId, handle, since, force: false, session, cancellationToken).ConfigureAwait(false); + results.Add(new ReconcileProviderResult( handle.Descriptor.Id, result.Status, "reconciled", @@ -232,15 +221,15 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator } else { - results.Add(new ReconcileProviderResult( - handle.Descriptor.Id, - "succeeded", - "skipped", - lastUpdated, - threshold, - Documents: 0, - Claims: 0, - Error: null)); + results.Add(new ReconcileProviderResult( + handle.Descriptor.Id, + "succeeded", + "skipped", + lastUpdated, + threshold, + Documents: 0, + Claims: 0, + Error: null)); } } catch (OperationCanceledException) when (cancellationToken.IsCancellationRequested) @@ -280,7 +269,7 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator await handle.Connector.ValidateAsync(VexConnectorSettings.Empty, cancellationToken).ConfigureAwait(false); } - private async Task EnsureProviderRegistrationAsync(VexConnectorDescriptor descriptor, IClientSessionHandle session, CancellationToken cancellationToken) + private async Task EnsureProviderRegistrationAsync(VexConnectorDescriptor descriptor, CancellationToken cancellationToken) { var existing = await _providerStore.FindAsync(descriptor.Id, cancellationToken, session).ConfigureAwait(false); if (existing is not null) @@ -292,48 +281,48 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator await _providerStore.SaveAsync(provider, cancellationToken, session).ConfigureAwait(false); } - private async Task ExecuteRunAsync( - Guid runId, - ConnectorHandle handle, - DateTimeOffset? since, - bool force, - IClientSessionHandle session, - CancellationToken cancellationToken) - { - var providerId = handle.Descriptor.Id; - var startedAt = _timeProvider.GetUtcNow(); - var stopwatch = Stopwatch.StartNew(); - using var scope = _logger.BeginScope(new Dictionary(StringComparer.Ordinal) - { - ["tenant"] = _defaultTenant, - ["runId"] = runId, - ["providerId"] = providerId, - ["window.since"] = since?.ToString("O", CultureInfo.InvariantCulture), - ["force"] = force, - }); + private async Task ExecuteRunAsync( + Guid runId, + ConnectorHandle handle, + DateTimeOffset? since, + bool force, + IClientSessionHandle session, + CancellationToken cancellationToken) + { + var providerId = handle.Descriptor.Id; + var startedAt = _timeProvider.GetUtcNow(); + var stopwatch = Stopwatch.StartNew(); + using var scope = _logger.BeginScope(new Dictionary(StringComparer.Ordinal) + { + ["tenant"] = _defaultTenant, + ["runId"] = runId, + ["providerId"] = providerId, + ["window.since"] = since?.ToString("O", CultureInfo.InvariantCulture), + ["force"] = force, + }); try { await ValidateConnectorAsync(handle, cancellationToken).ConfigureAwait(false); await EnsureProviderRegistrationAsync(handle.Descriptor, session, cancellationToken).ConfigureAwait(false); - if (force) - { - var resetState = new VexConnectorState(providerId, null, ImmutableArray.Empty); - await _stateRepository.SaveAsync(resetState, cancellationToken, session).ConfigureAwait(false); - } - - var stateBeforeRun = await _stateRepository.GetAsync(providerId, cancellationToken, session).ConfigureAwait(false); - var resumeTokens = stateBeforeRun?.ResumeTokens ?? ImmutableDictionary.Empty; - - var context = new VexConnectorContext( - since, - VexConnectorSettings.Empty, - _rawStore, - _signatureVerifier, - _normalizerRouter, - _serviceProvider, - resumeTokens); + if (force) + { + var resetState = new VexConnectorState(providerId, null, ImmutableArray.Empty); + await _stateRepository.SaveAsync(resetState, cancellationToken, session).ConfigureAwait(false); + } + + var stateBeforeRun = await _stateRepository.GetAsync(providerId, cancellationToken, session).ConfigureAwait(false); + var resumeTokens = stateBeforeRun?.ResumeTokens ?? ImmutableDictionary.Empty; + + var context = new VexConnectorContext( + since, + VexConnectorSettings.Empty, + _rawStore, + _signatureVerifier, + _normalizerRouter, + _serviceProvider, + resumeTokens); var documents = 0; var claims = 0; @@ -354,25 +343,25 @@ internal sealed class VexIngestOrchestrator : IVexIngestOrchestrator stopwatch.Stop(); var completedAt = _timeProvider.GetUtcNow(); - var stateAfterRun = await _stateRepository.GetAsync(providerId, cancellationToken, session).ConfigureAwait(false); - - var checkpoint = stateAfterRun?.DocumentDigests.IsDefaultOrEmpty == false - ? stateAfterRun.DocumentDigests[^1] - : lastDigest; - - var result = new ProviderRunResult( - providerId, - "succeeded", + var stateAfterRun = await _stateRepository.GetAsync(providerId, cancellationToken, session).ConfigureAwait(false); + + var checkpoint = stateAfterRun?.DocumentDigests.IsDefaultOrEmpty == false + ? stateAfterRun.DocumentDigests[^1] + : lastDigest; + + var result = new ProviderRunResult( + providerId, + "succeeded", documents, claims, startedAt, completedAt, stopwatch.Elapsed, - lastDigest, - stateAfterRun?.LastUpdated, - checkpoint, - null, - since); + lastDigest, + stateAfterRun?.LastUpdated, + checkpoint, + null, + since); _logger.LogInformation( "Excititor ingest provider {ProviderId} completed: documents={Documents} claims={Claims} since={Since} duration={Duration}ms", diff --git a/src/Excititor/StellaOps.Excititor.WebService/Services/VexObservationProjectionService.cs b/src/Excititor/StellaOps.Excititor.WebService/Services/VexObservationProjectionService.cs index 6d25e6e14..6b35b8454 100644 --- a/src/Excititor/StellaOps.Excititor.WebService/Services/VexObservationProjectionService.cs +++ b/src/Excititor/StellaOps.Excititor.WebService/Services/VexObservationProjectionService.cs @@ -6,7 +6,7 @@ using System.Linq; using System.Threading; using System.Threading.Tasks; using StellaOps.Excititor.Core; -using StellaOps.Excititor.Storage.Mongo; +using StellaOps.Excititor.Core.Storage; namespace StellaOps.Excititor.WebService.Services; diff --git a/src/Excititor/__Libraries/StellaOps.Excititor.Core/Observations/AppendOnlyLinksetExtractionService.cs b/src/Excititor/__Libraries/StellaOps.Excititor.Core/Observations/AppendOnlyLinksetExtractionService.cs index ce1f342b5..47f21f8af 100644 --- a/src/Excititor/__Libraries/StellaOps.Excititor.Core/Observations/AppendOnlyLinksetExtractionService.cs +++ b/src/Excititor/__Libraries/StellaOps.Excititor.Core/Observations/AppendOnlyLinksetExtractionService.cs @@ -75,13 +75,7 @@ public sealed class AppendOnlyLinksetExtractionService results.Add(result); - if (result.HadChanges && _eventPublisher is not null) - { - await _eventPublisher.PublishLinksetUpdatedAsync( - normalizedTenant, - result.Linkset, - cancellationToken); - } + await PublishIfNeededAsync(normalizedTenant, result, cancellationToken); } catch (Exception ex) { @@ -142,13 +136,7 @@ public sealed class AppendOnlyLinksetExtractionService disagreement, cancellationToken); - if (storeResult.HadChanges && _eventPublisher is not null) - { - await _eventPublisher.PublishLinksetUpdatedAsync( - normalizedTenant, - storeResult.Linkset, - cancellationToken); - } + await PublishIfNeededAsync(normalizedTenant, storeResult, cancellationToken); return LinksetAppendResult.Succeeded( normalizedTenant, @@ -193,7 +181,7 @@ public sealed class AppendOnlyLinksetExtractionService ProviderId: obs.ProviderId, Status: stmt.Status.ToString().ToLowerInvariant(), Confidence: null))) - .Distinct(VexLinksetObservationRefComparer.Instance) + .DistinctBy(refModel => $"{refModel.ProviderId}:{refModel.Status}:{refModel.ObservationId}", StringComparer.OrdinalIgnoreCase) .ToList(); if (observationRefs.Count == 0) @@ -263,6 +251,60 @@ public sealed class AppendOnlyLinksetExtractionService return at >= 0 && at < key.Length - 1 ? key[(at + 1)..] : null; } + private async Task PublishIfNeededAsync(string tenant, AppendLinksetResult result, CancellationToken cancellationToken) + { + if (_eventPublisher is null || !result.HadChanges) + { + return; + } + + var evt = ToEvent(tenant, result.Linkset); + await _eventPublisher.PublishAsync(evt, cancellationToken).ConfigureAwait(false); + } + + private static VexLinksetUpdatedEvent ToEvent(string tenant, VexLinkset linkset) + { + var observationRefs = linkset.Observations + .Select(o => new VexLinksetObservationRefCore( + o.ObservationId, + o.ProviderId, + o.Status, + o.Confidence, + ImmutableDictionary.Empty)) + .OrderBy(o => o.ProviderId, StringComparer.OrdinalIgnoreCase) + .ThenBy(o => o.Status, StringComparer.OrdinalIgnoreCase) + .ThenBy(o => o.ObservationId, StringComparer.Ordinal) + .ToImmutableArray(); + + var disagreements = linkset.Disagreements + .OrderBy(d => d.ProviderId, StringComparer.OrdinalIgnoreCase) + .ThenBy(d => d.Status, StringComparer.OrdinalIgnoreCase) + .ThenBy(d => d.Justification ?? string.Empty, StringComparer.OrdinalIgnoreCase) + .ToImmutableArray(); + + return new VexLinksetUpdatedEvent( + VexLinksetUpdatedEventFactory.EventType, + tenant, + linkset.LinksetId, + linkset.VulnerabilityId, + linkset.ProductKey, + linkset.Scope, + observationRefs, + disagreements, + linkset.UpdatedAt); + } + + private async Task PublishIfNeededAsync(string tenant, LinksetAppendResult result, CancellationToken cancellationToken) + { + if (_eventPublisher is null || !result.HadChanges || result.Linkset is null) + { + return; + } + + var evt = ToEvent(tenant, result.Linkset); + await _eventPublisher.PublishAsync(evt, cancellationToken).ConfigureAwait(false); + } + private static string Normalize(string value) => VexObservation.EnsureNotNullOrWhiteSpace(value, nameof(value)); diff --git a/src/Excititor/__Libraries/StellaOps.Excititor.Core/StellaOps.Excititor.Core.csproj b/src/Excititor/__Libraries/StellaOps.Excititor.Core/StellaOps.Excititor.Core.csproj index a8e0e4a28..2d257c78d 100644 --- a/src/Excititor/__Libraries/StellaOps.Excititor.Core/StellaOps.Excititor.Core.csproj +++ b/src/Excititor/__Libraries/StellaOps.Excititor.Core/StellaOps.Excititor.Core.csproj @@ -9,6 +9,7 @@ + diff --git a/src/Excititor/__Libraries/StellaOps.Excititor.Core/Storage/MongoDriverStubs.cs b/src/Excititor/__Libraries/StellaOps.Excititor.Core/Storage/MongoDriverStubs.cs new file mode 100644 index 000000000..5ff16f3a5 --- /dev/null +++ b/src/Excititor/__Libraries/StellaOps.Excititor.Core/Storage/MongoDriverStubs.cs @@ -0,0 +1,7 @@ +// Temporary stubs to allow legacy interfaces to compile while MongoDB is removed. +// These types are intentionally minimal; they do not perform any database operations. +namespace MongoDB.Driver; + +public interface IClientSessionHandle : IAsyncDisposable, IDisposable +{ +} diff --git a/src/Excititor/__Libraries/StellaOps.Excititor.Core/Storage/VexRawStore.cs b/src/Excititor/__Libraries/StellaOps.Excititor.Core/Storage/VexRawStore.cs new file mode 100644 index 000000000..ae4e1a660 --- /dev/null +++ b/src/Excititor/__Libraries/StellaOps.Excititor.Core/Storage/VexRawStore.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using System.Collections.Immutable; + +namespace StellaOps.Excititor.Core.Storage; + +/// +/// Query envelope for listing raw VEX documents. +/// +public sealed record VexRawQuery( + string Tenant, + IReadOnlyCollection ProviderIds, + IReadOnlyCollection Digests, + IReadOnlyCollection Formats, + DateTimeOffset? Since, + DateTimeOffset? Until, + VexRawCursor? Cursor, + int Limit); + +/// +/// Stable pagination cursor based on retrieved-at and digest ordering. +/// +public sealed record VexRawCursor(DateTimeOffset RetrievedAt, string Digest); + +/// +/// Lightweight summary used for list endpoints. +/// +public sealed record VexRawDocumentSummary( + string Digest, + string ProviderId, + VexDocumentFormat Format, + Uri SourceUri, + DateTimeOffset RetrievedAt, + bool InlineContent, + ImmutableDictionary Metadata); + +/// +/// Paged result for raw document listings. +/// +public sealed record VexRawDocumentPage( + IReadOnlyList Items, + VexRawCursor? NextCursor, + bool HasMore); + +/// +/// Stored raw VEX document with canonical content and metadata. +/// +public sealed record VexRawRecord( + string Digest, + string Tenant, + string ProviderId, + VexDocumentFormat Format, + Uri SourceUri, + DateTimeOffset RetrievedAt, + ImmutableDictionary Metadata, + ReadOnlyMemory Content, + bool InlineContent, + string? SupersedesDigest = null, + string? ETag = null, + DateTimeOffset? RecordedAt = null); + +/// +/// Append-only raw document store abstraction (backed by Postgres for Excititor). +/// +public interface IVexRawStore : IVexRawDocumentSink +{ + /// + /// Finds a raw document by digest. + /// + /// Content-addressed digest (sha256:...) + /// Cancellation token. + ValueTask FindByDigestAsync(string digest, CancellationToken cancellationToken); + + /// + /// Lists raw documents using deterministic ordering. + /// + /// Query filters and pagination cursor. + /// Cancellation token. + ValueTask QueryAsync(VexRawQuery query, CancellationToken cancellationToken); +} diff --git a/src/Excititor/__Libraries/StellaOps.Excititor.Core/Storage/VexStorageOptions.cs b/src/Excititor/__Libraries/StellaOps.Excititor.Core/Storage/VexStorageOptions.cs new file mode 100644 index 000000000..24f3a0984 --- /dev/null +++ b/src/Excititor/__Libraries/StellaOps.Excititor.Core/Storage/VexStorageOptions.cs @@ -0,0 +1,38 @@ +namespace StellaOps.Excititor.Core.Storage; + +/// +/// Storage options for Excititor persistence (Postgres-backed, legacy name retained for compatibility). +/// +public class VexStorageOptions +{ + /// + /// Default tenant to apply when no tenant header is supplied. + /// + public string DefaultTenant { get; set; } = "default"; + + /// + /// Inline content threshold in bytes; larger payloads are stored in the blob table. + /// + public int InlineThresholdBytes { get; set; } = 256 * 1024; +} + +/// +/// Legacy alias preserved while migrating off MongoDB-specific naming. +/// +[System.Obsolete("Use VexStorageOptions; retained for backwards compatibility during Mongo removal.")] +public sealed class VexMongoStorageOptions : VexStorageOptions +{ + /// + /// Historical bucket name (unused in Postgres mode). + /// + public string RawBucketName { get; set; } = "vex-raw"; + + /// + /// Backwards-compatible inline threshold property. + /// + public int GridFsInlineThresholdBytes + { + get => InlineThresholdBytes; + set => InlineThresholdBytes = value; + } +} diff --git a/src/Excititor/__Libraries/StellaOps.Excititor.Storage.Postgres/Migrations/001_initial_schema.sql b/src/Excititor/__Libraries/StellaOps.Excititor.Storage.Postgres/Migrations/001_initial_schema.sql index 7d9916e8f..d28083ad3 100644 --- a/src/Excititor/__Libraries/StellaOps.Excititor.Storage.Postgres/Migrations/001_initial_schema.sql +++ b/src/Excititor/__Libraries/StellaOps.Excititor.Storage.Postgres/Migrations/001_initial_schema.sql @@ -1,308 +1,92 @@ --- VEX Schema Migration 001: Initial Schema --- Creates the vex schema for VEX statements and dependency graphs +-- VEX Schema Migration 001: Append-only linksets (no Mongo, no consensus) +-- This migration defines an append-only Postgres backend for Excititor linksets, +-- observations, disagreements, and mutation logs. All operations are additive and +-- preserve deterministic ordering for audit/replay. --- Create schema CREATE SCHEMA IF NOT EXISTS vex; --- Projects table -CREATE TABLE IF NOT EXISTS vex.projects ( - id UUID PRIMARY KEY DEFAULT gen_random_uuid(), - tenant_id TEXT NOT NULL, - name TEXT NOT NULL, - display_name TEXT, - description TEXT, - repository_url TEXT, - default_branch TEXT, - settings JSONB NOT NULL DEFAULT '{}', - metadata JSONB NOT NULL DEFAULT '{}', +-- Drop legacy tables that carried mutable/consensus state +DROP TABLE IF EXISTS vex.linkset_mutations CASCADE; +DROP TABLE IF EXISTS vex.linkset_disagreements CASCADE; +DROP TABLE IF EXISTS vex.linkset_observations CASCADE; +DROP TABLE IF EXISTS vex.linksets CASCADE; +DROP TABLE IF EXISTS vex.observations CASCADE; +DROP TABLE IF EXISTS vex.consensus_holds CASCADE; +DROP TABLE IF EXISTS vex.consensus CASCADE; +DROP TABLE IF EXISTS vex.statements CASCADE; +DROP TABLE IF EXISTS vex.graph_edges CASCADE; +DROP TABLE IF EXISTS vex.graph_nodes CASCADE; +DROP TABLE IF EXISTS vex.graph_revisions CASCADE; +DROP TABLE IF EXISTS vex.projects CASCADE; +DROP TABLE IF EXISTS vex.linkset_events CASCADE; +DROP TABLE IF EXISTS vex.evidence_manifests CASCADE; +DROP TABLE IF EXISTS vex.cvss_receipts CASCADE; +DROP TABLE IF EXISTS vex.attestations CASCADE; +DROP TABLE IF EXISTS vex.timeline_events CASCADE; +DROP TABLE IF EXISTS vex.unknown_items CASCADE; +DROP TABLE IF EXISTS vex.unknowns_snapshots CASCADE; + +-- Core linkset table (append-only semantics; updated_at is refreshed on append) +CREATE TABLE vex.linksets ( + linkset_id TEXT PRIMARY KEY, + tenant TEXT NOT NULL, + vulnerability_id TEXT NOT NULL, + product_key TEXT NOT NULL, + scope JSONB NOT NULL DEFAULT '{}'::jsonb, created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - created_by TEXT, - UNIQUE(tenant_id, name) + UNIQUE (tenant, vulnerability_id, product_key) ); -CREATE INDEX idx_projects_tenant ON vex.projects(tenant_id); +CREATE INDEX idx_linksets_updated ON vex.linksets (tenant, updated_at DESC); --- Graph revisions table -CREATE TABLE IF NOT EXISTS vex.graph_revisions ( - id UUID PRIMARY KEY DEFAULT gen_random_uuid(), - project_id UUID NOT NULL REFERENCES vex.projects(id) ON DELETE CASCADE, - revision_id TEXT NOT NULL UNIQUE, - parent_revision_id TEXT, - sbom_digest TEXT NOT NULL, - feed_snapshot_id TEXT, - policy_version TEXT, - node_count INT NOT NULL DEFAULT 0, - edge_count INT NOT NULL DEFAULT 0, - metadata JSONB NOT NULL DEFAULT '{}', +-- Observation references recorded per linkset (immutable; deduplicated) +CREATE TABLE vex.linkset_observations ( + id BIGSERIAL PRIMARY KEY, + linkset_id TEXT NOT NULL REFERENCES vex.linksets(linkset_id) ON DELETE CASCADE, + observation_id TEXT NOT NULL, + provider_id TEXT NOT NULL, + status TEXT NOT NULL CHECK (status IN ('affected', 'not_affected', 'fixed', 'under_investigation')), + confidence NUMERIC(4,3), created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - created_by TEXT + UNIQUE (linkset_id, observation_id, provider_id, status) ); -CREATE INDEX idx_graph_revisions_project ON vex.graph_revisions(project_id); -CREATE INDEX idx_graph_revisions_revision ON vex.graph_revisions(revision_id); -CREATE INDEX idx_graph_revisions_created ON vex.graph_revisions(project_id, created_at DESC); +CREATE INDEX idx_linkset_observations_linkset ON vex.linkset_observations (linkset_id); +CREATE INDEX idx_linkset_observations_provider ON vex.linkset_observations (linkset_id, provider_id); +CREATE INDEX idx_linkset_observations_status ON vex.linkset_observations (linkset_id, status); --- Graph nodes table (BIGSERIAL for high volume) -CREATE TABLE IF NOT EXISTS vex.graph_nodes ( +-- Disagreements/conflicts recorded per linkset (immutable; deduplicated) +CREATE TABLE vex.linkset_disagreements ( id BIGSERIAL PRIMARY KEY, - graph_revision_id UUID NOT NULL REFERENCES vex.graph_revisions(id) ON DELETE CASCADE, - node_key TEXT NOT NULL, - node_type TEXT NOT NULL, - purl TEXT, - name TEXT, - version TEXT, - attributes JSONB NOT NULL DEFAULT '{}', - UNIQUE(graph_revision_id, node_key) -); - -CREATE INDEX idx_graph_nodes_revision ON vex.graph_nodes(graph_revision_id); -CREATE INDEX idx_graph_nodes_key ON vex.graph_nodes(graph_revision_id, node_key); -CREATE INDEX idx_graph_nodes_purl ON vex.graph_nodes(purl); -CREATE INDEX idx_graph_nodes_type ON vex.graph_nodes(graph_revision_id, node_type); - --- Graph edges table (BIGSERIAL for high volume) -CREATE TABLE IF NOT EXISTS vex.graph_edges ( - id BIGSERIAL PRIMARY KEY, - graph_revision_id UUID NOT NULL REFERENCES vex.graph_revisions(id) ON DELETE CASCADE, - from_node_id BIGINT NOT NULL REFERENCES vex.graph_nodes(id) ON DELETE CASCADE, - to_node_id BIGINT NOT NULL REFERENCES vex.graph_nodes(id) ON DELETE CASCADE, - edge_type TEXT NOT NULL, - attributes JSONB NOT NULL DEFAULT '{}' -); - -CREATE INDEX idx_graph_edges_revision ON vex.graph_edges(graph_revision_id); -CREATE INDEX idx_graph_edges_from ON vex.graph_edges(from_node_id); -CREATE INDEX idx_graph_edges_to ON vex.graph_edges(to_node_id); - --- VEX statements table -CREATE TABLE IF NOT EXISTS vex.statements ( - id UUID PRIMARY KEY DEFAULT gen_random_uuid(), - tenant_id TEXT NOT NULL, - project_id UUID REFERENCES vex.projects(id), - graph_revision_id UUID REFERENCES vex.graph_revisions(id), - vulnerability_id TEXT NOT NULL, - product_id TEXT, - status TEXT NOT NULL CHECK (status IN ( - 'not_affected', 'affected', 'fixed', 'under_investigation' - )), - justification TEXT CHECK (justification IN ( - 'component_not_present', 'vulnerable_code_not_present', - 'vulnerable_code_not_in_execute_path', 'vulnerable_code_cannot_be_controlled_by_adversary', - 'inline_mitigations_already_exist' - )), - impact_statement TEXT, - action_statement TEXT, - action_statement_timestamp TIMESTAMPTZ, - first_issued TIMESTAMPTZ NOT NULL DEFAULT NOW(), - last_updated TIMESTAMPTZ NOT NULL DEFAULT NOW(), - source TEXT, - source_url TEXT, - evidence JSONB NOT NULL DEFAULT '{}', - provenance JSONB NOT NULL DEFAULT '{}', - metadata JSONB NOT NULL DEFAULT '{}', - created_by TEXT -); - -CREATE INDEX idx_statements_tenant ON vex.statements(tenant_id); -CREATE INDEX idx_statements_project ON vex.statements(project_id); -CREATE INDEX idx_statements_revision ON vex.statements(graph_revision_id); -CREATE INDEX idx_statements_vuln ON vex.statements(vulnerability_id); -CREATE INDEX idx_statements_status ON vex.statements(tenant_id, status); - --- VEX observations table -CREATE TABLE IF NOT EXISTS vex.observations ( - id UUID PRIMARY KEY DEFAULT gen_random_uuid(), - tenant_id TEXT NOT NULL, - statement_id UUID REFERENCES vex.statements(id) ON DELETE CASCADE, - vulnerability_id TEXT NOT NULL, - product_id TEXT NOT NULL, - observed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - observer TEXT NOT NULL, - observation_type TEXT NOT NULL, - confidence NUMERIC(3,2), - details JSONB NOT NULL DEFAULT '{}', + linkset_id TEXT NOT NULL REFERENCES vex.linksets(linkset_id) ON DELETE CASCADE, + provider_id TEXT NOT NULL, + status TEXT NOT NULL, + justification TEXT, + confidence NUMERIC(4,3), created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - UNIQUE(tenant_id, vulnerability_id, product_id, observer, observation_type) + UNIQUE (linkset_id, provider_id, status, justification) ); -CREATE INDEX idx_observations_tenant ON vex.observations(tenant_id); -CREATE INDEX idx_observations_statement ON vex.observations(statement_id); -CREATE INDEX idx_observations_vuln ON vex.observations(vulnerability_id, product_id); +CREATE INDEX idx_linkset_disagreements_linkset ON vex.linkset_disagreements (linkset_id); --- Linksets table -CREATE TABLE IF NOT EXISTS vex.linksets ( - id UUID PRIMARY KEY DEFAULT gen_random_uuid(), - tenant_id TEXT NOT NULL, - name TEXT NOT NULL, - description TEXT, - source_type TEXT NOT NULL, - source_url TEXT, - enabled BOOLEAN NOT NULL DEFAULT TRUE, - priority INT NOT NULL DEFAULT 0, - filter JSONB NOT NULL DEFAULT '{}', - metadata JSONB NOT NULL DEFAULT '{}', - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - UNIQUE(tenant_id, name) +-- Append-only mutation log for deterministic replay/audit +CREATE TABLE vex.linkset_mutations ( + sequence_number BIGSERIAL PRIMARY KEY, + linkset_id TEXT NOT NULL REFERENCES vex.linksets(linkset_id) ON DELETE CASCADE, + mutation_type TEXT NOT NULL CHECK (mutation_type IN ('linkset_created', 'observation_added', 'disagreement_added')), + observation_id TEXT, + provider_id TEXT, + status TEXT, + confidence NUMERIC(4,3), + justification TEXT, + occurred_at TIMESTAMPTZ NOT NULL DEFAULT NOW() ); -CREATE INDEX idx_linksets_tenant ON vex.linksets(tenant_id); -CREATE INDEX idx_linksets_enabled ON vex.linksets(tenant_id, enabled, priority DESC); +CREATE INDEX idx_linkset_mutations_linkset ON vex.linkset_mutations (linkset_id, sequence_number); --- Linkset events table -CREATE TABLE IF NOT EXISTS vex.linkset_events ( - id BIGSERIAL PRIMARY KEY, - linkset_id UUID NOT NULL REFERENCES vex.linksets(id) ON DELETE CASCADE, - event_type TEXT NOT NULL, - statement_count INT NOT NULL DEFAULT 0, - error_message TEXT, - metadata JSONB NOT NULL DEFAULT '{}', - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() -); - -CREATE INDEX idx_linkset_events_linkset ON vex.linkset_events(linkset_id); -CREATE INDEX idx_linkset_events_created ON vex.linkset_events(created_at); - --- Consensus table (VEX consensus state) -CREATE TABLE IF NOT EXISTS vex.consensus ( - id UUID PRIMARY KEY DEFAULT gen_random_uuid(), - tenant_id TEXT NOT NULL, - vulnerability_id TEXT NOT NULL, - product_id TEXT NOT NULL, - consensus_status TEXT NOT NULL, - contributing_statements UUID[] NOT NULL DEFAULT '{}', - confidence NUMERIC(3,2), - computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - metadata JSONB NOT NULL DEFAULT '{}', - UNIQUE(tenant_id, vulnerability_id, product_id) -); - -CREATE INDEX idx_consensus_tenant ON vex.consensus(tenant_id); -CREATE INDEX idx_consensus_vuln ON vex.consensus(vulnerability_id, product_id); - --- Consensus holds table -CREATE TABLE IF NOT EXISTS vex.consensus_holds ( - id UUID PRIMARY KEY DEFAULT gen_random_uuid(), - consensus_id UUID NOT NULL REFERENCES vex.consensus(id) ON DELETE CASCADE, - hold_type TEXT NOT NULL, - reason TEXT NOT NULL, - held_by TEXT NOT NULL, - held_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - released_at TIMESTAMPTZ, - released_by TEXT, - metadata JSONB NOT NULL DEFAULT '{}' -); - -CREATE INDEX idx_consensus_holds_consensus ON vex.consensus_holds(consensus_id); -CREATE INDEX idx_consensus_holds_active ON vex.consensus_holds(consensus_id, released_at) - WHERE released_at IS NULL; - --- Unknown snapshots table -CREATE TABLE IF NOT EXISTS vex.unknowns_snapshots ( - id UUID PRIMARY KEY DEFAULT gen_random_uuid(), - tenant_id TEXT NOT NULL, - project_id UUID REFERENCES vex.projects(id), - graph_revision_id UUID REFERENCES vex.graph_revisions(id), - snapshot_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - unknown_count INT NOT NULL DEFAULT 0, - metadata JSONB NOT NULL DEFAULT '{}' -); - -CREATE INDEX idx_unknowns_snapshots_tenant ON vex.unknowns_snapshots(tenant_id); -CREATE INDEX idx_unknowns_snapshots_project ON vex.unknowns_snapshots(project_id); - --- Unknown items table -CREATE TABLE IF NOT EXISTS vex.unknown_items ( - id UUID PRIMARY KEY DEFAULT gen_random_uuid(), - snapshot_id UUID NOT NULL REFERENCES vex.unknowns_snapshots(id) ON DELETE CASCADE, - vulnerability_id TEXT NOT NULL, - product_id TEXT, - reason TEXT NOT NULL, - metadata JSONB NOT NULL DEFAULT '{}' -); - -CREATE INDEX idx_unknown_items_snapshot ON vex.unknown_items(snapshot_id); -CREATE INDEX idx_unknown_items_vuln ON vex.unknown_items(vulnerability_id); - --- Evidence manifests table -CREATE TABLE IF NOT EXISTS vex.evidence_manifests ( - id UUID PRIMARY KEY DEFAULT gen_random_uuid(), - tenant_id TEXT NOT NULL, - statement_id UUID REFERENCES vex.statements(id) ON DELETE CASCADE, - manifest_type TEXT NOT NULL, - content_hash TEXT NOT NULL, - content JSONB NOT NULL, - source TEXT, - collected_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - metadata JSONB NOT NULL DEFAULT '{}' -); - -CREATE INDEX idx_evidence_manifests_tenant ON vex.evidence_manifests(tenant_id); -CREATE INDEX idx_evidence_manifests_statement ON vex.evidence_manifests(statement_id); - --- CVSS receipts table -CREATE TABLE IF NOT EXISTS vex.cvss_receipts ( - id UUID PRIMARY KEY DEFAULT gen_random_uuid(), - tenant_id TEXT NOT NULL, - statement_id UUID REFERENCES vex.statements(id) ON DELETE CASCADE, - vulnerability_id TEXT NOT NULL, - cvss_version TEXT NOT NULL, - vector_string TEXT NOT NULL, - base_score NUMERIC(3,1) NOT NULL, - environmental_score NUMERIC(3,1), - temporal_score NUMERIC(3,1), - computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - metadata JSONB NOT NULL DEFAULT '{}' -); - -CREATE INDEX idx_cvss_receipts_tenant ON vex.cvss_receipts(tenant_id); -CREATE INDEX idx_cvss_receipts_statement ON vex.cvss_receipts(statement_id); -CREATE INDEX idx_cvss_receipts_vuln ON vex.cvss_receipts(vulnerability_id); - --- Attestations table -CREATE TABLE IF NOT EXISTS vex.attestations ( - id UUID PRIMARY KEY DEFAULT gen_random_uuid(), - tenant_id TEXT NOT NULL, - statement_id UUID REFERENCES vex.statements(id), - subject_digest TEXT NOT NULL, - predicate_type TEXT NOT NULL, - predicate JSONB NOT NULL, - signature TEXT, - signature_algorithm TEXT, - signed_by TEXT, - signed_at TIMESTAMPTZ, - verified BOOLEAN NOT NULL DEFAULT FALSE, - verified_at TIMESTAMPTZ, - metadata JSONB NOT NULL DEFAULT '{}', - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() -); - -CREATE INDEX idx_attestations_tenant ON vex.attestations(tenant_id); -CREATE INDEX idx_attestations_statement ON vex.attestations(statement_id); -CREATE INDEX idx_attestations_subject ON vex.attestations(subject_digest); - --- Timeline events table -CREATE TABLE IF NOT EXISTS vex.timeline_events ( - id BIGSERIAL PRIMARY KEY, - tenant_id TEXT NOT NULL, - project_id UUID REFERENCES vex.projects(id), - statement_id UUID REFERENCES vex.statements(id), - event_type TEXT NOT NULL, - event_data JSONB NOT NULL DEFAULT '{}', - actor TEXT, - correlation_id TEXT, - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() -); - -CREATE INDEX idx_timeline_events_tenant ON vex.timeline_events(tenant_id); -CREATE INDEX idx_timeline_events_project ON vex.timeline_events(project_id); -CREATE INDEX idx_timeline_events_statement ON vex.timeline_events(statement_id); -CREATE INDEX idx_timeline_events_created ON vex.timeline_events(tenant_id, created_at); -CREATE INDEX idx_timeline_events_correlation ON vex.timeline_events(correlation_id); - --- Update timestamp function -CREATE OR REPLACE FUNCTION vex.update_updated_at() +-- Refresh updated_at whenever linkset rows change +CREATE OR REPLACE FUNCTION vex.touch_updated_at() RETURNS TRIGGER AS $$ BEGIN NEW.updated_at = NOW(); @@ -310,15 +94,6 @@ BEGIN END; $$ LANGUAGE plpgsql; --- Triggers -CREATE TRIGGER trg_projects_updated_at - BEFORE UPDATE ON vex.projects - FOR EACH ROW EXECUTE FUNCTION vex.update_updated_at(); - -CREATE TRIGGER trg_linksets_updated_at +CREATE TRIGGER trg_linksets_touch_updated_at BEFORE UPDATE ON vex.linksets - FOR EACH ROW EXECUTE FUNCTION vex.update_updated_at(); - -CREATE TRIGGER trg_statements_updated_at - BEFORE UPDATE ON vex.statements - FOR EACH ROW EXECUTE FUNCTION vex.update_updated_at(); + FOR EACH ROW EXECUTE FUNCTION vex.touch_updated_at(); diff --git a/src/Excititor/__Libraries/StellaOps.Excititor.Storage.Postgres/Migrations/002_vex_raw_store.sql b/src/Excititor/__Libraries/StellaOps.Excititor.Storage.Postgres/Migrations/002_vex_raw_store.sql new file mode 100644 index 000000000..6fe3221fe --- /dev/null +++ b/src/Excititor/__Libraries/StellaOps.Excititor.Storage.Postgres/Migrations/002_vex_raw_store.sql @@ -0,0 +1,43 @@ +-- VEX Raw Store Migration 002: Postgres-backed raw document and blob storage (Mongo/BSON removed) + +-- Raw documents (append-only) +CREATE TABLE IF NOT EXISTS vex.vex_raw_documents ( + digest TEXT PRIMARY KEY, + tenant TEXT NOT NULL, + provider_id TEXT NOT NULL, + format TEXT NOT NULL CHECK (format IN ('openvex','csaf','cyclonedx','custom','unknown')), + source_uri TEXT NOT NULL, + etag TEXT NULL, + retrieved_at TIMESTAMPTZ NOT NULL, + recorded_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + supersedes_digest TEXT NULL REFERENCES vex.vex_raw_documents(digest), + content_json JSONB NOT NULL, + content_size_bytes INT NOT NULL, + metadata_json JSONB NOT NULL, + provenance_json JSONB NOT NULL, + inline_payload BOOLEAN NOT NULL DEFAULT TRUE, + UNIQUE (tenant, provider_id, source_uri, COALESCE(etag, '')) +); + +CREATE INDEX IF NOT EXISTS idx_vex_raw_documents_tenant_retrieved ON vex.vex_raw_documents (tenant, retrieved_at DESC, digest); +CREATE INDEX IF NOT EXISTS idx_vex_raw_documents_provider ON vex.vex_raw_documents (tenant, provider_id, retrieved_at DESC); +CREATE INDEX IF NOT EXISTS idx_vex_raw_documents_supersedes ON vex.vex_raw_documents (tenant, supersedes_digest); +CREATE INDEX IF NOT EXISTS idx_vex_raw_documents_metadata ON vex.vex_raw_documents USING GIN (metadata_json); +CREATE INDEX IF NOT EXISTS idx_vex_raw_documents_provenance ON vex.vex_raw_documents USING GIN (provenance_json); + +-- Large payloads stored separately when inline threshold exceeded +CREATE TABLE IF NOT EXISTS vex.vex_raw_blobs ( + digest TEXT PRIMARY KEY REFERENCES vex.vex_raw_documents(digest) ON DELETE CASCADE, + payload BYTEA NOT NULL, + payload_hash TEXT NOT NULL +); + +-- Optional attachment support (kept for parity with prior GridFS usage) +CREATE TABLE IF NOT EXISTS vex.vex_raw_attachments ( + digest TEXT REFERENCES vex.vex_raw_documents(digest) ON DELETE CASCADE, + name TEXT NOT NULL, + media_type TEXT NOT NULL, + payload BYTEA NOT NULL, + payload_hash TEXT NOT NULL, + PRIMARY KEY (digest, name) +); diff --git a/src/Excititor/__Libraries/StellaOps.Excititor.Storage.Postgres/Repositories/PostgresAppendOnlyLinksetStore.cs b/src/Excititor/__Libraries/StellaOps.Excititor.Storage.Postgres/Repositories/PostgresAppendOnlyLinksetStore.cs new file mode 100644 index 000000000..0205b47c1 --- /dev/null +++ b/src/Excititor/__Libraries/StellaOps.Excititor.Storage.Postgres/Repositories/PostgresAppendOnlyLinksetStore.cs @@ -0,0 +1,858 @@ +using System.Text.Json; +using Microsoft.Extensions.Logging; +using Npgsql; +using StellaOps.Excititor.Core.Observations; +using StellaOps.Infrastructure.Postgres.Repositories; + +namespace StellaOps.Excititor.Storage.Postgres.Repositories; + +/// +/// PostgreSQL implementation of backed by append-only tables. +/// Uses deterministic ordering and mutation logs for audit/replay. +/// +public sealed class PostgresAppendOnlyLinksetStore : RepositoryBase, IAppendOnlyLinksetStore, IVexLinksetStore +{ + private const string MutationCreated = "linkset_created"; + private const string MutationObservationAdded = "observation_added"; + private const string MutationDisagreementAdded = "disagreement_added"; + + private static readonly JsonSerializerOptions JsonOptions = new() + { + PropertyNamingPolicy = JsonNamingPolicy.CamelCase, + WriteIndented = false + }; + + public PostgresAppendOnlyLinksetStore( + ExcititorDataSource dataSource, + ILogger logger) + : base(dataSource, logger) + { + } + + public async ValueTask InsertAsync( + VexLinkset linkset, + CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(linkset); + + var tenant = linkset.Tenant; + var linksetId = linkset.LinksetId; + + await using var connection = await DataSource.OpenConnectionAsync(tenant, "writer", cancellationToken) + .ConfigureAwait(false); + await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false); + + var sequenceNumbers = new List(); + var created = await EnsureLinksetAsync( + connection, + linksetId, + tenant, + linkset.VulnerabilityId, + linkset.ProductKey, + linkset.Scope, + sequenceNumbers, + cancellationToken).ConfigureAwait(false); + + if (!created) + { + await transaction.RollbackAsync(cancellationToken).ConfigureAwait(false); + return false; + } + + foreach (var observation in linkset.Observations) + { + await InsertObservationAsync(connection, linksetId, observation, sequenceNumbers, cancellationToken) + .ConfigureAwait(false); + } + + foreach (var disagreement in linkset.Disagreements) + { + await InsertDisagreementAsync(connection, linksetId, disagreement, sequenceNumbers, cancellationToken) + .ConfigureAwait(false); + } + + await TouchLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false); + await transaction.CommitAsync(cancellationToken).ConfigureAwait(false); + return true; + } + + public async ValueTask UpsertAsync( + VexLinkset linkset, + CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(linkset); + + var tenant = linkset.Tenant; + var linksetId = linkset.LinksetId; + + await using var connection = await DataSource.OpenConnectionAsync(tenant, "writer", cancellationToken) + .ConfigureAwait(false); + await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false); + + var sequenceNumbers = new List(); + var created = await EnsureLinksetAsync( + connection, + linksetId, + tenant, + linkset.VulnerabilityId, + linkset.ProductKey, + linkset.Scope, + sequenceNumbers, + cancellationToken).ConfigureAwait(false); + + foreach (var observation in linkset.Observations) + { + await InsertObservationAsync(connection, linksetId, observation, sequenceNumbers, cancellationToken) + .ConfigureAwait(false); + } + + foreach (var disagreement in linkset.Disagreements) + { + await InsertDisagreementAsync(connection, linksetId, disagreement, sequenceNumbers, cancellationToken) + .ConfigureAwait(false); + } + + if (created || sequenceNumbers.Count > 0) + { + await TouchLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false); + } + + await transaction.CommitAsync(cancellationToken).ConfigureAwait(false); + return created; + } + + public async ValueTask GetOrCreateAsync( + string tenant, + string vulnerabilityId, + string productKey, + CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(tenant); + ArgumentNullException.ThrowIfNull(vulnerabilityId); + ArgumentNullException.ThrowIfNull(productKey); + + var linksetId = VexLinkset.CreateLinksetId(tenant, vulnerabilityId, productKey); + + await using var connection = await DataSource.OpenConnectionAsync(tenant, "writer", cancellationToken) + .ConfigureAwait(false); + + var existing = await ReadLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false); + if (existing is not null) + { + return existing; + } + + var sequenceNumbers = new List(); + await EnsureLinksetAsync( + connection, + linksetId, + tenant, + vulnerabilityId, + productKey, + VexProductScope.Unknown(productKey), + sequenceNumbers, + cancellationToken).ConfigureAwait(false); + + return await ReadLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false) + ?? throw new InvalidOperationException($"Failed to create linkset {linksetId}."); + } + + public async ValueTask AppendObservationAsync( + string tenant, + string vulnerabilityId, + string productKey, + VexLinksetObservationRefModel observation, + VexProductScope scope, + CancellationToken cancellationToken) + { + return await AppendObservationsBatchAsync( + tenant, + vulnerabilityId, + productKey, + new[] { observation }, + scope, + cancellationToken).ConfigureAwait(false); + } + + public async ValueTask AppendObservationsBatchAsync( + string tenant, + string vulnerabilityId, + string productKey, + IEnumerable observations, + VexProductScope scope, + CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(tenant); + ArgumentNullException.ThrowIfNull(vulnerabilityId); + ArgumentNullException.ThrowIfNull(productKey); + ArgumentNullException.ThrowIfNull(observations); + ArgumentNullException.ThrowIfNull(scope); + + var linksetId = VexLinkset.CreateLinksetId(tenant, vulnerabilityId, productKey); + var observationList = observations.ToList(); + + await using var connection = await DataSource.OpenConnectionAsync(tenant, "writer", cancellationToken) + .ConfigureAwait(false); + await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false); + + var sequenceNumbers = new List(); + var wasCreated = await EnsureLinksetAsync(connection, linksetId, tenant, vulnerabilityId, productKey, scope, sequenceNumbers, cancellationToken) + .ConfigureAwait(false); + + var observationsAdded = 0; + foreach (var obs in observationList) + { + var added = await InsertObservationAsync(connection, linksetId, obs, sequenceNumbers, cancellationToken) + .ConfigureAwait(false); + if (added) + { + observationsAdded++; + } + } + + if (wasCreated || observationsAdded > 0) + { + await TouchLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false); + } + + await transaction.CommitAsync(cancellationToken).ConfigureAwait(false); + + var linkset = await ReadLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false) + ?? throw new InvalidOperationException($"Linkset {linksetId} not found after append."); + + var sequenceNumber = await GetLatestSequenceAsync(connection, linksetId, cancellationToken).ConfigureAwait(false); + + if (observationsAdded == 0 && !wasCreated) + { + return AppendLinksetResult.NoChange(linkset, sequenceNumber); + } + + if (wasCreated) + { + return AppendLinksetResult.Created(linkset, observationsAdded, sequenceNumber); + } + + return AppendLinksetResult.Updated(linkset, observationsAdded, disagreementsAdded: 0, sequenceNumber); + } + + public async ValueTask AppendDisagreementAsync( + string tenant, + string vulnerabilityId, + string productKey, + VexObservationDisagreement disagreement, + CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(tenant); + ArgumentNullException.ThrowIfNull(vulnerabilityId); + ArgumentNullException.ThrowIfNull(productKey); + ArgumentNullException.ThrowIfNull(disagreement); + + var linksetId = VexLinkset.CreateLinksetId(tenant, vulnerabilityId, productKey); + + await using var connection = await DataSource.OpenConnectionAsync(tenant, "writer", cancellationToken) + .ConfigureAwait(false); + await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false); + + var sequenceNumbers = new List(); + var wasCreated = await EnsureLinksetAsync( + connection, + linksetId, + tenant, + vulnerabilityId, + productKey, + VexProductScope.Unknown(productKey), + sequenceNumbers, + cancellationToken).ConfigureAwait(false); + + var disagreementsAdded = await InsertDisagreementAsync(connection, linksetId, disagreement, sequenceNumbers, cancellationToken) + .ConfigureAwait(false) + ? 1 + : 0; + + if (wasCreated || disagreementsAdded > 0) + { + await TouchLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false); + } + + await transaction.CommitAsync(cancellationToken).ConfigureAwait(false); + + var linkset = await ReadLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false) + ?? throw new InvalidOperationException($"Linkset {linksetId} not found after append."); + + var sequenceNumber = await GetLatestSequenceAsync(connection, linksetId, cancellationToken).ConfigureAwait(false); + + if (disagreementsAdded == 0 && !wasCreated) + { + return AppendLinksetResult.NoChange(linkset, sequenceNumber); + } + + if (wasCreated) + { + return AppendLinksetResult.Created(linkset, observationsAdded: 0, sequenceNumber); + } + + return AppendLinksetResult.Updated(linkset, observationsAdded: 0, disagreementsAdded, sequenceNumber); + } + + public async ValueTask GetByIdAsync( + string tenant, + string linksetId, + CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(tenant); + ArgumentNullException.ThrowIfNull(linksetId); + + await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken) + .ConfigureAwait(false); + return await ReadLinksetAsync(connection, linksetId, cancellationToken).ConfigureAwait(false); + } + + public async ValueTask GetByKeyAsync( + string tenant, + string vulnerabilityId, + string productKey, + CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(tenant); + ArgumentNullException.ThrowIfNull(vulnerabilityId); + ArgumentNullException.ThrowIfNull(productKey); + + var linksetId = VexLinkset.CreateLinksetId(tenant, vulnerabilityId, productKey); + return await GetByIdAsync(tenant, linksetId, cancellationToken).ConfigureAwait(false); + } + + public async ValueTask> FindByVulnerabilityAsync( + string tenant, + string vulnerabilityId, + int limit, + CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(tenant); + ArgumentNullException.ThrowIfNull(vulnerabilityId); + + await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken) + .ConfigureAwait(false); + + var linksetIds = await GetLinksetIdsAsync(connection, "vulnerability_id = @vulnerability_id", cmd => + { + AddParameter(cmd, "vulnerability_id", vulnerabilityId); + AddParameter(cmd, "tenant", tenant); + AddParameter(cmd, "limit", limit); + }, cancellationToken).ConfigureAwait(false); + + return await ReadLinksetsAsync(connection, linksetIds, cancellationToken).ConfigureAwait(false); + } + + public async ValueTask> FindByProductKeyAsync( + string tenant, + string productKey, + int limit, + CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(tenant); + ArgumentNullException.ThrowIfNull(productKey); + + await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken) + .ConfigureAwait(false); + + var linksetIds = await GetLinksetIdsAsync(connection, "product_key = @product_key", cmd => + { + AddParameter(cmd, "product_key", productKey); + AddParameter(cmd, "tenant", tenant); + AddParameter(cmd, "limit", limit); + }, cancellationToken).ConfigureAwait(false); + + return await ReadLinksetsAsync(connection, linksetIds, cancellationToken).ConfigureAwait(false); + } + + public async ValueTask> FindWithConflictsAsync( + string tenant, + int limit, + CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(tenant); + + const string sql = """ + SELECT DISTINCT ls.linkset_id, ls.updated_at + FROM vex.linksets ls + JOIN vex.linkset_disagreements d ON d.linkset_id = ls.linkset_id + WHERE ls.tenant = @tenant + ORDER BY ls.updated_at DESC, ls.linkset_id + LIMIT @limit; + """; + + await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken) + .ConfigureAwait(false); + await using var command = CreateCommand(sql, connection); + + AddParameter(command, "tenant", tenant); + AddParameter(command, "limit", limit); + + var linksetIds = new List(); + await using (var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false)) + { + while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false)) + { + linksetIds.Add(reader.GetString(0)); + } + } + + return await ReadLinksetsAsync(connection, linksetIds, cancellationToken).ConfigureAwait(false); + } + + public async ValueTask> FindByProviderAsync( + string tenant, + string providerId, + int limit, + CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(tenant); + ArgumentNullException.ThrowIfNull(providerId); + + const string sql = """ + SELECT DISTINCT ls.linkset_id, ls.updated_at + FROM vex.linksets ls + JOIN vex.linkset_observations o ON o.linkset_id = ls.linkset_id + WHERE ls.tenant = @tenant AND o.provider_id = @provider_id + ORDER BY ls.updated_at DESC, ls.linkset_id + LIMIT @limit; + """; + + await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken) + .ConfigureAwait(false); + await using var command = CreateCommand(sql, connection); + AddParameter(command, "tenant", tenant); + AddParameter(command, "provider_id", providerId); + AddParameter(command, "limit", limit); + + var ids = new List(); + await using (var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false)) + { + while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false)) + { + ids.Add(reader.GetString(0)); + } + } + + return await ReadLinksetsAsync(connection, ids, cancellationToken).ConfigureAwait(false); + } + + public ValueTask DeleteAsync( + string tenant, + string linksetId, + CancellationToken cancellationToken) + { + // Append-only store does not support deletions; signal no-op. + return ValueTask.FromResult(false); + } + + public async ValueTask CountAsync(string tenant, CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(tenant); + const string sql = "SELECT COUNT(*) FROM vex.linksets WHERE tenant = @tenant;"; + + await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken) + .ConfigureAwait(false); + await using var command = CreateCommand(sql, connection); + AddParameter(command, "tenant", tenant); + + var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false); + return result is long count ? count : Convert.ToInt64(result); + } + + public async ValueTask CountWithConflictsAsync(string tenant, CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(tenant); + const string sql = """ + SELECT COUNT(DISTINCT ls.linkset_id) + FROM vex.linksets ls + JOIN vex.linkset_disagreements d ON d.linkset_id = ls.linkset_id + WHERE ls.tenant = @tenant; + """; + + await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken) + .ConfigureAwait(false); + await using var command = CreateCommand(sql, connection); + AddParameter(command, "tenant", tenant); + + var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false); + return result is long count ? count : Convert.ToInt64(result); + } + + public async ValueTask> GetMutationLogAsync( + string tenant, + string linksetId, + CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(tenant); + ArgumentNullException.ThrowIfNull(linksetId); + + const string sql = """ + SELECT sequence_number, mutation_type, occurred_at, observation_id, provider_id, status, confidence, justification + FROM vex.linkset_mutations + WHERE linkset_id = @linkset_id + ORDER BY sequence_number; + """; + + await using var connection = await DataSource.OpenConnectionAsync(tenant, "reader", cancellationToken) + .ConfigureAwait(false); + await using var command = CreateCommand(sql, connection); + AddParameter(command, "linkset_id", linksetId); + + var mutations = new List(); + await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false); + while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false)) + { + mutations.Add(new LinksetMutationEvent( + sequenceNumber: reader.GetInt64(0), + mutationType: reader.GetString(1), + timestamp: reader.GetFieldValue(2), + observationId: GetNullableString(reader, 3), + providerId: GetNullableString(reader, 4), + status: GetNullableString(reader, 5), + confidence: reader.IsDBNull(6) ? null : reader.GetDouble(6), + justification: GetNullableString(reader, 7))); + } + + return mutations; + } + + private async Task EnsureLinksetAsync( + NpgsqlConnection connection, + string linksetId, + string tenant, + string vulnerabilityId, + string productKey, + VexProductScope scope, + List sequenceNumbers, + CancellationToken cancellationToken) + { + const string sql = """ + INSERT INTO vex.linksets (linkset_id, tenant, vulnerability_id, product_key, scope) + VALUES (@linkset_id, @tenant, @vulnerability_id, @product_key, @scope::jsonb) + ON CONFLICT (linkset_id) DO NOTHING + RETURNING linkset_id; + """; + + await using var command = CreateCommand(sql, connection); + AddParameter(command, "linkset_id", linksetId); + AddParameter(command, "tenant", tenant); + AddParameter(command, "vulnerability_id", vulnerabilityId); + AddParameter(command, "product_key", productKey); + AddJsonbParameter(command, "scope", SerializeScope(scope)); + + var inserted = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false); + if (inserted is not null) + { + var seq = await InsertMutationAsync(connection, linksetId, MutationCreated, null, null, null, null, null, cancellationToken) + .ConfigureAwait(false); + sequenceNumbers.Add(seq); + return true; + } + + return false; + } + + private async Task InsertObservationAsync( + NpgsqlConnection connection, + string linksetId, + VexLinksetObservationRefModel observation, + List sequenceNumbers, + CancellationToken cancellationToken) + { + const string sql = """ + INSERT INTO vex.linkset_observations ( + linkset_id, observation_id, provider_id, status, confidence) + VALUES (@linkset_id, @observation_id, @provider_id, @status, @confidence) + ON CONFLICT DO NOTHING + RETURNING id; + """; + + await using var command = CreateCommand(sql, connection); + AddParameter(command, "linkset_id", linksetId); + AddParameter(command, "observation_id", observation.ObservationId); + AddParameter(command, "provider_id", observation.ProviderId); + AddParameter(command, "status", observation.Status); + AddParameter(command, "confidence", observation.Confidence); + + var inserted = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false); + if (inserted is not null) + { + var seq = await InsertMutationAsync( + connection, + linksetId, + MutationObservationAdded, + observation.ObservationId, + observation.ProviderId, + observation.Status, + observation.Confidence, + null, + cancellationToken).ConfigureAwait(false); + sequenceNumbers.Add(seq); + return true; + } + + return false; + } + + private async Task InsertDisagreementAsync( + NpgsqlConnection connection, + string linksetId, + VexObservationDisagreement disagreement, + List sequenceNumbers, + CancellationToken cancellationToken) + { + const string sql = """ + INSERT INTO vex.linkset_disagreements ( + linkset_id, provider_id, status, justification, confidence) + VALUES (@linkset_id, @provider_id, @status, @justification, @confidence) + ON CONFLICT DO NOTHING + RETURNING id; + """; + + await using var command = CreateCommand(sql, connection); + AddParameter(command, "linkset_id", linksetId); + AddParameter(command, "provider_id", disagreement.ProviderId); + AddParameter(command, "status", disagreement.Status); + AddParameter(command, "justification", disagreement.Justification); + AddParameter(command, "confidence", disagreement.Confidence); + + var inserted = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false); + if (inserted is not null) + { + var seq = await InsertMutationAsync( + connection, + linksetId, + MutationDisagreementAdded, + null, + disagreement.ProviderId, + disagreement.Status, + disagreement.Confidence, + disagreement.Justification, + cancellationToken).ConfigureAwait(false); + sequenceNumbers.Add(seq); + return true; + } + + return false; + } + + private async Task InsertMutationAsync( + NpgsqlConnection connection, + string linksetId, + string mutationType, + string? observationId, + string? providerId, + string? status, + double? confidence, + string? justification, + CancellationToken cancellationToken) + { + const string sql = """ + INSERT INTO vex.linkset_mutations ( + linkset_id, mutation_type, observation_id, provider_id, status, confidence, justification) + VALUES (@linkset_id, @mutation_type, @observation_id, @provider_id, @status, @confidence, @justification) + RETURNING sequence_number; + """; + + await using var command = CreateCommand(sql, connection); + AddParameter(command, "linkset_id", linksetId); + AddParameter(command, "mutation_type", mutationType); + AddParameter(command, "observation_id", observationId); + AddParameter(command, "provider_id", providerId); + AddParameter(command, "status", status); + AddParameter(command, "confidence", confidence); + AddParameter(command, "justification", justification); + + var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false) + ?? throw new InvalidOperationException("Failed to insert mutation log entry."); + + return Convert.ToInt64(result); + } + + private static async Task TouchLinksetAsync( + NpgsqlConnection connection, + string linksetId, + CancellationToken cancellationToken) + { + const string sql = "UPDATE vex.linksets SET updated_at = NOW() WHERE linkset_id = @linkset_id;"; + await using var command = new NpgsqlCommand(sql, connection); + command.Parameters.AddWithValue("linkset_id", linksetId); + await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false); + } + + private async Task GetLatestSequenceAsync( + NpgsqlConnection connection, + string linksetId, + CancellationToken cancellationToken) + { + const string sql = "SELECT COALESCE(MAX(sequence_number), 0) FROM vex.linkset_mutations WHERE linkset_id = @linkset_id;"; + await using var command = CreateCommand(sql, connection); + AddParameter(command, "linkset_id", linksetId); + var result = await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false); + return result is long value ? value : Convert.ToInt64(result); + } + + private async Task> GetLinksetIdsAsync( + NpgsqlConnection connection, + string predicate, + Action configure, + CancellationToken cancellationToken) + { + var sql = $""" + SELECT linkset_id + FROM vex.linksets + WHERE {predicate} AND tenant = @tenant + ORDER BY updated_at DESC, linkset_id + LIMIT @limit; + """; + + await using var command = CreateCommand(sql, connection); + configure(command); + + var ids = new List(); + await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false); + while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false)) + { + ids.Add(reader.GetString(0)); + } + return ids; + } + + private async Task> ReadLinksetsAsync( + NpgsqlConnection connection, + IReadOnlyList linksetIds, + CancellationToken cancellationToken) + { + var results = new List(); + foreach (var id in linksetIds) + { + var linkset = await ReadLinksetAsync(connection, id, cancellationToken).ConfigureAwait(false); + if (linkset is not null) + { + results.Add(linkset); + } + } + return results; + } + + private async Task ReadLinksetAsync( + NpgsqlConnection connection, + string linksetId, + CancellationToken cancellationToken) + { + const string sql = """ + SELECT linkset_id, tenant, vulnerability_id, product_key, scope::text, created_at, updated_at + FROM vex.linksets + WHERE linkset_id = @linkset_id; + """; + + await using var command = CreateCommand(sql, connection); + AddParameter(command, "linkset_id", linksetId); + + await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false); + if (!await reader.ReadAsync(cancellationToken).ConfigureAwait(false)) + { + return null; + } + + var id = reader.GetString(0); + var tenant = reader.GetString(1); + var vulnerabilityId = reader.GetString(2); + var productKey = reader.GetString(3); + var scopeJson = reader.GetString(4); + var createdAt = reader.GetFieldValue(5); + var updatedAt = reader.GetFieldValue(6); + + var scope = DeserializeScope(scopeJson) ?? VexProductScope.Unknown(productKey); + + await reader.CloseAsync(); + + var observations = await ReadObservationsAsync(connection, linksetId, cancellationToken).ConfigureAwait(false); + var disagreements = await ReadDisagreementsAsync(connection, linksetId, cancellationToken).ConfigureAwait(false); + + return new VexLinkset( + id, + tenant, + vulnerabilityId, + productKey, + scope, + observations, + disagreements, + createdAt, + updatedAt); + } + + private async Task> ReadObservationsAsync( + NpgsqlConnection connection, + string linksetId, + CancellationToken cancellationToken) + { + const string sql = """ + SELECT observation_id, provider_id, status, confidence + FROM vex.linkset_observations + WHERE linkset_id = @linkset_id + ORDER BY provider_id, status, observation_id; + """; + + await using var command = CreateCommand(sql, connection); + AddParameter(command, "linkset_id", linksetId); + + var observations = new List(); + await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false); + while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false)) + { + observations.Add(new VexLinksetObservationRefModel( + reader.GetString(0), + reader.GetString(1), + reader.GetString(2), + reader.IsDBNull(3) ? null : reader.GetDouble(3))); + } + + return observations; + } + + private async Task> ReadDisagreementsAsync( + NpgsqlConnection connection, + string linksetId, + CancellationToken cancellationToken) + { + const string sql = """ + SELECT provider_id, status, justification, confidence + FROM vex.linkset_disagreements + WHERE linkset_id = @linkset_id + ORDER BY provider_id, status, COALESCE(justification, ''), id; + """; + + await using var command = CreateCommand(sql, connection); + AddParameter(command, "linkset_id", linksetId); + + var disagreements = new List(); + await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false); + while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false)) + { + disagreements.Add(new VexObservationDisagreement( + reader.GetString(0), + reader.GetString(1), + GetNullableString(reader, 2), + reader.IsDBNull(3) ? null : reader.GetDouble(3))); + } + + return disagreements; + } + + private static string? SerializeScope(VexProductScope scope) + { + return JsonSerializer.Serialize(scope, JsonOptions); + } + + private static VexProductScope? DeserializeScope(string? json) + { + if (string.IsNullOrWhiteSpace(json)) + { + return null; + } + + return JsonSerializer.Deserialize(json, JsonOptions); + } +} diff --git a/src/Excititor/__Libraries/StellaOps.Excititor.Storage.Postgres/Repositories/PostgresVexRawStore.cs b/src/Excititor/__Libraries/StellaOps.Excititor.Storage.Postgres/Repositories/PostgresVexRawStore.cs new file mode 100644 index 000000000..33e0aa8bb --- /dev/null +++ b/src/Excititor/__Libraries/StellaOps.Excititor.Storage.Postgres/Repositories/PostgresVexRawStore.cs @@ -0,0 +1,441 @@ +using System; +using System.Buffers; +using System.Collections.Immutable; +using System.Linq; +using System.Text; +using System.Text.Json; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; +using Npgsql; +using NpgsqlTypes; +using StellaOps.Excititor.Core; +using StellaOps.Excititor.Core.Storage; +using StellaOps.Infrastructure.Postgres.Repositories; + +namespace StellaOps.Excititor.Storage.Postgres.Repositories; + +/// +/// PostgreSQL-backed implementation of replacing Mongo/GridFS. +/// +public sealed class PostgresVexRawStore : RepositoryBase, IVexRawStore +{ + private readonly int _inlineThreshold; + + public PostgresVexRawStore( + ExcititorDataSource dataSource, + IOptions options, + ILogger logger) + : base(dataSource, logger) + { + if (options is null) + { + throw new ArgumentNullException(nameof(options)); + } + + _inlineThreshold = Math.Max(1, options.Value?.InlineThresholdBytes ?? 256 * 1024); + } + + public async ValueTask StoreAsync(VexRawDocument document, CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(document); + + var canonicalContent = CanonicalizeJson(document.Content); + var digest = EnsureDigest(document.Digest, canonicalContent); + var metadata = document.Metadata ?? ImmutableDictionary.Empty; + var tenant = ResolveTenant(metadata); + var format = document.Format.ToString().ToLowerInvariant(); + var providerId = document.ProviderId; + var sourceUri = document.SourceUri.ToString(); + var retrievedAt = document.RetrievedAt.UtcDateTime; + var inline = canonicalContent.Length <= _inlineThreshold; + + await using var connection = await DataSource.OpenConnectionAsync(tenant, "writer", cancellationToken) + .ConfigureAwait(false); + await using var transaction = await connection.BeginTransactionAsync(cancellationToken).ConfigureAwait(false); + + var metadataJson = JsonSerializer.Serialize(metadata, JsonSerializerOptions); + // Provenance is currently stored as a clone of metadata; callers may slice it as needed. + var provenanceJson = metadataJson; + var contentJson = GetJsonString(canonicalContent); + + const string insertDocumentSql = """ + INSERT INTO vex.vex_raw_documents ( + digest, + tenant, + provider_id, + format, + source_uri, + etag, + retrieved_at, + supersedes_digest, + content_json, + content_size_bytes, + metadata_json, + provenance_json, + inline_payload) + VALUES ( + @digest, + @tenant, + @provider_id, + @format, + @source_uri, + @etag, + @retrieved_at, + @supersedes_digest, + @content_json::jsonb, + @content_size_bytes, + @metadata_json::jsonb, + @provenance_json::jsonb, + @inline_payload) + ON CONFLICT (digest) DO NOTHING; + """; + + await using (var command = CreateCommand(insertDocumentSql, connection, transaction)) + { + AddParameter(command, "digest", digest); + AddParameter(command, "tenant", tenant); + AddParameter(command, "provider_id", providerId); + AddParameter(command, "format", format); + AddParameter(command, "source_uri", sourceUri); + AddParameter(command, "etag", metadata.TryGetValue("etag", out var etag) ? etag : null); + AddParameter(command, "retrieved_at", retrievedAt); + AddParameter(command, "supersedes_digest", metadata.TryGetValue("supersedes", out var supersedes) ? supersedes : null); + AddJsonbParameter(command, "content_json", contentJson); + AddParameter(command, "content_size_bytes", canonicalContent.Length); + AddJsonbParameter(command, "metadata_json", metadataJson); + AddJsonbParameter(command, "provenance_json", provenanceJson); + AddParameter(command, "inline_payload", inline); + + await command.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false); + } + + if (!inline) + { + const string insertBlobSql = """ + INSERT INTO vex.vex_raw_blobs (digest, payload, payload_hash) + VALUES (@digest, @payload, @payload_hash) + ON CONFLICT (digest) DO NOTHING; + """; + + await using var blobCommand = CreateCommand(insertBlobSql, connection, transaction); + AddParameter(blobCommand, "digest", digest); + blobCommand.Parameters.Add(new NpgsqlParameter("payload", NpgsqlDbType.Bytea) + { + Value = canonicalContent.ToArray() + }); + AddParameter(blobCommand, "payload_hash", digest); + await blobCommand.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false); + } + + await transaction.CommitAsync(cancellationToken).ConfigureAwait(false); + } + + public async ValueTask FindByDigestAsync(string digest, CancellationToken cancellationToken) + { + ArgumentException.ThrowIfNullOrWhiteSpace(digest); + + const string sql = """ + SELECT d.digest, + d.tenant, + d.provider_id, + d.format, + d.source_uri, + d.retrieved_at, + d.metadata_json, + d.inline_payload, + d.content_json, + d.supersedes_digest, + d.etag, + d.recorded_at, + b.payload + FROM vex.vex_raw_documents d + LEFT JOIN vex.vex_raw_blobs b ON b.digest = d.digest + WHERE d.digest = @digest; + """; + + await using var connection = await DataSource.OpenConnectionAsync("public", "reader", cancellationToken) + .ConfigureAwait(false); + await using var command = CreateCommand(sql, connection); + AddParameter(command, "digest", digest); + + await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false); + if (!await reader.ReadAsync(cancellationToken).ConfigureAwait(false)) + { + return null; + } + + var tenant = reader.GetString(1); + var providerId = reader.GetString(2); + var format = ParseFormat(reader.GetString(3)); + var sourceUri = new Uri(reader.GetString(4)); + var retrievedAt = reader.GetFieldValue(5); + var metadata = ParseMetadata(reader.GetString(6)); + var inline = reader.GetFieldValue(7); + var contentJson = reader.GetString(8); + var supersedes = reader.IsDBNull(9) ? null : reader.GetString(9); + var etag = reader.IsDBNull(10) ? null : reader.GetString(10); + var recordedAt = reader.IsDBNull(11) ? (DateTimeOffset?)null : reader.GetFieldValue(11); + ReadOnlyMemory contentBytes; + + if (!inline && !reader.IsDBNull(12)) + { + contentBytes = (byte[])reader.GetValue(12); + } + else + { + contentBytes = Encoding.UTF8.GetBytes(contentJson); + } + + return new VexRawRecord( + digest, + tenant, + providerId, + format, + sourceUri, + new DateTimeOffset(retrievedAt, TimeSpan.Zero), + metadata, + contentBytes, + inline, + supersedes, + etag, + recordedAt); + } + + public async ValueTask QueryAsync(VexRawQuery query, CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(query); + + var conditions = new List { "tenant = @tenant" }; + if (query.ProviderIds.Count > 0) + { + conditions.Add("provider_id = ANY(@providers)"); + } + + if (query.Digests.Count > 0) + { + conditions.Add("digest = ANY(@digests)"); + } + + if (query.Formats.Count > 0) + { + conditions.Add("format = ANY(@formats)"); + } + + if (query.Since is not null) + { + conditions.Add("retrieved_at >= @since"); + } + + if (query.Until is not null) + { + conditions.Add("retrieved_at <= @until"); + } + + if (query.Cursor is not null) + { + conditions.Add("(retrieved_at < @cursor_retrieved_at OR (retrieved_at = @cursor_retrieved_at AND digest < @cursor_digest))"); + } + + var sql = $""" + SELECT digest, provider_id, format, source_uri, retrieved_at, metadata_json, inline_payload + FROM vex.vex_raw_documents + WHERE {string.Join(" AND ", conditions)} + ORDER BY retrieved_at DESC, digest DESC + LIMIT @limit; + """; + + await using var connection = await DataSource.OpenConnectionAsync(query.Tenant, "reader", cancellationToken) + .ConfigureAwait(false); + await using var command = CreateCommand(sql, connection); + AddParameter(command, "tenant", query.Tenant); + AddArray(command, "providers", query.ProviderIds); + AddArray(command, "digests", query.Digests); + AddArray(command, "formats", query.Formats.Select(static f => f.ToString().ToLowerInvariant()).ToArray()); + if (query.Since is not null) + { + AddParameter(command, "since", query.Since.Value.UtcDateTime); + } + + if (query.Until is not null) + { + AddParameter(command, "until", query.Until.Value.UtcDateTime); + } + + if (query.Cursor is not null) + { + AddParameter(command, "cursor_retrieved_at", query.Cursor.RetrievedAt.UtcDateTime); + AddParameter(command, "cursor_digest", query.Cursor.Digest); + } + + AddParameter(command, "limit", query.Limit); + + var summaries = new List(); + await using var reader = await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false); + while (await reader.ReadAsync(cancellationToken).ConfigureAwait(false)) + { + var digest = reader.GetString(0); + var providerId = reader.GetString(1); + var format = ParseFormat(reader.GetString(2)); + var sourceUri = new Uri(reader.GetString(3)); + var retrievedAt = reader.GetFieldValue(4); + var metadata = ParseMetadata(reader.GetString(5)); + var inline = reader.GetFieldValue(6); + + summaries.Add(new VexRawDocumentSummary( + digest, + providerId, + format, + sourceUri, + new DateTimeOffset(retrievedAt, TimeSpan.Zero), + inline, + metadata)); + } + + var hasMore = summaries.Count == query.Limit; + var nextCursor = hasMore && summaries.Count > 0 + ? new VexRawCursor(summaries[^1].RetrievedAt, summaries[^1].Digest) + : null; + + return new VexRawDocumentPage(summaries, nextCursor, hasMore); + } + + private static void AddArray(NpgsqlCommand command, string name, IReadOnlyCollection values) + { + command.Parameters.Add(new NpgsqlParameter + { + ParameterName = name, + NpgsqlDbType = NpgsqlDbType.Array | NpgsqlDbType.Text, + Value = values.Count == 0 ? Array.Empty() : values.ToArray() + }); + } + + private static string ResolveTenant(IReadOnlyDictionary metadata) + { + if (metadata.TryGetValue("tenant", out var tenant) && !string.IsNullOrWhiteSpace(tenant)) + { + return tenant.Trim(); + } + + return "default"; + } + + private static VexDocumentFormat ParseFormat(string value) + => Enum.TryParse(value, ignoreCase: true, out var parsed) + ? parsed + : VexDocumentFormat.Unknown; + + private static ImmutableDictionary ParseMetadata(string json) + { + if (string.IsNullOrWhiteSpace(json)) + { + return ImmutableDictionary.Empty; + } + + try + { + var doc = JsonDocument.Parse(json); + var builder = ImmutableDictionary.CreateBuilder(StringComparer.Ordinal); + foreach (var property in doc.RootElement.EnumerateObject()) + { + builder[property.Name] = property.Value.ToString(); + } + + return builder.ToImmutable(); + } + catch + { + return ImmutableDictionary.Empty; + } + } + + private static byte[] CanonicalizeJson(ReadOnlyMemory content) + { + using var jsonDocument = JsonDocument.Parse(content); + var buffer = new ArrayBufferWriter(); + using (var writer = new Utf8JsonWriter(buffer, new JsonWriterOptions { Indented = false })) + { + WriteCanonical(writer, jsonDocument.RootElement); + } + + return buffer.WrittenMemory.ToArray(); + } + + private static void WriteCanonical(Utf8JsonWriter writer, JsonElement element) + { + switch (element.ValueKind) + { + case JsonValueKind.Object: + writer.WriteStartObject(); + foreach (var property in element.EnumerateObject().OrderBy(static p => p.Name, StringComparer.Ordinal)) + { + writer.WritePropertyName(property.Name); + WriteCanonical(writer, property.Value); + } + writer.WriteEndObject(); + break; + case JsonValueKind.Array: + writer.WriteStartArray(); + foreach (var item in element.EnumerateArray()) + { + WriteCanonical(writer, item); + } + writer.WriteEndArray(); + break; + case JsonValueKind.String: + writer.WriteStringValue(element.GetString()); + break; + case JsonValueKind.Number: + if (element.TryGetInt64(out var l)) + { + writer.WriteNumberValue(l); + } + else if (element.TryGetDouble(out var d)) + { + writer.WriteNumberValue(d); + } + else + { + writer.WriteRawValue(element.GetRawText()); + } + break; + case JsonValueKind.True: + writer.WriteBooleanValue(true); + break; + case JsonValueKind.False: + writer.WriteBooleanValue(false); + break; + case JsonValueKind.Null: + case JsonValueKind.Undefined: + writer.WriteNullValue(); + break; + default: + writer.WriteRawValue(element.GetRawText()); + break; + } + } + + private static string EnsureDigest(string digest, ReadOnlyMemory canonicalContent) + { + if (!string.IsNullOrWhiteSpace(digest) && digest.StartsWith("sha256:", StringComparison.OrdinalIgnoreCase)) + { + return digest; + } + + Span hash = stackalloc byte[32]; + if (!System.Security.Cryptography.SHA256.TryHashData(canonicalContent.Span, hash, out _)) + { + hash = System.Security.Cryptography.SHA256.HashData(canonicalContent.ToArray()); + } + + return "sha256:" + Convert.ToHexString(hash).ToLowerInvariant(); + } + + private static string GetJsonString(ReadOnlyMemory canonicalContent) + => Encoding.UTF8.GetString(canonicalContent.Span); + + private static readonly JsonSerializerOptions JsonSerializerOptions = new() + { + PropertyNamingPolicy = JsonNamingPolicy.CamelCase, + DictionaryKeyPolicy = JsonNamingPolicy.CamelCase + }; +} diff --git a/src/Excititor/__Libraries/StellaOps.Excititor.Storage.Postgres/ServiceCollectionExtensions.cs b/src/Excititor/__Libraries/StellaOps.Excititor.Storage.Postgres/ServiceCollectionExtensions.cs index 63e195c13..a331cc69c 100644 --- a/src/Excititor/__Libraries/StellaOps.Excititor.Storage.Postgres/ServiceCollectionExtensions.cs +++ b/src/Excititor/__Libraries/StellaOps.Excititor.Storage.Postgres/ServiceCollectionExtensions.cs @@ -1,5 +1,7 @@ using Microsoft.Extensions.Configuration; using Microsoft.Extensions.DependencyInjection; +using StellaOps.Excititor.Core.Observations; +using StellaOps.Excititor.Core.Storage; using StellaOps.Excititor.Storage.Postgres.Repositories; using StellaOps.Infrastructure.Postgres; using StellaOps.Infrastructure.Postgres.Options; @@ -24,10 +26,14 @@ public static class ServiceCollectionExtensions string sectionName = "Postgres:Excititor") { services.Configure(sectionName, configuration.GetSection(sectionName)); + services.Configure(configuration.GetSection("Excititor:Storage")); services.AddSingleton(); // Register repositories services.AddScoped(); + services.AddScoped(); + services.AddScoped(); + services.AddScoped(); return services; } @@ -47,6 +53,9 @@ public static class ServiceCollectionExtensions // Register repositories services.AddScoped(); + services.AddScoped(); + services.AddScoped(); + services.AddScoped(); return services; } diff --git a/src/Excititor/__Libraries/StellaOps.Excititor.Storage.Postgres/StellaOps.Excititor.Storage.Postgres.csproj b/src/Excititor/__Libraries/StellaOps.Excititor.Storage.Postgres/StellaOps.Excititor.Storage.Postgres.csproj index c590a370b..f9aef539f 100644 --- a/src/Excititor/__Libraries/StellaOps.Excititor.Storage.Postgres/StellaOps.Excititor.Storage.Postgres.csproj +++ b/src/Excititor/__Libraries/StellaOps.Excititor.Storage.Postgres/StellaOps.Excititor.Storage.Postgres.csproj @@ -15,6 +15,7 @@ + diff --git a/src/Excititor/__Tests/StellaOps.Excititor.Storage.Mongo.Tests/StellaOps.Excititor.Storage.Mongo.Tests.csproj b/src/Excititor/__Tests/StellaOps.Excititor.Storage.Mongo.Tests/StellaOps.Excititor.Storage.Mongo.Tests.csproj index e466173be..0b9f7a4ad 100644 --- a/src/Excititor/__Tests/StellaOps.Excititor.Storage.Mongo.Tests/StellaOps.Excititor.Storage.Mongo.Tests.csproj +++ b/src/Excititor/__Tests/StellaOps.Excititor.Storage.Mongo.Tests/StellaOps.Excititor.Storage.Mongo.Tests.csproj @@ -11,6 +11,6 @@ - + - \ No newline at end of file + diff --git a/src/Excititor/__Tests/StellaOps.Excititor.Storage.Postgres.Tests/ExcititorPostgresFixture.cs b/src/Excititor/__Tests/StellaOps.Excititor.Storage.Postgres.Tests/ExcititorPostgresFixture.cs index 715076cfa..bd48bbc7c 100644 --- a/src/Excititor/__Tests/StellaOps.Excititor.Storage.Postgres.Tests/ExcititorPostgresFixture.cs +++ b/src/Excititor/__Tests/StellaOps.Excititor.Storage.Postgres.Tests/ExcititorPostgresFixture.cs @@ -15,6 +15,8 @@ public sealed class ExcititorPostgresFixture : PostgresIntegrationFixture, IColl => typeof(ExcititorDataSource).Assembly; protected override string GetModuleName() => "Excititor"; + + protected override string? GetResourcePrefix() => "Migrations"; } /// diff --git a/src/Excititor/__Tests/StellaOps.Excititor.Storage.Postgres.Tests/PostgresAppendOnlyLinksetStoreTests.cs b/src/Excititor/__Tests/StellaOps.Excititor.Storage.Postgres.Tests/PostgresAppendOnlyLinksetStoreTests.cs new file mode 100644 index 000000000..36a934162 --- /dev/null +++ b/src/Excititor/__Tests/StellaOps.Excititor.Storage.Postgres.Tests/PostgresAppendOnlyLinksetStoreTests.cs @@ -0,0 +1,136 @@ +using FluentAssertions; +using Microsoft.Extensions.Logging.Abstractions; +using Microsoft.Extensions.Options; +using StellaOps.Excititor.Core.Observations; +using StellaOps.Excititor.Storage.Postgres; +using StellaOps.Excititor.Storage.Postgres.Repositories; +using StellaOps.Infrastructure.Postgres.Options; +using Xunit; + +namespace StellaOps.Excititor.Storage.Postgres.Tests; + +[Collection(ExcititorPostgresCollection.Name)] +public sealed class PostgresAppendOnlyLinksetStoreTests : IAsyncLifetime +{ + private readonly ExcititorPostgresFixture _fixture; + private readonly PostgresAppendOnlyLinksetStore _store; + private readonly ExcititorDataSource _dataSource; + + public PostgresAppendOnlyLinksetStoreTests(ExcititorPostgresFixture fixture) + { + _fixture = fixture; + var options = Options.Create(new PostgresOptions + { + ConnectionString = fixture.ConnectionString, + SchemaName = fixture.SchemaName, + AutoMigrate = false + }); + + _dataSource = new ExcititorDataSource(options, NullLogger.Instance); + _store = new PostgresAppendOnlyLinksetStore(_dataSource, NullLogger.Instance); + } + + public async Task InitializeAsync() + { + await _fixture.Fixture.RunMigrationsFromAssemblyAsync( + typeof(ExcititorDataSource).Assembly, + moduleName: "Excititor", + resourcePrefix: "Migrations", + cancellationToken: CancellationToken.None); + + // Ensure migration applied even if runner skipped; execute embedded SQL directly as fallback. + var resourceName = typeof(ExcititorDataSource).Assembly + .GetManifestResourceNames() + .FirstOrDefault(n => n.EndsWith("001_initial_schema.sql", StringComparison.OrdinalIgnoreCase)); + await using var stream = resourceName is null + ? null + : typeof(ExcititorDataSource).Assembly.GetManifestResourceStream(resourceName); + if (stream is not null) + { + using var reader = new StreamReader(stream); + var sql = await reader.ReadToEndAsync(); + await _fixture.Fixture.ExecuteSqlAsync(sql); + } + + await _fixture.TruncateAllTablesAsync(); + } + + public async Task DisposeAsync() + { + await _dataSource.DisposeAsync(); + } + + [Fact] + public async Task AppendObservation_CreatesLinksetAndDedupes() + { + var tenant = "tenant-a"; + var vuln = "CVE-2025-1234"; + var product = "pkg:nuget/demo@1.0.0"; + var scope = VexProductScope.Unknown(product); + var observation = new VexLinksetObservationRefModel("obs-1", "provider-a", "not_affected", 0.9); + + var first = await _store.AppendObservationAsync(tenant, vuln, product, observation, scope, CancellationToken.None); + + first.WasCreated.Should().BeTrue(); + first.ObservationsAdded.Should().Be(1); + first.SequenceNumber.Should().BeGreaterThan(0); + first.Linkset.Observations.Should().HaveCount(1); + + var second = await _store.AppendObservationAsync(tenant, vuln, product, observation, scope, CancellationToken.None); + + second.HadChanges.Should().BeFalse(); + second.Linkset.Observations.Should().HaveCount(1); + second.SequenceNumber.Should().Be(first.SequenceNumber); + + var mutations = await _store.GetMutationLogAsync(tenant, first.Linkset.LinksetId, CancellationToken.None); + mutations.Select(m => m.SequenceNumber).Should().BeInAscendingOrder(); + mutations.Should().HaveCount(2); // created + observation + } + + [Fact] + public async Task AppendBatch_AppendsMultipleAndMaintainsOrder() + { + var tenant = "tenant-b"; + var vuln = "CVE-2025-2000"; + var product = "pkg:maven/demo/demo@2.0.0"; + var scope = VexProductScope.Unknown(product); + var observations = new[] + { + new VexLinksetObservationRefModel("obs-2", "provider-b", "affected", 0.7), + new VexLinksetObservationRefModel("obs-1", "provider-a", "affected", 0.8), + new VexLinksetObservationRefModel("obs-3", "provider-a", "fixed", 0.9) + }; + + var result = await _store.AppendObservationsBatchAsync(tenant, vuln, product, observations, scope, CancellationToken.None); + + result.Linkset.Observations.Should().HaveCount(3); + result.Linkset.Observations + .Select(o => $"{o.ProviderId}:{o.Status}:{o.ObservationId}") + .Should() + .ContainInOrder( + "provider-a:affected:obs-1", + "provider-a:fixed:obs-3", + "provider-b:affected:obs-2"); + result.SequenceNumber.Should().BeGreaterThan(0); + } + + [Fact] + public async Task AppendDisagreement_RegistersConflictAndCounts() + { + var tenant = "tenant-c"; + var vuln = "CVE-2025-3000"; + var product = "pkg:deb/debian/demo@1.2.3"; + var disagreement = new VexObservationDisagreement("provider-c", "not_affected", "component_not_present", 0.6); + + var result = await _store.AppendDisagreementAsync(tenant, vuln, product, disagreement, CancellationToken.None); + + result.Linkset.HasConflicts.Should().BeTrue(); + result.SequenceNumber.Should().BeGreaterThan(0); + + var conflicts = await _store.FindWithConflictsAsync(tenant, limit: 10, CancellationToken.None); + conflicts.Should().ContainSingle(ls => ls.LinksetId == result.Linkset.LinksetId); + + var conflictCount = await _store.CountWithConflictsAsync(tenant, CancellationToken.None); + conflictCount.Should().Be(1); + } +} diff --git a/src/Excititor/__Tests/StellaOps.Excititor.Storage.Postgres.Tests/StellaOps.Excititor.Storage.Postgres.Tests.csproj b/src/Excititor/__Tests/StellaOps.Excititor.Storage.Postgres.Tests/StellaOps.Excititor.Storage.Postgres.Tests.csproj index a98b6c53f..b998d1860 100644 --- a/src/Excititor/__Tests/StellaOps.Excititor.Storage.Postgres.Tests/StellaOps.Excititor.Storage.Postgres.Tests.csproj +++ b/src/Excititor/__Tests/StellaOps.Excititor.Storage.Postgres.Tests/StellaOps.Excititor.Storage.Postgres.Tests.csproj @@ -10,9 +10,16 @@ true + + + + + + + - + @@ -27,6 +34,7 @@ + diff --git a/src/Findings/StellaOps.Findings.Ledger.Tests/Incident/LedgerIncidentCoordinatorTests.cs b/src/Findings/StellaOps.Findings.Ledger.Tests/Incident/LedgerIncidentCoordinatorTests.cs new file mode 100644 index 000000000..f737fc639 --- /dev/null +++ b/src/Findings/StellaOps.Findings.Ledger.Tests/Incident/LedgerIncidentCoordinatorTests.cs @@ -0,0 +1,123 @@ +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; +using FluentAssertions; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; +using StellaOps.Findings.Ledger.Options; +using StellaOps.Findings.Ledger.Services.Incident; +using StellaOps.Findings.Ledger.Tests.Observability; +using StellaOps.Telemetry.Core; +using Xunit; + +namespace StellaOps.Findings.Ledger.Tests.Incident; + +public class LedgerIncidentCoordinatorTests +{ + [Fact] + public async Task Activation_updates_state_and_notifies() + { + var options = Options.Create(new LedgerIncidentOptions { RetentionExtensionDays = 45, LagTraceThresholdSeconds = 0.0 }); + var logger = new TestLogger(); + var notifier = new TestNotifier(); + var incidentService = new StubIncidentModeService(); + + using var coordinator = new LedgerIncidentCoordinator(options, logger, notifier, TimeProvider.System, incidentService); + + await incidentService.ActivateAsync("actor-a", reason: "test"); + + coordinator.IsActive.Should().BeTrue(); + coordinator.Current.RetentionExtensionDays.Should().Be(45); + notifier.Published.Should().ContainSingle(); + logger.Entries.Should().ContainSingle(e => e.EventId.Id == 6901); + } + + [Fact] + public async Task RecordProjectionLag_emits_when_active_and_above_threshold() + { + var options = Options.Create(new LedgerIncidentOptions { LagTraceThresholdSeconds = 0.1, RetentionExtensionDays = 5 }); + var logger = new TestLogger(); + var notifier = new TestNotifier(); + var incidentService = new StubIncidentModeService(); + + using var coordinator = new LedgerIncidentCoordinator(options, logger, notifier, TimeProvider.System, incidentService); + await incidentService.ActivateAsync("actor-a"); + + coordinator.RecordProjectionLag(new ProjectionLagSample( + "tenant-a", + Guid.NewGuid(), + 10, + "finding.created", + "v1", + 5.0, + DateTimeOffset.UtcNow.AddSeconds(-5), + DateTimeOffset.UtcNow)); + + logger.Entries.Should().Contain(e => e.EventId.Id == 6902); + coordinator.GetDiagnosticsSnapshot().LagSamples.Should().NotBeEmpty(); + } + + private sealed class TestNotifier : ILedgerIncidentNotifier + { + private readonly List _published = new(); + public IReadOnlyList Published => _published; + + public Task PublishIncidentModeChangedAsync(LedgerIncidentSnapshot snapshot, CancellationToken cancellationToken) + { + _published.Add(snapshot); + return Task.CompletedTask; + } + } + + private sealed class StubIncidentModeService : IIncidentModeService + { + private IncidentModeState? _state; + public bool IsActive => _state is { Enabled: true } && !_state.IsExpired; + public IncidentModeState? CurrentState => _state; + + public event EventHandler? Activated; + public event EventHandler? Deactivated; + + public Task ActivateAsync(string actor, string? tenantId = null, TimeSpan? ttlOverride = null, string? reason = null, CancellationToken ct = default) + { + var now = DateTimeOffset.UtcNow; + _state = new IncidentModeState + { + Enabled = true, + ActivatedAt = now, + ExpiresAt = now.AddMinutes(30), + Actor = actor, + TenantId = tenantId, + Source = IncidentModeSource.Api, + Reason = reason, + ActivationId = Guid.NewGuid().ToString("N")[..12] + }; + + Activated?.Invoke(this, new IncidentModeActivatedEventArgs { State = _state, WasReactivation = false }); + return Task.FromResult(IncidentModeActivationResult.Succeeded(_state)); + } + + public Task DeactivateAsync(string actor, string? reason = null, CancellationToken ct = default) + { + var previous = _state; + _state = null; + if (previous is not null) + { + Deactivated?.Invoke(this, new IncidentModeDeactivatedEventArgs + { + State = previous, + Reason = IncidentModeDeactivationReason.Manual, + DeactivatedBy = actor + }); + } + + return Task.FromResult(IncidentModeDeactivationResult.Succeeded(previous is not null, IncidentModeDeactivationReason.Manual)); + } + + public Task ExtendTtlAsync(TimeSpan extension, string actor, CancellationToken ct = default) => + Task.FromResult(_state?.ExpiresAt?.Add(extension)); + + public IReadOnlyDictionary GetIncidentTags() => new Dictionary(); + } +} diff --git a/src/Findings/StellaOps.Findings.Ledger.Tests/Observability/LedgerTimelineTests.cs b/src/Findings/StellaOps.Findings.Ledger.Tests/Observability/LedgerTimelineTests.cs index 50ca23277..e483bd7cf 100644 --- a/src/Findings/StellaOps.Findings.Ledger.Tests/Observability/LedgerTimelineTests.cs +++ b/src/Findings/StellaOps.Findings.Ledger.Tests/Observability/LedgerTimelineTests.cs @@ -3,6 +3,7 @@ using System.Text.Json.Nodes; using FluentAssertions; using StellaOps.Findings.Ledger.Domain; using StellaOps.Findings.Ledger.Observability; +using StellaOps.Findings.Ledger.Services.Incident; using Xunit; namespace StellaOps.Findings.Ledger.Tests.Observability; @@ -45,6 +46,49 @@ public class LedgerTimelineTests state["Status"].Should().Be("affected"); } + [Fact] + public void EmitIncidentModeChanged_writes_structured_log() + { + var logger = new TestLogger(); + var snapshot = new LedgerIncidentSnapshot( + IsActive: true, + ActivationId: "act-123", + Actor: "actor-1", + Reason: "reason", + TenantId: "tenant-a", + ChangedAt: DateTimeOffset.UtcNow, + ExpiresAt: DateTimeOffset.UtcNow.AddMinutes(10), + RetentionExtensionDays: 30); + + LedgerTimeline.EmitIncidentModeChanged(logger, snapshot, wasReactivation: false); + + var entry = logger.Entries.Single(e => e.EventId.Id == 6901); + var state = AsDictionary(entry.State); + state["RetentionExtensionDays"].Should().Be(30); + state["ActivationId"].Should().Be("act-123"); + } + + [Fact] + public void EmitIncidentLagTrace_writes_structured_log() + { + var logger = new TestLogger(); + var sample = new ProjectionLagSample( + "tenant-a", + Guid.NewGuid(), + 10, + "finding.created", + "v1", + 12.5, + DateTimeOffset.UtcNow.AddSeconds(-12), + DateTimeOffset.UtcNow); + + LedgerTimeline.EmitIncidentLagTrace(logger, sample); + + var entry = logger.Entries.Single(e => e.EventId.Id == 6902); + var state = AsDictionary(entry.State); + state["LagSeconds"].Should().Be(12.5); + } + private static LedgerEventRecord CreateRecord() { var payload = new JsonObject { ["status"] = "affected" }; diff --git a/src/Findings/StellaOps.Findings.Ledger.Tests/Services/LedgerEventWriteServiceIncidentTests.cs b/src/Findings/StellaOps.Findings.Ledger.Tests/Services/LedgerEventWriteServiceIncidentTests.cs new file mode 100644 index 000000000..484c9c837 --- /dev/null +++ b/src/Findings/StellaOps.Findings.Ledger.Tests/Services/LedgerEventWriteServiceIncidentTests.cs @@ -0,0 +1,81 @@ +using System; +using System.Text.Json.Nodes; +using System.Threading; +using System.Threading.Tasks; +using FluentAssertions; +using Microsoft.Extensions.Logging.Abstractions; +using Moq; +using StellaOps.Findings.Ledger.Domain; +using StellaOps.Findings.Ledger.Infrastructure; +using StellaOps.Findings.Ledger.Services; +using StellaOps.Findings.Ledger.Services.Incident; +using Xunit; + +namespace StellaOps.Findings.Ledger.Tests.Services; + +public class LedgerEventWriteServiceIncidentTests +{ + [Fact] + public async Task AppendAsync_sequence_mismatch_records_conflict_snapshot() + { + var repo = new Mock(); + repo.Setup(r => r.GetByEventIdAsync(It.IsAny(), It.IsAny(), It.IsAny())) + .ReturnsAsync((LedgerEventRecord?)null); + + var chainId = Guid.NewGuid(); + var chainHead = new LedgerEventRecord( + "tenant-a", + chainId, + 1, + Guid.NewGuid(), + LedgerEventConstants.EventFindingCreated, + "v1", + "finding-1", + "artifact-1", + null, + "actor-1", + "operator", + DateTimeOffset.UtcNow, + DateTimeOffset.UtcNow, + new JsonObject(), + "hash-prev", + LedgerEventConstants.EmptyHash, + "leaf-hash", + "{}"); + + repo.Setup(r => r.GetChainHeadAsync("tenant-a", chainId, It.IsAny())) + .ReturnsAsync(chainHead); + + var scheduler = new Mock(); + var diagnostics = new Mock(); + + var service = new LedgerEventWriteService( + repo.Object, + scheduler.Object, + NullLogger.Instance, + diagnostics.Object); + + var draft = new LedgerEventDraft( + "tenant-a", + chainId, + 3, + Guid.NewGuid(), + LedgerEventConstants.EventFindingCreated, + "v1", + "finding-1", + "artifact-1", + null, + "actor-1", + "operator", + DateTimeOffset.UtcNow, + DateTimeOffset.UtcNow, + new JsonObject(), + new JsonObject(), + null); + + var result = await service.AppendAsync(draft, CancellationToken.None); + + result.Status.Should().Be(LedgerWriteStatus.Conflict); + diagnostics.Verify(d => d.RecordConflict(It.Is(s => s.Reason == "sequence_mismatch")), Times.Once); + } +} diff --git a/src/Findings/StellaOps.Findings.Ledger.Tests/Snapshot/SnapshotServiceTests.cs b/src/Findings/StellaOps.Findings.Ledger.Tests/Snapshot/SnapshotServiceTests.cs index 23621b0d1..414bda1a5 100644 --- a/src/Findings/StellaOps.Findings.Ledger.Tests/Snapshot/SnapshotServiceTests.cs +++ b/src/Findings/StellaOps.Findings.Ledger.Tests/Snapshot/SnapshotServiceTests.cs @@ -9,6 +9,7 @@ using Microsoft.Extensions.Logging.Abstractions; using StellaOps.Findings.Ledger.Domain; using StellaOps.Findings.Ledger.Infrastructure.Snapshot; using StellaOps.Findings.Ledger.Services; +using StellaOps.Findings.Ledger.Services.Incident; using Xunit; public class SnapshotServiceTests @@ -58,6 +59,33 @@ public class SnapshotServiceTests Assert.True(result.Snapshot.ExpiresAt > DateTimeOffset.UtcNow); } + [Fact] + public async Task CreateSnapshotAsync_WhenIncidentActive_ExtendsRetention() + { + var incident = new StaticIncidentDiagnostics(new LedgerIncidentSnapshot( + IsActive: true, + ActivationId: "act-1", + Actor: "actor", + Reason: "reason", + TenantId: "tenant-incident", + ChangedAt: DateTimeOffset.UtcNow, + ExpiresAt: DateTimeOffset.UtcNow.AddHours(1), + RetentionExtensionDays: 7)); + + var service = new SnapshotService( + _snapshotRepository, + _timeTravelRepository, + NullLogger.Instance, + incident); + + var result = await service.CreateSnapshotAsync( + new CreateSnapshotInput("tenant-incident", Label: "incident-snapshot", ExpiresIn: TimeSpan.FromDays(1))); + + Assert.NotNull(result.Snapshot); + Assert.True(result.Snapshot!.ExpiresAt >= DateTimeOffset.UtcNow.AddDays(1)); + Assert.Equal("enabled", result.Snapshot.Metadata?["incident.mode"]); + } + [Fact] public async Task GetSnapshotAsync_ReturnsExistingSnapshot() { @@ -371,3 +399,34 @@ internal class InMemoryTimeTravelRepository : ITimeTravelRepository TimeSpan.FromMinutes(5))); } } + +internal sealed class StaticIncidentDiagnostics : ILedgerIncidentDiagnostics +{ + public StaticIncidentDiagnostics(LedgerIncidentSnapshot current) + { + Current = current; + } + + public bool IsActive => Current.IsActive; + + public LedgerIncidentSnapshot Current { get; } + + public IncidentDiagnosticsSnapshot GetDiagnosticsSnapshot() => new( + Current, + Array.Empty(), + Array.Empty(), + Array.Empty(), + DateTimeOffset.UtcNow); + + public void RecordConflict(ConflictSnapshot snapshot) + { + } + + public void RecordProjectionLag(ProjectionLagSample sample) + { + } + + public void RecordReplayTrace(ReplayTraceSample sample) + { + } +} diff --git a/src/Findings/StellaOps.Findings.Ledger.WebService/Program.cs b/src/Findings/StellaOps.Findings.Ledger.WebService/Program.cs index 0dc10d172..fc75ae1ad 100644 --- a/src/Findings/StellaOps.Findings.Ledger.WebService/Program.cs +++ b/src/Findings/StellaOps.Findings.Ledger.WebService/Program.cs @@ -26,6 +26,10 @@ using StellaOps.Findings.Ledger.WebService.Services; using StellaOps.Telemetry.Core; using StellaOps.Findings.Ledger.Services.Security; using StellaOps.Findings.Ledger.Observability; +using StellaOps.Findings.Ledger.OpenApi; +using System.Security.Cryptography; +using System.Text; +using StellaOps.Findings.Ledger.Services.Incident; const string LedgerWritePolicy = "ledger.events.write"; const string LedgerExportPolicy = "ledger.export.read"; @@ -62,6 +66,11 @@ builder.Services.AddOptions() .PostConfigure(options => options.Validate()) .ValidateOnStart(); +builder.Services.AddOptions() + .Bind(builder.Configuration.GetSection(LedgerIncidentOptions.SectionName)) + .PostConfigure(options => options.Validate()) + .ValidateOnStart(); + builder.Services.AddSingleton(TimeProvider.System); builder.Services.AddProblemDetails(); builder.Services.AddEndpointsApiExplorer(); @@ -80,6 +89,8 @@ builder.Services.AddStellaOpsTelemetry( tracerBuilder.AddHttpClientInstrumentation(); }); +builder.Services.AddIncidentMode(builder.Configuration); + builder.Services.AddStellaOpsResourceServerAuthentication( builder.Configuration, configurationSection: null, @@ -130,6 +141,10 @@ builder.Services.AddAuthorization(options => }); }); +builder.Services.AddSingleton(); +builder.Services.AddSingleton(); +builder.Services.AddSingleton(sp => sp.GetRequiredService()); +builder.Services.AddSingleton(sp => sp.GetRequiredService()); builder.Services.AddSingleton(); builder.Services.AddSingleton(); builder.Services.AddSingleton(); @@ -232,6 +247,8 @@ app.MapGet("/ledger/export/findings", async Task { + DeprecationHeaders.Apply(httpContext.Response, "ledger.export.findings"); + if (!httpContext.Request.Headers.TryGetValue("X-Stella-Tenant", out var tenantValues) || string.IsNullOrWhiteSpace(tenantValues)) { return TypedResults.Problem(statusCode: StatusCodes.Status400BadRequest, title: "missing_tenant", detail: "X-Stella-Tenant header is required."); @@ -841,20 +858,40 @@ app.MapPut("/v1/ledger/attestation-pointers/{pointerId}/verification", async Tas .Produces(StatusCodes.Status404NotFound) .ProducesProblem(StatusCodes.Status400BadRequest); -app.MapGet("/.well-known/openapi", () => +app.MapGet("/.well-known/openapi", async (HttpContext context) => { var contentRoot = AppContext.BaseDirectory; - var candidate = Path.GetFullPath(Path.Combine(contentRoot, "../../docs/modules/findings-ledger/openapi/findings-ledger.v1.yaml")); - if (!File.Exists(candidate)) + var specPath = OpenApiMetadataFactory.GetSpecPath(contentRoot); + + if (!File.Exists(specPath)) { return Results.Problem(statusCode: StatusCodes.Status500InternalServerError, title: "openapi_missing", detail: "OpenAPI document not found on server."); } - var yaml = File.ReadAllText(candidate); - return Results.Text(yaml, "application/yaml"); + var specBytes = await File.ReadAllBytesAsync(specPath, context.RequestAborted).ConfigureAwait(false); + var etag = OpenApiMetadataFactory.ComputeEtag(specBytes); + + if (context.Request.Headers.IfNoneMatch.Any(match => string.Equals(match, etag, StringComparison.Ordinal))) + { + return Results.StatusCode(StatusCodes.Status304NotModified); + } + + context.Response.Headers.ETag = etag; + context.Response.Headers.CacheControl = "public, max-age=300, must-revalidate"; + context.Response.Headers.Append("X-Api-Version", OpenApiMetadataFactory.ApiVersion); + context.Response.Headers.Append("X-Build-Version", OpenApiMetadataFactory.GetBuildVersion()); + + var lastModified = OpenApiMetadataFactory.GetLastModified(specPath); + if (lastModified.HasValue) + { + context.Response.Headers.LastModified = lastModified.Value.ToString("R"); + } + + return Results.Text(Encoding.UTF8.GetString(specBytes), "application/yaml"); }) .WithName("LedgerOpenApiDocument") .Produces(StatusCodes.Status200OK) +.Produces(StatusCodes.Status304NotModified) .ProducesProblem(StatusCodes.Status500InternalServerError); // Snapshot Endpoints (LEDGER-PACKS-42-001-DEV) diff --git a/src/Findings/StellaOps.Findings.Ledger/DeprecationHeaders.cs b/src/Findings/StellaOps.Findings.Ledger/DeprecationHeaders.cs new file mode 100644 index 000000000..5178850c5 --- /dev/null +++ b/src/Findings/StellaOps.Findings.Ledger/DeprecationHeaders.cs @@ -0,0 +1,25 @@ +using Microsoft.AspNetCore.Http; + +namespace StellaOps.Findings.Ledger; + +/// +/// Applies standardized deprecation/notification headers to retiring endpoints. +/// +public static class DeprecationHeaders +{ + private const string DeprecationLink = + "; rel=\"deprecation\"; type=\"application/yaml\""; + + public const string SunsetDate = "2026-03-31T00:00:00Z"; + + public static void Apply(HttpResponse response, string endpointId) + { + ArgumentNullException.ThrowIfNull(response); + ArgumentException.ThrowIfNullOrWhiteSpace(endpointId); + + response.Headers["Deprecation"] = "true"; + response.Headers["Sunset"] = SunsetDate; + response.Headers["X-Deprecated-Endpoint"] = endpointId; + response.Headers.Append("Link", DeprecationLink); + } +} diff --git a/src/Findings/StellaOps.Findings.Ledger/Domain/ProjectionModels.cs b/src/Findings/StellaOps.Findings.Ledger/Domain/ProjectionModels.cs index 26023d598..907885430 100644 --- a/src/Findings/StellaOps.Findings.Ledger/Domain/ProjectionModels.cs +++ b/src/Findings/StellaOps.Findings.Ledger/Domain/ProjectionModels.cs @@ -1,4 +1,5 @@ using System.Text.Json.Nodes; +using StellaOps.Findings.Ledger.Infrastructure.Attestation; namespace StellaOps.Findings.Ledger.Domain; @@ -18,7 +19,12 @@ public sealed record FindingProjection( string? ExplainRef, JsonArray PolicyRationale, DateTimeOffset UpdatedAt, - string CycleHash); + string CycleHash, + int AttestationCount = 0, + int VerifiedAttestationCount = 0, + int FailedAttestationCount = 0, + int UnverifiedAttestationCount = 0, + OverallVerificationStatus AttestationStatus = OverallVerificationStatus.NoAttestations); public sealed record FindingHistoryEntry( string TenantId, diff --git a/src/Findings/StellaOps.Findings.Ledger/Infrastructure/Attestation/AttestationStatusCalculator.cs b/src/Findings/StellaOps.Findings.Ledger/Infrastructure/Attestation/AttestationStatusCalculator.cs new file mode 100644 index 000000000..ea0eac2ad --- /dev/null +++ b/src/Findings/StellaOps.Findings.Ledger/Infrastructure/Attestation/AttestationStatusCalculator.cs @@ -0,0 +1,27 @@ +namespace StellaOps.Findings.Ledger.Infrastructure.Attestation; + +/// +/// Computes overall attestation status from summary counts. +/// +public static class AttestationStatusCalculator +{ + public static OverallVerificationStatus Compute(int attestationCount, int verifiedCount) + { + if (attestationCount <= 0) + { + return OverallVerificationStatus.NoAttestations; + } + + if (verifiedCount == attestationCount) + { + return OverallVerificationStatus.AllVerified; + } + + if (verifiedCount > 0) + { + return OverallVerificationStatus.PartiallyVerified; + } + + return OverallVerificationStatus.NoneVerified; + } +} diff --git a/src/Findings/StellaOps.Findings.Ledger/Infrastructure/Postgres/PostgresFindingProjectionRepository.cs b/src/Findings/StellaOps.Findings.Ledger/Infrastructure/Postgres/PostgresFindingProjectionRepository.cs index c468f8a8e..216e77b86 100644 --- a/src/Findings/StellaOps.Findings.Ledger/Infrastructure/Postgres/PostgresFindingProjectionRepository.cs +++ b/src/Findings/StellaOps.Findings.Ledger/Infrastructure/Postgres/PostgresFindingProjectionRepository.cs @@ -1,8 +1,10 @@ +using System.Text; using System.Text.Json.Nodes; using Microsoft.Extensions.Logging; using Npgsql; using NpgsqlTypes; using StellaOps.Findings.Ledger.Domain; +using StellaOps.Findings.Ledger.Infrastructure.Attestation; using StellaOps.Findings.Ledger.Hashing; using StellaOps.Findings.Ledger.Services; @@ -11,23 +13,43 @@ namespace StellaOps.Findings.Ledger.Infrastructure.Postgres; public sealed class PostgresFindingProjectionRepository : IFindingProjectionRepository { private const string GetProjectionSql = """ - SELECT status, - severity, - risk_score, - risk_severity, - risk_profile_version, - risk_explanation_id, - risk_event_sequence, - labels, - current_event_id, - explain_ref, - policy_rationale, - updated_at, - cycle_hash - FROM findings_projection - WHERE tenant_id = @tenant_id - AND finding_id = @finding_id - AND policy_version = @policy_version + WITH attestation_summary AS ( + SELECT + COUNT(*) AS attestation_count, + COUNT(*) FILTER (WHERE verification_result IS NOT NULL + AND (verification_result->>'verified')::boolean = true) AS verified_count, + COUNT(*) FILTER (WHERE verification_result IS NOT NULL + AND (verification_result->>'verified')::boolean = false) AS failed_count, + COUNT(*) FILTER (WHERE verification_result IS NULL) AS unverified_count + FROM ledger_attestation_pointers ap + WHERE ap.tenant_id = @tenant_id + AND ap.finding_id = @finding_id + ) + SELECT fp.tenant_id, + fp.finding_id, + fp.policy_version, + fp.status, + fp.severity, + fp.risk_score, + fp.risk_severity, + fp.risk_profile_version, + fp.risk_explanation_id, + fp.risk_event_sequence, + fp.labels, + fp.current_event_id, + fp.explain_ref, + fp.policy_rationale, + fp.updated_at, + fp.cycle_hash, + COALESCE(a.attestation_count, 0) AS attestation_count, + COALESCE(a.verified_count, 0) AS verified_count, + COALESCE(a.failed_count, 0) AS failed_count, + COALESCE(a.unverified_count, 0) AS unverified_count + FROM findings_projection fp + LEFT JOIN attestation_summary a ON TRUE + WHERE fp.tenant_id = @tenant_id + AND fp.finding_id = @finding_id + AND fp.policy_version = @policy_version """; private const string UpsertProjectionSql = """ @@ -203,47 +225,7 @@ public sealed class PostgresFindingProjectionRepository : IFindingProjectionRepo return null; } - var status = reader.GetString(0); - var severity = reader.IsDBNull(1) ? (decimal?)null : reader.GetDecimal(1); - var riskScore = reader.IsDBNull(2) ? (decimal?)null : reader.GetDecimal(2); - var riskSeverity = reader.IsDBNull(3) ? null : reader.GetString(3); - var riskProfileVersion = reader.IsDBNull(4) ? null : reader.GetString(4); - var riskExplanationId = reader.IsDBNull(5) ? (Guid?)null : reader.GetGuid(5); - var riskEventSequence = reader.IsDBNull(6) ? (long?)null : reader.GetInt64(6); - var labelsJson = reader.GetFieldValue(7); - var labels = JsonNode.Parse(labelsJson)?.AsObject() ?? new JsonObject(); - var currentEventId = reader.GetGuid(8); - var explainRef = reader.IsDBNull(9) ? null : reader.GetString(9); - var rationaleJson = reader.IsDBNull(10) ? string.Empty : reader.GetFieldValue(10); - JsonArray rationale; - if (string.IsNullOrWhiteSpace(rationaleJson)) - { - rationale = new JsonArray(); - } - else - { - rationale = JsonNode.Parse(rationaleJson) as JsonArray ?? new JsonArray(); - } - var updatedAt = reader.GetFieldValue(11); - var cycleHash = reader.GetString(12); - - return new FindingProjection( - tenantId, - findingId, - policyVersion, - status, - severity, - riskScore, - riskSeverity, - riskProfileVersion, - riskExplanationId, - riskEventSequence, - labels, - currentEventId, - explainRef, - rationale, - updatedAt, - cycleHash); + return MapProjection(reader); } public async Task UpsertAsync(FindingProjection projection, CancellationToken cancellationToken) @@ -407,7 +389,7 @@ public sealed class PostgresFindingProjectionRepository : IFindingProjectionRepo await using var connection = await _dataSource.OpenConnectionAsync(query.TenantId, "projector", cancellationToken).ConfigureAwait(false); // Build dynamic query - var whereConditions = new List { "tenant_id = @tenant_id" }; + var whereConditions = new List { "fp.tenant_id = @tenant_id" }; var parameters = new List { new NpgsqlParameter("tenant_id", query.TenantId) { NpgsqlDbType = NpgsqlDbType.Text } @@ -415,34 +397,86 @@ public sealed class PostgresFindingProjectionRepository : IFindingProjectionRepo if (!string.IsNullOrWhiteSpace(query.PolicyVersion)) { - whereConditions.Add("policy_version = @policy_version"); + whereConditions.Add("fp.policy_version = @policy_version"); parameters.Add(new NpgsqlParameter("policy_version", query.PolicyVersion) { NpgsqlDbType = NpgsqlDbType.Text }); } if (query.MinScore.HasValue) { - whereConditions.Add("risk_score >= @min_score"); + whereConditions.Add("fp.risk_score >= @min_score"); parameters.Add(new NpgsqlParameter("min_score", query.MinScore.Value) { NpgsqlDbType = NpgsqlDbType.Numeric }); } if (query.MaxScore.HasValue) { - whereConditions.Add("risk_score <= @max_score"); + whereConditions.Add("fp.risk_score <= @max_score"); parameters.Add(new NpgsqlParameter("max_score", query.MaxScore.Value) { NpgsqlDbType = NpgsqlDbType.Numeric }); } if (query.Severities is { Count: > 0 }) { - whereConditions.Add("risk_severity = ANY(@severities)"); + whereConditions.Add("fp.risk_severity = ANY(@severities)"); parameters.Add(new NpgsqlParameter("severities", query.Severities.ToArray()) { NpgsqlDbType = NpgsqlDbType.Array | NpgsqlDbType.Text }); } if (query.Statuses is { Count: > 0 }) { - whereConditions.Add("status = ANY(@statuses)"); + whereConditions.Add("fp.status = ANY(@statuses)"); parameters.Add(new NpgsqlParameter("statuses", query.Statuses.ToArray()) { NpgsqlDbType = NpgsqlDbType.Array | NpgsqlDbType.Text }); } + if (query.AttestationTypes is { Count: > 0 }) + { + parameters.Add(new NpgsqlParameter("attestation_types", query.AttestationTypes.Select(t => t.ToString()).ToArray()) + { + NpgsqlDbType = NpgsqlDbType.Array | NpgsqlDbType.Text + }); + } + + var attestationWhere = new List(); + + if (query.AttestationVerification.HasValue && + query.AttestationVerification.Value != AttestationVerificationFilter.Any) + { + var filter = query.AttestationVerification.Value switch + { + AttestationVerificationFilter.Verified => "verified_count > 0", + AttestationVerificationFilter.Unverified => "unverified_count > 0", + AttestationVerificationFilter.Failed => "failed_count > 0", + _ => string.Empty + }; + + if (!string.IsNullOrWhiteSpace(filter)) + { + attestationWhere.Add(filter); + } + } + + if (query.AttestationStatus.HasValue) + { + var statusFilter = query.AttestationStatus.Value switch + { + OverallVerificationStatus.AllVerified => + "attestation_count > 0 AND verified_count = attestation_count", + OverallVerificationStatus.PartiallyVerified => + "attestation_count > 0 AND verified_count > 0 AND verified_count < attestation_count", + OverallVerificationStatus.NoneVerified => + "attestation_count > 0 AND verified_count = 0", + OverallVerificationStatus.NoAttestations => + "attestation_count = 0", + _ => string.Empty + }; + + if (!string.IsNullOrWhiteSpace(statusFilter)) + { + attestationWhere.Add(statusFilter); + } + } + + var attestationWhereClause = attestationWhere.Count > 0 + ? "WHERE " + string.Join(" AND ", attestationWhere) + : string.Empty; + var whereClause = string.Join(" AND ", whereConditions); var orderColumn = query.SortBy switch { @@ -454,8 +488,46 @@ public sealed class PostgresFindingProjectionRepository : IFindingProjectionRepo }; var orderDirection = query.Descending ? "DESC NULLS LAST" : "ASC NULLS FIRST"; + var attestationSummarySql = new StringBuilder(@" + SELECT tenant_id, + finding_id, + COUNT(*) AS attestation_count, + COUNT(*) FILTER (WHERE verification_result IS NOT NULL + AND (verification_result->>'verified')::boolean = true) AS verified_count, + COUNT(*) FILTER (WHERE verification_result IS NOT NULL + AND (verification_result->>'verified')::boolean = false) AS failed_count, + COUNT(*) FILTER (WHERE verification_result IS NULL) AS unverified_count + FROM ledger_attestation_pointers + WHERE tenant_id = @tenant_id"); + + if (query.AttestationTypes is { Count: > 0 }) + { + attestationSummarySql.Append(" AND attestation_type = ANY(@attestation_types)"); + } + + attestationSummarySql.Append(" GROUP BY tenant_id, finding_id"); + + var cte = $@" + WITH attestation_summary AS ( + {attestationSummarySql} + ), + filtered_projection AS ( + SELECT + fp.tenant_id, fp.finding_id, fp.policy_version, fp.status, fp.severity, fp.risk_score, fp.risk_severity, + fp.risk_profile_version, fp.risk_explanation_id, fp.risk_event_sequence, fp.labels, fp.current_event_id, + fp.explain_ref, fp.policy_rationale, fp.updated_at, fp.cycle_hash, + COALESCE(a.attestation_count, 0) AS attestation_count, + COALESCE(a.verified_count, 0) AS verified_count, + COALESCE(a.failed_count, 0) AS failed_count, + COALESCE(a.unverified_count, 0) AS unverified_count + FROM findings_projection fp + LEFT JOIN attestation_summary a + ON a.tenant_id = fp.tenant_id AND a.finding_id = fp.finding_id + WHERE {whereClause} + )"; + // Count query - var countSql = $"SELECT COUNT(*) FROM findings_projection WHERE {whereClause}"; + var countSql = $"{cte} SELECT COUNT(*) FROM filtered_projection {attestationWhereClause};"; await using var countCommand = new NpgsqlCommand(countSql, connection); countCommand.CommandTimeout = _dataSource.CommandTimeoutSeconds; foreach (var p in parameters) countCommand.Parameters.Add(p.Clone()); @@ -463,12 +535,14 @@ public sealed class PostgresFindingProjectionRepository : IFindingProjectionRepo // Data query var dataSql = $@" + {cte} SELECT tenant_id, finding_id, policy_version, status, severity, risk_score, risk_severity, risk_profile_version, risk_explanation_id, risk_event_sequence, labels, current_event_id, - explain_ref, policy_rationale, updated_at, cycle_hash - FROM findings_projection - WHERE {whereClause} + explain_ref, policy_rationale, updated_at, cycle_hash, + attestation_count, verified_count, failed_count, unverified_count + FROM filtered_projection + {attestationWhereClause} ORDER BY {orderColumn} {orderDirection} LIMIT @limit"; @@ -638,6 +712,12 @@ public sealed class PostgresFindingProjectionRepository : IFindingProjectionRepo var rationaleJson = reader.GetString(13); var rationale = System.Text.Json.Nodes.JsonNode.Parse(rationaleJson) as System.Text.Json.Nodes.JsonArray ?? new System.Text.Json.Nodes.JsonArray(); + var attestationCount = reader.GetInt32(16); + var verifiedCount = reader.GetInt32(17); + var failedCount = reader.GetInt32(18); + var unverifiedCount = reader.GetInt32(19); + var attestationStatus = AttestationStatusCalculator.Compute(attestationCount, verifiedCount); + return new FindingProjection( TenantId: reader.GetString(0), FindingId: reader.GetString(1), @@ -654,6 +734,11 @@ public sealed class PostgresFindingProjectionRepository : IFindingProjectionRepo ExplainRef: reader.IsDBNull(12) ? null : reader.GetString(12), PolicyRationale: rationale, UpdatedAt: reader.GetDateTime(14), - CycleHash: reader.GetString(15)); + CycleHash: reader.GetString(15), + AttestationCount: attestationCount, + VerifiedAttestationCount: verifiedCount, + FailedAttestationCount: failedCount, + UnverifiedAttestationCount: unverifiedCount, + AttestationStatus: attestationStatus); } } diff --git a/src/Findings/StellaOps.Findings.Ledger/Infrastructure/Projection/LedgerProjectionWorker.cs b/src/Findings/StellaOps.Findings.Ledger/Infrastructure/Projection/LedgerProjectionWorker.cs index d61958849..224e5d053 100644 --- a/src/Findings/StellaOps.Findings.Ledger/Infrastructure/Projection/LedgerProjectionWorker.cs +++ b/src/Findings/StellaOps.Findings.Ledger/Infrastructure/Projection/LedgerProjectionWorker.cs @@ -8,6 +8,7 @@ using StellaOps.Findings.Ledger.Infrastructure.Policy; using StellaOps.Findings.Ledger.Options; using StellaOps.Findings.Ledger.Observability; using StellaOps.Findings.Ledger.Services; +using StellaOps.Findings.Ledger.Services.Incident; namespace StellaOps.Findings.Ledger.Infrastructure.Projection; @@ -19,6 +20,7 @@ public sealed class LedgerProjectionWorker : BackgroundService private readonly TimeProvider _timeProvider; private readonly LedgerServiceOptions.ProjectionOptions _options; private readonly ILogger _logger; + private readonly ILedgerIncidentDiagnostics? _incidentDiagnostics; public LedgerProjectionWorker( ILedgerEventStream eventStream, @@ -26,7 +28,8 @@ public sealed class LedgerProjectionWorker : BackgroundService IPolicyEvaluationService policyEvaluationService, IOptions options, TimeProvider timeProvider, - ILogger logger) + ILogger logger, + ILedgerIncidentDiagnostics? incidentDiagnostics = null) { _eventStream = eventStream ?? throw new ArgumentNullException(nameof(eventStream)); _repository = repository ?? throw new ArgumentNullException(nameof(repository)); @@ -34,6 +37,7 @@ public sealed class LedgerProjectionWorker : BackgroundService _timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider)); _options = (options ?? throw new ArgumentNullException(nameof(options))).Value.Projection; _logger = logger ?? throw new ArgumentNullException(nameof(logger)); + _incidentDiagnostics = incidentDiagnostics; } protected override async Task ExecuteAsync(CancellationToken stoppingToken) @@ -138,6 +142,15 @@ public sealed class LedgerProjectionWorker : BackgroundService record.PolicyVersion, evaluationStatus ?? string.Empty); LedgerTimeline.EmitProjectionUpdated(_logger, record, evaluationStatus, evidenceBundleRef: null); + _incidentDiagnostics?.RecordProjectionLag(new ProjectionLagSample( + TenantId: record.TenantId, + ChainId: record.ChainId, + SequenceNumber: record.SequenceNumber, + EventType: record.EventType, + PolicyVersion: record.PolicyVersion, + LagSeconds: lagSeconds, + RecordedAt: record.RecordedAt, + ObservedAt: now)); } catch (OperationCanceledException) when (stoppingToken.IsCancellationRequested) { diff --git a/src/Findings/StellaOps.Findings.Ledger/Observability/LedgerTimeline.cs b/src/Findings/StellaOps.Findings.Ledger/Observability/LedgerTimeline.cs index 729b3d59c..f12dabfcb 100644 --- a/src/Findings/StellaOps.Findings.Ledger/Observability/LedgerTimeline.cs +++ b/src/Findings/StellaOps.Findings.Ledger/Observability/LedgerTimeline.cs @@ -2,6 +2,7 @@ using System.Diagnostics; using Microsoft.Extensions.Logging; using StellaOps.Findings.Ledger.Domain; using StellaOps.Findings.Ledger.Infrastructure.Exports; +using StellaOps.Findings.Ledger.Services.Incident; namespace StellaOps.Findings.Ledger.Observability; @@ -23,6 +24,10 @@ internal static class LedgerTimeline private static readonly EventId TimeTravelQueryEvent = new(6803, "ledger.timetravel.query"); private static readonly EventId ReplayCompletedEvent = new(6804, "ledger.replay.completed"); private static readonly EventId DiffComputedEvent = new(6805, "ledger.diff.computed"); + private static readonly EventId IncidentModeChangedEvent = new(6901, "ledger.incident.mode"); + private static readonly EventId IncidentLagTraceEvent = new(6902, "ledger.incident.lag_trace"); + private static readonly EventId IncidentConflictSnapshotEvent = new(6903, "ledger.incident.conflict_snapshot"); + private static readonly EventId IncidentReplayTraceEvent = new(6904, "ledger.incident.replay_trace"); public static void EmitLedgerAppended(ILogger logger, LedgerEventRecord record, string? evidenceBundleRef = null) { @@ -280,4 +285,87 @@ internal static class LedgerTimeline modified, removed); } + + public static void EmitIncidentModeChanged(ILogger logger, LedgerIncidentSnapshot snapshot, bool wasReactivation) + { + if (logger is null) + { + return; + } + + logger.LogInformation( + IncidentModeChangedEvent, + "timeline ledger.incident.mode state={State} activation_id={ActivationId} actor={Actor} reason={Reason} expires_at={ExpiresAt} retention_extension_days={RetentionExtensionDays} reactivation={Reactivation}", + snapshot.IsActive ? "enabled" : "disabled", + snapshot.ActivationId ?? string.Empty, + snapshot.Actor ?? string.Empty, + snapshot.Reason ?? string.Empty, + snapshot.ExpiresAt?.ToString("O") ?? string.Empty, + snapshot.RetentionExtensionDays, + wasReactivation); + } + + public static void EmitIncidentLagTrace(ILogger logger, ProjectionLagSample sample) + { + if (logger is null) + { + return; + } + + logger.LogWarning( + IncidentLagTraceEvent, + "timeline ledger.incident.lag_trace tenant={Tenant} chain={ChainId} seq={Sequence} event_type={EventType} policy={PolicyVersion} lag_seconds={LagSeconds:0.000} recorded_at={RecordedAt} observed_at={ObservedAt}", + sample.TenantId, + sample.ChainId, + sample.SequenceNumber, + sample.EventType, + sample.PolicyVersion, + sample.LagSeconds, + sample.RecordedAt.ToString("O"), + sample.ObservedAt.ToString("O")); + } + + public static void EmitIncidentConflictSnapshot(ILogger logger, ConflictSnapshot snapshot) + { + if (logger is null) + { + return; + } + + logger.LogWarning( + IncidentConflictSnapshotEvent, + "timeline ledger.incident.conflict_snapshot tenant={Tenant} chain={ChainId} seq={Sequence} event_id={EventId} event_type={EventType} policy={PolicyVersion} reason={Reason} expected_seq={ExpectedSequence} actor={Actor} actor_type={ActorType} observed_at={ObservedAt}", + snapshot.TenantId, + snapshot.ChainId, + snapshot.SequenceNumber, + snapshot.EventId, + snapshot.EventType, + snapshot.PolicyVersion, + snapshot.Reason, + snapshot.ExpectedSequence, + snapshot.ActorId ?? string.Empty, + snapshot.ActorType ?? string.Empty, + snapshot.ObservedAt.ToString("O")); + } + + public static void EmitIncidentReplayTrace(ILogger logger, ReplayTraceSample sample) + { + if (logger is null) + { + return; + } + + logger.LogInformation( + IncidentReplayTraceEvent, + "timeline ledger.incident.replay_trace tenant={Tenant} from_seq={FromSequence} to_seq={ToSequence} events={Events} duration_ms={DurationMs} has_more={HasMore} chain_filters={ChainFilters} event_type_filters={EventTypeFilters} observed_at={ObservedAt}", + sample.TenantId, + sample.FromSequence, + sample.ToSequence, + sample.EventsCount, + sample.DurationMs, + sample.HasMore, + sample.ChainFilterCount, + sample.EventTypeFilterCount, + sample.ObservedAt.ToString("O")); + } } diff --git a/src/Findings/StellaOps.Findings.Ledger/OpenApi/OpenApiMetadataFactory.cs b/src/Findings/StellaOps.Findings.Ledger/OpenApi/OpenApiMetadataFactory.cs new file mode 100644 index 000000000..fe2b87aa6 --- /dev/null +++ b/src/Findings/StellaOps.Findings.Ledger/OpenApi/OpenApiMetadataFactory.cs @@ -0,0 +1,55 @@ +using System.IO; +using System.Reflection; +using System.Security.Cryptography; +using System.Text; + +namespace StellaOps.Findings.Ledger.OpenApi; + +/// +/// Provides versioned metadata for the Findings Ledger OpenAPI discovery endpoint. +/// +public static class OpenApiMetadataFactory +{ + public const string ApiVersion = "1.0.0-beta1"; + + public static string GetBuildVersion() + { + var assembly = Assembly.GetExecutingAssembly(); + var informational = assembly.GetCustomAttribute()?.InformationalVersion; + return string.IsNullOrWhiteSpace(informational) + ? assembly.GetName().Version?.ToString() ?? "0.0.0" + : informational; + } + + public static string GetSpecPath(string contentRoot) + { + var current = Path.GetFullPath(contentRoot); + for (var i = 0; i < 10; i++) + { + var candidate = Path.Combine(current, "docs", "modules", "findings-ledger", "openapi", "findings-ledger.v1.yaml"); + if (File.Exists(candidate)) + { + return candidate; + } + + current = Path.GetFullPath(Path.Combine(current, "..")); + } + + // Fallback to previous behavior if traversal fails + return Path.GetFullPath(Path.Combine(contentRoot, "../../docs/modules/findings-ledger/openapi/findings-ledger.v1.yaml")); + } + + public static DateTimeOffset? GetLastModified(string specPath) + { + return File.Exists(specPath) + ? File.GetLastWriteTimeUtc(specPath) + : null; + } + + public static string ComputeEtag(byte[] content) + { + var hash = SHA256.HashData(content); + var shortHash = Convert.ToHexString(hash)[..16].ToLowerInvariant(); + return $"W/\"{shortHash}\""; + } +} diff --git a/src/Findings/StellaOps.Findings.Ledger/Options/LedgerIncidentOptions.cs b/src/Findings/StellaOps.Findings.Ledger/Options/LedgerIncidentOptions.cs new file mode 100644 index 000000000..9cf9d7476 --- /dev/null +++ b/src/Findings/StellaOps.Findings.Ledger/Options/LedgerIncidentOptions.cs @@ -0,0 +1,92 @@ +using System; + +namespace StellaOps.Findings.Ledger.Options; + +/// +/// Configures incident-mode behaviour for the Findings Ledger. +/// +public sealed class LedgerIncidentOptions +{ + public const string SectionName = "findings:ledger:incident"; + + /// + /// Enables ledger-side incident instrumentation. + /// + public bool Enabled { get; set; } = true; + + /// + /// Number of days to extend retention windows while incident mode is active. + /// + public int RetentionExtensionDays { get; set; } = 60; + + /// + /// Minimum projection lag (seconds) that will be recorded during incident mode. + /// + public double LagTraceThresholdSeconds { get; set; } = 15; + + /// + /// Maximum number of projection lag samples to retain. + /// + public int LagTraceBufferSize { get; set; } = 100; + + /// + /// Maximum number of conflict snapshots to retain. + /// + public int ConflictSnapshotBufferSize { get; set; } = 50; + + /// + /// Maximum number of replay traces to retain. + /// + public int ReplayTraceBufferSize { get; set; } = 50; + + /// + /// Enables capture of projection lag traces when incident mode is active. + /// + public bool CaptureLagTraces { get; set; } = true; + + /// + /// Enables capture of conflict snapshots when incident mode is active. + /// + public bool CaptureConflictSnapshots { get; set; } = true; + + /// + /// Enables capture of replay request traces when incident mode is active. + /// + public bool CaptureReplayTraces { get; set; } = true; + + /// + /// Whether to emit structured timeline/log entries for incident actions. + /// + public bool EmitTimelineEvents { get; set; } = true; + + /// + /// Whether to emit notifier events (logging by default) for incident actions. + /// + public bool EmitNotifications { get; set; } = true; + + /// + /// Clears buffered diagnostics on each activation to avoid mixing epochs. + /// + public bool ResetDiagnosticsOnActivation { get; set; } = true; + + /// + /// Validates option values. + /// + public void Validate() + { + if (RetentionExtensionDays < 0 || RetentionExtensionDays > 3650) + { + throw new InvalidOperationException("RetentionExtensionDays must be between 0 and 3650."); + } + + if (LagTraceThresholdSeconds < 0) + { + throw new InvalidOperationException("LagTraceThresholdSeconds must be non-negative."); + } + + if (LagTraceBufferSize <= 0 || ConflictSnapshotBufferSize <= 0 || ReplayTraceBufferSize <= 0) + { + throw new InvalidOperationException("Incident diagnostic buffer sizes must be positive."); + } + } +} diff --git a/src/Findings/StellaOps.Findings.Ledger/Services/Incident/LedgerIncidentCoordinator.cs b/src/Findings/StellaOps.Findings.Ledger/Services/Incident/LedgerIncidentCoordinator.cs new file mode 100644 index 000000000..b6cfda31f --- /dev/null +++ b/src/Findings/StellaOps.Findings.Ledger/Services/Incident/LedgerIncidentCoordinator.cs @@ -0,0 +1,355 @@ +using System.Collections.Concurrent; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; +using StellaOps.Findings.Ledger.Observability; +using StellaOps.Findings.Ledger.Options; +using StellaOps.Telemetry.Core; + +namespace StellaOps.Findings.Ledger.Services.Incident; + +public interface ILedgerIncidentDiagnostics : ILedgerIncidentState +{ + void RecordProjectionLag(ProjectionLagSample sample); + + void RecordConflict(ConflictSnapshot snapshot); + + void RecordReplayTrace(ReplayTraceSample sample); + + IncidentDiagnosticsSnapshot GetDiagnosticsSnapshot(); +} + +public interface ILedgerIncidentState +{ + bool IsActive { get; } + + LedgerIncidentSnapshot Current { get; } +} + +public interface ILedgerIncidentNotifier +{ + Task PublishIncidentModeChangedAsync(LedgerIncidentSnapshot snapshot, CancellationToken cancellationToken); +} + +public sealed class LoggingLedgerIncidentNotifier : ILedgerIncidentNotifier +{ + private readonly ILogger _logger; + + public LoggingLedgerIncidentNotifier(ILogger logger) + { + _logger = logger ?? throw new ArgumentNullException(nameof(logger)); + } + + public Task PublishIncidentModeChangedAsync(LedgerIncidentSnapshot snapshot, CancellationToken cancellationToken) + { + var state = snapshot.IsActive ? "enabled" : "disabled"; + _logger.LogWarning( + "NOTIFICATION: Ledger incident mode {State} (activation_id={ActivationId}, retention_extension_days={ExtensionDays})", + state, + snapshot.ActivationId ?? string.Empty, + snapshot.RetentionExtensionDays); + return Task.CompletedTask; + } +} + +public sealed record LedgerIncidentSnapshot( + bool IsActive, + string? ActivationId, + string? Actor, + string? Reason, + string? TenantId, + DateTimeOffset ChangedAt, + DateTimeOffset? ExpiresAt, + int RetentionExtensionDays); + +public sealed record ProjectionLagSample( + string TenantId, + Guid ChainId, + long SequenceNumber, + string EventType, + string PolicyVersion, + double LagSeconds, + DateTimeOffset RecordedAt, + DateTimeOffset ObservedAt); + +public sealed record ConflictSnapshot( + string TenantId, + Guid ChainId, + long SequenceNumber, + Guid EventId, + string EventType, + string PolicyVersion, + string Reason, + DateTimeOffset RecordedAt, + DateTimeOffset ObservedAt, + string? ActorId, + string? ActorType, + long ExpectedSequence, + string? ProvidedPreviousHash, + string? ExpectedPreviousHash); + +public sealed record ReplayTraceSample( + string TenantId, + long FromSequence, + long ToSequence, + long EventsCount, + bool HasMore, + double DurationMs, + DateTimeOffset ObservedAt, + int ChainFilterCount, + int EventTypeFilterCount); + +public sealed record IncidentDiagnosticsSnapshot( + LedgerIncidentSnapshot Incident, + IReadOnlyList LagSamples, + IReadOnlyList ConflictSnapshots, + IReadOnlyList ReplayTraces, + DateTimeOffset CapturedAt); + +/// +/// Coordinates ledger-specific incident mode behaviour (diagnostics, retention hints, timeline/notification events). +/// +public sealed class LedgerIncidentCoordinator : ILedgerIncidentDiagnostics, IDisposable +{ + private const int ReplayTraceLogThresholdMs = 250; + + private readonly LedgerIncidentOptions _options; + private readonly ILogger _logger; + private readonly ILedgerIncidentNotifier _notifier; + private readonly TimeProvider _timeProvider; + private readonly IIncidentModeService? _incidentModeService; + + private readonly ConcurrentQueue _lagSamples = new(); + private readonly ConcurrentQueue _conflictSnapshots = new(); + private readonly ConcurrentQueue _replayTraces = new(); + private readonly ConcurrentDictionary _lastLagLogByChain = new(StringComparer.Ordinal); + + private readonly object _stateLock = new(); + private LedgerIncidentSnapshot _current; + private bool _disposed; + + public LedgerIncidentCoordinator( + IOptions options, + ILogger logger, + ILedgerIncidentNotifier notifier, + TimeProvider? timeProvider = null, + IIncidentModeService? incidentModeService = null) + { + _options = (options ?? throw new ArgumentNullException(nameof(options))).Value; + _options.Validate(); + _logger = logger ?? throw new ArgumentNullException(nameof(logger)); + _notifier = notifier ?? throw new ArgumentNullException(nameof(notifier)); + _timeProvider = timeProvider ?? TimeProvider.System; + _incidentModeService = incidentModeService; + + _current = new LedgerIncidentSnapshot( + IsActive: false, + ActivationId: null, + Actor: null, + Reason: null, + TenantId: null, + ChangedAt: _timeProvider.GetUtcNow(), + ExpiresAt: null, + RetentionExtensionDays: 0); + + if (_incidentModeService is not null) + { + _incidentModeService.Activated += OnActivated; + _incidentModeService.Deactivated += OnDeactivated; + + if (_incidentModeService.CurrentState is { } state && !_incidentModeService.CurrentState.IsExpired) + { + ApplyIncidentState(state, wasReactivation: false); + } + } + } + + public bool IsActive => _current.IsActive; + + public LedgerIncidentSnapshot Current => _current; + + public void RecordProjectionLag(ProjectionLagSample sample) + { + if (!_options.Enabled || !IsActive || !_options.CaptureLagTraces) + { + return; + } + + EnqueueWithLimit(_lagSamples, sample, _options.LagTraceBufferSize); + + if (_options.EmitTimelineEvents && sample.LagSeconds >= _options.LagTraceThresholdSeconds) + { + var now = sample.ObservedAt; + var key = $"{sample.TenantId}:{sample.ChainId}"; + if (!_lastLagLogByChain.TryGetValue(key, out var lastLogged) || + now - lastLogged >= TimeSpan.FromMinutes(1)) + { + _lastLagLogByChain[key] = now; + LedgerTimeline.EmitIncidentLagTrace(_logger, sample); + } + } + } + + public void RecordConflict(ConflictSnapshot snapshot) + { + if (!_options.Enabled || !IsActive || !_options.CaptureConflictSnapshots) + { + return; + } + + EnqueueWithLimit(_conflictSnapshots, snapshot, _options.ConflictSnapshotBufferSize); + if (_options.EmitTimelineEvents) + { + LedgerTimeline.EmitIncidentConflictSnapshot(_logger, snapshot); + } + } + + public void RecordReplayTrace(ReplayTraceSample sample) + { + if (!_options.Enabled || !IsActive || !_options.CaptureReplayTraces) + { + return; + } + + EnqueueWithLimit(_replayTraces, sample, _options.ReplayTraceBufferSize); + + if (_options.EmitTimelineEvents && + (sample.DurationMs >= ReplayTraceLogThresholdMs || sample.HasMore)) + { + LedgerTimeline.EmitIncidentReplayTrace(_logger, sample); + } + } + + public IncidentDiagnosticsSnapshot GetDiagnosticsSnapshot() + { + return new IncidentDiagnosticsSnapshot( + _current, + _lagSamples.ToArray(), + _conflictSnapshots.ToArray(), + _replayTraces.ToArray(), + _timeProvider.GetUtcNow()); + } + + private void OnActivated(object? sender, IncidentModeActivatedEventArgs e) + { + ApplyIncidentState(e.State, e.WasReactivation); + } + + private void OnDeactivated(object? sender, IncidentModeDeactivatedEventArgs e) + { + if (!_options.Enabled) + { + return; + } + + lock (_stateLock) + { + _current = new LedgerIncidentSnapshot( + IsActive: false, + ActivationId: e.State.ActivationId, + Actor: e.DeactivatedBy, + Reason: e.Reason.ToString(), + TenantId: e.State.TenantId, + ChangedAt: _timeProvider.GetUtcNow(), + ExpiresAt: e.State.ExpiresAt, + RetentionExtensionDays: 0); + } + + if (_options.EmitTimelineEvents) + { + LedgerTimeline.EmitIncidentModeChanged(_logger, _current, wasReactivation: false); + } + + if (_options.EmitNotifications) + { + _ = SafeNotifyAsync(_current); + } + } + + private void ApplyIncidentState(IncidentModeState state, bool wasReactivation) + { + if (!_options.Enabled) + { + return; + } + + lock (_stateLock) + { + _current = new LedgerIncidentSnapshot( + IsActive: true, + ActivationId: state.ActivationId, + Actor: state.Actor, + Reason: state.Reason, + TenantId: state.TenantId, + ChangedAt: _timeProvider.GetUtcNow(), + ExpiresAt: state.ExpiresAt, + RetentionExtensionDays: _options.RetentionExtensionDays); + + if (_options.ResetDiagnosticsOnActivation) + { + ClearDiagnostics(); + } + } + + if (_options.EmitTimelineEvents) + { + LedgerTimeline.EmitIncidentModeChanged(_logger, _current, wasReactivation); + } + + if (_options.EmitNotifications) + { + _ = SafeNotifyAsync(_current); + } + } + + private Task SafeNotifyAsync(LedgerIncidentSnapshot snapshot) + { + try + { + return _notifier.PublishIncidentModeChangedAsync(snapshot, CancellationToken.None); + } + catch (Exception ex) + { + _logger.LogWarning(ex, "Failed to publish incident mode notification."); + return Task.CompletedTask; + } + } + + private void ClearDiagnostics() + { + while (_lagSamples.TryDequeue(out _)) + { + } + + while (_conflictSnapshots.TryDequeue(out _)) + { + } + + while (_replayTraces.TryDequeue(out _)) + { + } + } + + private static void EnqueueWithLimit(ConcurrentQueue queue, T item, int limit) + { + queue.Enqueue(item); + while (queue.Count > limit && queue.TryDequeue(out _)) + { + } + } + + public void Dispose() + { + if (_disposed) + { + return; + } + + if (_incidentModeService is not null) + { + _incidentModeService.Activated -= OnActivated; + _incidentModeService.Deactivated -= OnDeactivated; + } + + _disposed = true; + } +} diff --git a/src/Findings/StellaOps.Findings.Ledger/Services/LedgerEventWriteService.cs b/src/Findings/StellaOps.Findings.Ledger/Services/LedgerEventWriteService.cs index 7a18ea3f3..cb9da8b78 100644 --- a/src/Findings/StellaOps.Findings.Ledger/Services/LedgerEventWriteService.cs +++ b/src/Findings/StellaOps.Findings.Ledger/Services/LedgerEventWriteService.cs @@ -5,6 +5,7 @@ using StellaOps.Findings.Ledger.Domain; using StellaOps.Findings.Ledger.Hashing; using StellaOps.Findings.Ledger.Infrastructure; using StellaOps.Findings.Ledger.Observability; +using StellaOps.Findings.Ledger.Services.Incident; namespace StellaOps.Findings.Ledger.Services; @@ -18,15 +19,18 @@ public sealed class LedgerEventWriteService : ILedgerEventWriteService private readonly ILedgerEventRepository _repository; private readonly IMerkleAnchorScheduler _merkleAnchorScheduler; private readonly ILogger _logger; + private readonly ILedgerIncidentDiagnostics? _incidentDiagnostics; public LedgerEventWriteService( ILedgerEventRepository repository, IMerkleAnchorScheduler merkleAnchorScheduler, - ILogger logger) + ILogger logger, + ILedgerIncidentDiagnostics? incidentDiagnostics = null) { _repository = repository ?? throw new ArgumentNullException(nameof(repository)); _merkleAnchorScheduler = merkleAnchorScheduler ?? throw new ArgumentNullException(nameof(merkleAnchorScheduler)); _logger = logger ?? throw new ArgumentNullException(nameof(logger)); + _incidentDiagnostics = incidentDiagnostics; } public async Task AppendAsync(LedgerEventDraft draft, CancellationToken cancellationToken) @@ -57,6 +61,7 @@ public sealed class LedgerEventWriteService : ILedgerEventWriteService if (!string.Equals(existing.CanonicalJson, canonicalJson, StringComparison.Ordinal)) { LedgerTelemetry.MarkError(activity, "event_id_conflict"); + RecordConflictSnapshot(draft, expectedSequence: existing.SequenceNumber + 1, reason: "event_id_conflict", expectedPreviousHash: existing.EventHash); return LedgerWriteResult.Conflict( "event_id_conflict", $"Event '{draft.EventId}' already exists with a different payload."); @@ -71,6 +76,7 @@ public sealed class LedgerEventWriteService : ILedgerEventWriteService if (draft.SequenceNumber != expectedSequence) { LedgerTelemetry.MarkError(activity, "sequence_mismatch"); + RecordConflictSnapshot(draft, expectedSequence, reason: "sequence_mismatch", expectedPreviousHash: chainHead?.EventHash); return LedgerWriteResult.Conflict( "sequence_mismatch", $"Sequence number '{draft.SequenceNumber}' does not match expected '{expectedSequence}'."); @@ -80,6 +86,7 @@ public sealed class LedgerEventWriteService : ILedgerEventWriteService if (draft.ProvidedPreviousHash is not null && !string.Equals(draft.ProvidedPreviousHash, previousHash, StringComparison.OrdinalIgnoreCase)) { LedgerTelemetry.MarkError(activity, "previous_hash_mismatch"); + RecordConflictSnapshot(draft, expectedSequence, reason: "previous_hash_mismatch", providedPreviousHash: draft.ProvidedPreviousHash, expectedPreviousHash: previousHash); return LedgerWriteResult.Conflict( "previous_hash_mismatch", $"Provided previous hash '{draft.ProvidedPreviousHash}' does not match chain head hash '{previousHash}'."); @@ -143,11 +150,13 @@ public sealed class LedgerEventWriteService : ILedgerEventWriteService var persisted = await _repository.GetByEventIdAsync(draft.TenantId, draft.EventId, cancellationToken).ConfigureAwait(false); if (persisted is null) { + RecordConflictSnapshot(draft, expectedSequence, reason: "append_failed", expectedPreviousHash: previousHash); return LedgerWriteResult.Conflict("append_failed", "Ledger append failed due to concurrent write."); } if (!string.Equals(persisted.CanonicalJson, record.CanonicalJson, StringComparison.Ordinal)) { + RecordConflictSnapshot(draft, expectedSequence, reason: "event_id_conflict", expectedPreviousHash: persisted.EventHash); return LedgerWriteResult.Conflict("event_id_conflict", "Ledger append raced with conflicting payload."); } @@ -157,6 +166,37 @@ public sealed class LedgerEventWriteService : ILedgerEventWriteService return LedgerWriteResult.Success(record); } + private void RecordConflictSnapshot( + LedgerEventDraft draft, + long expectedSequence, + string reason, + string? providedPreviousHash = null, + string? expectedPreviousHash = null) + { + if (_incidentDiagnostics is null) + { + return; + } + + var snapshot = new ConflictSnapshot( + TenantId: draft.TenantId, + ChainId: draft.ChainId, + SequenceNumber: draft.SequenceNumber, + EventId: draft.EventId, + EventType: draft.EventType, + PolicyVersion: draft.PolicyVersion ?? string.Empty, + Reason: reason, + RecordedAt: draft.RecordedAt, + ObservedAt: DateTimeOffset.UtcNow, + ActorId: draft.ActorId, + ActorType: draft.ActorType, + ExpectedSequence: expectedSequence, + ProvidedPreviousHash: providedPreviousHash, + ExpectedPreviousHash: expectedPreviousHash); + + _incidentDiagnostics.RecordConflict(snapshot); + } + private static string DetermineSource(LedgerEventDraft draft) { if (draft.SourceRunId.HasValue) diff --git a/src/Findings/StellaOps.Findings.Ledger/Services/ScoredFindingsExportService.cs b/src/Findings/StellaOps.Findings.Ledger/Services/ScoredFindingsExportService.cs index b383070ed..49d25656a 100644 --- a/src/Findings/StellaOps.Findings.Ledger/Services/ScoredFindingsExportService.cs +++ b/src/Findings/StellaOps.Findings.Ledger/Services/ScoredFindingsExportService.cs @@ -154,7 +154,12 @@ public sealed class ScoredFindingsExportService : IScoredFindingsExportService finding.RiskProfileVersion, finding.RiskExplanationId, finding.ExplainRef, - finding.UpdatedAt + finding.UpdatedAt, + finding.AttestationStatus, + finding.AttestationCount, + finding.VerifiedAttestationCount, + finding.FailedAttestationCount, + finding.UnverifiedAttestationCount }; } diff --git a/src/Findings/StellaOps.Findings.Ledger/Services/ScoredFindingsQueryModels.cs b/src/Findings/StellaOps.Findings.Ledger/Services/ScoredFindingsQueryModels.cs index ab2531652..cb8b8dec1 100644 --- a/src/Findings/StellaOps.Findings.Ledger/Services/ScoredFindingsQueryModels.cs +++ b/src/Findings/StellaOps.Findings.Ledger/Services/ScoredFindingsQueryModels.cs @@ -1,3 +1,5 @@ +using StellaOps.Findings.Ledger.Infrastructure.Attestation; + namespace StellaOps.Findings.Ledger.Services; /// @@ -18,6 +20,9 @@ public sealed record ScoredFindingsQuery public int Limit { get; init; } = 50; public ScoredFindingsSortField SortBy { get; init; } = ScoredFindingsSortField.RiskScore; public bool Descending { get; init; } = true; + public IReadOnlyList? AttestationTypes { get; init; } + public AttestationVerificationFilter? AttestationVerification { get; init; } + public OverallVerificationStatus? AttestationStatus { get; init; } } /// @@ -57,6 +62,11 @@ public sealed record ScoredFinding public Guid? RiskExplanationId { get; init; } public string? ExplainRef { get; init; } public DateTimeOffset UpdatedAt { get; init; } + public int AttestationCount { get; init; } + public int VerifiedAttestationCount { get; init; } + public int FailedAttestationCount { get; init; } + public int UnverifiedAttestationCount { get; init; } + public OverallVerificationStatus AttestationStatus { get; init; } = OverallVerificationStatus.NoAttestations; } /// diff --git a/src/Findings/StellaOps.Findings.Ledger/Services/ScoredFindingsQueryService.cs b/src/Findings/StellaOps.Findings.Ledger/Services/ScoredFindingsQueryService.cs index b2c7332ee..2fa1fbf30 100644 --- a/src/Findings/StellaOps.Findings.Ledger/Services/ScoredFindingsQueryService.cs +++ b/src/Findings/StellaOps.Findings.Ledger/Services/ScoredFindingsQueryService.cs @@ -164,7 +164,12 @@ public sealed class ScoredFindingsQueryService : IScoredFindingsQueryService RiskProfileVersion = projection.RiskProfileVersion, RiskExplanationId = projection.RiskExplanationId, ExplainRef = projection.ExplainRef, - UpdatedAt = projection.UpdatedAt + UpdatedAt = projection.UpdatedAt, + AttestationCount = projection.AttestationCount, + VerifiedAttestationCount = projection.VerifiedAttestationCount, + FailedAttestationCount = projection.FailedAttestationCount, + UnverifiedAttestationCount = projection.UnverifiedAttestationCount, + AttestationStatus = projection.AttestationStatus }; } diff --git a/src/Findings/StellaOps.Findings.Ledger/Services/SnapshotService.cs b/src/Findings/StellaOps.Findings.Ledger/Services/SnapshotService.cs index 8a22bb42b..a40231971 100644 --- a/src/Findings/StellaOps.Findings.Ledger/Services/SnapshotService.cs +++ b/src/Findings/StellaOps.Findings.Ledger/Services/SnapshotService.cs @@ -1,5 +1,6 @@ namespace StellaOps.Findings.Ledger.Services; +using System.Collections.Generic; using System.Security.Cryptography; using System.Text; using System.Text.Json; @@ -7,6 +8,7 @@ using Microsoft.Extensions.Logging; using StellaOps.Findings.Ledger.Domain; using StellaOps.Findings.Ledger.Infrastructure.Snapshot; using StellaOps.Findings.Ledger.Observability; +using StellaOps.Findings.Ledger.Services.Incident; /// /// Service for managing ledger snapshots and time-travel queries. @@ -17,15 +19,18 @@ public sealed class SnapshotService private readonly ITimeTravelRepository _timeTravelRepository; private readonly ILogger _logger; private readonly JsonSerializerOptions _jsonOptions; + private readonly ILedgerIncidentDiagnostics? _incidentDiagnostics; public SnapshotService( ISnapshotRepository snapshotRepository, ITimeTravelRepository timeTravelRepository, - ILogger logger) + ILogger logger, + ILedgerIncidentDiagnostics? incidentDiagnostics = null) { _snapshotRepository = snapshotRepository; _timeTravelRepository = timeTravelRepository; _logger = logger; + _incidentDiagnostics = incidentDiagnostics; _jsonOptions = new JsonSerializerOptions { PropertyNamingPolicy = JsonNamingPolicy.CamelCase, @@ -42,32 +47,33 @@ public sealed class SnapshotService { try { + var effectiveInput = ApplyIncidentRetention(input); _logger.LogInformation( "Creating snapshot for tenant {TenantId} at sequence {Sequence} / timestamp {Timestamp}", - input.TenantId, - input.AtSequence, - input.AtTimestamp); + effectiveInput.TenantId, + effectiveInput.AtSequence, + effectiveInput.AtTimestamp); // Get current ledger state - var currentPoint = await _timeTravelRepository.GetCurrentPointAsync(input.TenantId, ct); + var currentPoint = await _timeTravelRepository.GetCurrentPointAsync(effectiveInput.TenantId, ct); // Create the snapshot record var snapshot = await _snapshotRepository.CreateAsync( - input.TenantId, - input, + effectiveInput.TenantId, + effectiveInput, currentPoint.SequenceNumber, currentPoint.Timestamp, ct); // Compute statistics asynchronously var statistics = await ComputeStatisticsAsync( - input.TenantId, + effectiveInput.TenantId, snapshot.SequenceNumber, - input.IncludeEntityTypes, + effectiveInput.IncludeEntityTypes, ct); await _snapshotRepository.UpdateStatisticsAsync( - input.TenantId, + effectiveInput.TenantId, snapshot.SnapshotId, statistics, ct); @@ -79,12 +85,12 @@ public sealed class SnapshotService if (input.Sign) { merkleRoot = await ComputeMerkleRootAsync( - input.TenantId, + effectiveInput.TenantId, snapshot.SequenceNumber, ct); await _snapshotRepository.SetMerkleRootAsync( - input.TenantId, + effectiveInput.TenantId, snapshot.SnapshotId, merkleRoot, dsseDigest, @@ -93,20 +99,20 @@ public sealed class SnapshotService // Mark as available await _snapshotRepository.UpdateStatusAsync( - input.TenantId, + effectiveInput.TenantId, snapshot.SnapshotId, SnapshotStatus.Available, ct); // Retrieve updated snapshot var finalSnapshot = await _snapshotRepository.GetByIdAsync( - input.TenantId, + effectiveInput.TenantId, snapshot.SnapshotId, ct); LedgerTimeline.EmitSnapshotCreated( _logger, - input.TenantId, + effectiveInput.TenantId, snapshot.SnapshotId, snapshot.SequenceNumber, statistics.FindingsCount); @@ -196,7 +202,20 @@ public sealed class SnapshotService ReplayRequest request, CancellationToken ct = default) { - return await _timeTravelRepository.ReplayEventsAsync(request, ct); + var result = await _timeTravelRepository.ReplayEventsAsync(request, ct); + + _incidentDiagnostics?.RecordReplayTrace(new ReplayTraceSample( + TenantId: request.TenantId, + FromSequence: result.Metadata.FromSequence, + ToSequence: result.Metadata.ToSequence, + EventsCount: result.Metadata.EventsCount, + HasMore: result.Metadata.HasMore, + DurationMs: result.Metadata.ReplayDurationMs, + ObservedAt: DateTimeOffset.UtcNow, + ChainFilterCount: request.ChainIds?.Count ?? 0, + EventTypeFilterCount: request.EventTypes?.Count ?? 0)); + + return result; } /// @@ -249,6 +268,15 @@ public sealed class SnapshotService public async Task ExpireOldSnapshotsAsync(CancellationToken ct = default) { var cutoff = DateTimeOffset.UtcNow; + if (_incidentDiagnostics?.IsActive == true && _incidentDiagnostics.Current.RetentionExtensionDays > 0) + { + cutoff = cutoff.AddDays(-_incidentDiagnostics.Current.RetentionExtensionDays); + _logger.LogInformation( + "Incident mode active; extending snapshot expiry cutoff by {ExtensionDays} days (activation {ActivationId}).", + _incidentDiagnostics.Current.RetentionExtensionDays, + _incidentDiagnostics.Current.ActivationId ?? string.Empty); + } + var count = await _snapshotRepository.ExpireSnapshotsAsync(cutoff, ct); if (count > 0) @@ -367,4 +395,44 @@ public sealed class SnapshotService var bytes = SHA256.HashData(Encoding.UTF8.GetBytes(input)); return Convert.ToHexStringLower(bytes); } + + private CreateSnapshotInput ApplyIncidentRetention(CreateSnapshotInput input) + { + if (_incidentDiagnostics is null || !_incidentDiagnostics.IsActive) + { + return input; + } + + var incident = _incidentDiagnostics.Current; + if (incident.RetentionExtensionDays <= 0) + { + return input; + } + + TimeSpan? expiresIn = input.ExpiresIn; + if (expiresIn.HasValue) + { + expiresIn = expiresIn.Value.Add(TimeSpan.FromDays(incident.RetentionExtensionDays)); + } + + var metadata = input.Metadata is null + ? new Dictionary() + : new Dictionary(input.Metadata); + + metadata["incident.mode"] = "enabled"; + metadata["incident.activationId"] = incident.ActivationId ?? string.Empty; + metadata["incident.retentionExtensionDays"] = incident.RetentionExtensionDays; + metadata["incident.changedAt"] = incident.ChangedAt.ToString("O"); + if (incident.ExpiresAt is not null) + { + metadata["incident.expiresAt"] = incident.ExpiresAt.Value.ToString("O"); + } + + _logger.LogInformation( + "Incident mode active; extending snapshot retention by {ExtensionDays} days (activation {ActivationId}).", + incident.RetentionExtensionDays, + incident.ActivationId ?? string.Empty); + + return input with { ExpiresIn = expiresIn, Metadata = metadata }; + } } diff --git a/src/Findings/StellaOps.Findings.Ledger/StellaOps.Findings.Ledger.csproj b/src/Findings/StellaOps.Findings.Ledger/StellaOps.Findings.Ledger.csproj index fed655a3f..927481435 100644 --- a/src/Findings/StellaOps.Findings.Ledger/StellaOps.Findings.Ledger.csproj +++ b/src/Findings/StellaOps.Findings.Ledger/StellaOps.Findings.Ledger.csproj @@ -32,6 +32,7 @@ + diff --git a/src/Findings/StellaOps.Findings.Ledger/TASKS.md b/src/Findings/StellaOps.Findings.Ledger/TASKS.md index 2a6c6324d..fcd712d22 100644 --- a/src/Findings/StellaOps.Findings.Ledger/TASKS.md +++ b/src/Findings/StellaOps.Findings.Ledger/TASKS.md @@ -1,4 +1,4 @@ -# Findings Ledger · Sprint 0120-0000-0001 +# Findings Ledger · Sprint 0120-0000-0001 | Task ID | Status | Notes | Updated (UTC) | | --- | --- | --- | --- | @@ -8,9 +8,18 @@ Status changes must be mirrored in `docs/implplan/SPRINT_0120_0001_0001_policy_reasoning.md`. -# Findings Ledger · Sprint 0121-0001-0001 +# Findings Ledger · Sprint 0121-0001-0001 | Task ID | Status | Notes | Updated (UTC) | | --- | --- | --- | --- | | LEDGER-OBS-54-001 | DONE | Implemented `/v1/ledger/attestations` with deterministic paging, filter hash guard, and schema/OpenAPI updates. | 2025-11-22 | -| LEDGER-GAPS-121-009 | DONE | FL1–FL10 remediation: schema catalog + export canonicals, Merkle/external anchor policy, tenant isolation/redaction manifest, offline verifier + checksum guard, golden fixtures, backpressure metrics. | 2025-12-02 | +| LEDGER-GAPS-121-009 | DONE | FL1–FL10 remediation: schema catalog + export canonicals, Merkle/external anchor policy, tenant isolation/redaction manifest, offline verifier + checksum guard, golden fixtures, backpressure metrics. | 2025-12-02 | +# Findings Ledger Aú Sprint 0121-0001-0002 + +| Task ID | Status | Notes | Updated (UTC) | +| --- | --- | --- | --- | +| LEDGER-ATTEST-73-002 | DONE | Verification-result and attestation-status filters wired into findings projection queries and exports; tests added. | 2025-12-08 | +| LEDGER-OAS-61-002 | DONE | `/.well-known/openapi` serves spec with version/build headers, ETag, cache hints. | 2025-12-08 | +| LEDGER-OAS-62-001 | DONE | SDK-facing OpenAPI assertions for pagination, evidence links, provenance added. | 2025-12-08 | +| LEDGER-OAS-63-001 | DONE | Deprecation headers and notifications applied to legacy findings export endpoint. | 2025-12-08 | +| LEDGER-OBS-55-001 | DONE | Incident-mode diagnostics (lag/conflict/replay traces), retention extension for snapshots, timeline/notifier hooks. | 2025-12-08 | diff --git a/src/Findings/__Tests/StellaOps.Findings.Ledger.Tests/AttestationStatusCalculatorTests.cs b/src/Findings/__Tests/StellaOps.Findings.Ledger.Tests/AttestationStatusCalculatorTests.cs new file mode 100644 index 000000000..c2264d159 --- /dev/null +++ b/src/Findings/__Tests/StellaOps.Findings.Ledger.Tests/AttestationStatusCalculatorTests.cs @@ -0,0 +1,18 @@ +using FluentAssertions; +using StellaOps.Findings.Ledger.Infrastructure.Attestation; + +namespace StellaOps.Findings.Ledger.Tests; + +public class AttestationStatusCalculatorTests +{ + [Theory] + [InlineData(0, 0, OverallVerificationStatus.NoAttestations)] + [InlineData(3, 3, OverallVerificationStatus.AllVerified)] + [InlineData(4, 1, OverallVerificationStatus.PartiallyVerified)] + [InlineData(2, 0, OverallVerificationStatus.NoneVerified)] + public void Compute_ReturnsExpectedStatus(int attestationCount, int verifiedCount, OverallVerificationStatus expected) + { + AttestationStatusCalculator.Compute(attestationCount, verifiedCount) + .Should().Be(expected); + } +} diff --git a/src/Findings/__Tests/StellaOps.Findings.Ledger.Tests/DeprecationHeadersTests.cs b/src/Findings/__Tests/StellaOps.Findings.Ledger.Tests/DeprecationHeadersTests.cs new file mode 100644 index 000000000..1dde04120 --- /dev/null +++ b/src/Findings/__Tests/StellaOps.Findings.Ledger.Tests/DeprecationHeadersTests.cs @@ -0,0 +1,20 @@ +using FluentAssertions; +using Microsoft.AspNetCore.Http; +using StellaOps.Findings.Ledger; + +namespace StellaOps.Findings.Ledger.Tests; + +public class DeprecationHeadersTests +{ + [Fact] + public void Apply_SetsStandardDeprecationHeaders() + { + var context = new DefaultHttpContext(); + DeprecationHeaders.Apply(context.Response, "ledger.export.findings"); + + context.Response.Headers["Deprecation"].ToString().Should().Be("true"); + context.Response.Headers["Sunset"].ToString().Should().Be(DeprecationHeaders.SunsetDate); + context.Response.Headers["X-Deprecated-Endpoint"].ToString().Should().Be("ledger.export.findings"); + context.Response.Headers["Link"].ToString().Should().Contain("/.well-known/openapi"); + } +} diff --git a/src/Findings/__Tests/StellaOps.Findings.Ledger.Tests/OpenApiMetadataFactoryTests.cs b/src/Findings/__Tests/StellaOps.Findings.Ledger.Tests/OpenApiMetadataFactoryTests.cs new file mode 100644 index 000000000..70b9af6fd --- /dev/null +++ b/src/Findings/__Tests/StellaOps.Findings.Ledger.Tests/OpenApiMetadataFactoryTests.cs @@ -0,0 +1,28 @@ +using System.Text; +using FluentAssertions; +using StellaOps.Findings.Ledger.OpenApi; + +namespace StellaOps.Findings.Ledger.Tests; + +public class OpenApiMetadataFactoryTests +{ + [Fact] + public void ComputeEtag_IsDeterministicAndWeak() + { + var bytes = Encoding.UTF8.GetBytes("spec-content"); + + var etag1 = OpenApiMetadataFactory.ComputeEtag(bytes); + var etag2 = OpenApiMetadataFactory.ComputeEtag(bytes); + + etag1.Should().StartWith("W/\""); + etag1.Should().Be(etag2); + etag1.Length.Should().BeGreaterThan(6); + } + + [Fact] + public void GetSpecPath_ResolvesExistingSpec() + { + var path = OpenApiMetadataFactory.GetSpecPath(AppContext.BaseDirectory); + File.Exists(path).Should().BeTrue(); + } +} diff --git a/src/Findings/__Tests/StellaOps.Findings.Ledger.Tests/OpenApiSdkSurfaceTests.cs b/src/Findings/__Tests/StellaOps.Findings.Ledger.Tests/OpenApiSdkSurfaceTests.cs new file mode 100644 index 000000000..5e92cf495 --- /dev/null +++ b/src/Findings/__Tests/StellaOps.Findings.Ledger.Tests/OpenApiSdkSurfaceTests.cs @@ -0,0 +1,39 @@ +using System.Text; +using FluentAssertions; +using StellaOps.Findings.Ledger.OpenApi; + +namespace StellaOps.Findings.Ledger.Tests; + +public class OpenApiSdkSurfaceTests +{ + private readonly string _specContent; + + public OpenApiSdkSurfaceTests() + { + var path = OpenApiMetadataFactory.GetSpecPath(AppContext.BaseDirectory); + _specContent = File.ReadAllText(path, Encoding.UTF8); + } + + [Fact] + public void FindingsEndpoints_ExposePaginationAndFilters() + { + _specContent.Should().Contain("/findings"); + _specContent.Should().Contain("page_token"); + _specContent.Should().MatchRegex("nextPageToken|next_page_token"); + } + + [Fact] + public void EvidenceSchemas_ExposeEvidenceLinks() + { + _specContent.Should().Contain("evidenceBundleRef"); + _specContent.Should().Contain("ExportProvenance"); + } + + [Fact] + public void AttestationPointers_ExposeProvenanceMetadata() + { + _specContent.Should().Contain("/v1/ledger/attestations"); + _specContent.Should().Contain("attestation"); + _specContent.Should().Contain("provenance"); + } +} diff --git a/src/Findings/__Tests/StellaOps.Findings.Ledger.Tests/ScoredFindingsQueryServiceTests.cs b/src/Findings/__Tests/StellaOps.Findings.Ledger.Tests/ScoredFindingsQueryServiceTests.cs new file mode 100644 index 000000000..f11cfc63a --- /dev/null +++ b/src/Findings/__Tests/StellaOps.Findings.Ledger.Tests/ScoredFindingsQueryServiceTests.cs @@ -0,0 +1,116 @@ +using System.Text.Json.Nodes; +using Microsoft.Extensions.Logging.Abstractions; +using StellaOps.Findings.Ledger.Domain; +using StellaOps.Findings.Ledger.Infrastructure; +using StellaOps.Findings.Ledger.Infrastructure.Attestation; +using StellaOps.Findings.Ledger.Services; +using FluentAssertions; + +namespace StellaOps.Findings.Ledger.Tests; + +public class ScoredFindingsQueryServiceTests +{ + [Fact] + public async Task QueryAsync_MapsAttestationMetadata() + { + var projection = new FindingProjection( + TenantId: "tenant-a", + FindingId: "finding-123", + PolicyVersion: "v1", + Status: "affected", + Severity: 7.5m, + RiskScore: 0.9m, + RiskSeverity: "critical", + RiskProfileVersion: "p1", + RiskExplanationId: Guid.NewGuid(), + RiskEventSequence: 42, + Labels: new(), + CurrentEventId: Guid.NewGuid(), + ExplainRef: "explain-1", + PolicyRationale: new(), + UpdatedAt: DateTimeOffset.UtcNow, + CycleHash: "abc123", + AttestationCount: 3, + VerifiedAttestationCount: 2, + FailedAttestationCount: 1, + UnverifiedAttestationCount: 0, + AttestationStatus: OverallVerificationStatus.PartiallyVerified); + + var repo = new FakeFindingProjectionRepository(projection); + var service = new ScoredFindingsQueryService( + repo, + new NullRiskExplanationStore(), + TimeProvider.System, + NullLogger.Instance); + + var result = await service.QueryAsync(new ScoredFindingsQuery + { + TenantId = "tenant-a", + Limit = 10 + }); + + result.TotalCount.Should().Be(1); + result.Findings.Should().HaveCount(1); + + var finding = result.Findings.Single(); + finding.AttestationCount.Should().Be(3); + finding.VerifiedAttestationCount.Should().Be(2); + finding.FailedAttestationCount.Should().Be(1); + finding.UnverifiedAttestationCount.Should().Be(0); + finding.AttestationStatus.Should().Be(OverallVerificationStatus.PartiallyVerified); + } + + private sealed class FakeFindingProjectionRepository : IFindingProjectionRepository + { + private readonly FindingProjection _projection; + + public FakeFindingProjectionRepository(FindingProjection projection) + { + _projection = projection; + } + + public Task GetCheckpointAsync(CancellationToken cancellationToken) => + Task.FromResult(ProjectionCheckpoint.Initial(TimeProvider.System)); + + public Task<(IReadOnlyList Projections, int TotalCount)> QueryScoredAsync( + ScoredFindingsQuery query, + CancellationToken cancellationToken) => + Task.FromResult((new List { _projection } as IReadOnlyList, 1)); + + public Task GetFindingStatsSinceAsync(string tenantId, DateTimeOffset since, CancellationToken cancellationToken) => + Task.FromResult(new FindingStatsResult(0, 0, 0, 0, 0, 0)); + + public Task<(int Total, int Scored, decimal AvgScore, decimal MaxScore)> GetRiskAggregatesAsync(string tenantId, string? policyVersion, CancellationToken cancellationToken) => + Task.FromResult((0, 0, 0m, 0m)); + + public Task GetScoreDistributionAsync(string tenantId, string? policyVersion, CancellationToken cancellationToken) => + Task.FromResult(new ScoreDistribution()); + + public Task GetSeverityDistributionAsync(string tenantId, string? policyVersion, CancellationToken cancellationToken) => + Task.FromResult(new SeverityDistribution()); + + public Task SaveCheckpointAsync(ProjectionCheckpoint checkpoint, CancellationToken cancellationToken) => + Task.CompletedTask; + + public Task InsertHistoryAsync(FindingHistoryEntry entry, CancellationToken cancellationToken) => + Task.CompletedTask; + + public Task InsertActionAsync(TriageActionEntry entry, CancellationToken cancellationToken) => + Task.CompletedTask; + + public Task GetAsync(string tenantId, string findingId, string policyVersion, CancellationToken cancellationToken) => + Task.FromResult(_projection); + + public Task UpsertAsync(FindingProjection projection, CancellationToken cancellationToken) => + Task.CompletedTask; + } + + private sealed class NullRiskExplanationStore : IRiskExplanationStore + { + public Task GetAsync(string tenantId, string findingId, Guid? explanationId, CancellationToken cancellationToken) => + Task.FromResult(null); + + public Task StoreAsync(string tenantId, ScoredFindingExplanation explanation, CancellationToken cancellationToken) => + Task.CompletedTask; + } +} diff --git a/src/Mirror/StellaOps.Mirror.Creator/TASKS.md b/src/Mirror/StellaOps.Mirror.Creator/TASKS.md index a108a625d..c393bcb44 100644 --- a/src/Mirror/StellaOps.Mirror.Creator/TASKS.md +++ b/src/Mirror/StellaOps.Mirror.Creator/TASKS.md @@ -8,3 +8,4 @@ | MIRROR-CRT-57-002 | DONE | Time-anchor DSSE emitted when SIGN_KEY is set; bundle meta + verifier check anchor integrity. | | MIRROR-CRT-58-001 | DONE | CLI wrappers (`mirror-create.sh`, `mirror-verify.sh`) for deterministic build/verify flows; uses existing assembler + verifier. | | MIRROR-CRT-58-002 | DOING (dev) | Export Center scheduling helper (`src/Mirror/StellaOps.Mirror.Creator/schedule-export-center-run.sh`) added; production signing still pending MIRROR-CRT-56-002 key. | +| EXPORT-OBS-51-001 / 54-001 | DONE | Export Center handoff scripted via `export-center-wire.sh`, scheduler payload now carries bundle metadata, and mirror-sign CI uploads handoff outputs. | diff --git a/src/Mirror/StellaOps.Mirror.Creator/schedule-export-center-run.sh b/src/Mirror/StellaOps.Mirror.Creator/schedule-export-center-run.sh old mode 100644 new mode 100755 index e2ed1ea09..d7b46b40a --- a/src/Mirror/StellaOps.Mirror.Creator/schedule-export-center-run.sh +++ b/src/Mirror/StellaOps.Mirror.Creator/schedule-export-center-run.sh @@ -3,6 +3,18 @@ set -euo pipefail # Schedule an Export Center run for mirror bundles and emit an audit log entry. # Requires curl. Uses bearer token auth for simplicity; swap to DPoP if/when gateway enforces it. +# Usage: +# EXPORT_CENTER_BASE_URL=https://export.example.com \ +# EXPORT_CENTER_TENANT=tenant-a \ +# EXPORT_CENTER_TOKEN=token123 \ +# ./schedule-export-center-run.sh mirror:thin '["vex","advisory"]' '["tar.gz","json"]' +# Env: +# EXPORT_CENTER_BASE_URL (default: http://localhost:8080) +# EXPORT_CENTER_TENANT (default: tenant-default) +# EXPORT_CENTER_PROJECT (optional header) +# EXPORT_CENTER_TOKEN (optional Bearer token) +# EXPORT_CENTER_ARTIFACTS_JSON (optional JSON array of {name,path,sha256} to include in payload) +# AUDIT_LOG_PATH (default: ./logs/export-center-schedule.log) BASE_URL="${EXPORT_CENTER_BASE_URL:-http://localhost:8080}" TENANT="${EXPORT_CENTER_TENANT:-tenant-default}" @@ -19,6 +31,7 @@ fi TARGETS_JSON="${2:-[\"vex\",\"advisory\",\"policy\"]}" FORMATS_JSON="${3:-[\"json\",\"ndjson\"]}" +ARTIFACTS_JSON="${EXPORT_CENTER_ARTIFACTS_JSON:-}" mkdir -p "$(dirname "$AUDIT_LOG")" @@ -27,15 +40,39 @@ if [[ -n "$TOKEN" ]]; then AUTH_HEADER=(-H "Authorization: Bearer ${TOKEN}") fi -payload="$(cat < object: + try: + return json.loads(os.environ[env_key]) + except KeyError: + print(f"missing env: {env_key}", file=sys.stderr) + sys.exit(1) + except json.JSONDecodeError as exc: + print(f"invalid JSON in {env_key}: {exc}", file=sys.stderr) + sys.exit(1) + +payload = { + "profileId": os.environ["PROFILE_ID"], + "targets": parse_json("TARGETS_JSON"), + "formats": parse_json("FORMATS_JSON"), + "retentionDays": 30, + "priority": "normal", } -JSON + +artifacts_raw = os.environ.get("ARTIFACTS_JSON", "").strip() +if artifacts_raw: + try: + payload["artifacts"] = json.loads(artifacts_raw) + except json.JSONDecodeError as exc: + print(f"invalid JSON in EXPORT_CENTER_ARTIFACTS_JSON: {exc}", file=sys.stderr) + sys.exit(1) + +print(json.dumps(payload)) +PY )" response="$(curl -sS -X POST "${BASE_URL}/export-center/runs" \ diff --git a/src/Scanner/StellaOps.Scanner.Analyzers.Native/Reachability/NativeReachabilityGraphBuilder.cs b/src/Scanner/StellaOps.Scanner.Analyzers.Native/Reachability/NativeReachabilityGraphBuilder.cs new file mode 100644 index 000000000..c1443fbe1 --- /dev/null +++ b/src/Scanner/StellaOps.Scanner.Analyzers.Native/Reachability/NativeReachabilityGraphBuilder.cs @@ -0,0 +1,179 @@ +using System.Collections.Immutable; +using System.Security.Cryptography; +using System.Text; +using StellaOps.Scanner.Analyzers.Native.Observations; + +namespace StellaOps.Scanner.Analyzers.Native.Reachability; + +/// +/// Builds a deterministic reachability graph from native observations. +/// +internal static class NativeReachabilityGraphBuilder +{ + private const string PayloadType = "stellaops.native.graph@1"; + + public static NativeReachabilityGraph Build(NativeObservationDocument document, string? layerDigest = null) + { + ArgumentNullException.ThrowIfNull(document); + ArgumentNullException.ThrowIfNull(document.Binary); + + var nodes = new List(); + var edges = new List(); + + var binaryId = ResolveBinaryId(document); + var codeId = ResolveCodeId(document); + var binaryNodeId = $"bin::{binaryId}"; + var rootNodeId = $"root::{binaryId}"; + + // Root node to capture synthetic sources (init arrays, entrypoints). + nodes.Add(new NativeReachabilityNode( + Id: rootNodeId, + Kind: "root", + Name: document.Binary.Path ?? "native-binary", + Path: document.Binary.Path, + BuildId: document.Binary.BuildId, + CodeId: codeId, + Format: document.Binary.Format, + Architecture: document.Binary.Architecture)); + + nodes.Add(new NativeReachabilityNode( + Id: binaryNodeId, + Kind: "binary", + Name: document.Binary.Path ?? "native-binary", + Path: document.Binary.Path, + BuildId: document.Binary.BuildId, + CodeId: codeId, + Format: document.Binary.Format, + Architecture: document.Binary.Architecture)); + + edges.Add(new NativeReachabilityEdge(rootNodeId, binaryNodeId, "binary")); + + // Entrypoints -> binary (synthetic roots) + foreach (var entry in document.Entrypoints ?? Array.Empty()) + { + var entryId = $"entry::{entry.Symbol ?? entry.Type ?? "entry"}"; + nodes.Add(new NativeReachabilityNode( + Id: entryId, + Kind: "entrypoint", + Name: entry.Symbol ?? entry.Type ?? "entry", + Path: document.Binary.Path, + BuildId: document.Binary.BuildId, + CodeId: codeId, + Format: document.Binary.Format, + Architecture: document.Binary.Architecture)); + + edges.Add(new NativeReachabilityEdge(rootNodeId, entryId, "entrypoint")); + edges.Add(new NativeReachabilityEdge(entryId, binaryNodeId, "entrypoint")); + } + + // Declared dependencies -> binary + foreach (var dep in document.DeclaredEdges ?? Array.Empty()) + { + var targetId = $"decl::{dep.Target}"; + nodes.Add(new NativeReachabilityNode( + Id: targetId, + Kind: "dependency", + Name: dep.Target ?? "unknown", + BuildId: null, + CodeId: null, + Format: null, + Path: dep.Target)); + + edges.Add(new NativeReachabilityEdge(binaryNodeId, targetId, dep.Reason ?? "declared")); + } + + // Heuristic/runtime edges as unknown targets + foreach (var edge in document.HeuristicEdges ?? Array.Empty()) + { + var targetId = $"heur::{edge.Target}"; + nodes.Add(new NativeReachabilityNode( + Id: targetId, + Kind: "dependency", + Name: edge.Target ?? "unknown", + Path: edge.Target)); + + edges.Add(new NativeReachabilityEdge(binaryNodeId, targetId, edge.Reason ?? "heuristic")); + } + + foreach (var edge in document.RuntimeEdges ?? Array.Empty()) + { + var targetId = $"rt::{edge.Target}"; + nodes.Add(new NativeReachabilityNode( + Id: targetId, + Kind: "runtime", + Name: edge.Target ?? "runtime", + Path: edge.Target)); + + edges.Add(new NativeReachabilityEdge(binaryNodeId, targetId, edge.Reason ?? "runtime")); + } + + var distinctNodes = nodes + .GroupBy(n => n.Id, StringComparer.Ordinal) + .Select(g => g.First()) + .OrderBy(n => n.Id, StringComparer.Ordinal) + .ToImmutableArray(); + + var distinctEdges = edges + .GroupBy(e => (e.From, e.To, e.Reason), ValueTuple.Create) + .Select(g => g.First()) + .OrderBy(e => e.From, StringComparer.Ordinal) + .ThenBy(e => e.To, StringComparer.Ordinal) + .ThenBy(e => e.Reason, StringComparer.Ordinal) + .ToImmutableArray(); + + return new NativeReachabilityGraph(distinctNodes, distinctEdges, layerDigest, document.Binary.BuildId, codeId); + } + + public static NativeReachabilityBundle ToBundle(NativeObservationDocument document, string? layerDigest = null) + { + var graph = Build(document, layerDigest); + return new NativeReachabilityBundle( + PayloadType, + graph, + layerDigest, + graph.BuildId, + graph.CodeId); + } + + private static string ResolveBinaryId(NativeObservationDocument document) + { + if (!string.IsNullOrWhiteSpace(document.Binary.BuildId)) + { + return $"buildid:{document.Binary.BuildId}"; + } + + if (!string.IsNullOrWhiteSpace(document.Binary.Sha256)) + { + return $"sha256:{document.Binary.Sha256}"; + } + + return $"path:{document.Binary.Path}"; + } + + private static string? ResolveCodeId(NativeObservationDocument document) + { + if (!string.IsNullOrWhiteSpace(document.Binary.BuildId)) + { + return document.Binary.BuildId; + } + + if (!string.IsNullOrWhiteSpace(document.Binary.Sha256)) + { + return document.Binary.Sha256; + } + + if (!string.IsNullOrWhiteSpace(document.Binary.Path)) + { + return ComputeSha256(document.Binary.Path); + } + + return null; + } + + private static string ComputeSha256(string value) + { + var bytes = Encoding.UTF8.GetBytes(value); + var hash = SHA256.HashData(bytes); + return Convert.ToHexString(hash).ToLowerInvariant(); + } +} diff --git a/src/Scanner/StellaOps.Scanner.Analyzers.Native/Reachability/NativeReachabilityModels.cs b/src/Scanner/StellaOps.Scanner.Analyzers.Native/Reachability/NativeReachabilityModels.cs new file mode 100644 index 000000000..1a9112ddc --- /dev/null +++ b/src/Scanner/StellaOps.Scanner.Analyzers.Native/Reachability/NativeReachabilityModels.cs @@ -0,0 +1,32 @@ +using System.Collections.Immutable; + +namespace StellaOps.Scanner.Analyzers.Native.Reachability; + +internal sealed record NativeReachabilityNode( + string Id, + string Kind, + string Name, + string? BuildId = null, + string? CodeId = null, + string? Format = null, + string? Path = null, + string? Architecture = null); + +internal sealed record NativeReachabilityEdge( + string From, + string To, + string Reason); + +internal sealed record NativeReachabilityGraph( + ImmutableArray Nodes, + ImmutableArray Edges, + string? LayerDigest, + string? BuildId, + string? CodeId); + +internal sealed record NativeReachabilityBundle( + string PayloadType, + NativeReachabilityGraph Graph, + string? LayerDigest, + string? BuildId, + string? CodeId); diff --git a/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet/Internal/DotNetAnalyzerOptions.cs b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet/Internal/DotNetAnalyzerOptions.cs new file mode 100644 index 000000000..e89478686 --- /dev/null +++ b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet/Internal/DotNetAnalyzerOptions.cs @@ -0,0 +1,57 @@ +using System.Text.Json; +using System.Text.Json.Serialization; + +namespace StellaOps.Scanner.Analyzers.Lang.DotNet.Internal; + +internal sealed class DotNetAnalyzerOptions +{ + private const string DefaultConfigFileName = "dotnet-il.config.json"; + + [JsonPropertyName("emitDependencyEdges")] + public bool EmitDependencyEdges { get; init; } = false; + + [JsonPropertyName("includeEntrypoints")] + public bool IncludeEntrypoints { get; init; } = false; + + [JsonPropertyName("runtimeEvidencePath")] + public string? RuntimeEvidencePath { get; init; } + + [JsonPropertyName("runtimeEvidenceConfidence")] + public string? RuntimeEvidenceConfidence { get; init; } + + public static DotNetAnalyzerOptions Load(LanguageAnalyzerContext context) + { + ArgumentNullException.ThrowIfNull(context); + + var path = Path.Combine(context.RootPath, DefaultConfigFileName); + if (!File.Exists(path)) + { + return new DotNetAnalyzerOptions(); + } + + try + { + var json = File.ReadAllText(path); + var options = new JsonSerializerOptions + { + PropertyNameCaseInsensitive = true, + ReadCommentHandling = JsonCommentHandling.Skip, + AllowTrailingCommas = true + }; + + return JsonSerializer.Deserialize(json, options) ?? new DotNetAnalyzerOptions(); + } + catch (IOException) + { + return new DotNetAnalyzerOptions(); + } + catch (JsonException) + { + return new DotNetAnalyzerOptions(); + } + catch (UnauthorizedAccessException) + { + return new DotNetAnalyzerOptions(); + } + } +} diff --git a/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet/Internal/DotNetDependencyCollector.cs b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet/Internal/DotNetDependencyCollector.cs index 7a780a555..f916a471e 100644 --- a/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet/Internal/DotNetDependencyCollector.cs +++ b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet/Internal/DotNetDependencyCollector.cs @@ -1,269 +1,323 @@ -using System.Diagnostics; -using System.Globalization; -using System.Linq; -using System.Reflection; -using System.Security.Cryptography; -using System.Text.Json; - -namespace StellaOps.Scanner.Analyzers.Lang.DotNet.Internal; - -internal static class DotNetDependencyCollector -{ - private static readonly EnumerationOptions Enumeration = new() - { - RecurseSubdirectories = true, - IgnoreInaccessible = true, - AttributesToSkip = FileAttributes.Device | FileAttributes.ReparsePoint - }; - - public static ValueTask> CollectAsync(LanguageAnalyzerContext context, CancellationToken cancellationToken) - { - ArgumentNullException.ThrowIfNull(context); - - var depsFiles = Directory - .EnumerateFiles(context.RootPath, "*.deps.json", Enumeration) - .OrderBy(static path => path, StringComparer.Ordinal) - .ToArray(); - - if (depsFiles.Length == 0) - { - return ValueTask.FromResult>(Array.Empty()); - } - - var aggregator = new DotNetPackageAggregator(context); - - foreach (var depsPath in depsFiles) - { - cancellationToken.ThrowIfCancellationRequested(); - - try - { - var relativeDepsPath = NormalizeRelative(context.GetRelativePath(depsPath)); - var depsFile = DotNetDepsFile.Load(depsPath, relativeDepsPath, cancellationToken); - if (depsFile is null) - { - continue; - } - - DotNetRuntimeConfig? runtimeConfig = null; - var runtimeConfigPath = Path.ChangeExtension(depsPath, ".runtimeconfig.json"); - if (!string.IsNullOrEmpty(runtimeConfigPath) && File.Exists(runtimeConfigPath)) - { - var relativeRuntimePath = NormalizeRelative(context.GetRelativePath(runtimeConfigPath)); - runtimeConfig = DotNetRuntimeConfig.Load(runtimeConfigPath, relativeRuntimePath, cancellationToken); - } - - aggregator.Add(depsFile, runtimeConfig); - } - catch (IOException) - { - continue; - } - catch (JsonException) - { - continue; - } - catch (UnauthorizedAccessException) - { - continue; - } - } - - var packages = aggregator.Build(cancellationToken); - return ValueTask.FromResult>(packages); - } - - private static string NormalizeRelative(string path) - { - if (string.IsNullOrWhiteSpace(path) || path == ".") - { - return "."; - } - - var normalized = path.Replace('\\', '/'); - return string.IsNullOrWhiteSpace(normalized) ? "." : normalized; - } -} - -internal sealed class DotNetPackageAggregator -{ - private readonly LanguageAnalyzerContext _context; - private readonly IDotNetAuthenticodeInspector? _authenticodeInspector; - private readonly Dictionary _packages = new(StringComparer.Ordinal); - - public DotNetPackageAggregator(LanguageAnalyzerContext context) - { - _context = context ?? throw new ArgumentNullException(nameof(context)); - if (context.TryGetService(out var inspector)) - { - _authenticodeInspector = inspector; - } - } - - public void Add(DotNetDepsFile depsFile, DotNetRuntimeConfig? runtimeConfig) - { - ArgumentNullException.ThrowIfNull(depsFile); - - foreach (var library in depsFile.Libraries.Values) - { - if (!library.IsPackage) - { - continue; - } - - var normalizedId = DotNetPackageBuilder.NormalizeId(library.Id); - var key = DotNetPackageBuilder.BuildKey(normalizedId, library.Version); - - if (!_packages.TryGetValue(key, out var builder)) - { - builder = new DotNetPackageBuilder(_context, _authenticodeInspector, library.Id, normalizedId, library.Version); - _packages[key] = builder; - } - - builder.AddLibrary(library, depsFile.RelativePath, runtimeConfig); - } - } - - public IReadOnlyList Build(CancellationToken cancellationToken) - { - if (_packages.Count == 0) - { - return Array.Empty(); - } - - var items = new List(_packages.Count); - foreach (var builder in _packages.Values) - { - cancellationToken.ThrowIfCancellationRequested(); - items.Add(builder.Build(cancellationToken)); - } - - items.Sort(static (left, right) => string.CompareOrdinal(left.ComponentKey, right.ComponentKey)); - return items; - } -} - -internal sealed class DotNetPackageBuilder -{ - private readonly LanguageAnalyzerContext _context; - private readonly IDotNetAuthenticodeInspector? _authenticodeInspector; - - private readonly string _originalId; - private readonly string _normalizedId; - private readonly string _version; - - private bool? _serviceable; - - private readonly SortedSet _sha512 = new(StringComparer.Ordinal); - private readonly SortedSet _packagePaths = new(StringComparer.Ordinal); - private readonly SortedSet _hashPaths = new(StringComparer.Ordinal); - private readonly SortedSet _depsPaths = new(StringComparer.Ordinal); - private readonly SortedSet _targetFrameworks = new(StringComparer.Ordinal); - private readonly SortedSet _runtimeIdentifiers = new(StringComparer.Ordinal); - private readonly SortedSet _dependencies = new(StringComparer.OrdinalIgnoreCase); - private readonly SortedSet _runtimeConfigPaths = new(StringComparer.Ordinal); - private readonly SortedSet _runtimeConfigTfms = new(StringComparer.OrdinalIgnoreCase); - private readonly SortedSet _runtimeConfigFrameworks = new(StringComparer.OrdinalIgnoreCase); - private readonly SortedSet _runtimeConfigGraph = new(StringComparer.OrdinalIgnoreCase); - - private readonly Dictionary _assemblies = new(StringComparer.OrdinalIgnoreCase); - private readonly Dictionary _nativeAssets = new(StringComparer.OrdinalIgnoreCase); - private readonly HashSet _evidence = new(new LanguageComponentEvidenceComparer()); - private bool _usedByEntrypoint; - - public DotNetPackageBuilder(LanguageAnalyzerContext context, IDotNetAuthenticodeInspector? authenticodeInspector, string originalId, string normalizedId, string version) - { - _context = context ?? throw new ArgumentNullException(nameof(context)); - _authenticodeInspector = authenticodeInspector; - _originalId = string.IsNullOrWhiteSpace(originalId) ? normalizedId : originalId.Trim(); - _normalizedId = normalizedId; - _version = version ?? string.Empty; - } - - public static string BuildKey(string normalizedId, string version) - => $"{normalizedId}::{version}"; - - public static string NormalizeId(string id) - => string.IsNullOrWhiteSpace(id) ? string.Empty : id.Trim().ToLowerInvariant(); - - public void AddLibrary(DotNetLibrary library, string relativeDepsPath, DotNetRuntimeConfig? runtimeConfig) - { - ArgumentNullException.ThrowIfNull(library); - - if (library.Serviceable is bool serviceable) - { - _serviceable = _serviceable.HasValue - ? _serviceable.Value || serviceable - : serviceable; - } - - AddIfPresent(_sha512, library.Sha512); - AddIfPresent(_packagePaths, library.PackagePath); - AddIfPresent(_hashPaths, library.HashPath); - AddIfPresent(_depsPaths, NormalizeRelativePath(relativeDepsPath)); - - foreach (var dependency in library.Dependencies) - { - AddIfPresent(_dependencies, dependency, normalizeLower: true); - } - - foreach (var tfm in library.TargetFrameworks) - { - AddIfPresent(_targetFrameworks, tfm); - } - - foreach (var rid in library.RuntimeIdentifiers) - { - AddIfPresent(_runtimeIdentifiers, rid); - } - - AddRuntimeAssets(library); - - _evidence.Add(new LanguageComponentEvidence( - LanguageEvidenceKind.File, - "deps.json", - NormalizeRelativePath(relativeDepsPath), - library.Key, - Sha256: null)); - - if (runtimeConfig is not null) - { - AddRuntimeConfig(runtimeConfig); - } - } - - public DotNetPackage Build(CancellationToken cancellationToken) - { - var metadata = new List>(32) - { - new("package.id", _originalId), - new("package.id.normalized", _normalizedId), - new("package.version", _version) - }; - - if (_serviceable.HasValue) - { - metadata.Add(new KeyValuePair("package.serviceable", _serviceable.Value ? "true" : "false")); - } - - AddIndexed(metadata, "package.sha512", _sha512); - AddIndexed(metadata, "package.path", _packagePaths); - AddIndexed(metadata, "package.hashPath", _hashPaths); - AddIndexed(metadata, "deps.path", _depsPaths); - AddIndexed(metadata, "deps.dependency", _dependencies); - AddIndexed(metadata, "deps.tfm", _targetFrameworks); - AddIndexed(metadata, "deps.rid", _runtimeIdentifiers); - AddIndexed(metadata, "runtimeconfig.path", _runtimeConfigPaths); - AddIndexed(metadata, "runtimeconfig.tfm", _runtimeConfigTfms); - AddIndexed(metadata, "runtimeconfig.framework", _runtimeConfigFrameworks); - AddIndexed(metadata, "runtimeconfig.graph", _runtimeConfigGraph); - +using System.Diagnostics; +using System.Globalization; +using System.Linq; +using System.Reflection; +using System.Security.Cryptography; +using System.Text.Json; + +namespace StellaOps.Scanner.Analyzers.Lang.DotNet.Internal; + +internal static class DotNetDependencyCollector +{ + private static readonly EnumerationOptions Enumeration = new() + { + RecurseSubdirectories = true, + IgnoreInaccessible = true, + AttributesToSkip = FileAttributes.Device | FileAttributes.ReparsePoint + }; + + public static async ValueTask> CollectAsync(LanguageAnalyzerContext context, CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(context); + + var options = DotNetAnalyzerOptions.Load(context); + var entrypoints = options.IncludeEntrypoints + ? await DotNetEntrypointResolver.ResolveAsync(context, cancellationToken).ConfigureAwait(false) + : Array.Empty(); + var runtimeEdges = DotNetRuntimeEvidenceLoader.Load(context, options, cancellationToken); + var depsFiles = Directory + .EnumerateFiles(context.RootPath, "*.deps.json", Enumeration) + .OrderBy(static path => path, StringComparer.Ordinal) + .ToArray(); + + if (depsFiles.Length == 0) + { + return Array.Empty(); + } + + var aggregator = new DotNetPackageAggregator(context, options, entrypoints, runtimeEdges); + + foreach (var depsPath in depsFiles) + { + cancellationToken.ThrowIfCancellationRequested(); + + try + { + var relativeDepsPath = NormalizeRelative(context.GetRelativePath(depsPath)); + var depsFile = DotNetDepsFile.Load(depsPath, relativeDepsPath, cancellationToken); + if (depsFile is null) + { + continue; + } + + DotNetRuntimeConfig? runtimeConfig = null; + var runtimeConfigPath = Path.ChangeExtension(depsPath, ".runtimeconfig.json"); + if (!string.IsNullOrEmpty(runtimeConfigPath) && File.Exists(runtimeConfigPath)) + { + var relativeRuntimePath = NormalizeRelative(context.GetRelativePath(runtimeConfigPath)); + runtimeConfig = DotNetRuntimeConfig.Load(runtimeConfigPath, relativeRuntimePath, cancellationToken); + } + + aggregator.Add(depsFile, runtimeConfig); + } + catch (IOException) + { + continue; + } + catch (JsonException) + { + continue; + } + catch (UnauthorizedAccessException) + { + continue; + } + } + + var packages = aggregator.Build(cancellationToken); + return packages; + } + + private static string NormalizeRelative(string path) + { + if (string.IsNullOrWhiteSpace(path) || path == ".") + { + return "."; + } + + var normalized = path.Replace('\\', '/'); + return string.IsNullOrWhiteSpace(normalized) ? "." : normalized; + } +} + +internal sealed class DotNetPackageAggregator +{ + private readonly LanguageAnalyzerContext _context; + private readonly IDotNetAuthenticodeInspector? _authenticodeInspector; + private readonly DotNetAnalyzerOptions _options; + private readonly IReadOnlyList _entrypoints; + private readonly IReadOnlyDictionary> _runtimeEdges; + private readonly Dictionary _packages = new(StringComparer.Ordinal); + + public DotNetPackageAggregator( + LanguageAnalyzerContext context, + DotNetAnalyzerOptions options, + IReadOnlyList entrypoints, + IReadOnlyDictionary> runtimeEdges) + { + _context = context ?? throw new ArgumentNullException(nameof(context)); + _options = options ?? throw new ArgumentNullException(nameof(options)); + _entrypoints = entrypoints ?? Array.Empty(); + _runtimeEdges = runtimeEdges ?? new Dictionary>(StringComparer.OrdinalIgnoreCase); + if (context.TryGetService(out var inspector)) + { + _authenticodeInspector = inspector; + } + } + + public void Add(DotNetDepsFile depsFile, DotNetRuntimeConfig? runtimeConfig) + { + ArgumentNullException.ThrowIfNull(depsFile); + + foreach (var library in depsFile.Libraries.Values) + { + if (!library.IsPackage) + { + continue; + } + + var normalizedId = DotNetPackageBuilder.NormalizeId(library.Id); + var key = DotNetPackageBuilder.BuildKey(normalizedId, library.Version); + + if (!_packages.TryGetValue(key, out var builder)) + { + _runtimeEdges.TryGetValue(normalizedId, out var runtimeEdges); + builder = new DotNetPackageBuilder( + _context, + _authenticodeInspector, + _options, + _entrypoints, + runtimeEdges ?? Array.Empty(), + library.Id, + normalizedId, + library.Version); + _packages[key] = builder; + } + + builder.AddLibrary(library, depsFile.RelativePath, runtimeConfig); + } + } + + public IReadOnlyList Build(CancellationToken cancellationToken) + { + if (_packages.Count == 0) + { + return Array.Empty(); + } + + var items = new List(_packages.Count); + foreach (var builder in _packages.Values) + { + cancellationToken.ThrowIfCancellationRequested(); + items.Add(builder.Build(cancellationToken)); + } + + items.Sort(static (left, right) => string.CompareOrdinal(left.ComponentKey, right.ComponentKey)); + return items; + } +} + +internal sealed class DotNetPackageBuilder +{ + private readonly LanguageAnalyzerContext _context; + private readonly IDotNetAuthenticodeInspector? _authenticodeInspector; + private readonly DotNetAnalyzerOptions _options; + private readonly IReadOnlyList _entrypoints; + private readonly IReadOnlyList _runtimeEdges; + + private readonly string _originalId; + private readonly string _normalizedId; + private readonly string _version; + + private bool? _serviceable; + + private readonly SortedSet _sha512 = new(StringComparer.Ordinal); + private readonly SortedSet _packagePaths = new(StringComparer.Ordinal); + private readonly SortedSet _hashPaths = new(StringComparer.Ordinal); + private readonly SortedSet _depsPaths = new(StringComparer.Ordinal); + private readonly SortedSet _targetFrameworks = new(StringComparer.Ordinal); + private readonly SortedSet _runtimeIdentifiers = new(StringComparer.Ordinal); + private readonly SortedSet _dependencies = new(StringComparer.OrdinalIgnoreCase); + private readonly SortedSet _runtimeConfigPaths = new(StringComparer.Ordinal); + private readonly SortedSet _runtimeConfigTfms = new(StringComparer.OrdinalIgnoreCase); + private readonly SortedSet _runtimeConfigFrameworks = new(StringComparer.OrdinalIgnoreCase); + private readonly SortedSet _runtimeConfigGraph = new(StringComparer.OrdinalIgnoreCase); + + private readonly Dictionary _assemblies = new(StringComparer.OrdinalIgnoreCase); + private readonly Dictionary _nativeAssets = new(StringComparer.OrdinalIgnoreCase); + private readonly HashSet _evidence = new(new LanguageComponentEvidenceComparer()); + private bool _usedByEntrypoint; + + public DotNetPackageBuilder( + LanguageAnalyzerContext context, + IDotNetAuthenticodeInspector? authenticodeInspector, + DotNetAnalyzerOptions options, + IReadOnlyList entrypoints, + IReadOnlyList runtimeEdges, + string originalId, + string normalizedId, + string version) + { + _context = context ?? throw new ArgumentNullException(nameof(context)); + _authenticodeInspector = authenticodeInspector; + _options = options ?? throw new ArgumentNullException(nameof(options)); + _entrypoints = entrypoints ?? Array.Empty(); + _runtimeEdges = runtimeEdges ?? Array.Empty(); + _originalId = string.IsNullOrWhiteSpace(originalId) ? normalizedId : originalId.Trim(); + _normalizedId = normalizedId; + _version = version ?? string.Empty; + } + + public static string BuildKey(string normalizedId, string version) + => $"{normalizedId}::{version}"; + + public static string NormalizeId(string id) + => string.IsNullOrWhiteSpace(id) ? string.Empty : id.Trim().ToLowerInvariant(); + + public void AddLibrary(DotNetLibrary library, string relativeDepsPath, DotNetRuntimeConfig? runtimeConfig) + { + ArgumentNullException.ThrowIfNull(library); + + if (library.Serviceable is bool serviceable) + { + _serviceable = _serviceable.HasValue + ? _serviceable.Value || serviceable + : serviceable; + } + + AddIfPresent(_sha512, library.Sha512); + AddIfPresent(_packagePaths, library.PackagePath); + AddIfPresent(_hashPaths, library.HashPath); + AddIfPresent(_depsPaths, NormalizeRelativePath(relativeDepsPath)); + + foreach (var dependency in library.Dependencies) + { + AddIfPresent(_dependencies, dependency, normalizeLower: true); + } + + foreach (var tfm in library.TargetFrameworks) + { + AddIfPresent(_targetFrameworks, tfm); + } + + foreach (var rid in library.RuntimeIdentifiers) + { + AddIfPresent(_runtimeIdentifiers, rid); + } + + AddRuntimeAssets(library); + + _evidence.Add(new LanguageComponentEvidence( + LanguageEvidenceKind.File, + "deps.json", + NormalizeRelativePath(relativeDepsPath), + library.Key, + Sha256: null)); + + if (runtimeConfig is not null) + { + AddRuntimeConfig(runtimeConfig); + } + } + + public DotNetPackage Build(CancellationToken cancellationToken) + { + var metadata = new List>(32) + { + new("package.id", _originalId), + new("package.id.normalized", _normalizedId), + new("package.version", _version) + }; + + if (_serviceable.HasValue) + { + metadata.Add(new KeyValuePair("package.serviceable", _serviceable.Value ? "true" : "false")); + } + + AddIndexed(metadata, "package.sha512", _sha512); + AddIndexed(metadata, "package.path", _packagePaths); + AddIndexed(metadata, "package.hashPath", _hashPaths); + AddIndexed(metadata, "deps.path", _depsPaths); + AddIndexed(metadata, "deps.dependency", _dependencies); + AddIndexed(metadata, "deps.tfm", _targetFrameworks); + AddIndexed(metadata, "deps.rid", _runtimeIdentifiers); + AddIndexed(metadata, "runtimeconfig.path", _runtimeConfigPaths); + AddIndexed(metadata, "runtimeconfig.tfm", _runtimeConfigTfms); + AddIndexed(metadata, "runtimeconfig.framework", _runtimeConfigFrameworks); + AddIndexed(metadata, "runtimeconfig.graph", _runtimeConfigGraph); + var assemblies = CollectAssemblyMetadata(cancellationToken); AddAssemblyMetadata(metadata, assemblies); var nativeAssets = CollectNativeMetadata(cancellationToken); AddNativeMetadata(metadata, nativeAssets); + if (_options.EmitDependencyEdges) + { + var declaredEdges = BuildDeclaredEdges(); + AddEdgeMetadata(metadata, declaredEdges, "edge"); + + if (_runtimeEdges.Count > 0) + { + AddEdgeMetadata(metadata, _runtimeEdges, "edge.runtime"); + } + } + + if (_options.IncludeEntrypoints && _entrypoints.Count > 0) + { + AddEntrypointMetadata(metadata, _entrypoints); + } + AppendLicenseMetadata(metadata, cancellationToken); AddProvenanceMetadata(metadata); @@ -271,138 +325,222 @@ internal sealed class DotNetPackageBuilder var evidence = _evidence .OrderBy(static item => item.Source, StringComparer.Ordinal) - .ThenBy(static item => item.Locator, StringComparer.Ordinal) - .ThenBy(static item => item.Value, StringComparer.Ordinal) - .ToArray(); - - return new DotNetPackage( - name: _originalId, - normalizedId: _normalizedId, - version: _version, - metadata: metadata, - evidence: evidence, - usedByEntrypoint: _usedByEntrypoint); - } - - private IReadOnlyList CollectAssemblyMetadata(CancellationToken cancellationToken) - { - if (_assemblies.Count == 0) - { - return Array.Empty(); - } - - var results = new List(_assemblies.Count); - foreach (var aggregate in _assemblies.Values.OrderBy(static aggregate => aggregate.AssetRelativePath, StringComparer.Ordinal)) - { - cancellationToken.ThrowIfCancellationRequested(); - results.Add(aggregate.Build(_context, _authenticodeInspector, cancellationToken)); - } - - return results; - } - - private IReadOnlyList CollectNativeMetadata(CancellationToken cancellationToken) - { - if (_nativeAssets.Count == 0) - { - return Array.Empty(); - } - - var results = new List(_nativeAssets.Count); - foreach (var aggregate in _nativeAssets.Values.OrderBy(static aggregate => aggregate.AssetRelativePath, StringComparer.Ordinal)) - { - cancellationToken.ThrowIfCancellationRequested(); - results.Add(aggregate.Build(_context, cancellationToken)); - } - - return results; - } - - private void AddAssemblyMetadata(ICollection> metadata, IReadOnlyList assemblies) - { - if (assemblies.Count == 0) - { - return; - } - - for (var index = 0; index < assemblies.Count; index++) - { - var record = assemblies[index]; - var prefix = $"assembly[{index}]"; - - if (record.UsedByEntrypoint) - { - _usedByEntrypoint = true; - } - - AddIfPresent(metadata, $"{prefix}.assetPath", record.AssetPath); - AddIfPresent(metadata, $"{prefix}.path", record.RelativePath); - AddIndexed(metadata, $"{prefix}.tfm", record.TargetFrameworks); - AddIndexed(metadata, $"{prefix}.rid", record.RuntimeIdentifiers); - AddIfPresent(metadata, $"{prefix}.version", record.AssemblyVersion); - AddIfPresent(metadata, $"{prefix}.fileVersion", record.FileVersion); - AddIfPresent(metadata, $"{prefix}.publicKeyToken", record.PublicKeyToken); - AddIfPresent(metadata, $"{prefix}.strongName", record.StrongName); - AddIfPresent(metadata, $"{prefix}.company", record.CompanyName); - AddIfPresent(metadata, $"{prefix}.product", record.ProductName); - AddIfPresent(metadata, $"{prefix}.productVersion", record.ProductVersion); - AddIfPresent(metadata, $"{prefix}.fileDescription", record.FileDescription); - AddIfPresent(metadata, $"{prefix}.sha256", record.Sha256); - - if (record.Authenticode is { } authenticode) - { - AddIfPresent(metadata, $"{prefix}.authenticode.subject", authenticode.Subject); - AddIfPresent(metadata, $"{prefix}.authenticode.issuer", authenticode.Issuer); - AddIfPresent(metadata, $"{prefix}.authenticode.notBefore", FormatTimestamp(authenticode.NotBefore)); - AddIfPresent(metadata, $"{prefix}.authenticode.notAfter", FormatTimestamp(authenticode.NotAfter)); - AddIfPresent(metadata, $"{prefix}.authenticode.thumbprint", authenticode.Thumbprint); - AddIfPresent(metadata, $"{prefix}.authenticode.serialNumber", authenticode.SerialNumber); - } - - if (!string.IsNullOrEmpty(record.RelativePath)) - { - _evidence.Add(new LanguageComponentEvidence( - LanguageEvidenceKind.File, - Source: "assembly", - Locator: record.RelativePath!, - Value: record.AssetPath, - Sha256: record.Sha256)); - } - } - } - + .ThenBy(static item => item.Locator, StringComparer.Ordinal) + .ThenBy(static item => item.Value, StringComparer.Ordinal) + .ToArray(); + + return new DotNetPackage( + name: _originalId, + normalizedId: _normalizedId, + version: _version, + metadata: metadata, + evidence: evidence, + usedByEntrypoint: _usedByEntrypoint); + } + + private IReadOnlyList CollectAssemblyMetadata(CancellationToken cancellationToken) + { + if (_assemblies.Count == 0) + { + return Array.Empty(); + } + + var results = new List(_assemblies.Count); + foreach (var aggregate in _assemblies.Values.OrderBy(static aggregate => aggregate.AssetRelativePath, StringComparer.Ordinal)) + { + cancellationToken.ThrowIfCancellationRequested(); + results.Add(aggregate.Build(_context, _authenticodeInspector, cancellationToken)); + } + + return results; + } + + private IReadOnlyList CollectNativeMetadata(CancellationToken cancellationToken) + { + if (_nativeAssets.Count == 0) + { + return Array.Empty(); + } + + var results = new List(_nativeAssets.Count); + foreach (var aggregate in _nativeAssets.Values.OrderBy(static aggregate => aggregate.AssetRelativePath, StringComparer.Ordinal)) + { + cancellationToken.ThrowIfCancellationRequested(); + results.Add(aggregate.Build(_context, cancellationToken)); + } + + return results; + } + + private void AddAssemblyMetadata(ICollection> metadata, IReadOnlyList assemblies) + { + if (assemblies.Count == 0) + { + return; + } + + for (var index = 0; index < assemblies.Count; index++) + { + var record = assemblies[index]; + var prefix = $"assembly[{index}]"; + + if (record.UsedByEntrypoint) + { + _usedByEntrypoint = true; + } + + AddIfPresent(metadata, $"{prefix}.assetPath", record.AssetPath); + AddIfPresent(metadata, $"{prefix}.path", record.RelativePath); + AddIndexed(metadata, $"{prefix}.tfm", record.TargetFrameworks); + AddIndexed(metadata, $"{prefix}.rid", record.RuntimeIdentifiers); + AddIfPresent(metadata, $"{prefix}.version", record.AssemblyVersion); + AddIfPresent(metadata, $"{prefix}.fileVersion", record.FileVersion); + AddIfPresent(metadata, $"{prefix}.publicKeyToken", record.PublicKeyToken); + AddIfPresent(metadata, $"{prefix}.strongName", record.StrongName); + AddIfPresent(metadata, $"{prefix}.company", record.CompanyName); + AddIfPresent(metadata, $"{prefix}.product", record.ProductName); + AddIfPresent(metadata, $"{prefix}.productVersion", record.ProductVersion); + AddIfPresent(metadata, $"{prefix}.fileDescription", record.FileDescription); + AddIfPresent(metadata, $"{prefix}.sha256", record.Sha256); + + if (record.Authenticode is { } authenticode) + { + AddIfPresent(metadata, $"{prefix}.authenticode.subject", authenticode.Subject); + AddIfPresent(metadata, $"{prefix}.authenticode.issuer", authenticode.Issuer); + AddIfPresent(metadata, $"{prefix}.authenticode.notBefore", FormatTimestamp(authenticode.NotBefore)); + AddIfPresent(metadata, $"{prefix}.authenticode.notAfter", FormatTimestamp(authenticode.NotAfter)); + AddIfPresent(metadata, $"{prefix}.authenticode.thumbprint", authenticode.Thumbprint); + AddIfPresent(metadata, $"{prefix}.authenticode.serialNumber", authenticode.SerialNumber); + } + + if (!string.IsNullOrEmpty(record.RelativePath)) + { + _evidence.Add(new LanguageComponentEvidence( + LanguageEvidenceKind.File, + Source: "assembly", + Locator: record.RelativePath!, + Value: record.AssetPath, + Sha256: record.Sha256)); + } + } + } + private void AddNativeMetadata(ICollection> metadata, IReadOnlyList nativeAssets) { if (nativeAssets.Count == 0) { return; - } - - for (var index = 0; index < nativeAssets.Count; index++) - { - var record = nativeAssets[index]; - var prefix = $"native[{index}]"; - - if (record.UsedByEntrypoint) - { - _usedByEntrypoint = true; - } - - AddIfPresent(metadata, $"{prefix}.assetPath", record.AssetPath); - AddIfPresent(metadata, $"{prefix}.path", record.RelativePath); - AddIndexed(metadata, $"{prefix}.tfm", record.TargetFrameworks); - AddIndexed(metadata, $"{prefix}.rid", record.RuntimeIdentifiers); - AddIfPresent(metadata, $"{prefix}.sha256", record.Sha256); - - if (!string.IsNullOrEmpty(record.RelativePath)) - { - _evidence.Add(new LanguageComponentEvidence( - LanguageEvidenceKind.File, - Source: "native", - Locator: record.RelativePath!, - Value: record.AssetPath, - Sha256: record.Sha256)); - } + } + + for (var index = 0; index < nativeAssets.Count; index++) + { + var record = nativeAssets[index]; + var prefix = $"native[{index}]"; + + if (record.UsedByEntrypoint) + { + _usedByEntrypoint = true; + } + + AddIfPresent(metadata, $"{prefix}.assetPath", record.AssetPath); + AddIfPresent(metadata, $"{prefix}.path", record.RelativePath); + AddIndexed(metadata, $"{prefix}.tfm", record.TargetFrameworks); + AddIndexed(metadata, $"{prefix}.rid", record.RuntimeIdentifiers); + AddIfPresent(metadata, $"{prefix}.sha256", record.Sha256); + + if (!string.IsNullOrEmpty(record.RelativePath)) + { + _evidence.Add(new LanguageComponentEvidence( + LanguageEvidenceKind.File, + Source: "native", + Locator: record.RelativePath!, + Value: record.AssetPath, + Sha256: record.Sha256)); + } + } + } + + private List BuildDeclaredEdges() + { + if (_dependencies.Count == 0) + { + return new List(0); + } + + var edges = new List(_dependencies.Count); + foreach (var dependency in _dependencies) + { + if (string.IsNullOrWhiteSpace(dependency)) + { + continue; + } + + edges.Add(new DotNetDependencyEdge( + Target: dependency.Trim().ToLowerInvariant(), + Reason: "declared-dependency", + Confidence: "high", + Source: "deps.json")); + } + + edges.Sort(static (left, right) => string.CompareOrdinal(left.Target, right.Target)); + return edges; + } + + private static void AddEdgeMetadata( + ICollection> metadata, + IReadOnlyList edges, + string prefix) + { + if (edges.Count == 0) + { + return; + } + + for (var index = 0; index < edges.Count; index++) + { + var edge = edges[index]; + metadata.Add(new KeyValuePair($"{prefix}[{index}].target", edge.Target)); + metadata.Add(new KeyValuePair($"{prefix}[{index}].reason", edge.Reason)); + metadata.Add(new KeyValuePair($"{prefix}[{index}].confidence", edge.Confidence)); + metadata.Add(new KeyValuePair($"{prefix}[{index}].source", edge.Source)); + } + } + + private static void AddEntrypointMetadata( + ICollection> metadata, + IReadOnlyList entrypoints) + { + if (metadata is null) + { + throw new ArgumentNullException(nameof(metadata)); + } + + if (entrypoints.Count == 0) + { + return; + } + + for (var index = 0; index < entrypoints.Count; index++) + { + var entrypoint = entrypoints[index]; + var prefix = $"entrypoint[{index}]"; + + metadata.Add(new KeyValuePair($"{prefix}.id", entrypoint.Id)); + metadata.Add(new KeyValuePair($"{prefix}.name", entrypoint.Name)); + metadata.Add(new KeyValuePair($"{prefix}.publishKind", entrypoint.PublishKind.ToString().ToLowerInvariant())); + + AddIndexed(metadata, $"{prefix}.tfm", entrypoint.TargetFrameworks); + AddIndexed(metadata, $"{prefix}.rid", entrypoint.RuntimeIdentifiers); + + if (!string.IsNullOrWhiteSpace(entrypoint.RelativeDepsPath)) + { + metadata.Add(new KeyValuePair($"{prefix}.depsPath", entrypoint.RelativeDepsPath)); + } + + if (!string.IsNullOrWhiteSpace(entrypoint.RelativeRuntimeConfigPath)) + { + metadata.Add(new KeyValuePair($"{prefix}.runtimeConfigPath", entrypoint.RelativeRuntimeConfigPath)); + } } } @@ -583,372 +721,372 @@ internal sealed class DotNetPackageBuilder return fullPath.TrimEnd(Path.DirectorySeparatorChar, Path.AltDirectorySeparatorChar); } - - private void AddRuntimeAssets(DotNetLibrary library) - { - foreach (var asset in library.RuntimeAssets) - { - switch (asset.Kind) - { - case DotNetLibraryAssetKind.Runtime: - AddRuntimeAssemblyAsset(asset, library.PackagePath); - break; - case DotNetLibraryAssetKind.Native: - AddNativeAsset(asset, library.PackagePath); - break; - } - } - } - - private void AddRuntimeAssemblyAsset(DotNetLibraryAsset asset, string? packagePath) - { - var key = NormalizePath(asset.RelativePath); - if (string.IsNullOrEmpty(key)) - { - return; - } - - if (!_assemblies.TryGetValue(key, out var aggregate)) - { - aggregate = new AssemblyMetadataAggregate(key); - _assemblies[key] = aggregate; - } - - aggregate.AddManifestData(asset, packagePath); - } - - private void AddNativeAsset(DotNetLibraryAsset asset, string? packagePath) - { - var key = NormalizePath(asset.RelativePath); - if (string.IsNullOrEmpty(key)) - { - return; - } - - if (!_nativeAssets.TryGetValue(key, out var aggregate)) - { - aggregate = new NativeAssetAggregate(key); - _nativeAssets[key] = aggregate; - } - - aggregate.AddManifestData(asset, packagePath); - } - - private void AddRuntimeConfig(DotNetRuntimeConfig runtimeConfig) - { - AddIfPresent(_runtimeConfigPaths, runtimeConfig.RelativePath); - - foreach (var tfm in runtimeConfig.Tfms) - { - AddIfPresent(_runtimeConfigTfms, tfm); - } - - foreach (var framework in runtimeConfig.Frameworks) - { - AddIfPresent(_runtimeConfigFrameworks, framework); - } - - foreach (var entry in runtimeConfig.RuntimeGraph) - { - var value = BuildRuntimeGraphValue(entry.Rid, entry.Fallbacks); - AddIfPresent(_runtimeConfigGraph, value); - } - - _evidence.Add(new LanguageComponentEvidence( - LanguageEvidenceKind.File, - "runtimeconfig.json", - runtimeConfig.RelativePath, - Value: null, - Sha256: null)); - } - - private static void AddIfPresent(ICollection> metadata, string key, string? value) - { - if (metadata is null) - { - throw new ArgumentNullException(nameof(metadata)); - } - - if (string.IsNullOrWhiteSpace(key) || string.IsNullOrWhiteSpace(value)) - { - return; - } - - metadata.Add(new KeyValuePair(key, value)); - } - - private static void AddIfPresent(ISet set, string? value, bool normalizeLower = false) - { - if (set is null) - { - throw new ArgumentNullException(nameof(set)); - } - - if (string.IsNullOrWhiteSpace(value)) - { - return; - } - - var normalized = value.Trim(); - if (normalizeLower) - { - normalized = normalized.ToLowerInvariant(); - } - - set.Add(normalized); - } - - private static void AddIndexed(ICollection> metadata, string prefix, IEnumerable values) - { - if (metadata is null) - { - throw new ArgumentNullException(nameof(metadata)); - } - - if (values is null) - { - return; - } - - var index = 0; - foreach (var value in values) - { - if (string.IsNullOrWhiteSpace(value)) - { - continue; - } - - metadata.Add(new KeyValuePair($"{prefix}[{index++}]", value)); - } - } - - private static string NormalizeRelativePath(string path) - { - if (string.IsNullOrWhiteSpace(path) || path == ".") - { - return "."; - } - - return path.Replace('\\', '/'); - } - - private static string NormalizePath(string? path) - { - if (string.IsNullOrWhiteSpace(path)) - { - return string.Empty; - } - - var normalized = path.Replace('\\', '/').Trim(); - return string.IsNullOrEmpty(normalized) ? string.Empty : normalized; - } - + + private void AddRuntimeAssets(DotNetLibrary library) + { + foreach (var asset in library.RuntimeAssets) + { + switch (asset.Kind) + { + case DotNetLibraryAssetKind.Runtime: + AddRuntimeAssemblyAsset(asset, library.PackagePath); + break; + case DotNetLibraryAssetKind.Native: + AddNativeAsset(asset, library.PackagePath); + break; + } + } + } + + private void AddRuntimeAssemblyAsset(DotNetLibraryAsset asset, string? packagePath) + { + var key = NormalizePath(asset.RelativePath); + if (string.IsNullOrEmpty(key)) + { + return; + } + + if (!_assemblies.TryGetValue(key, out var aggregate)) + { + aggregate = new AssemblyMetadataAggregate(key); + _assemblies[key] = aggregate; + } + + aggregate.AddManifestData(asset, packagePath); + } + + private void AddNativeAsset(DotNetLibraryAsset asset, string? packagePath) + { + var key = NormalizePath(asset.RelativePath); + if (string.IsNullOrEmpty(key)) + { + return; + } + + if (!_nativeAssets.TryGetValue(key, out var aggregate)) + { + aggregate = new NativeAssetAggregate(key); + _nativeAssets[key] = aggregate; + } + + aggregate.AddManifestData(asset, packagePath); + } + + private void AddRuntimeConfig(DotNetRuntimeConfig runtimeConfig) + { + AddIfPresent(_runtimeConfigPaths, runtimeConfig.RelativePath); + + foreach (var tfm in runtimeConfig.Tfms) + { + AddIfPresent(_runtimeConfigTfms, tfm); + } + + foreach (var framework in runtimeConfig.Frameworks) + { + AddIfPresent(_runtimeConfigFrameworks, framework); + } + + foreach (var entry in runtimeConfig.RuntimeGraph) + { + var value = BuildRuntimeGraphValue(entry.Rid, entry.Fallbacks); + AddIfPresent(_runtimeConfigGraph, value); + } + + _evidence.Add(new LanguageComponentEvidence( + LanguageEvidenceKind.File, + "runtimeconfig.json", + runtimeConfig.RelativePath, + Value: null, + Sha256: null)); + } + + private static void AddIfPresent(ICollection> metadata, string key, string? value) + { + if (metadata is null) + { + throw new ArgumentNullException(nameof(metadata)); + } + + if (string.IsNullOrWhiteSpace(key) || string.IsNullOrWhiteSpace(value)) + { + return; + } + + metadata.Add(new KeyValuePair(key, value)); + } + + private static void AddIfPresent(ISet set, string? value, bool normalizeLower = false) + { + if (set is null) + { + throw new ArgumentNullException(nameof(set)); + } + + if (string.IsNullOrWhiteSpace(value)) + { + return; + } + + var normalized = value.Trim(); + if (normalizeLower) + { + normalized = normalized.ToLowerInvariant(); + } + + set.Add(normalized); + } + + private static void AddIndexed(ICollection> metadata, string prefix, IEnumerable values) + { + if (metadata is null) + { + throw new ArgumentNullException(nameof(metadata)); + } + + if (values is null) + { + return; + } + + var index = 0; + foreach (var value in values) + { + if (string.IsNullOrWhiteSpace(value)) + { + continue; + } + + metadata.Add(new KeyValuePair($"{prefix}[{index++}]", value)); + } + } + + private static string NormalizeRelativePath(string path) + { + if (string.IsNullOrWhiteSpace(path) || path == ".") + { + return "."; + } + + return path.Replace('\\', '/'); + } + + private static string NormalizePath(string? path) + { + if (string.IsNullOrWhiteSpace(path)) + { + return string.Empty; + } + + var normalized = path.Replace('\\', '/').Trim(); + return string.IsNullOrEmpty(normalized) ? string.Empty : normalized; + } + internal static string ConvertToPlatformPath(string path) => string.IsNullOrEmpty(path) ? "." : path.Replace('/', Path.DirectorySeparatorChar); - - private static string CombineRelative(string basePath, string relativePath) - { - var left = NormalizePath(basePath); - var right = NormalizePath(relativePath); - - if (string.IsNullOrEmpty(left)) - { - return right; - } - - if (string.IsNullOrEmpty(right)) - { - return left; - } - - return NormalizePath($"{left}/{right}"); - } - - private static string? FormatPublicKeyToken(byte[]? token) - { - if (token is null || token.Length == 0) - { - return null; - } - - return Convert.ToHexString(token).ToLowerInvariant(); - } - - private static string? BuildStrongName(AssemblyName assemblyName, string? publicKeyToken) - { - if (assemblyName is null || string.IsNullOrWhiteSpace(assemblyName.Name)) - { - return null; - } - - var version = assemblyName.Version?.ToString() ?? "0.0.0.0"; - var culture = string.IsNullOrWhiteSpace(assemblyName.CultureName) ? "neutral" : assemblyName.CultureName; - var token = string.IsNullOrWhiteSpace(publicKeyToken) ? "null" : publicKeyToken; - return $"{assemblyName.Name}, Version={version}, Culture={culture}, PublicKeyToken={token}"; - } - - private static string? NormalizeMetadataValue(string? value) - { - if (string.IsNullOrWhiteSpace(value)) - { - return null; - } - - return value.Trim(); - } - - private static string? FormatTimestamp(DateTimeOffset? value) - { - if (value is null) - { - return null; - } - - return value.Value.UtcDateTime.ToString("yyyy-MM-ddTHH:mm:ss.fffZ", CultureInfo.InvariantCulture); - } - - private static IEnumerable EnumeratePackageBases(string packagePath) - { - if (string.IsNullOrWhiteSpace(packagePath)) - { - yield break; - } - - var normalized = NormalizePath(packagePath); - if (string.IsNullOrEmpty(normalized)) - { - yield break; - } - - yield return normalized; - yield return NormalizePath($".nuget/packages/{normalized}"); - yield return NormalizePath($"packages/{normalized}"); - yield return NormalizePath($"usr/share/dotnet/packs/{normalized}"); - } - - private static string BuildRuntimeGraphValue(string rid, IReadOnlyList fallbacks) - { - if (string.IsNullOrWhiteSpace(rid)) - { - return string.Empty; - } - - if (fallbacks.Count == 0) - { - return rid.Trim(); - } - - var ordered = fallbacks - .Where(static fallback => !string.IsNullOrWhiteSpace(fallback)) - .Select(static fallback => fallback.Trim()) - .Distinct(StringComparer.OrdinalIgnoreCase) - .OrderBy(static fallback => fallback, StringComparer.OrdinalIgnoreCase) - .ToArray(); - - return ordered.Length == 0 - ? rid.Trim() - : $"{rid.Trim()}=>{string.Join(';', ordered)}"; - } - - private sealed class AssemblyMetadataAggregate - { - private readonly string _assetRelativePath; - private readonly SortedSet _tfms = new(StringComparer.OrdinalIgnoreCase); - private readonly SortedSet _runtimeIdentifiers = new(StringComparer.OrdinalIgnoreCase); - private readonly SortedSet _packagePaths = new(StringComparer.Ordinal); - private string? _assemblyVersion; - private string? _fileVersion; - - public AssemblyMetadataAggregate(string assetRelativePath) - { - _assetRelativePath = NormalizePath(assetRelativePath); - } - - public string AssetRelativePath => _assetRelativePath; - - public void AddManifestData(DotNetLibraryAsset asset, string? packagePath) - { - if (!string.IsNullOrWhiteSpace(asset.TargetFramework)) - { - _tfms.Add(asset.TargetFramework); - } - - if (!string.IsNullOrWhiteSpace(asset.RuntimeIdentifier)) - { - _runtimeIdentifiers.Add(asset.RuntimeIdentifier); - } - - if (!string.IsNullOrWhiteSpace(asset.AssemblyVersion) && string.IsNullOrEmpty(_assemblyVersion)) - { - _assemblyVersion = asset.AssemblyVersion; - } - - if (!string.IsNullOrWhiteSpace(asset.FileVersion) && string.IsNullOrEmpty(_fileVersion)) - { - _fileVersion = asset.FileVersion; - } - - if (!string.IsNullOrWhiteSpace(packagePath)) - { - var normalized = NormalizePath(packagePath); - if (!string.IsNullOrEmpty(normalized)) - { - _packagePaths.Add(normalized); - } - } - } - - public AssemblyMetadataResult Build(LanguageAnalyzerContext context, IDotNetAuthenticodeInspector? authenticodeInspector, CancellationToken cancellationToken) - { - var fileMetadata = ResolveFileMetadata(context, authenticodeInspector, cancellationToken); - - var assemblyName = fileMetadata?.AssemblyName; - var versionInfo = fileMetadata?.FileVersionInfo; - - var assemblyVersion = assemblyName?.Version?.ToString() ?? _assemblyVersion; - var fileVersion = !string.IsNullOrWhiteSpace(versionInfo?.FileVersion) ? versionInfo?.FileVersion : _fileVersion; - var usedByEntrypoint = fileMetadata?.UsedByEntrypoint ?? false; - - string? publicKeyToken = null; - string? strongName = null; - if (assemblyName is not null) - { - publicKeyToken = FormatPublicKeyToken(assemblyName.GetPublicKeyToken()); - strongName = BuildStrongName(assemblyName, publicKeyToken); - } - - return new AssemblyMetadataResult( - AssetPath: _assetRelativePath, - RelativePath: fileMetadata?.RelativePath, - TargetFrameworks: _tfms.ToArray(), - RuntimeIdentifiers: _runtimeIdentifiers.ToArray(), - AssemblyVersion: assemblyVersion, - FileVersion: fileVersion, - PublicKeyToken: publicKeyToken, - StrongName: strongName, - CompanyName: NormalizeMetadataValue(versionInfo?.CompanyName), - ProductName: NormalizeMetadataValue(versionInfo?.ProductName), - ProductVersion: NormalizeMetadataValue(versionInfo?.ProductVersion), - FileDescription: NormalizeMetadataValue(versionInfo?.FileDescription), - Sha256: fileMetadata?.Sha256, - Authenticode: fileMetadata?.Authenticode, - UsedByEntrypoint: usedByEntrypoint); - } - - private AssemblyFileMetadata? ResolveFileMetadata(LanguageAnalyzerContext context, IDotNetAuthenticodeInspector? authenticodeInspector, CancellationToken cancellationToken) - { - var candidates = BuildCandidateRelativePaths(); - - foreach (var candidate in candidates) - { - cancellationToken.ThrowIfCancellationRequested(); - - var absolutePath = context.ResolvePath(ConvertToPlatformPath(candidate)); - if (!File.Exists(absolutePath)) - { - continue; - } - + + private static string CombineRelative(string basePath, string relativePath) + { + var left = NormalizePath(basePath); + var right = NormalizePath(relativePath); + + if (string.IsNullOrEmpty(left)) + { + return right; + } + + if (string.IsNullOrEmpty(right)) + { + return left; + } + + return NormalizePath($"{left}/{right}"); + } + + private static string? FormatPublicKeyToken(byte[]? token) + { + if (token is null || token.Length == 0) + { + return null; + } + + return Convert.ToHexString(token).ToLowerInvariant(); + } + + private static string? BuildStrongName(AssemblyName assemblyName, string? publicKeyToken) + { + if (assemblyName is null || string.IsNullOrWhiteSpace(assemblyName.Name)) + { + return null; + } + + var version = assemblyName.Version?.ToString() ?? "0.0.0.0"; + var culture = string.IsNullOrWhiteSpace(assemblyName.CultureName) ? "neutral" : assemblyName.CultureName; + var token = string.IsNullOrWhiteSpace(publicKeyToken) ? "null" : publicKeyToken; + return $"{assemblyName.Name}, Version={version}, Culture={culture}, PublicKeyToken={token}"; + } + + private static string? NormalizeMetadataValue(string? value) + { + if (string.IsNullOrWhiteSpace(value)) + { + return null; + } + + return value.Trim(); + } + + private static string? FormatTimestamp(DateTimeOffset? value) + { + if (value is null) + { + return null; + } + + return value.Value.UtcDateTime.ToString("yyyy-MM-ddTHH:mm:ss.fffZ", CultureInfo.InvariantCulture); + } + + private static IEnumerable EnumeratePackageBases(string packagePath) + { + if (string.IsNullOrWhiteSpace(packagePath)) + { + yield break; + } + + var normalized = NormalizePath(packagePath); + if (string.IsNullOrEmpty(normalized)) + { + yield break; + } + + yield return normalized; + yield return NormalizePath($".nuget/packages/{normalized}"); + yield return NormalizePath($"packages/{normalized}"); + yield return NormalizePath($"usr/share/dotnet/packs/{normalized}"); + } + + private static string BuildRuntimeGraphValue(string rid, IReadOnlyList fallbacks) + { + if (string.IsNullOrWhiteSpace(rid)) + { + return string.Empty; + } + + if (fallbacks.Count == 0) + { + return rid.Trim(); + } + + var ordered = fallbacks + .Where(static fallback => !string.IsNullOrWhiteSpace(fallback)) + .Select(static fallback => fallback.Trim()) + .Distinct(StringComparer.OrdinalIgnoreCase) + .OrderBy(static fallback => fallback, StringComparer.OrdinalIgnoreCase) + .ToArray(); + + return ordered.Length == 0 + ? rid.Trim() + : $"{rid.Trim()}=>{string.Join(';', ordered)}"; + } + + private sealed class AssemblyMetadataAggregate + { + private readonly string _assetRelativePath; + private readonly SortedSet _tfms = new(StringComparer.OrdinalIgnoreCase); + private readonly SortedSet _runtimeIdentifiers = new(StringComparer.OrdinalIgnoreCase); + private readonly SortedSet _packagePaths = new(StringComparer.Ordinal); + private string? _assemblyVersion; + private string? _fileVersion; + + public AssemblyMetadataAggregate(string assetRelativePath) + { + _assetRelativePath = NormalizePath(assetRelativePath); + } + + public string AssetRelativePath => _assetRelativePath; + + public void AddManifestData(DotNetLibraryAsset asset, string? packagePath) + { + if (!string.IsNullOrWhiteSpace(asset.TargetFramework)) + { + _tfms.Add(asset.TargetFramework); + } + + if (!string.IsNullOrWhiteSpace(asset.RuntimeIdentifier)) + { + _runtimeIdentifiers.Add(asset.RuntimeIdentifier); + } + + if (!string.IsNullOrWhiteSpace(asset.AssemblyVersion) && string.IsNullOrEmpty(_assemblyVersion)) + { + _assemblyVersion = asset.AssemblyVersion; + } + + if (!string.IsNullOrWhiteSpace(asset.FileVersion) && string.IsNullOrEmpty(_fileVersion)) + { + _fileVersion = asset.FileVersion; + } + + if (!string.IsNullOrWhiteSpace(packagePath)) + { + var normalized = NormalizePath(packagePath); + if (!string.IsNullOrEmpty(normalized)) + { + _packagePaths.Add(normalized); + } + } + } + + public AssemblyMetadataResult Build(LanguageAnalyzerContext context, IDotNetAuthenticodeInspector? authenticodeInspector, CancellationToken cancellationToken) + { + var fileMetadata = ResolveFileMetadata(context, authenticodeInspector, cancellationToken); + + var assemblyName = fileMetadata?.AssemblyName; + var versionInfo = fileMetadata?.FileVersionInfo; + + var assemblyVersion = assemblyName?.Version?.ToString() ?? _assemblyVersion; + var fileVersion = !string.IsNullOrWhiteSpace(versionInfo?.FileVersion) ? versionInfo?.FileVersion : _fileVersion; + var usedByEntrypoint = fileMetadata?.UsedByEntrypoint ?? false; + + string? publicKeyToken = null; + string? strongName = null; + if (assemblyName is not null) + { + publicKeyToken = FormatPublicKeyToken(assemblyName.GetPublicKeyToken()); + strongName = BuildStrongName(assemblyName, publicKeyToken); + } + + return new AssemblyMetadataResult( + AssetPath: _assetRelativePath, + RelativePath: fileMetadata?.RelativePath, + TargetFrameworks: _tfms.ToArray(), + RuntimeIdentifiers: _runtimeIdentifiers.ToArray(), + AssemblyVersion: assemblyVersion, + FileVersion: fileVersion, + PublicKeyToken: publicKeyToken, + StrongName: strongName, + CompanyName: NormalizeMetadataValue(versionInfo?.CompanyName), + ProductName: NormalizeMetadataValue(versionInfo?.ProductName), + ProductVersion: NormalizeMetadataValue(versionInfo?.ProductVersion), + FileDescription: NormalizeMetadataValue(versionInfo?.FileDescription), + Sha256: fileMetadata?.Sha256, + Authenticode: fileMetadata?.Authenticode, + UsedByEntrypoint: usedByEntrypoint); + } + + private AssemblyFileMetadata? ResolveFileMetadata(LanguageAnalyzerContext context, IDotNetAuthenticodeInspector? authenticodeInspector, CancellationToken cancellationToken) + { + var candidates = BuildCandidateRelativePaths(); + + foreach (var candidate in candidates) + { + cancellationToken.ThrowIfCancellationRequested(); + + var absolutePath = context.ResolvePath(ConvertToPlatformPath(candidate)); + if (!File.Exists(absolutePath)) + { + continue; + } + try { var relativePath = NormalizePath(context.GetRelativePath(absolutePath)); @@ -958,141 +1096,141 @@ internal sealed class DotNetPackageBuilder DotNetAuthenticodeMetadata? authenticode = null; if (authenticodeInspector is not null) - { - try - { - authenticode = authenticodeInspector.TryInspect(absolutePath, cancellationToken); - } - catch - { - authenticode = null; - } - } - - var usedByEntrypoint = context.UsageHints.IsPathUsed(absolutePath); - - return new AssemblyFileMetadata( - AbsolutePath: absolutePath, - RelativePath: relativePath, - Sha256: sha256, - AssemblyName: assemblyName, - FileVersionInfo: versionInfo, - Authenticode: authenticode, - UsedByEntrypoint: usedByEntrypoint); - } - catch (IOException) - { - continue; - } - catch (UnauthorizedAccessException) - { - continue; - } - catch (BadImageFormatException) - { - continue; - } - } - - return null; - } - - private IEnumerable BuildCandidateRelativePaths() - { - var seen = new HashSet(StringComparer.OrdinalIgnoreCase); - - if (_packagePaths.Count > 0) - { - foreach (var packagePath in _packagePaths) - { - foreach (var basePath in EnumeratePackageBases(packagePath)) - { - var combined = CombineRelative(basePath, _assetRelativePath); - if (string.IsNullOrEmpty(combined)) - { - continue; - } - - if (seen.Add(combined)) - { - yield return combined; - } - } - } - } - - if (seen.Add(_assetRelativePath)) - { - yield return _assetRelativePath; - } - } - } - - private sealed class NativeAssetAggregate - { - private readonly string _assetRelativePath; - private readonly SortedSet _tfms = new(StringComparer.OrdinalIgnoreCase); - private readonly SortedSet _runtimeIdentifiers = new(StringComparer.OrdinalIgnoreCase); - private readonly SortedSet _packagePaths = new(StringComparer.Ordinal); - - public NativeAssetAggregate(string assetRelativePath) - { - _assetRelativePath = NormalizePath(assetRelativePath); - } - - public string AssetRelativePath => _assetRelativePath; - - public void AddManifestData(DotNetLibraryAsset asset, string? packagePath) - { - if (!string.IsNullOrWhiteSpace(asset.TargetFramework)) - { - _tfms.Add(asset.TargetFramework); - } - - if (!string.IsNullOrWhiteSpace(asset.RuntimeIdentifier)) - { - _runtimeIdentifiers.Add(asset.RuntimeIdentifier); - } - - if (!string.IsNullOrWhiteSpace(packagePath)) - { - var normalized = NormalizePath(packagePath); - if (!string.IsNullOrEmpty(normalized)) - { - _packagePaths.Add(normalized); - } - } - } - - public NativeAssetResult Build(LanguageAnalyzerContext context, CancellationToken cancellationToken) - { - var fileMetadata = ResolveFileMetadata(context, cancellationToken); - - return new NativeAssetResult( - AssetPath: _assetRelativePath, - RelativePath: fileMetadata?.RelativePath, - TargetFrameworks: _tfms.ToArray(), - RuntimeIdentifiers: _runtimeIdentifiers.ToArray(), - Sha256: fileMetadata?.Sha256, - UsedByEntrypoint: fileMetadata?.UsedByEntrypoint ?? false); - } - - private NativeAssetFileMetadata? ResolveFileMetadata(LanguageAnalyzerContext context, CancellationToken cancellationToken) - { - var candidates = BuildCandidateRelativePaths(); - - foreach (var candidate in candidates) - { - cancellationToken.ThrowIfCancellationRequested(); - - var absolutePath = context.ResolvePath(ConvertToPlatformPath(candidate)); - var usedByEntrypoint = context.UsageHints.IsPathUsed(absolutePath); - - if (!File.Exists(absolutePath)) - { - continue; - } - + { + try + { + authenticode = authenticodeInspector.TryInspect(absolutePath, cancellationToken); + } + catch + { + authenticode = null; + } + } + + var usedByEntrypoint = context.UsageHints.IsPathUsed(absolutePath); + + return new AssemblyFileMetadata( + AbsolutePath: absolutePath, + RelativePath: relativePath, + Sha256: sha256, + AssemblyName: assemblyName, + FileVersionInfo: versionInfo, + Authenticode: authenticode, + UsedByEntrypoint: usedByEntrypoint); + } + catch (IOException) + { + continue; + } + catch (UnauthorizedAccessException) + { + continue; + } + catch (BadImageFormatException) + { + continue; + } + } + + return null; + } + + private IEnumerable BuildCandidateRelativePaths() + { + var seen = new HashSet(StringComparer.OrdinalIgnoreCase); + + if (_packagePaths.Count > 0) + { + foreach (var packagePath in _packagePaths) + { + foreach (var basePath in EnumeratePackageBases(packagePath)) + { + var combined = CombineRelative(basePath, _assetRelativePath); + if (string.IsNullOrEmpty(combined)) + { + continue; + } + + if (seen.Add(combined)) + { + yield return combined; + } + } + } + } + + if (seen.Add(_assetRelativePath)) + { + yield return _assetRelativePath; + } + } + } + + private sealed class NativeAssetAggregate + { + private readonly string _assetRelativePath; + private readonly SortedSet _tfms = new(StringComparer.OrdinalIgnoreCase); + private readonly SortedSet _runtimeIdentifiers = new(StringComparer.OrdinalIgnoreCase); + private readonly SortedSet _packagePaths = new(StringComparer.Ordinal); + + public NativeAssetAggregate(string assetRelativePath) + { + _assetRelativePath = NormalizePath(assetRelativePath); + } + + public string AssetRelativePath => _assetRelativePath; + + public void AddManifestData(DotNetLibraryAsset asset, string? packagePath) + { + if (!string.IsNullOrWhiteSpace(asset.TargetFramework)) + { + _tfms.Add(asset.TargetFramework); + } + + if (!string.IsNullOrWhiteSpace(asset.RuntimeIdentifier)) + { + _runtimeIdentifiers.Add(asset.RuntimeIdentifier); + } + + if (!string.IsNullOrWhiteSpace(packagePath)) + { + var normalized = NormalizePath(packagePath); + if (!string.IsNullOrEmpty(normalized)) + { + _packagePaths.Add(normalized); + } + } + } + + public NativeAssetResult Build(LanguageAnalyzerContext context, CancellationToken cancellationToken) + { + var fileMetadata = ResolveFileMetadata(context, cancellationToken); + + return new NativeAssetResult( + AssetPath: _assetRelativePath, + RelativePath: fileMetadata?.RelativePath, + TargetFrameworks: _tfms.ToArray(), + RuntimeIdentifiers: _runtimeIdentifiers.ToArray(), + Sha256: fileMetadata?.Sha256, + UsedByEntrypoint: fileMetadata?.UsedByEntrypoint ?? false); + } + + private NativeAssetFileMetadata? ResolveFileMetadata(LanguageAnalyzerContext context, CancellationToken cancellationToken) + { + var candidates = BuildCandidateRelativePaths(); + + foreach (var candidate in candidates) + { + cancellationToken.ThrowIfCancellationRequested(); + + var absolutePath = context.ResolvePath(ConvertToPlatformPath(candidate)); + var usedByEntrypoint = context.UsageHints.IsPathUsed(absolutePath); + + if (!File.Exists(absolutePath)) + { + continue; + } + try { var relativePath = NormalizePath(context.GetRelativePath(absolutePath)); @@ -1100,158 +1238,158 @@ internal sealed class DotNetPackageBuilder return new NativeAssetFileMetadata( AbsolutePath: absolutePath, RelativePath: relativePath, - Sha256: sha256, - UsedByEntrypoint: usedByEntrypoint); - } - catch (IOException) - { - continue; - } - catch (UnauthorizedAccessException) - { - continue; - } - } - - return null; - } - - private IEnumerable BuildCandidateRelativePaths() - { - var seen = new HashSet(StringComparer.OrdinalIgnoreCase); - - if (_packagePaths.Count > 0) - { - foreach (var packagePath in _packagePaths) - { - foreach (var basePath in EnumeratePackageBases(packagePath)) - { - var combined = CombineRelative(basePath, _assetRelativePath); - if (string.IsNullOrEmpty(combined)) - { - continue; - } - - if (seen.Add(combined)) - { - yield return combined; - } - } - } - } - - if (seen.Add(_assetRelativePath)) - { - yield return _assetRelativePath; - } - } - } - - private sealed record AssemblyMetadataResult( - string AssetPath, - string? RelativePath, - IReadOnlyList TargetFrameworks, - IReadOnlyList RuntimeIdentifiers, - string? AssemblyVersion, - string? FileVersion, - string? PublicKeyToken, - string? StrongName, - string? CompanyName, - string? ProductName, - string? ProductVersion, - string? FileDescription, - string? Sha256, - DotNetAuthenticodeMetadata? Authenticode, - bool UsedByEntrypoint); - - private sealed record NativeAssetResult( - string AssetPath, - string? RelativePath, - IReadOnlyList TargetFrameworks, - IReadOnlyList RuntimeIdentifiers, - string? Sha256, - bool UsedByEntrypoint); - - private sealed record AssemblyFileMetadata( - string AbsolutePath, - string? RelativePath, - string? Sha256, - AssemblyName? AssemblyName, - FileVersionInfo? FileVersionInfo, - DotNetAuthenticodeMetadata? Authenticode, - bool UsedByEntrypoint); - - private sealed record NativeAssetFileMetadata( - string AbsolutePath, - string? RelativePath, - string? Sha256, - bool UsedByEntrypoint); - - private sealed class LanguageComponentEvidenceComparer : IEqualityComparer - { - public bool Equals(LanguageComponentEvidence? x, LanguageComponentEvidence? y) - { - if (ReferenceEquals(x, y)) - { - return true; - } - - if (x is null || y is null) - { - return false; - } - - return x.Kind == y.Kind && - string.Equals(x.Source, y.Source, StringComparison.Ordinal) && - string.Equals(x.Locator, y.Locator, StringComparison.Ordinal) && - string.Equals(x.Value, y.Value, StringComparison.Ordinal) && - string.Equals(x.Sha256, y.Sha256, StringComparison.Ordinal); - } - - public int GetHashCode(LanguageComponentEvidence obj) - { - var hash = new HashCode(); - hash.Add(obj.Kind); - hash.Add(obj.Source, StringComparer.Ordinal); - hash.Add(obj.Locator, StringComparer.Ordinal); - hash.Add(obj.Value, StringComparer.Ordinal); - hash.Add(obj.Sha256, StringComparer.Ordinal); - return hash.ToHashCode(); - } - } -} - -internal sealed class DotNetPackage -{ - public DotNetPackage( - string name, - string normalizedId, - string version, - IReadOnlyList> metadata, - IReadOnlyCollection evidence, - bool usedByEntrypoint) - { - Name = string.IsNullOrWhiteSpace(name) ? normalizedId : name.Trim(); - NormalizedId = normalizedId; - Version = version ?? string.Empty; - Metadata = metadata ?? Array.Empty>(); - Evidence = evidence ?? Array.Empty(); - UsedByEntrypoint = usedByEntrypoint; - } - - public string Name { get; } - - public string NormalizedId { get; } - - public string Version { get; } - - public IReadOnlyList> Metadata { get; } - - public IReadOnlyCollection Evidence { get; } - - public bool UsedByEntrypoint { get; } - - public string Purl => $"pkg:nuget/{NormalizedId}@{Version}"; - - public string ComponentKey => $"purl::{Purl}"; -} + Sha256: sha256, + UsedByEntrypoint: usedByEntrypoint); + } + catch (IOException) + { + continue; + } + catch (UnauthorizedAccessException) + { + continue; + } + } + + return null; + } + + private IEnumerable BuildCandidateRelativePaths() + { + var seen = new HashSet(StringComparer.OrdinalIgnoreCase); + + if (_packagePaths.Count > 0) + { + foreach (var packagePath in _packagePaths) + { + foreach (var basePath in EnumeratePackageBases(packagePath)) + { + var combined = CombineRelative(basePath, _assetRelativePath); + if (string.IsNullOrEmpty(combined)) + { + continue; + } + + if (seen.Add(combined)) + { + yield return combined; + } + } + } + } + + if (seen.Add(_assetRelativePath)) + { + yield return _assetRelativePath; + } + } + } + + private sealed record AssemblyMetadataResult( + string AssetPath, + string? RelativePath, + IReadOnlyList TargetFrameworks, + IReadOnlyList RuntimeIdentifiers, + string? AssemblyVersion, + string? FileVersion, + string? PublicKeyToken, + string? StrongName, + string? CompanyName, + string? ProductName, + string? ProductVersion, + string? FileDescription, + string? Sha256, + DotNetAuthenticodeMetadata? Authenticode, + bool UsedByEntrypoint); + + private sealed record NativeAssetResult( + string AssetPath, + string? RelativePath, + IReadOnlyList TargetFrameworks, + IReadOnlyList RuntimeIdentifiers, + string? Sha256, + bool UsedByEntrypoint); + + private sealed record AssemblyFileMetadata( + string AbsolutePath, + string? RelativePath, + string? Sha256, + AssemblyName? AssemblyName, + FileVersionInfo? FileVersionInfo, + DotNetAuthenticodeMetadata? Authenticode, + bool UsedByEntrypoint); + + private sealed record NativeAssetFileMetadata( + string AbsolutePath, + string? RelativePath, + string? Sha256, + bool UsedByEntrypoint); + + private sealed class LanguageComponentEvidenceComparer : IEqualityComparer + { + public bool Equals(LanguageComponentEvidence? x, LanguageComponentEvidence? y) + { + if (ReferenceEquals(x, y)) + { + return true; + } + + if (x is null || y is null) + { + return false; + } + + return x.Kind == y.Kind && + string.Equals(x.Source, y.Source, StringComparison.Ordinal) && + string.Equals(x.Locator, y.Locator, StringComparison.Ordinal) && + string.Equals(x.Value, y.Value, StringComparison.Ordinal) && + string.Equals(x.Sha256, y.Sha256, StringComparison.Ordinal); + } + + public int GetHashCode(LanguageComponentEvidence obj) + { + var hash = new HashCode(); + hash.Add(obj.Kind); + hash.Add(obj.Source, StringComparer.Ordinal); + hash.Add(obj.Locator, StringComparer.Ordinal); + hash.Add(obj.Value, StringComparer.Ordinal); + hash.Add(obj.Sha256, StringComparer.Ordinal); + return hash.ToHashCode(); + } + } +} + +internal sealed class DotNetPackage +{ + public DotNetPackage( + string name, + string normalizedId, + string version, + IReadOnlyList> metadata, + IReadOnlyCollection evidence, + bool usedByEntrypoint) + { + Name = string.IsNullOrWhiteSpace(name) ? normalizedId : name.Trim(); + NormalizedId = normalizedId; + Version = version ?? string.Empty; + Metadata = metadata ?? Array.Empty>(); + Evidence = evidence ?? Array.Empty(); + UsedByEntrypoint = usedByEntrypoint; + } + + public string Name { get; } + + public string NormalizedId { get; } + + public string Version { get; } + + public IReadOnlyList> Metadata { get; } + + public IReadOnlyCollection Evidence { get; } + + public bool UsedByEntrypoint { get; } + + public string Purl => $"pkg:nuget/{NormalizedId}@{Version}"; + + public string ComponentKey => $"purl::{Purl}"; +} diff --git a/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet/Internal/DotNetDependencyEdge.cs b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet/Internal/DotNetDependencyEdge.cs new file mode 100644 index 000000000..23518df5d --- /dev/null +++ b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet/Internal/DotNetDependencyEdge.cs @@ -0,0 +1,7 @@ +namespace StellaOps.Scanner.Analyzers.Lang.DotNet.Internal; + +internal sealed record DotNetDependencyEdge( + string Target, + string Reason, + string Confidence, + string Source); diff --git a/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet/Internal/DotNetRuntimeEvidenceLoader.cs b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet/Internal/DotNetRuntimeEvidenceLoader.cs new file mode 100644 index 000000000..4961ad0c1 --- /dev/null +++ b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.DotNet/Internal/DotNetRuntimeEvidenceLoader.cs @@ -0,0 +1,110 @@ +using System.Text.Json; + +namespace StellaOps.Scanner.Analyzers.Lang.DotNet.Internal; + +internal static class DotNetRuntimeEvidenceLoader +{ + public static IReadOnlyDictionary> Load( + LanguageAnalyzerContext context, + DotNetAnalyzerOptions options, + CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(context); + ArgumentNullException.ThrowIfNull(options); + + if (string.IsNullOrWhiteSpace(options.RuntimeEvidencePath)) + { + return new Dictionary>(StringComparer.OrdinalIgnoreCase); + } + + var absolute = context.ResolvePath(options.RuntimeEvidencePath); + if (!File.Exists(absolute)) + { + return new Dictionary>(StringComparer.OrdinalIgnoreCase); + } + + var edges = new Dictionary>(StringComparer.OrdinalIgnoreCase); + var confidence = string.IsNullOrWhiteSpace(options.RuntimeEvidenceConfidence) ? "medium" : options.RuntimeEvidenceConfidence!.Trim(); + + foreach (var line in File.ReadLines(absolute)) + { + cancellationToken.ThrowIfCancellationRequested(); + + if (string.IsNullOrWhiteSpace(line)) + { + continue; + } + + try + { + using var document = JsonDocument.Parse(line); + var root = document.RootElement; + + if (!root.TryGetProperty("package", out var packageElement) || packageElement.ValueKind != JsonValueKind.String) + { + continue; + } + + var packageId = packageElement.GetString(); + if (string.IsNullOrWhiteSpace(packageId)) + { + continue; + } + + if (!root.TryGetProperty("target", out var targetElement) || targetElement.ValueKind != JsonValueKind.String) + { + continue; + } + + var target = targetElement.GetString(); + if (string.IsNullOrWhiteSpace(target)) + { + continue; + } + + var reason = root.TryGetProperty("reason", out var reasonElement) && reasonElement.ValueKind == JsonValueKind.String + ? reasonElement.GetString() + : "runtime"; + + var conf = root.TryGetProperty("confidence", out var confidenceElement) && confidenceElement.ValueKind == JsonValueKind.String + ? confidenceElement.GetString() + : confidence; + + var source = root.TryGetProperty("source", out var sourceElement) && sourceElement.ValueKind == JsonValueKind.String + ? sourceElement.GetString() + : "runtime-evidence"; + + var edge = new DotNetDependencyEdge( + Target: target!.Trim(), + Reason: string.IsNullOrWhiteSpace(reason) ? "runtime" : reason!.Trim(), + Confidence: string.IsNullOrWhiteSpace(conf) ? confidence : conf!.Trim(), + Source: string.IsNullOrWhiteSpace(source) ? "runtime-evidence" : source!.Trim()); + + if (!edges.TryGetValue(packageId.Trim().ToLowerInvariant(), out var list)) + { + list = new List(); + edges[packageId.Trim().ToLowerInvariant()] = list; + } + + list.Add(edge); + } + catch (JsonException) + { + continue; + } + catch (IOException) + { + continue; + } + catch (UnauthorizedAccessException) + { + continue; + } + } + + return edges.ToDictionary( + kvp => kvp.Key, + kvp => (IReadOnlyList)kvp.Value.OrderBy(edge => edge.Target, StringComparer.OrdinalIgnoreCase).ToArray(), + StringComparer.OrdinalIgnoreCase); + } +} diff --git a/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Node/Internal/NodeDeclarationKeyBuilder.cs b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Node/Internal/NodeDeclarationKeyBuilder.cs new file mode 100644 index 000000000..98345340c --- /dev/null +++ b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Node/Internal/NodeDeclarationKeyBuilder.cs @@ -0,0 +1,15 @@ +namespace StellaOps.Scanner.Analyzers.Lang.Node.Internal; + +internal static class NodeDeclarationKeyBuilder +{ + public static string Build(string name, string? version) + { + if (string.IsNullOrWhiteSpace(name) || string.IsNullOrWhiteSpace(version)) + { + return string.Empty; + } + + return $"{name.Trim().ToLowerInvariant()}@{version.Trim()}"; + } +} + diff --git a/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Node/Internal/NodePackageCollector.cs b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Node/Internal/NodePackageCollector.cs index b2e8f331b..84be9047c 100644 --- a/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Node/Internal/NodePackageCollector.cs +++ b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Node/Internal/NodePackageCollector.cs @@ -162,25 +162,81 @@ internal static class NodePackageCollector return; } + var componentIndex = packages.ToDictionary(static p => p.ComponentKey, StringComparer.Ordinal); + foreach (var package in pnpPackages) { cancellationToken.ThrowIfCancellationRequested(); + if (componentIndex.TryGetValue(package.ComponentKey, out var existing)) + { + var merged = MergePackageMetadata(existing, package); + var existingIndex = packages.FindIndex(p => string.Equals(p.ComponentKey, existing.ComponentKey, StringComparison.Ordinal)); + if (existingIndex >= 0) + { + packages[existingIndex] = merged; + } + + componentIndex[merged.ComponentKey] = merged; + + if (!string.IsNullOrEmpty(merged.RelativePathNormalized)) + { + visited.Add(merged.RelativePathNormalized); + } + + continue; + } + var key = package.RelativePathNormalized; if (!string.IsNullOrEmpty(key) && !visited.Add(key)) { continue; } - if (packages.Any(p => string.Equals(p.ComponentKey, package.ComponentKey, StringComparison.Ordinal))) - { - continue; - } - packages.Add(package); + componentIndex[package.ComponentKey] = package; } } + private static NodePackage MergePackageMetadata(NodePackage existing, NodePackage pnpPackage) + { + var merged = new NodePackage( + name: existing.Name, + version: existing.Version, + relativePath: string.IsNullOrWhiteSpace(pnpPackage.RelativePathNormalized) ? existing.RelativePath : pnpPackage.RelativePathNormalized, + packageJsonLocator: string.IsNullOrWhiteSpace(pnpPackage.PackageJsonLocator) ? existing.PackageJsonLocator : pnpPackage.PackageJsonLocator, + isPrivate: pnpPackage.IsPrivate ?? existing.IsPrivate, + lockEntry: existing.LockEntry, + isWorkspaceMember: existing.IsWorkspaceMember, + workspaceRoot: existing.WorkspaceRoot, + workspaceTargets: existing.WorkspaceTargets, + workspaceLink: existing.WorkspaceLink, + lifecycleScripts: existing.LifecycleScripts, + nodeVersions: existing.NodeVersions, + usedByEntrypoint: existing.IsUsedByEntrypoint, + declaredOnly: existing.DeclaredOnly, + lockSource: string.IsNullOrWhiteSpace(pnpPackage.LockSource) ? existing.LockSource : pnpPackage.LockSource, + lockLocator: string.IsNullOrWhiteSpace(pnpPackage.LockLocator) ? existing.LockLocator : pnpPackage.LockLocator, + packageSha256: pnpPackage.PackageSha256 ?? existing.PackageSha256, + isYarnPnp: existing.IsYarnPnp || pnpPackage.IsYarnPnp, + scope: existing.Scope, + isOptional: existing.IsOptional, + license: string.IsNullOrWhiteSpace(existing.License) ? pnpPackage.License : existing.License); + + foreach (var entrypoint in existing.Entrypoints) + { + merged.AddEntrypoint(entrypoint.Path, entrypoint.ConditionSet, entrypoint.BinName, entrypoint.MainField, entrypoint.ModuleField); + } + + foreach (var importEdge in existing.Imports) + { + merged.AddImport(importEdge); + } + + merged.SetResolvedImports(existing.ResolvedImports); + return merged; + } + private static IEnumerable EnumerateSourceFiles(string root) { foreach (var extension in new[] { ".js", ".jsx", ".mjs", ".cjs", ".ts", ".tsx" }) @@ -525,7 +581,7 @@ internal static class NodePackageCollector } } - private static string? BuildLockLocator(NodeLockEntry? entry) + internal static string? BuildLockLocator(NodeLockEntry? entry) { if (entry is null) { diff --git a/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Node/Internal/NodePnpDataLoader.cs b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Node/Internal/NodePnpDataLoader.cs index 3760a1a81..fe75f55fa 100644 --- a/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Node/Internal/NodePnpDataLoader.cs +++ b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Node/Internal/NodePnpDataLoader.cs @@ -255,6 +255,20 @@ internal static class NodePnpDataLoader private static string NormalizeRelativePath(LanguageAnalyzerContext context, string packageLocation) { + if (!Path.IsPathRooted(packageLocation)) + { + var normalized = packageLocation + .Replace('\\', '/') + .TrimEnd('/'); + + while (normalized.StartsWith("./", StringComparison.Ordinal)) + { + normalized = normalized[2..]; + } + + return string.IsNullOrWhiteSpace(normalized) ? "." : normalized; + } + var relative = context.GetRelativePath(packageLocation); if (string.IsNullOrWhiteSpace(relative) || relative == ".") { diff --git a/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Python/Internal/Imports/PythonImportGraph.cs b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Python/Internal/Imports/PythonImportGraph.cs index a749288e3..1052ee15e 100644 --- a/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Python/Internal/Imports/PythonImportGraph.cs +++ b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Python/Internal/Imports/PythonImportGraph.cs @@ -201,27 +201,19 @@ internal sealed class PythonImportGraph /// public IReadOnlyList? GetTopologicalOrder() { - var inDegree = new Dictionary(StringComparer.Ordinal); + var dependencyCounts = new Dictionary(StringComparer.Ordinal); foreach (var module in _modules.Keys) { - inDegree[module] = 0; - } - - foreach (var edges in _edges.Values) - { - foreach (var edge in edges) - { - if (inDegree.ContainsKey(edge.To)) - { - inDegree[edge.To]++; - } - } + var count = _edges.TryGetValue(module, out var edges) + ? edges.Count + : 0; + dependencyCounts[module] = count; } var queue = new Queue(); - foreach (var (module, degree) in inDegree) + foreach (var (module, count) in dependencyCounts) { - if (degree == 0) + if (count == 0) { queue.Enqueue(module); } @@ -233,23 +225,27 @@ internal sealed class PythonImportGraph var module = queue.Dequeue(); result.Add(module); - if (_edges.TryGetValue(module, out var edges)) + if (!_reverseEdges.TryGetValue(module, out var dependents)) { - foreach (var edge in edges) + continue; + } + + foreach (var edge in dependents) + { + if (!dependencyCounts.TryGetValue(edge.From, out var remaining)) { - if (inDegree.ContainsKey(edge.To)) - { - inDegree[edge.To]--; - if (inDegree[edge.To] == 0) - { - queue.Enqueue(edge.To); - } - } + continue; + } + + remaining--; + dependencyCounts[edge.From] = remaining; + if (remaining == 0) + { + queue.Enqueue(edge.From); } } } - // If not all modules are in result, there's a cycle return result.Count == _modules.Count ? result : null; } @@ -437,27 +433,23 @@ internal sealed class PythonImportGraph { var parts = sourceModulePath.Split('.'); - // Calculate the package to start from - // Level 1 (.) = current package - // Level 2 (..) = parent package - var levelsUp = import.RelativeLevel; - - // If source is not a package (__init__.py), we need to go one more level up var sourceVirtualPath = _modules.TryGetValue(sourceModulePath, out var node) ? node.VirtualPath : null; var isSourcePackage = sourceVirtualPath?.EndsWith("__init__.py", StringComparison.Ordinal) == true; - if (!isSourcePackage) + // Base package is the containing package of the source module + var packageParts = isSourcePackage ? parts : parts[..^1]; + + // RelativeLevel counts from the package boundary; level 1 = current package + var levelsUp = Math.Max(import.RelativeLevel - 1, 0); + if (levelsUp > packageParts.Length) { - levelsUp++; + return null; } - if (levelsUp > parts.Length) - { - return null; // Invalid relative import (goes beyond top-level package) - } - - var baseParts = parts[..^(levelsUp)]; - var basePackage = string.Join('.', baseParts); + var basePartsLength = packageParts.Length - levelsUp; + var basePackage = basePartsLength <= 0 + ? string.Empty + : string.Join('.', packageParts.Take(basePartsLength)); if (string.IsNullOrEmpty(import.Module)) { diff --git a/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Python/Internal/Imports/PythonSourceImportExtractor.cs b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Python/Internal/Imports/PythonSourceImportExtractor.cs index f903039e3..d3a08bda2 100644 --- a/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Python/Internal/Imports/PythonSourceImportExtractor.cs +++ b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Python/Internal/Imports/PythonSourceImportExtractor.cs @@ -11,8 +11,9 @@ internal sealed partial class PythonSourceImportExtractor private readonly List _imports = new(); private bool _inTryBlock; private bool _inTypeCheckingBlock; - private int _functionDepth; - private int _classDepth; + private int? _typeCheckingIndent; + private readonly Stack _functionIndentStack = new(); + private readonly Stack _classIndentStack = new(); public PythonSourceImportExtractor(string sourceFile) { @@ -37,11 +38,15 @@ internal sealed partial class PythonSourceImportExtractor var lines = content.Split('\n'); var lineNumber = 0; var continuedLine = string.Empty; + var parenBuffer = string.Empty; + var inParenContinuation = false; - foreach (var rawLine in lines) + foreach (var rawLineOriginal in lines) { lineNumber++; - var line = rawLine.TrimEnd('\r'); + var rawLine = rawLineOriginal.TrimEnd('\r'); + var indent = CountIndentation(rawLine); + var line = rawLine; // Handle line continuations if (line.EndsWith('\\')) @@ -54,7 +59,7 @@ internal sealed partial class PythonSourceImportExtractor continuedLine = string.Empty; // Track context - UpdateContext(fullLine.TrimStart()); + UpdateContext(fullLine.TrimStart(), indent); // Skip comments and empty lines var trimmed = fullLine.TrimStart(); @@ -75,6 +80,29 @@ internal sealed partial class PythonSourceImportExtractor continue; } + // Handle parenthesized from-imports spanning multiple lines + if (inParenContinuation) + { + parenBuffer += " " + trimmed; + if (trimmed.Contains(')')) + { + inParenContinuation = false; + ExtractImports(parenBuffer, lineNumber); + parenBuffer = string.Empty; + } + continue; + } + + if (trimmed.StartsWith("from ", StringComparison.Ordinal) && + trimmed.Contains("import", StringComparison.Ordinal) && + trimmed.Contains('(') && + !trimmed.Contains(')')) + { + inParenContinuation = true; + parenBuffer = trimmed; + continue; + } + // Try to extract imports ExtractImports(trimmed, lineNumber); } @@ -82,8 +110,24 @@ internal sealed partial class PythonSourceImportExtractor return this; } - private void UpdateContext(string line) + private void UpdateContext(string line, int indent) { + // unwind function/class stacks based on indentation + while (_functionIndentStack.Count > 0 && indent <= _functionIndentStack.Peek()) + { + _functionIndentStack.Pop(); + } + + while (_classIndentStack.Count > 0 && indent <= _classIndentStack.Peek()) + { + _classIndentStack.Pop(); + } + + if (_typeCheckingIndent is not null && indent <= _typeCheckingIndent.Value) + { + _typeCheckingIndent = null; + } + // Track try blocks if (line.StartsWith("try:") || line == "try") { @@ -94,31 +138,30 @@ internal sealed partial class PythonSourceImportExtractor _inTryBlock = false; } - // Track TYPE_CHECKING blocks - if (line.Contains("TYPE_CHECKING") && line.Contains("if")) - { - _inTypeCheckingBlock = true; - } - // Track function depth if (line.StartsWith("def ") || line.StartsWith("async def ")) { - _functionDepth++; + _functionIndentStack.Push(indent); } // Track class depth (for nested classes) if (line.StartsWith("class ")) { - _classDepth++; + _classIndentStack.Push(indent); } - // Reset context at module level definitions - if ((line.StartsWith("def ") || line.StartsWith("class ") || line.StartsWith("async def ")) && - !line.StartsWith(" ") && !line.StartsWith("\t")) + // Track TYPE_CHECKING blocks + if (line.StartsWith("if", StringComparison.Ordinal) && line.Contains("TYPE_CHECKING", StringComparison.Ordinal)) + { + _typeCheckingIndent = indent; + } + + _inTypeCheckingBlock = _typeCheckingIndent is not null && indent > _typeCheckingIndent.Value; + + // Reset TYPE_CHECKING when hitting new top-level if + if (_typeCheckingIndent is null && indent == 0 && line.StartsWith("if ", StringComparison.Ordinal)) { _inTypeCheckingBlock = false; - _functionDepth = 0; - _classDepth = 0; } } @@ -212,7 +255,7 @@ internal sealed partial class PythonSourceImportExtractor LineNumber: lineNumber, Confidence: PythonImportConfidence.Definitive, IsConditional: _inTryBlock, - IsLazy: _functionDepth > 0, + IsLazy: _functionIndentStack.Count > 0, IsTypeCheckingOnly: _inTypeCheckingBlock)); } } @@ -241,7 +284,7 @@ internal sealed partial class PythonSourceImportExtractor LineNumber: lineNumber, Confidence: PythonImportConfidence.Definitive, IsConditional: _inTryBlock, - IsLazy: _functionDepth > 0, + IsLazy: _functionIndentStack.Count > 0, IsTypeCheckingOnly: _inTypeCheckingBlock)); return; } @@ -294,7 +337,7 @@ internal sealed partial class PythonSourceImportExtractor LineNumber: lineNumber, Confidence: PythonImportConfidence.Definitive, IsConditional: _inTryBlock, - IsLazy: _functionDepth > 0, + IsLazy: _functionIndentStack.Count > 0, IsTypeCheckingOnly: _inTypeCheckingBlock)); } @@ -324,7 +367,7 @@ internal sealed partial class PythonSourceImportExtractor LineNumber: lineNumber, Confidence: PythonImportConfidence.High, IsConditional: _inTryBlock, - IsLazy: _functionDepth > 0, + IsLazy: _functionIndentStack.Count > 0, IsTypeCheckingOnly: _inTypeCheckingBlock)); } @@ -342,7 +385,7 @@ internal sealed partial class PythonSourceImportExtractor LineNumber: lineNumber, Confidence: PythonImportConfidence.High, IsConditional: _inTryBlock, - IsLazy: _functionDepth > 0, + IsLazy: _functionIndentStack.Count > 0, IsTypeCheckingOnly: _inTypeCheckingBlock)); } @@ -359,10 +402,32 @@ internal sealed partial class PythonSourceImportExtractor LineNumber: lineNumber, Confidence: PythonImportConfidence.Medium, IsConditional: _inTryBlock, - IsLazy: _functionDepth > 0, + IsLazy: _functionIndentStack.Count > 0, IsTypeCheckingOnly: _inTypeCheckingBlock)); } + private static int CountIndentation(string line) + { + var count = 0; + foreach (var c in line) + { + if (c == ' ') + { + count++; + } + else if (c == '\t') + { + count += 4; + } + else + { + break; + } + } + + return count; + } + private static int FindCommentStart(string line) { var inSingleQuote = false; diff --git a/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Python/Internal/VirtualFileSystem/PythonInputNormalizer.cs b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Python/Internal/VirtualFileSystem/PythonInputNormalizer.cs index 3e5a5bd5d..826b2fe0b 100644 --- a/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Python/Internal/VirtualFileSystem/PythonInputNormalizer.cs +++ b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.Lang.Python/Internal/VirtualFileSystem/PythonInputNormalizer.cs @@ -1,5 +1,6 @@ using System.Text.Json; using System.Text.RegularExpressions; +using System.Linq; namespace StellaOps.Scanner.Analyzers.Lang.Python.Internal.VirtualFileSystem; @@ -276,10 +277,10 @@ internal sealed partial class PythonInputNormalizer var version = parts[1].Trim(); if (!string.IsNullOrEmpty(version)) { - _versionTargets.Add(new PythonVersionTarget( + AddVersionTarget( version, "pyvenv.cfg", - PythonVersionConfidence.Definitive)); + PythonVersionConfidence.Definitive); } } } @@ -304,8 +305,7 @@ internal sealed partial class PythonInputNormalizer var content = await File.ReadAllTextAsync(path, cancellationToken).ConfigureAwait(false); // Look for requires-python in [project] section - var requiresPythonMatch = RequiresPythonPattern().Match(content); - if (requiresPythonMatch.Success) + foreach (Match requiresPythonMatch in RequiresPythonPattern().Matches(content)) { var version = requiresPythonMatch.Groups["version"].Value.Trim().Trim('"', '\''); if (!string.IsNullOrEmpty(version)) @@ -314,17 +314,16 @@ internal sealed partial class PythonInputNormalizer version.StartsWith(">", StringComparison.Ordinal); version = Regex.Replace(version, @"^[><=!~]+", string.Empty).Trim(); - _versionTargets.Add(new PythonVersionTarget( + AddVersionTarget( version, "pyproject.toml", PythonVersionConfidence.High, - isMinimum)); + isMinimum); } } // Look for python_requires in [tool.poetry] or similar - var pythonMatch = PythonVersionTomlPattern().Match(content); - if (pythonMatch.Success) + foreach (Match pythonMatch in PythonVersionTomlPattern().Matches(content)) { var version = pythonMatch.Groups["version"].Value.Trim().Trim('"', '\''); if (!string.IsNullOrEmpty(version)) @@ -333,11 +332,11 @@ internal sealed partial class PythonInputNormalizer version.StartsWith(">=", StringComparison.Ordinal); version = Regex.Replace(version, @"^[\^><=!~]+", string.Empty).Trim(); - _versionTargets.Add(new PythonVersionTarget( + AddVersionTarget( version, "pyproject.toml", PythonVersionConfidence.High, - isMinimum)); + isMinimum); } } } @@ -367,11 +366,11 @@ internal sealed partial class PythonInputNormalizer var isMinimum = version.StartsWith(">=", StringComparison.Ordinal); version = Regex.Replace(version, @"^[><=!~]+", string.Empty).Trim(); - _versionTargets.Add(new PythonVersionTarget( + AddVersionTarget( version, "setup.py", PythonVersionConfidence.High, - isMinimum)); + isMinimum); } } } @@ -401,11 +400,11 @@ internal sealed partial class PythonInputNormalizer var isMinimum = version.StartsWith(">=", StringComparison.Ordinal); version = Regex.Replace(version, @"^[><=!~]+", string.Empty).Trim(); - _versionTargets.Add(new PythonVersionTarget( + AddVersionTarget( version, "setup.cfg", PythonVersionConfidence.High, - isMinimum)); + isMinimum); } } } @@ -433,10 +432,10 @@ internal sealed partial class PythonInputNormalizer if (match.Success) { var version = match.Groups["version"].Value; - _versionTargets.Add(new PythonVersionTarget( + AddVersionTarget( version, "runtime.txt", - PythonVersionConfidence.Definitive)); + PythonVersionConfidence.Definitive); } } catch (IOException) @@ -462,10 +461,10 @@ internal sealed partial class PythonInputNormalizer if (fromMatch.Success) { var version = fromMatch.Groups["version"].Value; - _versionTargets.Add(new PythonVersionTarget( + AddVersionTarget( version, "Dockerfile", - PythonVersionConfidence.High)); + PythonVersionConfidence.High); return; } @@ -474,10 +473,10 @@ internal sealed partial class PythonInputNormalizer if (envMatch.Success) { var version = envMatch.Groups["version"].Value; - _versionTargets.Add(new PythonVersionTarget( + AddVersionTarget( version, "Dockerfile", - PythonVersionConfidence.Medium)); + PythonVersionConfidence.Medium); } } catch (IOException) @@ -509,10 +508,10 @@ internal sealed partial class PythonInputNormalizer { // Convert py311 to 3.11 var formatted = $"{version[0]}.{version[1..]}"; - _versionTargets.Add(new PythonVersionTarget( + AddVersionTarget( formatted, "tox.ini", - PythonVersionConfidence.Medium)); + PythonVersionConfidence.Medium); } } } @@ -553,10 +552,10 @@ internal sealed partial class PythonInputNormalizer if (match.Success) { var version = match.Groups["version"].Value; - _versionTargets.Add(new PythonVersionTarget( + AddVersionTarget( version, $"lib/{dirName}", - PythonVersionConfidence.Medium)); + PythonVersionConfidence.Medium); } } } @@ -659,6 +658,46 @@ internal sealed partial class PythonInputNormalizer } } + private void AddVersionTarget(string version, string source, PythonVersionConfidence confidence, bool isMinimum = false) + { + if (string.IsNullOrWhiteSpace(version) || string.IsNullOrWhiteSpace(source)) + { + return; + } + + var normalizedVersion = version.Trim(); + var normalizedSource = source.Trim(); + + var existingIndex = _versionTargets.FindIndex(target => + string.Equals(target.Version, normalizedVersion, StringComparison.OrdinalIgnoreCase) && + string.Equals(target.Source, normalizedSource, StringComparison.OrdinalIgnoreCase)); + + if (existingIndex >= 0) + { + var existing = _versionTargets[existingIndex]; + var effectiveConfidence = (PythonVersionConfidence)Math.Max((int)existing.Confidence, (int)confidence); + var effectiveIsMinimum = existing.IsMinimum || isMinimum; + + if (existing.Confidence == effectiveConfidence && existing.IsMinimum == effectiveIsMinimum) + { + return; + } + + _versionTargets[existingIndex] = new PythonVersionTarget( + normalizedVersion, + normalizedSource, + effectiveConfidence, + effectiveIsMinimum); + return; + } + + _versionTargets.Add(new PythonVersionTarget( + normalizedVersion, + normalizedSource, + confidence, + isMinimum)); + } + private void DetectZipapps() { if (!Directory.Exists(_rootPath)) @@ -779,7 +818,7 @@ internal sealed partial class PythonInputNormalizer [GeneratedRegex(@"requires-python\s*=\s*[""']?(?[^""'\n]+)", RegexOptions.IgnoreCase)] private static partial Regex RequiresPythonPattern(); - [GeneratedRegex(@"python\s*=\s*[""'](?[^""']+)[""']", RegexOptions.IgnoreCase)] + [GeneratedRegex(@"^\s*python\s*=\s*[""'](?[^""']+)[""']", RegexOptions.IgnoreCase | RegexOptions.Multiline)] private static partial Regex PythonVersionTomlPattern(); [GeneratedRegex(@"python_requires\s*=\s*[""'](?[^""']+)[""']", RegexOptions.IgnoreCase)] diff --git a/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.OS.Apk/ApkPackageAnalyzer.cs b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.OS.Apk/ApkPackageAnalyzer.cs index 5d539cf03..54598f4c5 100644 --- a/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.OS.Apk/ApkPackageAnalyzer.cs +++ b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.OS.Apk/ApkPackageAnalyzer.cs @@ -38,7 +38,7 @@ internal sealed class ApkPackageAnalyzer : OsPackageAnalyzerBase using var stream = File.OpenRead(installedPath); var entries = _parser.Parse(stream, cancellationToken); - context.Metadata.TryGetValue(ScanMetadataKeys.CurrentLayerDigest, out var layerDigest); + var evidenceFactory = OsFileEvidenceFactory.Create(context.RootPath, context.Metadata); var records = new List(entries.Count); foreach (var entry in entries) @@ -68,16 +68,17 @@ internal sealed class ApkPackageAnalyzer : OsPackageAnalyzerBase vendorMetadata[$"apk:{pair.Key}"] = pair.Value; } - var files = new List(entry.Files.Count); - foreach (var file in entry.Files) - { - files.Add(new OSPackageFileEvidence( + var files = entry.Files + .Select(file => evidenceFactory.Create( file.Path, - layerDigest: layerDigest, - sha256: file.Digest, - sizeBytes: null, - isConfigFile: file.IsConfig)); - } + file.IsConfig, + string.IsNullOrWhiteSpace(file.Digest) + ? null + : new Dictionary(StringComparer.OrdinalIgnoreCase) + { + ["sha256"] = file.Digest + })) + .ToList(); var cveHints = CveHintExtractor.Extract( string.Join(' ', entry.Depends), diff --git a/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.OS.Dpkg/DpkgPackageAnalyzer.cs b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.OS.Dpkg/DpkgPackageAnalyzer.cs index c47fae155..87d32cabc 100644 --- a/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.OS.Dpkg/DpkgPackageAnalyzer.cs +++ b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.OS.Dpkg/DpkgPackageAnalyzer.cs @@ -40,10 +40,9 @@ internal sealed class DpkgPackageAnalyzer : OsPackageAnalyzerBase using var stream = File.OpenRead(statusPath); var entries = _parser.Parse(stream, cancellationToken); - context.Metadata.TryGetValue(ScanMetadataKeys.CurrentLayerDigest, out var layerDigest); - var infoDirectory = Path.Combine(context.RootPath, "var", "lib", "dpkg", "info"); var records = new List(); + var evidenceFactory = OsFileEvidenceFactory.Create(context.RootPath, context.Metadata); foreach (var entry in entries) { @@ -86,7 +85,7 @@ internal sealed class DpkgPackageAnalyzer : OsPackageAnalyzerBase var dependencies = entry.Depends.Concat(entry.PreDepends).ToArray(); var provides = entry.Provides.ToArray(); - var fileEvidence = BuildFileEvidence(infoDirectory, entry, layerDigest, cancellationToken); + var fileEvidence = BuildFileEvidence(infoDirectory, entry, evidenceFactory, cancellationToken); var cveHints = CveHintExtractor.Extract(entry.Description, string.Join(' ', dependencies), string.Join(' ', provides)); @@ -128,7 +127,11 @@ internal sealed class DpkgPackageAnalyzer : OsPackageAnalyzerBase return parts.Length == 0 ? null : parts[0]; } - private static IReadOnlyList BuildFileEvidence(string infoDirectory, DpkgPackageEntry entry, string? layerDigest, CancellationToken cancellationToken) + private static IReadOnlyList BuildFileEvidence( + string infoDirectory, + DpkgPackageEntry entry, + OsFileEvidenceFactory evidenceFactory, + CancellationToken cancellationToken) { if (!Directory.Exists(infoDirectory)) { @@ -140,7 +143,7 @@ internal sealed class DpkgPackageAnalyzer : OsPackageAnalyzerBase { if (!files.TryGetValue(path, out _)) { - files[path] = new FileEvidenceBuilder(path, layerDigest); + files[path] = new FileEvidenceBuilder(path); } } @@ -236,7 +239,7 @@ internal sealed class DpkgPackageAnalyzer : OsPackageAnalyzerBase } var evidence = files.Values - .Select(builder => builder.ToEvidence()) + .Select(builder => evidenceFactory.Create(builder.Path, builder.IsConfig, builder.Digests)) .OrderBy(e => e) .ToArray(); @@ -251,23 +254,15 @@ internal sealed class DpkgPackageAnalyzer : OsPackageAnalyzerBase private sealed class FileEvidenceBuilder { - public FileEvidenceBuilder(string path, string? layerDigest) + public FileEvidenceBuilder(string path) { Path = path; - LayerDigest = layerDigest; } public string Path { get; } - public string? LayerDigest { get; } - public bool IsConfig { get; set; } public Dictionary Digests { get; } = new(StringComparer.OrdinalIgnoreCase); - - public OSPackageFileEvidence ToEvidence() - { - return new OSPackageFileEvidence(Path, layerDigest: LayerDigest, isConfigFile: IsConfig, digests: Digests); - } } } diff --git a/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.OS.Rpm/RpmPackageAnalyzer.cs b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.OS.Rpm/RpmPackageAnalyzer.cs index 0da9b74b8..f2c4fc184 100644 --- a/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.OS.Rpm/RpmPackageAnalyzer.cs +++ b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.OS.Rpm/RpmPackageAnalyzer.cs @@ -43,7 +43,7 @@ internal sealed class RpmPackageAnalyzer : OsPackageAnalyzerBase return ValueTask.FromResult>(EmptyPackages); } - context.Metadata.TryGetValue(ScanMetadataKeys.CurrentLayerDigest, out var layerDigest); + var evidenceFactory = OsFileEvidenceFactory.Create(context.RootPath, context.Metadata); var records = new List(headers.Count); foreach (var header in headers) @@ -80,7 +80,7 @@ internal sealed class RpmPackageAnalyzer : OsPackageAnalyzerBase digests = new Dictionary(file.Digests, StringComparer.OrdinalIgnoreCase); } - files.Add(new OSPackageFileEvidence(file.Path, layerDigest: layerDigest, isConfigFile: file.IsConfig, digests: digests)); + files.Add(evidenceFactory.Create(file.Path, file.IsConfig, digests)); } var cveHints = CveHintExtractor.Extract( diff --git a/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.OS/Helpers/OsFileEvidenceFactory.cs b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.OS/Helpers/OsFileEvidenceFactory.cs new file mode 100644 index 000000000..43665bb01 --- /dev/null +++ b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.OS/Helpers/OsFileEvidenceFactory.cs @@ -0,0 +1,222 @@ +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.IO; +using System.Linq; +using System.Security.Cryptography; +using System.Text; +using System.Text.Json; +using StellaOps.Scanner.Core.Contracts; + +namespace StellaOps.Scanner.Analyzers.OS.Helpers; + +/// +/// Enriches OS package file evidence with layer attribution and stable hashes/sizes. +/// +public sealed class OsFileEvidenceFactory +{ + private readonly string _rootPath; + private readonly ImmutableArray<(string? Digest, string Path)> _layerDirectories; + private readonly string? _defaultLayerDigest; + + private OsFileEvidenceFactory(string rootPath, ImmutableArray<(string? Digest, string Path)> layerDirectories, string? defaultLayerDigest) + { + _rootPath = rootPath; + _layerDirectories = layerDirectories; + _defaultLayerDigest = NormalizeDigest(defaultLayerDigest); + } + + public static OsFileEvidenceFactory Create(string rootPath, IReadOnlyDictionary metadata) + { + ArgumentException.ThrowIfNullOrWhiteSpace(rootPath); + ArgumentNullException.ThrowIfNull(metadata); + + var layerDirectories = ParseLayerEntries(metadata, ScanMetadataKeys.LayerDirectories); + metadata.TryGetValue(ScanMetadataKeys.CurrentLayerDigest, out var defaultLayerDigest); + + return new OsFileEvidenceFactory(rootPath, layerDirectories, defaultLayerDigest); + } + + public OSPackageFileEvidence Create(string path, bool isConfigFile, IDictionary? digests = null) + { + var digestMap = digests is null + ? new Dictionary(StringComparer.OrdinalIgnoreCase) + : new Dictionary(digests, StringComparer.OrdinalIgnoreCase); + + var layerDigest = ResolveLayerDigest(path) ?? _defaultLayerDigest; + string? sha256 = null; + long? size = null; + + var fullPath = CombineWithRoot(path); + if (fullPath is not null && File.Exists(fullPath)) + { + try + { + var info = new FileInfo(fullPath); + size = info.Length; + + if (info.Length > 0 && !digestMap.TryGetValue("sha256", out sha256)) + { + sha256 = ComputeSha256(fullPath); + digestMap["sha256"] = sha256; + } + } + catch (IOException) + { + // Best-effort: ignore IO failures and fall back to existing metadata + } + catch (UnauthorizedAccessException) + { + // Ignore permission issues + } + } + + return new OSPackageFileEvidence( + path, + layerDigest: layerDigest, + sha256: sha256, + sizeBytes: size, + isConfigFile: isConfigFile, + digests: digestMap); + } + + private string? CombineWithRoot(string path) + { + if (string.IsNullOrWhiteSpace(path)) + { + return null; + } + + var trimmed = path.TrimStart('/', '\\'); + var combined = Path.Combine(_rootPath, trimmed); + return Path.GetFullPath(combined); + } + + private string? ResolveLayerDigest(string path) + { + if (_layerDirectories.IsDefaultOrEmpty) + { + return null; + } + + var relative = path.TrimStart('/', '\\'); + foreach (var (digest, layerPath) in _layerDirectories) + { + string? layerDigest = NormalizeDigest(digest); + string candidate; + try + { + candidate = Path.GetFullPath(Path.Combine(layerPath, relative)); + } + catch + { + continue; + } + + if (!File.Exists(candidate)) + { + continue; + } + + return layerDigest ?? ComputeDirectoryDigest(layerPath); + } + + return null; + } + + private static string ComputeSha256(string path) + { + using var stream = File.OpenRead(path); + return Convert.ToHexString(SHA256.HashData(stream)).ToLowerInvariant(); + } + + private static string ComputeDirectoryDigest(string path) + { + var payload = Encoding.UTF8.GetBytes(Path.GetFullPath(path)); + var hash = SHA256.HashData(payload); + return $"sha256:{Convert.ToHexString(hash).ToLowerInvariant()}"; + } + + private static ImmutableArray<(string? Digest, string Path)> ParseLayerEntries( + IReadOnlyDictionary metadata, + string metadataKey) + { + if (string.IsNullOrWhiteSpace(metadataKey) || + !metadata.TryGetValue(metadataKey, out var rawValue) || + string.IsNullOrWhiteSpace(rawValue)) + { + return ImmutableArray<(string?, string)>.Empty; + } + + rawValue = rawValue.Trim(); + IEnumerable tokens; + + if (rawValue.StartsWith("[", StringComparison.Ordinal)) + { + try + { + var parsed = JsonSerializer.Deserialize(rawValue); + tokens = parsed ?? Array.Empty(); + } + catch + { + tokens = SplitLayerString(rawValue); + } + } + else + { + tokens = SplitLayerString(rawValue); + } + + var builder = ImmutableArray.CreateBuilder<(string?, string)>(); + foreach (var token in tokens) + { + var entry = token.Trim(); + if (entry.Length == 0) + { + continue; + } + + var separator = entry.IndexOf('='); + string? digest = null; + var pathPart = entry; + + if (separator >= 0) + { + digest = entry[..separator].Trim(); + pathPart = entry[(separator + 1)..].Trim(); + } + + if (pathPart.Length == 0) + { + continue; + } + + builder.Add((NormalizeDigest(digest), pathPart)); + } + + return builder.ToImmutable(); + } + + private static IEnumerable SplitLayerString(string raw) + => raw.Split(new[] { '\n', '\r', ';' }, StringSplitOptions.RemoveEmptyEntries | StringSplitOptions.TrimEntries); + + private static string? NormalizeDigest(string? digest) + { + if (string.IsNullOrWhiteSpace(digest)) + { + return null; + } + + var trimmed = digest.Trim(); + if (!trimmed.Contains(':', StringComparison.Ordinal)) + { + return trimmed.ToLowerInvariant(); + } + + var parts = trimmed.Split(':', 2, StringSplitOptions.RemoveEmptyEntries | StringSplitOptions.TrimEntries); + return parts.Length == 2 + ? $"{parts[0].ToLowerInvariant()}:{parts[1].ToLowerInvariant()}" + : trimmed.ToLowerInvariant(); + } +} diff --git a/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.OS/Mapping/OsComponentMapper.cs b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.OS/Mapping/OsComponentMapper.cs index c0b725cb4..4aa4b1625 100644 --- a/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.OS/Mapping/OsComponentMapper.cs +++ b/src/Scanner/__Libraries/StellaOps.Scanner.Analyzers.OS/Mapping/OsComponentMapper.cs @@ -4,6 +4,7 @@ using System.Collections.Immutable; using System.Linq; using System.Security.Cryptography; using System.Text; +using System.Globalization; using StellaOps.Scanner.Core.Contracts; namespace StellaOps.Scanner.Analyzers.OS.Mapping; @@ -144,6 +145,11 @@ public static class OsComponentMapper properties[$"digest.{digest.Key}.{NormalizePathKey(file.Path)}"] = digest.Value.Trim(); } + + if (file.SizeBytes.HasValue) + { + properties[$"size.{NormalizePathKey(file.Path)}"] = file.SizeBytes.Value.ToString(CultureInfo.InvariantCulture); + } } IReadOnlyList? licenses = null; diff --git a/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Node.Tests/Fixtures/lang/node/yarn-pnp/expected.json b/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Node.Tests/Fixtures/lang/node/yarn-pnp/expected.json index a06e61df8..57c6fd105 100644 --- a/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Node.Tests/Fixtures/lang/node/yarn-pnp/expected.json +++ b/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Node.Tests/Fixtures/lang/node/yarn-pnp/expected.json @@ -40,7 +40,8 @@ { "kind": "file", "source": "package.json", - "locator": "package.json" + "locator": "package.json", + "sha256": "65e86ba14f0beebc4573039ac34a58f6dfa0133aa4a9e7f2dcdbb36a4e5c2814" } ] } diff --git a/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Node.Tests/Node/NodePackageCollectorTests.cs b/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Node.Tests/Node/NodePackageCollectorTests.cs index 7e5a4d023..d8718b3a7 100644 --- a/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Node.Tests/Node/NodePackageCollectorTests.cs +++ b/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Node.Tests/Node/NodePackageCollectorTests.cs @@ -594,11 +594,7 @@ public sealed class NodePackageCollectorTests } private static string InvokeBuildDeclarationKey(string name, string? version) - { - var method = typeof(NodePackageCollector).GetMethod("BuildDeclarationKey", - System.Reflection.BindingFlags.NonPublic | System.Reflection.BindingFlags.Static); - return (string?)method?.Invoke(null, [name, version]) ?? string.Empty; - } + => NodeDeclarationKeyBuilder.Build(name, version); #endregion } diff --git a/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Python.Tests/Fixtures/lang/python/layered-editable/expected.json b/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Python.Tests/Fixtures/lang/python/layered-editable/expected.json index 2eea00d45..420f78446 100644 --- a/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Python.Tests/Fixtures/lang/python/layered-editable/expected.json +++ b/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Python.Tests/Fixtures/lang/python/layered-editable/expected.json @@ -26,9 +26,9 @@ "projectUrl": "Documentation, https://example.com/layered/docs", "provenance": "dist-info", "record.hashMismatches": "0", - "record.hashedEntries": "8", + "record.hashedEntries": "7", "record.ioErrors": "0", - "record.missingFiles": "0", + "record.missingFiles": "1", "record.totalEntries": "9", "requiresDist": "requests", "requiresPython": "\u003E=3.9", @@ -44,6 +44,12 @@ "wheel.version": "1.0" }, "evidence": [ + { + "kind": "derived", + "source": "RECORD", + "locator": "layer1/usr/bin/layered-cli", + "value": "missing" + }, { "kind": "file", "source": "INSTALLER", diff --git a/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Python.Tests/Fixtures/lang/python/pip-cache/expected.json b/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Python.Tests/Fixtures/lang/python/pip-cache/expected.json index a9b5f3b87..9a497a0a4 100644 --- a/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Python.Tests/Fixtures/lang/python/pip-cache/expected.json +++ b/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Python.Tests/Fixtures/lang/python/pip-cache/expected.json @@ -30,6 +30,8 @@ "record.unsupportedAlgorithms": "md5", "requiresDist": "click", "requiresPython": "\u003E=3.8", + "runtime.libPaths.count": "1", + "runtime.versions": "3.11", "summary": "Cache test package for hashed RECORD coverage", "version": "1.2.3", "wheel.generator": "pip 24.0", diff --git a/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Python.Tests/Fixtures/lang/python/simple-venv/expected.json b/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Python.Tests/Fixtures/lang/python/simple-venv/expected.json index 67ffc82c9..6a356508d 100644 --- a/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Python.Tests/Fixtures/lang/python/simple-venv/expected.json +++ b/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Python.Tests/Fixtures/lang/python/simple-venv/expected.json @@ -31,6 +31,8 @@ "record.totalEntries": "10", "requiresDist": "requests (\u003E=2.0)", "requiresPython": "\u003E=3.9", + "runtime.libPaths.count": "1", + "runtime.versions": "3.11", "sourceCommit": "abc123def", "sourceSubdirectory": "src/simple", "sourceUrl": "https://example.com/simple-1.0.0.tar.gz", diff --git a/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Python.Tests/Python/PythonLanguageAnalyzerTests.cs b/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Python.Tests/Python/PythonLanguageAnalyzerTests.cs index ccfa29a17..6b8ae22e4 100644 --- a/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Python.Tests/Python/PythonLanguageAnalyzerTests.cs +++ b/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Python.Tests/Python/PythonLanguageAnalyzerTests.cs @@ -360,7 +360,8 @@ public sealed class PythonLanguageAnalyzerTests using var document = JsonDocument.Parse(json); var root = document.RootElement; - var expectedPath = Path.Combine("lib", "python3.11", "site-packages", "egg_info_pkg-1.2.3.egg-info"); + var expectedPath = Path.Combine("lib", "python3.11", "site-packages", "egg_info_pkg-1.2.3.egg-info") + .Replace('\\', '/'); Assert.True(ComponentHasMetadata(root, "egg-info-pkg", "provenance", "egg-info")); Assert.True(ComponentHasMetadata(root, "egg-info-pkg", "record.totalEntries", "4")); diff --git a/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Python.Tests/VirtualFileSystem/PythonVirtualFileSystemTests.cs b/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Python.Tests/VirtualFileSystem/PythonVirtualFileSystemTests.cs index bc771bfab..93689dbc4 100644 --- a/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Python.Tests/VirtualFileSystem/PythonVirtualFileSystemTests.cs +++ b/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Python.Tests/VirtualFileSystem/PythonVirtualFileSystemTests.cs @@ -77,16 +77,22 @@ public sealed class PythonVirtualFileSystemTests using (var archive = ZipFile.Open(wheelPath, ZipArchiveMode.Create)) { var entry = archive.CreateEntry("mypackage/__init__.py"); - using var writer = new StreamWriter(entry.Open()); - writer.Write("# Package init"); + using (var writer = new StreamWriter(entry.Open())) + { + writer.Write("# Package init"); + } entry = archive.CreateEntry("mypackage/core.py"); - using var writer2 = new StreamWriter(entry.Open()); - writer2.Write("def main(): pass"); + using (var writer2 = new StreamWriter(entry.Open())) + { + writer2.Write("def main(): pass"); + } entry = archive.CreateEntry("mypackage-1.0.0.dist-info/METADATA"); - using var writer3 = new StreamWriter(entry.Open()); - writer3.Write("Name: mypackage\nVersion: 1.0.0"); + using (var writer3 = new StreamWriter(entry.Open())) + { + writer3.Write("Name: mypackage\nVersion: 1.0.0"); + } } var vfs = PythonVirtualFileSystem.CreateBuilder() diff --git a/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Tests/DotNet/DotNetLanguageAnalyzerTests.cs b/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Tests/DotNet/DotNetLanguageAnalyzerTests.cs index 9a9416a05..fd22cbf3b 100644 --- a/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Tests/DotNet/DotNetLanguageAnalyzerTests.cs +++ b/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Lang.Tests/DotNet/DotNetLanguageAnalyzerTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.IO; using System.Linq; using System.Text.Json; @@ -7,72 +7,72 @@ using System.Threading.Tasks; using StellaOps.Scanner.Analyzers.Lang.DotNet; using StellaOps.Scanner.Analyzers.Lang.Tests.Harness; using StellaOps.Scanner.Analyzers.Lang.Tests.TestUtilities; - -namespace StellaOps.Scanner.Analyzers.Lang.Tests.DotNet; - -public sealed class DotNetLanguageAnalyzerTests -{ - [Fact] - public async Task SimpleFixtureProducesDeterministicOutputAsync() - { - var cancellationToken = TestContext.Current.CancellationToken; - var fixturePath = TestPaths.ResolveFixture("lang", "dotnet", "simple"); - var goldenPath = Path.Combine(fixturePath, "expected.json"); - - var analyzers = new ILanguageAnalyzer[] - { - new DotNetLanguageAnalyzer() - }; - - await LanguageAnalyzerTestHarness.AssertDeterministicAsync( - fixturePath, - goldenPath, - analyzers, - cancellationToken); - } - - [Fact] - public async Task SignedFixtureCapturesAssemblyMetadataAsync() - { - var cancellationToken = TestContext.Current.CancellationToken; - var fixturePath = TestPaths.ResolveFixture("lang", "dotnet", "signed"); - var goldenPath = Path.Combine(fixturePath, "expected.json"); - - var analyzers = new ILanguageAnalyzer[] - { - new DotNetLanguageAnalyzer() - }; - - var inspector = new StubAuthenticodeInspector(); - var services = new SingleServiceProvider(inspector); - - await LanguageAnalyzerTestHarness.AssertDeterministicAsync( - fixturePath, - goldenPath, - analyzers, - cancellationToken, - usageHints: null, - services: services); - } - - [Fact] - public async Task SelfContainedFixtureHandlesNativeAssetsAndUsageAsync() - { - var cancellationToken = TestContext.Current.CancellationToken; - var fixturePath = TestPaths.ResolveFixture("lang", "dotnet", "selfcontained"); - var goldenPath = Path.Combine(fixturePath, "expected.json"); - - var usageHints = new LanguageUsageHints(new[] - { - Path.Combine(fixturePath, "lib", "net10.0", "StellaOps.Toolkit.dll"), - Path.Combine(fixturePath, "runtimes", "linux-x64", "native", "libstellaopsnative.so") - }); - - var analyzers = new ILanguageAnalyzer[] - { - new DotNetLanguageAnalyzer() - }; - + +namespace StellaOps.Scanner.Analyzers.Lang.Tests.DotNet; + +public sealed class DotNetLanguageAnalyzerTests +{ + [Fact] + public async Task SimpleFixtureProducesDeterministicOutputAsync() + { + var cancellationToken = TestContext.Current.CancellationToken; + var fixturePath = TestPaths.ResolveFixture("lang", "dotnet", "simple"); + var goldenPath = Path.Combine(fixturePath, "expected.json"); + + var analyzers = new ILanguageAnalyzer[] + { + new DotNetLanguageAnalyzer() + }; + + await LanguageAnalyzerTestHarness.AssertDeterministicAsync( + fixturePath, + goldenPath, + analyzers, + cancellationToken); + } + + [Fact] + public async Task SignedFixtureCapturesAssemblyMetadataAsync() + { + var cancellationToken = TestContext.Current.CancellationToken; + var fixturePath = TestPaths.ResolveFixture("lang", "dotnet", "signed"); + var goldenPath = Path.Combine(fixturePath, "expected.json"); + + var analyzers = new ILanguageAnalyzer[] + { + new DotNetLanguageAnalyzer() + }; + + var inspector = new StubAuthenticodeInspector(); + var services = new SingleServiceProvider(inspector); + + await LanguageAnalyzerTestHarness.AssertDeterministicAsync( + fixturePath, + goldenPath, + analyzers, + cancellationToken, + usageHints: null, + services: services); + } + + [Fact] + public async Task SelfContainedFixtureHandlesNativeAssetsAndUsageAsync() + { + var cancellationToken = TestContext.Current.CancellationToken; + var fixturePath = TestPaths.ResolveFixture("lang", "dotnet", "selfcontained"); + var goldenPath = Path.Combine(fixturePath, "expected.json"); + + var usageHints = new LanguageUsageHints(new[] + { + Path.Combine(fixturePath, "lib", "net10.0", "StellaOps.Toolkit.dll"), + Path.Combine(fixturePath, "runtimes", "linux-x64", "native", "libstellaopsnative.so") + }); + + var analyzers = new ILanguageAnalyzer[] + { + new DotNetLanguageAnalyzer() + }; + await LanguageAnalyzerTestHarness.AssertDeterministicAsync( fixturePath, goldenPath, @@ -104,6 +104,68 @@ public sealed class DotNetLanguageAnalyzerTests } } + [Fact] + public async Task ConfigEnablesIlMetadataEdgesAsync() + { + var cancellationToken = TestContext.Current.CancellationToken; + var sourceFixture = TestPaths.ResolveFixture("lang", "dotnet", "simple"); + var tempRoot = TestPaths.CreateTemporaryDirectory(); + + try + { + CopyDirectory(sourceFixture, tempRoot); + + File.WriteAllText( + Path.Combine(tempRoot, "dotnet-il.config.json"), + """ + { + "emitDependencyEdges": true, + "includeEntrypoints": true, + "runtimeEvidencePath": "runtime-evidence.ndjson", + "runtimeEvidenceConfidence": "medium" + } + """); + + File.WriteAllLines( + Path.Combine(tempRoot, "runtime-evidence.ndjson"), + new[] + { + "{\"package\":\"stellaops.toolkit\",\"target\":\"native-lib\",\"reason\":\"runtime-load\",\"confidence\":\"medium\",\"source\":\"trace\"}" + }); + + var analyzers = new ILanguageAnalyzer[] + { + new DotNetLanguageAnalyzer() + }; + + var json = await LanguageAnalyzerTestHarness.RunToJsonAsync( + tempRoot, + analyzers, + cancellationToken); + + using var document = JsonDocument.Parse(json); + var packages = document.RootElement.EnumerateArray().ToArray(); + var toolkit = packages.First(element => element.GetProperty("name").GetString() == "StellaOps.Toolkit"); + var logging = packages.First(element => element.GetProperty("name").GetString() == "Microsoft.Extensions.Logging"); + + var toolkitMetadata = toolkit.GetProperty("metadata"); + Assert.Equal("microsoft.extensions.logging", toolkitMetadata.GetProperty("edge[0].target").GetString()); + Assert.Equal("declared-dependency", toolkitMetadata.GetProperty("edge[0].reason").GetString()); + Assert.Equal("native-lib", toolkitMetadata.GetProperty("edge.runtime[0].target").GetString()); + Assert.Equal("runtime-load", toolkitMetadata.GetProperty("edge.runtime[0].reason").GetString()); + + var entrypointId = toolkitMetadata.GetProperty("entrypoint[0].id").GetString(); + Assert.NotNull(entrypointId); + Assert.StartsWith("Sample.App:", entrypointId, StringComparison.Ordinal); + Assert.True(toolkitMetadata.TryGetProperty("entrypoint[0].tfm[0]", out _)); + Assert.True(logging.GetProperty("metadata").TryGetProperty("entrypoint[0].rid[0]", out _)); + } + finally + { + TestPaths.SafeDelete(tempRoot); + } + } + [Fact] public async Task MultiFixtureMergesRuntimeMetadataAsync() { @@ -151,29 +213,49 @@ public sealed class DotNetLanguageAnalyzerTests Assert.Contains("osx-arm64", ridValues); Assert.Contains("win-arm64", ridValues); } - - private sealed class StubAuthenticodeInspector : IDotNetAuthenticodeInspector - { - public DotNetAuthenticodeMetadata? TryInspect(string assemblyPath, CancellationToken cancellationToken) - => new DotNetAuthenticodeMetadata( - Subject: "CN=StellaOps Test Signing", - Issuer: "CN=StellaOps Root", - NotBefore: new DateTimeOffset(2025, 1, 1, 0, 0, 0, TimeSpan.Zero), - NotAfter: new DateTimeOffset(2026, 1, 1, 0, 0, 0, TimeSpan.Zero), - Thumbprint: "AA11BB22CC33DD44EE55FF66GG77HH88II99JJ00", - SerialNumber: "0123456789ABCDEF"); - } - - private sealed class SingleServiceProvider : IServiceProvider - { - private readonly object _service; - - public SingleServiceProvider(object service) - { - _service = service; - } - - public object? GetService(Type serviceType) - => serviceType == typeof(IDotNetAuthenticodeInspector) ? _service : null; - } -} + + private sealed class StubAuthenticodeInspector : IDotNetAuthenticodeInspector + { + public DotNetAuthenticodeMetadata? TryInspect(string assemblyPath, CancellationToken cancellationToken) + => new DotNetAuthenticodeMetadata( + Subject: "CN=StellaOps Test Signing", + Issuer: "CN=StellaOps Root", + NotBefore: new DateTimeOffset(2025, 1, 1, 0, 0, 0, TimeSpan.Zero), + NotAfter: new DateTimeOffset(2026, 1, 1, 0, 0, 0, TimeSpan.Zero), + Thumbprint: "AA11BB22CC33DD44EE55FF66GG77HH88II99JJ00", + SerialNumber: "0123456789ABCDEF"); + } + + private sealed class SingleServiceProvider : IServiceProvider + { + private readonly object _service; + + public SingleServiceProvider(object service) + { + _service = service; + } + + public object? GetService(Type serviceType) + => serviceType == typeof(IDotNetAuthenticodeInspector) ? _service : null; + } + + private static void CopyDirectory(string sourceDir, string destinationDir) + { + if (!Directory.Exists(destinationDir)) + { + Directory.CreateDirectory(destinationDir); + } + + foreach (var file in Directory.EnumerateFiles(sourceDir, "*", SearchOption.TopDirectoryOnly)) + { + var targetPath = Path.Combine(destinationDir, Path.GetFileName(file)); + File.Copy(file, targetPath, overwrite: true); + } + + foreach (var directory in Directory.EnumerateDirectories(sourceDir, "*", SearchOption.TopDirectoryOnly)) + { + var targetDirectory = Path.Combine(destinationDir, Path.GetFileName(directory)); + CopyDirectory(directory, targetDirectory); + } + } +} diff --git a/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Native.Tests/Reachability/NativeReachabilityGraphBuilderTests.cs b/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Native.Tests/Reachability/NativeReachabilityGraphBuilderTests.cs new file mode 100644 index 000000000..6adef1aca --- /dev/null +++ b/src/Scanner/__Tests/StellaOps.Scanner.Analyzers.Native.Tests/Reachability/NativeReachabilityGraphBuilderTests.cs @@ -0,0 +1,88 @@ +using FluentAssertions; +using StellaOps.Scanner.Analyzers.Native.Observations; +using StellaOps.Scanner.Analyzers.Native.Reachability; +using Xunit; + +namespace StellaOps.Scanner.Analyzers.Native.Tests.Reachability; + +public class NativeReachabilityGraphBuilderTests +{ + [Fact] + public void Build_ConstructsDeterministicGraphWithSyntheticRoots() + { + // Arrange + var document = new NativeObservationDocument + { + Binary = new NativeObservationBinary + { + Path = "/usr/bin/test", + Format = "elf", + BuildId = "abcd1234", + Architecture = "x86_64", + Is64Bit = true, + }, + Entrypoints = + [ + new NativeObservationEntrypoint + { + Type = "main", + Symbol = "_start", + } + ], + DeclaredEdges = + [ + new NativeObservationDeclaredEdge + { + Target = "libc.so.6", + Reason = "elf-dtneeded", + } + ], + HeuristicEdges = + [ + new NativeObservationHeuristicEdge + { + Target = "libplugin.so", + Reason = "string-dlopen", + Confidence = "medium", + } + ], + Environment = new NativeObservationEnvironment() + }; + + // Act + var graph = NativeReachabilityGraphBuilder.Build(document, layerDigest: "sha256:layer1"); + + // Assert + graph.LayerDigest.Should().Be("sha256:layer1"); + graph.BuildId.Should().Be("abcd1234"); + graph.Nodes.Should().ContainSingle(n => n.Kind == "root"); + graph.Nodes.Should().ContainSingle(n => n.Kind == "binary"); + graph.Nodes.Should().Contain(n => n.Name == "libc.so.6"); + graph.Edges.Should().Contain(e => e.Reason == "entrypoint"); + graph.Edges.Should().Contain(e => e.Reason == "elf-dtneeded"); + } + + [Fact] + public void ToBundle_WrapsGraphWithPayloadType() + { + // Arrange + var document = new NativeObservationDocument + { + Binary = new NativeObservationBinary + { + Path = "/usr/bin/test", + Format = "elf", + Sha256 = "aa", + Is64Bit = true, + }, + Environment = new NativeObservationEnvironment() + }; + + // Act + var bundle = NativeReachabilityGraphBuilder.ToBundle(document); + + // Assert + bundle.PayloadType.Should().Be("stellaops.native.graph@1"); + bundle.Graph.Nodes.Should().NotBeEmpty(); + } +} diff --git a/src/Signals/StellaOps.Signals/Options/SignalsEventsOptions.cs b/src/Signals/StellaOps.Signals/Options/SignalsEventsOptions.cs new file mode 100644 index 000000000..518a39402 --- /dev/null +++ b/src/Signals/StellaOps.Signals/Options/SignalsEventsOptions.cs @@ -0,0 +1,105 @@ +using System; + +namespace StellaOps.Signals.Options; + +/// +/// Configuration for reachability fact events (SIGNALS-24-005). +/// +public sealed class SignalsEventsOptions +{ + /// + /// Enables event emission. When false, events are dropped. + /// + public bool Enabled { get; set; } = true; + + /// + /// Transport driver: "inmemory" or "redis". + /// + public string Driver { get; set; } = "inmemory"; + + /// + /// Primary topic/stream name for fact updates. + /// + public string Stream { get; set; } = "signals.fact.updated.v1"; + + /// + /// Dead-letter topic/stream used when publishing fails. + /// + public string DeadLetterStream { get; set; } = "signals.fact.updated.dlq"; + + /// + /// Connection string for Redis streams (when Driver=redis). + /// + public string? ConnectionString { get; set; } + + /// + /// Optional publish timeout (seconds). Set to 0 to disable. + /// + public int PublishTimeoutSeconds { get; set; } = 5; + + /// + /// Approximate maximum stream length (capped by Redis trimming). + /// + public long MaxStreamLength { get; set; } = 10_000; + + /// + /// Producer identifier for observability payloads. + /// + public string Producer { get; set; } = "StellaOps.Signals"; + + /// + /// Pipeline name attached to event metadata. + /// + public string Pipeline { get; set; } = "signals"; + + /// + /// Optional release string to stamp events with build provenance. + /// + public string? Release { get; set; } + + /// + /// Default tenant when none is supplied in metadata. + /// + public string DefaultTenant { get; set; } = "tenant-default"; + + public void Validate() + { + var normalizedDriver = Driver?.Trim(); + if (string.IsNullOrWhiteSpace(normalizedDriver)) + { + throw new InvalidOperationException("Signals events driver is required."); + } + + if (!string.Equals(normalizedDriver, "redis", StringComparison.OrdinalIgnoreCase) + && !string.Equals(normalizedDriver, "inmemory", StringComparison.OrdinalIgnoreCase)) + { + throw new InvalidOperationException("Signals events driver must be 'redis' or 'inmemory'."); + } + + if (string.IsNullOrWhiteSpace(Stream)) + { + throw new InvalidOperationException("Signals events stream/topic is required."); + } + + if (PublishTimeoutSeconds < 0) + { + throw new InvalidOperationException("Signals events publish timeout must be >= 0 seconds."); + } + + if (MaxStreamLength < 0) + { + throw new InvalidOperationException("Signals events max stream length must be >= 0."); + } + + if (string.IsNullOrWhiteSpace(DefaultTenant)) + { + throw new InvalidOperationException("Signals events default tenant is required."); + } + + if (string.Equals(normalizedDriver, "redis", StringComparison.OrdinalIgnoreCase) + && string.IsNullOrWhiteSpace(ConnectionString)) + { + throw new InvalidOperationException("Signals events Redis driver requires ConnectionString."); + } + } +} diff --git a/src/Signals/StellaOps.Signals/Options/SignalsOptions.cs b/src/Signals/StellaOps.Signals/Options/SignalsOptions.cs index cbd7dad2b..fac8022f3 100644 --- a/src/Signals/StellaOps.Signals/Options/SignalsOptions.cs +++ b/src/Signals/StellaOps.Signals/Options/SignalsOptions.cs @@ -1,26 +1,26 @@ -namespace StellaOps.Signals.Options; - -/// -/// Root configuration for the Signals service. -/// -public sealed class SignalsOptions -{ - /// - /// Configuration section name. - /// - public const string SectionName = "Signals"; - - /// - /// Authority integration settings. - /// - public SignalsAuthorityOptions Authority { get; } = new(); - - /// - /// MongoDB configuration. - /// - public SignalsMongoOptions Mongo { get; } = new(); - - /// +namespace StellaOps.Signals.Options; + +/// +/// Root configuration for the Signals service. +/// +public sealed class SignalsOptions +{ + /// + /// Configuration section name. + /// + public const string SectionName = "Signals"; + + /// + /// Authority integration settings. + /// + public SignalsAuthorityOptions Authority { get; } = new(); + + /// + /// MongoDB configuration. + /// + public SignalsMongoOptions Mongo { get; } = new(); + + /// /// Artifact storage configuration. /// public SignalsArtifactStorageOptions Storage { get; } = new(); @@ -40,22 +40,28 @@ public sealed class SignalsOptions /// public SignalsCacheOptions Cache { get; } = new(); + /// + /// Event transport configuration. + /// + public SignalsEventsOptions Events { get; } = new(); + /// /// OpenAPI exposure (if enabled). /// public SignalsOpenApiOptions OpenApi { get; } = new(); - - /// - /// Validates configured options. - /// - public void Validate() - { + + /// + /// Validates configured options. + /// + public void Validate() + { Authority.Validate(); Mongo.Validate(); Storage.Validate(); AirGap.Validate(); Scoring.Validate(); Cache.Validate(); + Events.Validate(); OpenApi.Validate(); } } diff --git a/src/Signals/StellaOps.Signals/Program.cs b/src/Signals/StellaOps.Signals/Program.cs index fc7f6e0be..49ec8d141 100644 --- a/src/Signals/StellaOps.Signals/Program.cs +++ b/src/Signals/StellaOps.Signals/Program.cs @@ -125,6 +125,7 @@ builder.Services.AddSingleton>(sp => }); builder.Services.AddSingleton(); +builder.Services.AddSingleton(); // Configure callgraph artifact storage based on driver if (bootstrap.Storage.IsRustFsDriver()) @@ -165,7 +166,31 @@ builder.Services.AddSingleton(sp => var options = sp.GetRequiredService>().Value; return new RedisReachabilityCache(options.Cache); }); -builder.Services.AddSingleton(); +builder.Services.AddSingleton(); +builder.Services.AddSingleton(); +builder.Services.AddSingleton(sp => +{ + var options = sp.GetRequiredService(); + var eventBuilder = sp.GetRequiredService(); + + if (!options.Events.Enabled) + { + return new NullEventsPublisher(); + } + + if (string.Equals(options.Events.Driver, "redis", StringComparison.OrdinalIgnoreCase)) + { + return new RedisEventsPublisher( + options, + sp.GetRequiredService(), + eventBuilder, + sp.GetRequiredService>()); + } + + return new InMemoryEventsPublisher( + sp.GetRequiredService>(), + eventBuilder); +}); builder.Services.AddSingleton(sp => { var inner = sp.GetRequiredService(); diff --git a/src/Signals/StellaOps.Signals/Services/CallgraphIngestionService.cs b/src/Signals/StellaOps.Signals/Services/CallgraphIngestionService.cs index 825764869..5251fbb30 100644 --- a/src/Signals/StellaOps.Signals/Services/CallgraphIngestionService.cs +++ b/src/Signals/StellaOps.Signals/Services/CallgraphIngestionService.cs @@ -1,72 +1,76 @@ -using System; -using System.Collections.Generic; -using System.IO; +using System; +using System.Collections.Generic; +using System.IO; using System.Security.Cryptography; using System.Text; using System.Text.Json; using System.Linq; -using System.Threading; -using System.Threading.Tasks; -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Options; -using StellaOps.Signals.Models; -using StellaOps.Signals.Options; -using StellaOps.Signals.Parsing; -using StellaOps.Signals.Persistence; -using StellaOps.Signals.Storage; -using StellaOps.Signals.Storage.Models; - -namespace StellaOps.Signals.Services; - -internal sealed class CallgraphIngestionService : ICallgraphIngestionService -{ - private static readonly HashSet AllowedContentTypes = new(StringComparer.OrdinalIgnoreCase) - { - "application/json", - "application/vnd.stellaops.callgraph+json" - }; - - private readonly ICallgraphParserResolver parserResolver; - private readonly ICallgraphArtifactStore artifactStore; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; +using StellaOps.Signals.Models; +using StellaOps.Signals.Options; +using StellaOps.Signals.Parsing; +using StellaOps.Signals.Persistence; +using StellaOps.Signals.Storage; +using StellaOps.Signals.Storage.Models; + +namespace StellaOps.Signals.Services; + +internal sealed class CallgraphIngestionService : ICallgraphIngestionService +{ + private static readonly HashSet AllowedContentTypes = new(StringComparer.OrdinalIgnoreCase) + { + "application/json", + "application/vnd.stellaops.callgraph+json" + }; + + private readonly ICallgraphParserResolver parserResolver; + private readonly ICallgraphArtifactStore artifactStore; private readonly ICallgraphRepository repository; + private readonly ICallgraphNormalizationService normalizer; private readonly ILogger logger; private readonly SignalsOptions options; private readonly TimeProvider timeProvider; private static readonly JsonSerializerOptions ManifestSerializerOptions = new(JsonSerializerDefaults.Web); - - public CallgraphIngestionService( - ICallgraphParserResolver parserResolver, - ICallgraphArtifactStore artifactStore, - ICallgraphRepository repository, - IOptions options, - TimeProvider timeProvider, - ILogger logger) - { - this.parserResolver = parserResolver ?? throw new ArgumentNullException(nameof(parserResolver)); - this.artifactStore = artifactStore ?? throw new ArgumentNullException(nameof(artifactStore)); - this.repository = repository ?? throw new ArgumentNullException(nameof(repository)); - this.logger = logger ?? throw new ArgumentNullException(nameof(logger)); - this.timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider)); - this.options = options?.Value ?? throw new ArgumentNullException(nameof(options)); - } - - public async Task IngestAsync(CallgraphIngestRequest request, CancellationToken cancellationToken) - { - ValidateRequest(request); - - var parser = parserResolver.Resolve(request.Language); - + + public CallgraphIngestionService( + ICallgraphParserResolver parserResolver, + ICallgraphArtifactStore artifactStore, + ICallgraphRepository repository, + ICallgraphNormalizationService normalizer, + IOptions options, + TimeProvider timeProvider, + ILogger logger) + { + this.parserResolver = parserResolver ?? throw new ArgumentNullException(nameof(parserResolver)); + this.artifactStore = artifactStore ?? throw new ArgumentNullException(nameof(artifactStore)); + this.repository = repository ?? throw new ArgumentNullException(nameof(repository)); + this.normalizer = normalizer ?? throw new ArgumentNullException(nameof(normalizer)); + this.logger = logger ?? throw new ArgumentNullException(nameof(logger)); + this.timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider)); + this.options = options?.Value ?? throw new ArgumentNullException(nameof(options)); + } + + public async Task IngestAsync(CallgraphIngestRequest request, CancellationToken cancellationToken) + { + ValidateRequest(request); + + var parser = parserResolver.Resolve(request.Language); + var artifactBytes = Convert.FromBase64String(request.ArtifactContentBase64); await using var parseStream = new MemoryStream(artifactBytes, writable: false); - var parseResult = await parser.ParseAsync(parseStream, cancellationToken).ConfigureAwait(false); + var parsed = await parser.ParseAsync(parseStream, cancellationToken).ConfigureAwait(false); + var normalized = normalizer.Normalize(parser.Language, parsed); var schemaVersion = !string.IsNullOrWhiteSpace(request.SchemaVersion) ? request.SchemaVersion! - : parseResult.SchemaVersion; - var analyzerMeta = request.Analyzer ?? parseResult.Analyzer; + : normalized.SchemaVersion; + var analyzerMeta = request.Analyzer ?? normalized.Analyzer; parseStream.Position = 0; var artifactHash = ComputeSha256(artifactBytes); - var graphHash = ComputeGraphHash(parseResult); + var graphHash = ComputeGraphHash(normalized); var manifest = new CallgraphManifest { @@ -76,9 +80,9 @@ internal sealed class CallgraphIngestionService : ICallgraphIngestionService ArtifactHash = artifactHash, GraphHash = graphHash, SchemaVersion = schemaVersion, - NodeCount = parseResult.Nodes.Count, - EdgeCount = parseResult.Edges.Count, - RootCount = parseResult.Roots.Count, + NodeCount = normalized.Nodes.Count, + EdgeCount = normalized.Edges.Count, + RootCount = normalized.Roots.Count, CreatedAt = timeProvider.GetUtcNow() }; @@ -98,15 +102,15 @@ internal sealed class CallgraphIngestionService : ICallgraphIngestionService manifestStream), parseStream, cancellationToken).ConfigureAwait(false); - - var document = new CallgraphDocument - { + + var document = new CallgraphDocument + { Language = parser.Language, Component = request.Component, Version = request.Version, - Nodes = new List(parseResult.Nodes), - Edges = new List(parseResult.Edges), - Roots = new List(parseResult.Roots), + Nodes = new List(normalized.Nodes), + Edges = new List(normalized.Edges), + Roots = new List(normalized.Roots), Metadata = request.Metadata is null ? null : new Dictionary(request.Metadata, StringComparer.OrdinalIgnoreCase), @@ -125,7 +129,7 @@ internal sealed class CallgraphIngestionService : ICallgraphIngestionService }; document.Metadata ??= new Dictionary(StringComparer.OrdinalIgnoreCase); - document.Metadata["formatVersion"] = parseResult.FormatVersion; + document.Metadata["formatVersion"] = normalized.FormatVersion; document.Metadata["schemaVersion"] = schemaVersion; if (analyzerMeta is not null) { @@ -138,16 +142,16 @@ internal sealed class CallgraphIngestionService : ICallgraphIngestionService document.SchemaVersion = schemaVersion; document = await repository.UpsertAsync(document, cancellationToken).ConfigureAwait(false); - - logger.LogInformation( - "Ingested callgraph {Language}:{Component}:{Version} (id={Id}) with {NodeCount} nodes and {EdgeCount} edges.", - document.Language, - document.Component, - document.Version, - document.Id, - document.Nodes.Count, - document.Edges.Count); - + + logger.LogInformation( + "Ingested callgraph {Language}:{Component}:{Version} (id={Id}) with {NodeCount} nodes and {EdgeCount} edges.", + document.Language, + document.Component, + document.Version, + document.Id, + document.Nodes.Count, + document.Edges.Count); + return new CallgraphIngestResponse( document.Id, document.Artifact.Path, @@ -160,42 +164,42 @@ internal sealed class CallgraphIngestionService : ICallgraphIngestionService document.Edges.Count, document.Roots?.Count ?? 0); } - - private static void ValidateRequest(CallgraphIngestRequest request) - { - ArgumentNullException.ThrowIfNull(request); - - if (string.IsNullOrWhiteSpace(request.Language)) - { - throw new CallgraphIngestionValidationException("Language is required."); - } - - if (string.IsNullOrWhiteSpace(request.Component)) - { - throw new CallgraphIngestionValidationException("Component is required."); - } - - if (string.IsNullOrWhiteSpace(request.Version)) - { - throw new CallgraphIngestionValidationException("Version is required."); - } - - if (string.IsNullOrWhiteSpace(request.ArtifactContentBase64)) - { - throw new CallgraphIngestionValidationException("Artifact content is required."); - } - - if (string.IsNullOrWhiteSpace(request.ArtifactFileName)) - { - throw new CallgraphIngestionValidationException("Artifact file name is required."); - } - - if (string.IsNullOrWhiteSpace(request.ArtifactContentType) || !AllowedContentTypes.Contains(request.ArtifactContentType)) - { - throw new CallgraphIngestionValidationException($"Unsupported artifact content type '{request.ArtifactContentType}'."); - } - } - + + private static void ValidateRequest(CallgraphIngestRequest request) + { + ArgumentNullException.ThrowIfNull(request); + + if (string.IsNullOrWhiteSpace(request.Language)) + { + throw new CallgraphIngestionValidationException("Language is required."); + } + + if (string.IsNullOrWhiteSpace(request.Component)) + { + throw new CallgraphIngestionValidationException("Component is required."); + } + + if (string.IsNullOrWhiteSpace(request.Version)) + { + throw new CallgraphIngestionValidationException("Version is required."); + } + + if (string.IsNullOrWhiteSpace(request.ArtifactContentBase64)) + { + throw new CallgraphIngestionValidationException("Artifact content is required."); + } + + if (string.IsNullOrWhiteSpace(request.ArtifactFileName)) + { + throw new CallgraphIngestionValidationException("Artifact file name is required."); + } + + if (string.IsNullOrWhiteSpace(request.ArtifactContentType) || !AllowedContentTypes.Contains(request.ArtifactContentType)) + { + throw new CallgraphIngestionValidationException($"Unsupported artifact content type '{request.ArtifactContentType}'."); + } + } + private static string ComputeSha256(ReadOnlySpan buffer) { Span hash = stackalloc byte[SHA256.HashSizeInBytes]; @@ -274,13 +278,13 @@ internal sealed class CallgraphIngestionService : ICallgraphIngestionService return ordered.ToString(); } } - -/// -/// Exception thrown when the ingestion request is invalid. -/// -public sealed class CallgraphIngestionValidationException : Exception -{ - public CallgraphIngestionValidationException(string message) : base(message) - { - } -} + +/// +/// Exception thrown when the ingestion request is invalid. +/// +public sealed class CallgraphIngestionValidationException : Exception +{ + public CallgraphIngestionValidationException(string message) : base(message) + { + } +} diff --git a/src/Signals/StellaOps.Signals/Services/CallgraphNormalizationService.cs b/src/Signals/StellaOps.Signals/Services/CallgraphNormalizationService.cs new file mode 100644 index 000000000..4522e076b --- /dev/null +++ b/src/Signals/StellaOps.Signals/Services/CallgraphNormalizationService.cs @@ -0,0 +1,248 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using StellaOps.Signals.Models; +using StellaOps.Signals.Parsing; + +namespace StellaOps.Signals.Services; + +internal interface ICallgraphNormalizationService +{ + CallgraphParseResult Normalize(string language, CallgraphParseResult result); +} + +/// +/// Normalizes language-specific callgraphs into deterministic graph documents. +/// +internal sealed class CallgraphNormalizationService : ICallgraphNormalizationService +{ + public CallgraphParseResult Normalize(string language, CallgraphParseResult result) + { + ArgumentException.ThrowIfNullOrWhiteSpace(language); + ArgumentNullException.ThrowIfNull(result); + + var normalizedLanguage = language.Trim(); + var nodesById = new Dictionary(StringComparer.Ordinal); + + foreach (var node in result.Nodes ?? Array.Empty()) + { + var normalizedNode = NormalizeNode(node, normalizedLanguage); + if (!nodesById.ContainsKey(normalizedNode.Id)) + { + nodesById[normalizedNode.Id] = normalizedNode; + } + } + + var edges = NormalizeEdges(result.Edges, nodesById); + var roots = NormalizeRoots(result.Roots); + + return new CallgraphParseResult( + Nodes: nodesById.Values.OrderBy(n => n.Id, StringComparer.Ordinal).ToList(), + Edges: edges, + Roots: roots, + FormatVersion: string.IsNullOrWhiteSpace(result.FormatVersion) ? "1.0" : result.FormatVersion.Trim(), + SchemaVersion: string.IsNullOrWhiteSpace(result.SchemaVersion) ? "1.0" : result.SchemaVersion.Trim(), + Analyzer: result.Analyzer); + } + + private static CallgraphNode NormalizeNode(CallgraphNode node, string language) + { + var id = node.Id?.Trim(); + if (string.IsNullOrWhiteSpace(id)) + { + throw new CallgraphParserValidationException("Callgraph node is missing an id."); + } + + var name = string.IsNullOrWhiteSpace(node.Name) ? id : node.Name.Trim(); + var kind = string.IsNullOrWhiteSpace(node.Kind) ? "function" : node.Kind.Trim(); + var normalizedLanguage = string.IsNullOrWhiteSpace(node.Language) ? language : node.Language.Trim(); + + var ns = string.IsNullOrWhiteSpace(node.Namespace) + ? DeriveNamespace(id, node.File, normalizedLanguage) + : node.Namespace!.Trim(); + + return node with + { + Id = id, + Name = name, + Kind = kind, + Namespace = ns, + File = node.File?.Trim(), + Purl = NormalizePurl(node.Purl), + SymbolDigest = NormalizeDigest(node.SymbolDigest), + BuildId = node.BuildId?.Trim(), + Language = normalizedLanguage, + Evidence = NormalizeList(node.Evidence), + Analyzer = NormalizeDict(node.Analyzer), + CodeId = node.CodeId?.Trim() + }; + } + + private static IReadOnlyList NormalizeEdges( + IReadOnlyList? edges, + IReadOnlyDictionary nodes) + { + var list = new List(); + var seen = new HashSet(StringComparer.Ordinal); + + foreach (var edge in edges ?? Array.Empty()) + { + var source = edge.SourceId?.Trim(); + var target = edge.TargetId?.Trim(); + if (string.IsNullOrWhiteSpace(source) || string.IsNullOrWhiteSpace(target)) + { + continue; + } + + if (!nodes.ContainsKey(source) || !nodes.ContainsKey(target)) + { + continue; + } + + var type = string.IsNullOrWhiteSpace(edge.Type) ? "call" : edge.Type.Trim(); + var key = $"{source}|{target}|{type}"; + if (!seen.Add(key)) + { + continue; + } + + list.Add(edge with + { + SourceId = source, + TargetId = target, + Type = type, + Purl = NormalizePurl(edge.Purl), + SymbolDigest = NormalizeDigest(edge.SymbolDigest), + Confidence = ClampConfidence(edge.Confidence), + Candidates = NormalizeList(edge.Candidates), + Evidence = NormalizeList(edge.Evidence) + }); + } + + return list.OrderBy(e => e.SourceId, StringComparer.Ordinal) + .ThenBy(e => e.TargetId, StringComparer.Ordinal) + .ThenBy(e => e.Type, StringComparer.Ordinal) + .ToList(); + } + + private static IReadOnlyList NormalizeRoots(IReadOnlyList? roots) + { + var list = new List(); + var seen = new HashSet(StringComparer.Ordinal); + + foreach (var root in roots ?? Array.Empty()) + { + var id = root.Id?.Trim(); + if (string.IsNullOrWhiteSpace(id)) + { + continue; + } + + var normalized = new CallgraphRoot( + id, + string.IsNullOrWhiteSpace(root.Phase) ? "runtime" : root.Phase.Trim(), + root.Source?.Trim()); + + if (seen.Add($"{normalized.Id}|{normalized.Phase}|{normalized.Source}")) + { + list.Add(normalized); + } + } + + return list.OrderBy(r => r.Id, StringComparer.Ordinal) + .ThenBy(r => r.Phase, StringComparer.Ordinal) + .ToList(); + } + + private static string? DeriveNamespace(string id, string? file, string language) + { + if (string.Equals(language, "java", StringComparison.OrdinalIgnoreCase)) + { + var candidate = id.Replace('/', '.'); + var lastDot = candidate.LastIndexOf('.'); + if (lastDot > 0) + { + return candidate[..lastDot]; + } + } + + if (string.Equals(language, "go", StringComparison.OrdinalIgnoreCase) || + string.Equals(language, "nodejs", StringComparison.OrdinalIgnoreCase) || + string.Equals(language, "python", StringComparison.OrdinalIgnoreCase)) + { + if (!string.IsNullOrWhiteSpace(file)) + { + var normalizedPath = file.Replace('\\', '/'); + var idx = normalizedPath.LastIndexOf('/'); + if (idx > 0) + { + return normalizedPath[..idx]; + } + } + + var sepIdx = id.LastIndexOfAny(new[] { '.', '/', ':' }); + if (sepIdx > 0) + { + return id[..sepIdx]; + } + } + + return null; + } + + private static string? NormalizePurl(string? value) + { + return string.IsNullOrWhiteSpace(value) ? null : value.Trim().ToLowerInvariant(); + } + + private static string? NormalizeDigest(string? value) + { + return string.IsNullOrWhiteSpace(value) ? null : value.Trim().ToLowerInvariant(); + } + + private static double? ClampConfidence(double? confidence) + { + if (!confidence.HasValue) + { + return null; + } + + return Math.Clamp(confidence.Value, 0.0, 1.0); + } + + private static IReadOnlyList? NormalizeList(IReadOnlyList? values) + { + if (values is null) + { + return null; + } + + return values + .Where(v => !string.IsNullOrWhiteSpace(v)) + .Select(v => v.Trim()) + .Distinct(StringComparer.Ordinal) + .OrderBy(v => v, StringComparer.Ordinal) + .ToList(); + } + + private static IReadOnlyDictionary? NormalizeDict(IReadOnlyDictionary? values) + { + if (values is null) + { + return null; + } + + var dict = new Dictionary(StringComparer.Ordinal); + foreach (var kv in values) + { + if (string.IsNullOrWhiteSpace(kv.Key)) + { + continue; + } + + dict[kv.Key.Trim()] = kv.Value?.Trim(); + } + + return dict.Count == 0 ? null : dict; + } +} diff --git a/src/Signals/StellaOps.Signals/Services/IRedisConnectionFactory.cs b/src/Signals/StellaOps.Signals/Services/IRedisConnectionFactory.cs new file mode 100644 index 000000000..b108b8570 --- /dev/null +++ b/src/Signals/StellaOps.Signals/Services/IRedisConnectionFactory.cs @@ -0,0 +1,10 @@ +using System.Threading; +using System.Threading.Tasks; +using StackExchange.Redis; + +namespace StellaOps.Signals.Services; + +internal interface IRedisConnectionFactory +{ + Task ConnectAsync(ConfigurationOptions options, CancellationToken cancellationToken); +} diff --git a/src/Signals/StellaOps.Signals/Services/InMemoryEventsPublisher.cs b/src/Signals/StellaOps.Signals/Services/InMemoryEventsPublisher.cs index b93ac2862..97374d340 100644 --- a/src/Signals/StellaOps.Signals/Services/InMemoryEventsPublisher.cs +++ b/src/Signals/StellaOps.Signals/Services/InMemoryEventsPublisher.cs @@ -1,11 +1,9 @@ using System; -using System.Linq; using System.Text.Json; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; using StellaOps.Signals.Models; -using StellaOps.Signals.Options; namespace StellaOps.Signals.Services; @@ -15,65 +13,27 @@ namespace StellaOps.Signals.Services; internal sealed class InMemoryEventsPublisher : IEventsPublisher { private readonly ILogger logger; - private readonly string topic; + private readonly ReachabilityFactEventBuilder eventBuilder; + private static readonly JsonSerializerOptions SerializerOptions = new(JsonSerializerDefaults.Web) + { + WriteIndented = false, + DefaultIgnoreCondition = System.Text.Json.Serialization.JsonIgnoreCondition.WhenWritingNull + }; - public InMemoryEventsPublisher(ILogger logger, SignalsOptions options) + public InMemoryEventsPublisher(ILogger logger, ReachabilityFactEventBuilder eventBuilder) { this.logger = logger; - topic = string.IsNullOrWhiteSpace(options?.AirGap?.EventTopic) - ? "signals.fact.updated" - : options!.AirGap.EventTopic!; + this.eventBuilder = eventBuilder ?? throw new ArgumentNullException(nameof(eventBuilder)); } public Task PublishFactUpdatedAsync(ReachabilityFactDocument fact, CancellationToken cancellationToken) { ArgumentNullException.ThrowIfNull(fact); - var (reachable, unreachable) = CountStates(fact); - var runtimeFactsCount = fact.RuntimeFacts?.Count ?? 0; - var avgConfidence = fact.States.Count > 0 ? fact.States.Average(s => s.Confidence) : 0; - var score = fact.Score; - var unknownsCount = fact.UnknownsCount; - var unknownsPressure = fact.UnknownsPressure; - var topBucket = fact.States.Count > 0 - ? fact.States - .GroupBy(s => s.Bucket, StringComparer.OrdinalIgnoreCase) - .OrderByDescending(g => g.Count()) - .ThenByDescending(g => g.Average(s => s.Weight)) - .First() - : null; - var payload = new ReachabilityFactUpdatedEvent( - Version: "signals.fact.updated@v1", - SubjectKey: fact.SubjectKey, - CallgraphId: string.IsNullOrWhiteSpace(fact.CallgraphId) ? null : fact.CallgraphId, - OccurredAtUtc: DateTimeOffset.UtcNow, - ReachableCount: reachable, - UnreachableCount: unreachable, - RuntimeFactsCount: runtimeFactsCount, - Bucket: topBucket?.Key ?? "unknown", - Weight: topBucket?.Average(s => s.Weight) ?? 0, - StateCount: fact.States.Count, - FactScore: score, - UnknownsCount: unknownsCount, - UnknownsPressure: unknownsPressure, - AverageConfidence: avgConfidence, - ComputedAtUtc: fact.ComputedAt, - Targets: fact.States.Select(s => s.Target).ToArray()); + var envelope = eventBuilder.Build(fact); + var json = JsonSerializer.Serialize(envelope, SerializerOptions); - var json = JsonSerializer.Serialize(payload, new JsonSerializerOptions(JsonSerializerDefaults.Web)); - logger.LogInformation("{Topic} {Payload}", topic, json); + logger.LogInformation(json); return Task.CompletedTask; } - - private static (int reachable, int unreachable) CountStates(ReachabilityFactDocument fact) - { - if (fact.States is null || fact.States.Count == 0) - { - return (0, 0); - } - - var reachable = fact.States.Count(state => state.Reachable); - var unreachable = fact.States.Count - reachable; - return (reachable, unreachable); - } } diff --git a/src/Signals/StellaOps.Signals/Services/NullEventsPublisher.cs b/src/Signals/StellaOps.Signals/Services/NullEventsPublisher.cs new file mode 100644 index 000000000..fdf1c5ff9 --- /dev/null +++ b/src/Signals/StellaOps.Signals/Services/NullEventsPublisher.cs @@ -0,0 +1,10 @@ +using System.Threading; +using System.Threading.Tasks; +using StellaOps.Signals.Models; + +namespace StellaOps.Signals.Services; + +internal sealed class NullEventsPublisher : IEventsPublisher +{ + public Task PublishFactUpdatedAsync(ReachabilityFactDocument fact, CancellationToken cancellationToken) => Task.CompletedTask; +} diff --git a/src/Signals/StellaOps.Signals/Services/ReachabilityFactDigestCalculator.cs b/src/Signals/StellaOps.Signals/Services/ReachabilityFactDigestCalculator.cs new file mode 100644 index 000000000..bd8a8f8ac --- /dev/null +++ b/src/Signals/StellaOps.Signals/Services/ReachabilityFactDigestCalculator.cs @@ -0,0 +1,170 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Security.Cryptography; +using System.Text; +using System.Text.Json; +using StellaOps.Signals.Models; + +namespace StellaOps.Signals.Services; + +internal static class ReachabilityFactDigestCalculator +{ + private static readonly JsonSerializerOptions SerializerOptions = new(JsonSerializerDefaults.Web) + { + WriteIndented = false + }; + + public static string Compute(ReachabilityFactDocument fact) + { + ArgumentNullException.ThrowIfNull(fact); + + var canonical = new CanonicalReachabilityFact( + CallgraphId: fact.CallgraphId ?? string.Empty, + SubjectKey: fact.SubjectKey ?? string.Empty, + Subject: new CanonicalSubject( + fact.Subject?.ImageDigest ?? string.Empty, + fact.Subject?.Component ?? string.Empty, + fact.Subject?.Version ?? string.Empty, + fact.Subject?.ScanId ?? string.Empty), + EntryPoints: NormalizeList(fact.EntryPoints), + States: NormalizeStates(fact.States), + RuntimeFacts: NormalizeRuntimeFacts(fact.RuntimeFacts), + Metadata: NormalizeMetadata(fact.Metadata), + Score: fact.Score, + UnknownsCount: fact.UnknownsCount, + UnknownsPressure: fact.UnknownsPressure, + ComputedAt: fact.ComputedAt); + + var json = JsonSerializer.Serialize(canonical, SerializerOptions); + Span hash = stackalloc byte[SHA256.HashSizeInBytes]; + SHA256.HashData(Encoding.UTF8.GetBytes(json), hash); + return "sha256:" + Convert.ToHexString(hash).ToLowerInvariant(); + } + + private static List NormalizeList(IEnumerable? values) => + values? + .Where(v => !string.IsNullOrWhiteSpace(v)) + .Select(v => v.Trim()) + .Distinct(StringComparer.Ordinal) + .OrderBy(v => v, StringComparer.Ordinal) + .ToList() ?? new List(); + + private static List NormalizeStates(IEnumerable? states) + { + if (states is null) + { + return new List(); + } + + return states + .OrderBy(s => s.Target, StringComparer.Ordinal) + .Select(state => new CanonicalState( + Target: state.Target ?? string.Empty, + Reachable: state.Reachable, + Confidence: state.Confidence, + Bucket: state.Bucket ?? "unknown", + Weight: state.Weight, + Score: state.Score, + Path: NormalizeList(state.Path), + RuntimeHits: NormalizeList(state.Evidence?.RuntimeHits), + BlockedEdges: NormalizeList(state.Evidence?.BlockedEdges))) + .ToList(); + } + + private static List NormalizeRuntimeFacts(IEnumerable? facts) + { + if (facts is null) + { + return new List(); + } + + return facts + .Select(f => new CanonicalRuntimeFact( + SymbolId: f.SymbolId ?? string.Empty, + CodeId: f.CodeId, + SymbolDigest: f.SymbolDigest, + Purl: f.Purl, + BuildId: f.BuildId, + LoaderBase: f.LoaderBase, + ProcessId: f.ProcessId, + ProcessName: f.ProcessName, + SocketAddress: f.SocketAddress, + ContainerId: f.ContainerId, + EvidenceUri: f.EvidenceUri, + HitCount: f.HitCount, + ObservedAt: f.ObservedAt, + Metadata: NormalizeMetadata(f.Metadata))) + .OrderBy(f => f.SymbolId, StringComparer.Ordinal) + .ThenBy(f => f.CodeId, StringComparer.Ordinal) + .ThenBy(f => f.LoaderBase, StringComparer.Ordinal) + .ToList(); + } + + private static SortedDictionary NormalizeMetadata(IDictionary? metadata) + { + var normalized = new SortedDictionary(StringComparer.Ordinal); + if (metadata is null) + { + return normalized; + } + + foreach (var kvp in metadata) + { + if (string.IsNullOrWhiteSpace(kvp.Key)) + { + continue; + } + + normalized[kvp.Key.Trim()] = kvp.Value?.Trim(); + } + + return normalized; + } + + private sealed record CanonicalReachabilityFact( + string CallgraphId, + string SubjectKey, + CanonicalSubject Subject, + List EntryPoints, + List States, + List RuntimeFacts, + SortedDictionary Metadata, + double Score, + int UnknownsCount, + double UnknownsPressure, + DateTimeOffset ComputedAt); + + private sealed record CanonicalSubject( + string ImageDigest, + string Component, + string Version, + string ScanId); + + private sealed record CanonicalState( + string Target, + bool Reachable, + double Confidence, + string Bucket, + double Weight, + double Score, + List Path, + List RuntimeHits, + List BlockedEdges); + + private sealed record CanonicalRuntimeFact( + string SymbolId, + string? CodeId, + string? SymbolDigest, + string? Purl, + string? BuildId, + string? LoaderBase, + int? ProcessId, + string? ProcessName, + string? SocketAddress, + string? ContainerId, + string? EvidenceUri, + int HitCount, + DateTimeOffset? ObservedAt, + SortedDictionary Metadata); +} diff --git a/src/Signals/StellaOps.Signals/Services/ReachabilityFactEventBuilder.cs b/src/Signals/StellaOps.Signals/Services/ReachabilityFactEventBuilder.cs new file mode 100644 index 000000000..af8c9d8a4 --- /dev/null +++ b/src/Signals/StellaOps.Signals/Services/ReachabilityFactEventBuilder.cs @@ -0,0 +1,200 @@ +using System; +using System.Collections.Generic; +using System.Globalization; +using System.Linq; +using System.Text.Json.Serialization; +using StellaOps.Signals.Models; +using StellaOps.Signals.Options; + +namespace StellaOps.Signals.Services; + +internal sealed class ReachabilityFactEventBuilder +{ + private readonly SignalsOptions options; + private readonly TimeProvider timeProvider; + + public ReachabilityFactEventBuilder(SignalsOptions options, TimeProvider timeProvider) + { + this.options = options ?? throw new ArgumentNullException(nameof(options)); + this.timeProvider = timeProvider ?? throw new ArgumentNullException(nameof(timeProvider)); + } + + public ReachabilityFactUpdatedEnvelope Build(ReachabilityFactDocument fact) + { + ArgumentNullException.ThrowIfNull(fact); + + var summary = BuildSummary(fact); + var digest = ResolveDigest(fact); + var factVersion = ResolveFactVersion(fact); + + return new ReachabilityFactUpdatedEnvelope( + Topic: ResolveTopic(), + EventId: Guid.NewGuid().ToString("n"), + Version: "signals.fact.updated@v1", + EmittedAtUtc: timeProvider.GetUtcNow(), + Tenant: ResolveTenant(fact), + SubjectKey: fact.SubjectKey, + CallgraphId: string.IsNullOrWhiteSpace(fact.CallgraphId) ? null : fact.CallgraphId, + FactKind: "reachability", + FactVersion: factVersion, + Digest: digest, + ContentType: "application/json", + Producer: new EventProducerMetadata( + Service: options.Events.Producer, + Pipeline: options.Events.Pipeline, + Release: options.Events.Release ?? typeof(Program).Assembly.GetName().Version?.ToString() ?? "unknown"), + Trace: ResolveTrace(fact), + Summary: summary); + } + + private ReachabilityFactUpdatedEvent BuildSummary(ReachabilityFactDocument fact) + { + var (reachable, unreachable) = CountStates(fact); + var runtimeFactsCount = fact.RuntimeFacts?.Count ?? 0; + var avgConfidence = fact.States.Count > 0 ? fact.States.Average(s => s.Confidence) : 0; + var topBucket = fact.States.Count > 0 + ? fact.States + .GroupBy(s => s.Bucket, StringComparer.OrdinalIgnoreCase) + .OrderByDescending(g => g.Count()) + .ThenByDescending(g => g.Average(s => s.Weight)) + .First() + : null; + + return new ReachabilityFactUpdatedEvent( + Version: "signals.fact.updated@v1", + SubjectKey: fact.SubjectKey, + CallgraphId: string.IsNullOrWhiteSpace(fact.CallgraphId) ? null : fact.CallgraphId, + OccurredAtUtc: timeProvider.GetUtcNow(), + ReachableCount: reachable, + UnreachableCount: unreachable, + RuntimeFactsCount: runtimeFactsCount, + Bucket: topBucket?.Key ?? "unknown", + Weight: topBucket?.Average(s => s.Weight) ?? 0, + StateCount: fact.States.Count, + FactScore: fact.Score, + UnknownsCount: fact.UnknownsCount, + UnknownsPressure: fact.UnknownsPressure, + AverageConfidence: avgConfidence, + ComputedAtUtc: fact.ComputedAt, + Targets: fact.States.Select(s => s.Target).ToArray()); + } + + private static (int reachable, int unreachable) CountStates(ReachabilityFactDocument fact) + { + if (fact.States is null || fact.States.Count == 0) + { + return (0, 0); + } + + var reachable = fact.States.Count(state => state.Reachable); + var unreachable = fact.States.Count - reachable; + return (reachable, unreachable); + } + + private string ResolveTopic() + { + if (!string.IsNullOrWhiteSpace(options.Events.Stream)) + { + return options.Events.Stream; + } + + if (!string.IsNullOrWhiteSpace(options.AirGap.EventTopic)) + { + return options.AirGap.EventTopic!; + } + + return "signals.fact.updated.v1"; + } + + private string ResolveTenant(ReachabilityFactDocument fact) + { + if (fact.Metadata is not null) + { + if (fact.Metadata.TryGetValue("tenant", out var tenant) && !string.IsNullOrWhiteSpace(tenant)) + { + return tenant!; + } + + if (fact.Metadata.TryGetValue("subject.tenant", out var subjectTenant) && !string.IsNullOrWhiteSpace(subjectTenant)) + { + return subjectTenant!; + } + } + + return options.Events.DefaultTenant; + } + + private static EventTraceMetadata ResolveTrace(ReachabilityFactDocument fact) + { + var metadata = fact.Metadata; + string? traceId = null; + string? spanId = null; + + if (metadata is not null) + { + metadata.TryGetValue("trace_id", out traceId); + metadata.TryGetValue("span_id", out spanId); + + if (string.IsNullOrWhiteSpace(traceId) && metadata.TryGetValue("trace.id", out var dottedTrace)) + { + traceId = dottedTrace; + } + + if (string.IsNullOrWhiteSpace(spanId) && metadata.TryGetValue("trace.parent_span", out var dottedSpan)) + { + spanId = dottedSpan; + } + } + + return new EventTraceMetadata(traceId, spanId); + } + + private static int ResolveFactVersion(ReachabilityFactDocument fact) + { + if (fact.Metadata is not null && + fact.Metadata.TryGetValue("fact.version", out var versionValue) && + int.TryParse(versionValue, NumberStyles.Integer, CultureInfo.InvariantCulture, out var parsed)) + { + return parsed; + } + + return 1; + } + + private static string ResolveDigest(ReachabilityFactDocument fact) + { + if (fact.Metadata is not null && + fact.Metadata.TryGetValue("fact.digest", out var digest) && + !string.IsNullOrWhiteSpace(digest)) + { + return digest!; + } + + return ReachabilityFactDigestCalculator.Compute(fact); + } +} + +public sealed record ReachabilityFactUpdatedEnvelope( + [property: JsonPropertyName("topic")] string Topic, + [property: JsonPropertyName("event_id")] string EventId, + [property: JsonPropertyName("version")] string Version, + [property: JsonPropertyName("emitted_at")] DateTimeOffset EmittedAtUtc, + [property: JsonPropertyName("tenant")] string Tenant, + [property: JsonPropertyName("subject_key")] string SubjectKey, + [property: JsonPropertyName("callgraph_id")] string? CallgraphId, + [property: JsonPropertyName("fact_kind")] string FactKind, + [property: JsonPropertyName("fact_version")] int FactVersion, + [property: JsonPropertyName("digest")] string Digest, + [property: JsonPropertyName("content_type")] string ContentType, + [property: JsonPropertyName("producer")] EventProducerMetadata Producer, + [property: JsonPropertyName("trace")] EventTraceMetadata Trace, + [property: JsonPropertyName("summary")] ReachabilityFactUpdatedEvent Summary); + +public sealed record EventProducerMetadata( + [property: JsonPropertyName("service")] string Service, + [property: JsonPropertyName("pipeline")] string Pipeline, + [property: JsonPropertyName("release")] string? Release); + +public sealed record EventTraceMetadata( + [property: JsonPropertyName("trace_id")] string? TraceId, + [property: JsonPropertyName("span_id")] string? SpanId); diff --git a/src/Signals/StellaOps.Signals/Services/ReachabilityScoringService.cs b/src/Signals/StellaOps.Signals/Services/ReachabilityScoringService.cs index 5adda2e6a..7bec23411 100644 --- a/src/Signals/StellaOps.Signals/Services/ReachabilityScoringService.cs +++ b/src/Signals/StellaOps.Signals/Services/ReachabilityScoringService.cs @@ -1,6 +1,7 @@ using System; using System.Collections.Generic; using System.Linq; +using System.Globalization; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; @@ -90,7 +91,7 @@ public sealed class ReachabilityScoringService : IReachabilityScoringService } } - var runtimeHits = runtimeHitSet.ToList(); + var runtimeHits = runtimeHitSet.OrderBy(h => h, StringComparer.Ordinal).ToList(); var states = new List(targets.Count); foreach (var target in targets) @@ -108,6 +109,8 @@ public sealed class ReachabilityScoringService : IReachabilityScoringService var score = confidence * weight; + runtimeEvidence = runtimeEvidence.OrderBy(hit => hit, StringComparer.Ordinal).ToList(); + states.Add(new ReachabilityStateDocument { Target = target, @@ -120,11 +123,17 @@ public sealed class ReachabilityScoringService : IReachabilityScoringService Evidence = new ReachabilityEvidenceDocument { RuntimeHits = runtimeEvidence, - BlockedEdges = request.BlockedEdges?.Select(edge => $"{edge.From} -> {edge.To}").ToList() + BlockedEdges = request.BlockedEdges? + .Select(edge => $"{edge.From} -> {edge.To}") + .OrderBy(edge => edge, StringComparer.Ordinal) + .ToList() } }); } + states = states.OrderBy(s => s.Target, StringComparer.Ordinal).ToList(); + entryPoints = entryPoints.OrderBy(ep => ep, StringComparer.Ordinal).ToList(); + var baseScore = states.Count > 0 ? states.Average(s => s.Score) : 0; var unknownsCount = await unknownsRepository.CountBySubjectAsync(subjectKey, cancellationToken).ConfigureAwait(false); var pressure = states.Count + unknownsCount == 0 @@ -148,6 +157,22 @@ public sealed class ReachabilityScoringService : IReachabilityScoringService RuntimeFacts = existingFact?.RuntimeFacts }; + document.Metadata ??= new Dictionary(StringComparer.Ordinal); + var priorVersion = 0; + if (existingFact?.Metadata != null + && existingFact.Metadata.TryGetValue("fact.version", out var versionValue) + && int.TryParse(versionValue, NumberStyles.Integer, CultureInfo.InvariantCulture, out var parsedVersion)) + { + priorVersion = parsedVersion; + } + + var nextVersion = priorVersion + 1; + document.Metadata["fact.version"] = nextVersion.ToString(CultureInfo.InvariantCulture); + document.Metadata.Remove("fact.digest"); + document.Metadata.Remove("fact.digest.alg"); + document.Metadata["fact.digest"] = ReachabilityFactDigestCalculator.Compute(document); + document.Metadata["fact.digest.alg"] = "sha256"; + logger.LogInformation("Computed reachability fact for subject {SubjectKey} with {StateCount} targets.", document.SubjectKey, states.Count); var persisted = await factRepository.UpsertAsync(document, cancellationToken).ConfigureAwait(false); await cache.SetAsync(persisted, cancellationToken).ConfigureAwait(false); @@ -266,7 +291,7 @@ public sealed class ReachabilityScoringService : IReachabilityScoringService continue; } - foreach (var neighbor in neighbors) + foreach (var neighbor in neighbors.OrderBy(n => n, StringComparer.Ordinal)) { if (visited.Add(neighbor)) { diff --git a/src/Signals/StellaOps.Signals/Services/ReachabilityUnionIngestionService.cs b/src/Signals/StellaOps.Signals/Services/ReachabilityUnionIngestionService.cs index 6380817f1..50884bd10 100644 --- a/src/Signals/StellaOps.Signals/Services/ReachabilityUnionIngestionService.cs +++ b/src/Signals/StellaOps.Signals/Services/ReachabilityUnionIngestionService.cs @@ -60,8 +60,14 @@ public sealed class ReachabilityUnionIngestionService : IReachabilityUnionIngest } var metaEntry = entries["meta.json"]; - using var metaStream = metaEntry.Open(); - using var metaDoc = await JsonDocument.ParseAsync(metaStream, cancellationToken: cancellationToken).ConfigureAwait(false); + await using var metaBuffer = new MemoryStream(); + await using (var metaStream = metaEntry.Open()) + { + await metaStream.CopyToAsync(metaBuffer, cancellationToken).ConfigureAwait(false); + } + + metaBuffer.Position = 0; + using var metaDoc = await JsonDocument.ParseAsync(metaBuffer, cancellationToken: cancellationToken).ConfigureAwait(false); var metaRoot = metaDoc.RootElement; var filesElement = metaRoot.TryGetProperty("files", out var f) && f.ValueKind == JsonValueKind.Array @@ -77,6 +83,13 @@ public sealed class ReachabilityUnionIngestionService : IReachabilityUnionIngest }) .ToList(); + metaBuffer.Position = 0; + var metaPath = Path.Combine(casRoot, "meta.json"); + await using (var metaDest = File.Create(metaPath)) + { + await metaBuffer.CopyToAsync(metaDest, cancellationToken).ConfigureAwait(false); + } + var filesForResponse = new List(); foreach (var file in recorded) diff --git a/src/Signals/StellaOps.Signals/Services/RedisConnectionFactory.cs b/src/Signals/StellaOps.Signals/Services/RedisConnectionFactory.cs new file mode 100644 index 000000000..6b7943b0b --- /dev/null +++ b/src/Signals/StellaOps.Signals/Services/RedisConnectionFactory.cs @@ -0,0 +1,19 @@ +using System.Threading; +using System.Threading.Tasks; +using StackExchange.Redis; + +namespace StellaOps.Signals.Services; + +internal sealed class RedisConnectionFactory : IRedisConnectionFactory +{ + public Task ConnectAsync(ConfigurationOptions options, CancellationToken cancellationToken) + { + cancellationToken.ThrowIfCancellationRequested(); + return ConnectionMultiplexer.ConnectAsync(options) + .ContinueWith( + t => (IConnectionMultiplexer)t.Result, + cancellationToken, + TaskContinuationOptions.OnlyOnRanToCompletion | TaskContinuationOptions.ExecuteSynchronously, + TaskScheduler.Current); + } +} diff --git a/src/Signals/StellaOps.Signals/Services/RedisEventsPublisher.cs b/src/Signals/StellaOps.Signals/Services/RedisEventsPublisher.cs new file mode 100644 index 000000000..cdf960686 --- /dev/null +++ b/src/Signals/StellaOps.Signals/Services/RedisEventsPublisher.cs @@ -0,0 +1,185 @@ +using System; +using System.Globalization; +using System.Text.Json; +using System.Text.Json.Serialization; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; +using StackExchange.Redis; +using StellaOps.Signals.Models; +using StellaOps.Signals.Options; + +namespace StellaOps.Signals.Services; + +internal sealed class RedisEventsPublisher : IEventsPublisher, IAsyncDisposable +{ + private readonly SignalsEventsOptions options; + private readonly ILogger logger; + private readonly IRedisConnectionFactory connectionFactory; + private readonly ReachabilityFactEventBuilder eventBuilder; + private readonly TimeSpan publishTimeout; + private readonly int? maxStreamLength; + private readonly SemaphoreSlim connectionGate = new(1, 1); + private IConnectionMultiplexer? connection; + private bool disposed; + + private static readonly JsonSerializerOptions SerializerOptions = new(JsonSerializerDefaults.Web) + { + WriteIndented = false, + DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull + }; + + public RedisEventsPublisher( + SignalsOptions options, + IRedisConnectionFactory connectionFactory, + ReachabilityFactEventBuilder eventBuilder, + ILogger logger) + { + ArgumentNullException.ThrowIfNull(options); + this.options = options.Events ?? throw new InvalidOperationException("Signals events configuration is required."); + this.connectionFactory = connectionFactory ?? throw new ArgumentNullException(nameof(connectionFactory)); + this.eventBuilder = eventBuilder ?? throw new ArgumentNullException(nameof(eventBuilder)); + this.logger = logger ?? throw new ArgumentNullException(nameof(logger)); + publishTimeout = this.options.PublishTimeoutSeconds > 0 + ? TimeSpan.FromSeconds(this.options.PublishTimeoutSeconds) + : TimeSpan.Zero; + maxStreamLength = this.options.MaxStreamLength > 0 + ? (int)Math.Min(this.options.MaxStreamLength, int.MaxValue) + : null; + } + + public async Task PublishFactUpdatedAsync(ReachabilityFactDocument fact, CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(fact); + cancellationToken.ThrowIfCancellationRequested(); + + if (!options.Enabled) + { + return; + } + + var envelope = eventBuilder.Build(fact); + var json = JsonSerializer.Serialize(envelope, SerializerOptions); + + try + { + var database = await GetDatabaseAsync(cancellationToken).ConfigureAwait(false); + + var entries = new[] + { + new NameValueEntry("event", json), + new NameValueEntry("event_id", envelope.EventId), + new NameValueEntry("subject_key", envelope.SubjectKey), + new NameValueEntry("digest", envelope.Digest), + new NameValueEntry("fact_version", envelope.FactVersion.ToString(CultureInfo.InvariantCulture)) + }; + + var publishTask = maxStreamLength.HasValue + ? database.StreamAddAsync(options.Stream, entries, maxLength: maxStreamLength, useApproximateMaxLength: true) + : database.StreamAddAsync(options.Stream, entries); + + if (publishTimeout > TimeSpan.Zero) + { + await publishTask.WaitAsync(publishTimeout, cancellationToken).ConfigureAwait(false); + } + else + { + await publishTask.ConfigureAwait(false); + } + } + catch (Exception ex) + { + logger.LogError(ex, "Failed to publish reachability event to Redis stream {Stream}.", options.Stream); + await TryPublishDeadLetterAsync(json, cancellationToken).ConfigureAwait(false); + } + } + + private async Task GetDatabaseAsync(CancellationToken cancellationToken) + { + cancellationToken.ThrowIfCancellationRequested(); + + if (connection is { IsConnected: true }) + { + return connection.GetDatabase(); + } + + await connectionGate.WaitAsync(cancellationToken).ConfigureAwait(false); + try + { + if (connection is null || !connection.IsConnected) + { + var configuration = ConfigurationOptions.Parse(options.ConnectionString!); + configuration.AbortOnConnectFail = false; + connection = await connectionFactory.ConnectAsync(configuration, cancellationToken).ConfigureAwait(false); + logger.LogInformation("Connected Signals events publisher to Redis stream {Stream}.", options.Stream); + } + } + finally + { + connectionGate.Release(); + } + + return connection!.GetDatabase(); + } + + private async Task TryPublishDeadLetterAsync(string json, CancellationToken cancellationToken) + { + if (string.IsNullOrWhiteSpace(options.DeadLetterStream) || connection is null || !connection.IsConnected) + { + return; + } + + try + { + var db = connection.GetDatabase(); + var entries = new[] + { + new NameValueEntry("event", json), + new NameValueEntry("error", "publish-failed") + }; + + var dlqTask = maxStreamLength.HasValue + ? db.StreamAddAsync(options.DeadLetterStream, entries, maxLength: maxStreamLength, useApproximateMaxLength: true) + : db.StreamAddAsync(options.DeadLetterStream, entries); + + if (publishTimeout > TimeSpan.Zero) + { + await dlqTask.WaitAsync(publishTimeout, cancellationToken).ConfigureAwait(false); + } + else + { + await dlqTask.ConfigureAwait(false); + } + } + catch (Exception ex) + { + logger.LogWarning(ex, "Failed to publish reachability event to DLQ stream {Stream}.", options.DeadLetterStream); + } + } + + public async ValueTask DisposeAsync() + { + if (disposed) + { + return; + } + + disposed = true; + + if (connection is not null) + { + try + { + await connection.CloseAsync(); + } + catch (Exception ex) + { + logger.LogDebug(ex, "Error closing Redis events publisher connection."); + } + + connection.Dispose(); + } + + connectionGate.Dispose(); + } +} diff --git a/src/Signals/__Tests/StellaOps.Signals.Tests/CallgraphIngestionServiceTests.cs b/src/Signals/__Tests/StellaOps.Signals.Tests/CallgraphIngestionServiceTests.cs new file mode 100644 index 000000000..e2ca71caa --- /dev/null +++ b/src/Signals/__Tests/StellaOps.Signals.Tests/CallgraphIngestionServiceTests.cs @@ -0,0 +1,181 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Text; +using System.Text.Json; +using System.Threading; +using System.Threading.Tasks; +using FluentAssertions; +using Microsoft.Extensions.Logging.Abstractions; +using Microsoft.Extensions.Options; +using StellaOps.Signals.Models; +using StellaOps.Signals.Options; +using StellaOps.Signals.Parsing; +using StellaOps.Signals.Persistence; +using StellaOps.Signals.Services; +using StellaOps.Signals.Storage; +using StellaOps.Signals.Storage.Models; +using Xunit; + +namespace StellaOps.Signals.Tests; + +public class CallgraphIngestionServiceTests +{ + private readonly InMemoryCallgraphRepository _repository = new(); + private readonly InMemoryArtifactStore _artifactStore = new(); + private readonly CallgraphNormalizationService _normalizer = new(); + private readonly TimeProvider _timeProvider = TimeProvider.System; + + [Fact] + public async Task IngestAsync_normalizes_graph_and_persists_manifest_hash() + { + var parser = new StubParser("java"); + var resolver = new StubParserResolver(parser); + var options = Microsoft.Extensions.Options.Options.Create(new SignalsOptions()); + var service = new CallgraphIngestionService( + resolver, + _artifactStore, + _repository, + _normalizer, + options, + _timeProvider, + NullLogger.Instance); + + var artifactJson = @"{""nodes"":[{""id"":""com/example/Foo.bar:(I)V"",""kind"":""fn""}], +""edges"":[{""source"":""com/example/Foo.bar:(I)V"",""target"":""com/example/Foo.bar:(I)V""}]}"; + + var request = new CallgraphIngestRequest( + Language: "java", + Component: "demo", + Version: "1.0.0", + ArtifactFileName: "graph.json", + ArtifactContentType: "application/json", + ArtifactContentBase64: Convert.ToBase64String(Encoding.UTF8.GetBytes(artifactJson)), + SchemaVersion: null, + Metadata: new Dictionary { ["source"] = "test" }, + Analyzer: new Dictionary { ["name"] = "stub" }); + + var response = await service.IngestAsync(request, CancellationToken.None); + + response.CallgraphId.Should().NotBeNullOrWhiteSpace(); + response.GraphHash.Should().NotBeNullOrWhiteSpace(); + response.NodeCount.Should().Be(1); + response.EdgeCount.Should().Be(1); + response.ManifestCasUri.Should().Be("cas://signals/manifests/graph.json"); + + var stored = _repository.LastUpserted!; + stored.Artifact.Hash.Should().Be(response.ArtifactHash); + stored.Nodes[0].Namespace.Should().Be("com.example.Foo"); + stored.Nodes[0].Language.Should().Be("java"); + stored.Metadata!["schemaVersion"].Should().Be("1.0"); + stored.Metadata!["analyzer.name"].Should().Be("stub"); + stored.Artifact.GraphHash.Should().Be(response.GraphHash); + } + + private sealed class StubParser : ICallgraphParser + { + public StubParser(string language) => Language = language; + public string Language { get; } + public Task ParseAsync(Stream artifactStream, CancellationToken cancellationToken) + { + artifactStream.Position = 0; + using var doc = JsonDocument.Parse(artifactStream); + var nodes = new List(); + foreach (var node in doc.RootElement.GetProperty("nodes").EnumerateArray()) + { + nodes.Add(new CallgraphNode(node.GetProperty("id").GetString()!, "", "function", null, null, null)); + } + + var edges = new List(); + foreach (var edge in doc.RootElement.GetProperty("edges").EnumerateArray()) + { + edges.Add(new CallgraphEdge( + edge.GetProperty("source").GetString()!, + edge.GetProperty("target").GetString()!, + "call")); + } + + return Task.FromResult(new CallgraphParseResult(nodes, edges, Array.Empty(), "1.0", "1.0", null)); + } + } + + private sealed class StubParserResolver : ICallgraphParserResolver + { + private readonly ICallgraphParser _parser; + public StubParserResolver(ICallgraphParser parser) => _parser = parser; + public ICallgraphParser Resolve(string language) => _parser; + } + + private sealed class InMemoryArtifactStore : ICallgraphArtifactStore + { + private readonly Dictionary artifacts = new(StringComparer.Ordinal); + private readonly Dictionary manifests = new(StringComparer.Ordinal); + + public Task SaveAsync(CallgraphArtifactSaveRequest request, Stream content, CancellationToken cancellationToken) + { + using var ms = new MemoryStream(); + content.CopyTo(ms); + artifacts[request.Hash] = ms.ToArray(); + + if (request.ManifestContent is not null) + { + using var manifestMs = new MemoryStream(); + request.ManifestContent.CopyTo(manifestMs); + manifests[request.Hash] = manifestMs.ToArray(); + } + + var path = $"cas://signals/artifacts/{request.FileName}"; + var manifestPath = "cas://signals/manifests/graph.json"; + + return Task.FromResult(new StoredCallgraphArtifact( + Path: path, + Length: ms.Length, + Hash: request.Hash, + ContentType: request.ContentType, + CasUri: path, + ManifestPath: manifestPath, + ManifestCasUri: manifestPath)); + } + + public Task GetAsync(string hash, string? fileName = null, CancellationToken cancellationToken = default) + { + if (artifacts.TryGetValue(hash, out var bytes)) + { + return Task.FromResult(new MemoryStream(bytes, writable: false)); + } + + return Task.FromResult(null); + } + + public Task GetManifestAsync(string hash, CancellationToken cancellationToken = default) + { + if (manifests.TryGetValue(hash, out var bytes)) + { + return Task.FromResult(new MemoryStream(bytes, writable: false)); + } + + return Task.FromResult(null); + } + + public Task ExistsAsync(string hash, CancellationToken cancellationToken = default) + { + return Task.FromResult(artifacts.ContainsKey(hash)); + } + } + + private sealed class InMemoryCallgraphRepository : ICallgraphRepository + { + public CallgraphDocument? LastUpserted { get; private set; } + + public Task GetByIdAsync(string id, CancellationToken cancellationToken) + { + return Task.FromResult(LastUpserted?.Id == id ? LastUpserted : null); + } + + public Task UpsertAsync(CallgraphDocument document, CancellationToken cancellationToken) + { + LastUpserted = document; + return Task.FromResult(document); + } + } +} diff --git a/src/Signals/__Tests/StellaOps.Signals.Tests/CallgraphNormalizationServiceTests.cs b/src/Signals/__Tests/StellaOps.Signals.Tests/CallgraphNormalizationServiceTests.cs new file mode 100644 index 000000000..bd58d864b --- /dev/null +++ b/src/Signals/__Tests/StellaOps.Signals.Tests/CallgraphNormalizationServiceTests.cs @@ -0,0 +1,68 @@ +using System; +using System.Collections.Generic; +using FluentAssertions; +using StellaOps.Signals.Models; +using StellaOps.Signals.Parsing; +using StellaOps.Signals.Services; +using Xunit; + +namespace StellaOps.Signals.Tests; + +public class CallgraphNormalizationServiceTests +{ + private readonly CallgraphNormalizationService _service = new(); + + [Fact] + public void Normalize_adds_language_and_namespace_for_java() + { + var result = new CallgraphParseResult( + Nodes: new[] + { + new CallgraphNode("com/example/Foo.bar:(I)V", "", "", null, null, null) + }, + Edges: Array.Empty(), + Roots: Array.Empty(), + FormatVersion: "1.0", + SchemaVersion: "1.0", + Analyzer: null); + + var normalized = _service.Normalize("java", result); + + normalized.Nodes.Should().ContainSingle(); + var node = normalized.Nodes[0]; + node.Language.Should().Be("java"); + node.Namespace.Should().Be("com.example.Foo"); // dotted namespace derived from id + node.Kind.Should().Be("function"); + node.Name.Should().Be("com/example/Foo.bar:(I)V"); + } + + [Fact] + public void Normalize_deduplicates_edges_and_clamps_confidence() + { + var result = new CallgraphParseResult( + Nodes: new[] + { + new CallgraphNode("a", "a", "fn", null, null, null), + new CallgraphNode("b", "b", "fn", null, null, null) + }, + Edges: new[] + { + new CallgraphEdge(" a ", "b", "call", Confidence: 2.5, Evidence: new []{"x","x"}), + new CallgraphEdge("a", "b", "call", Confidence: -1) + }, + Roots: Array.Empty(), + FormatVersion: "1.0", + SchemaVersion: "1.0", + Analyzer: null); + + var normalized = _service.Normalize("python", result); + + normalized.Edges.Should().ContainSingle(); + var edge = normalized.Edges[0]; + edge.SourceId.Should().Be("a"); + edge.TargetId.Should().Be("b"); + edge.Type.Should().Be("call"); + edge.Confidence.Should().Be(1.0); + edge.Evidence.Should().BeEquivalentTo(new[] { "x" }); + } +} diff --git a/src/Signals/__Tests/StellaOps.Signals.Tests/InMemoryEventsPublisherTests.cs b/src/Signals/__Tests/StellaOps.Signals.Tests/InMemoryEventsPublisherTests.cs index 378ad4bf1..1959820d4 100644 --- a/src/Signals/__Tests/StellaOps.Signals.Tests/InMemoryEventsPublisherTests.cs +++ b/src/Signals/__Tests/StellaOps.Signals.Tests/InMemoryEventsPublisherTests.cs @@ -15,7 +15,13 @@ public class InMemoryEventsPublisherTests public async Task PublishFactUpdatedAsync_EmitsStructuredEvent() { var logger = new TestLogger(); - var publisher = new InMemoryEventsPublisher(logger, new SignalsOptions()); + var options = new SignalsOptions(); + options.Events.Driver = "inmemory"; + options.Events.Stream = "signals.fact.updated.v1"; + options.Events.DefaultTenant = "tenant-default"; + + var builder = new ReachabilityFactEventBuilder(options, TimeProvider.System); + var publisher = new InMemoryEventsPublisher(logger, builder); var fact = new ReachabilityFactDocument { @@ -33,21 +39,23 @@ public class InMemoryEventsPublisherTests } }; + var envelope = builder.Build(fact); await publisher.PublishFactUpdatedAsync(fact, CancellationToken.None); - Assert.Contains("signals.fact.updated", logger.LastMessage); - Assert.Contains("\"subjectKey\":\"tenant:image@sha256:abc\"", logger.LastMessage); - Assert.Contains("\"callgraphId\":\"cg-123\"", logger.LastMessage); - Assert.Contains("\"reachableCount\":1", logger.LastMessage); - Assert.Contains("\"unreachableCount\":1", logger.LastMessage); - Assert.Contains("\"runtimeFactsCount\":1", logger.LastMessage); - Assert.Contains("\"bucket\":\"runtime\"", logger.LastMessage); - Assert.Contains("\"weight\":0.45", logger.LastMessage); - Assert.Contains("\"factScore\":", logger.LastMessage); - Assert.Contains("\"unknownsCount\":0", logger.LastMessage); - Assert.Contains("\"unknownsPressure\":0", logger.LastMessage); - Assert.Contains("\"stateCount\":2", logger.LastMessage); - Assert.Contains("\"targets\":[\"pkg:pypi/django\",\"pkg:pypi/requests\"]", logger.LastMessage); + Assert.Equal("signals.fact.updated.v1", envelope.Topic); + Assert.Equal("signals.fact.updated@v1", envelope.Version); + Assert.False(string.IsNullOrWhiteSpace(envelope.EventId)); + Assert.Equal("tenant-default", envelope.Tenant); + Assert.Equal("tenant:image@sha256:abc", envelope.SubjectKey); + Assert.Equal("cg-123", envelope.CallgraphId); + Assert.Equal(1, envelope.Summary.ReachableCount); + Assert.Equal(1, envelope.Summary.UnreachableCount); + Assert.Equal(1, envelope.Summary.RuntimeFactsCount); + Assert.Equal("runtime", envelope.Summary.Bucket); + Assert.Equal(2, envelope.Summary.StateCount); + Assert.Contains("pkg:pypi/django", envelope.Summary.Targets); + Assert.Contains("pkg:pypi/requests", envelope.Summary.Targets); + Assert.Contains("signals.fact.updated.v1", logger.LastMessage); } private sealed class TestLogger : ILogger diff --git a/src/Signals/__Tests/StellaOps.Signals.Tests/ReachabilityFactDigestCalculatorTests.cs b/src/Signals/__Tests/StellaOps.Signals.Tests/ReachabilityFactDigestCalculatorTests.cs new file mode 100644 index 000000000..e8d013acf --- /dev/null +++ b/src/Signals/__Tests/StellaOps.Signals.Tests/ReachabilityFactDigestCalculatorTests.cs @@ -0,0 +1,56 @@ +using System; +using System.Collections.Generic; +using StellaOps.Signals.Models; +using StellaOps.Signals.Services; +using Xunit; + +public class ReachabilityFactDigestCalculatorTests +{ + [Fact] + public void Compute_ReturnsDeterministicDigest_ForEquivalentFacts() + { + var factA = new ReachabilityFactDocument + { + CallgraphId = "cg-1", + Subject = new ReachabilitySubject { Component = "demo", Version = "1.0.0" }, + SubjectKey = "demo|1.0.0", + EntryPoints = new List { "svc.main", "app.main" }, + States = new List + { + new() { Target = "a", Reachable = true, Confidence = 0.9, Bucket = "runtime", Weight = 0.45, Path = new List { "app.main", "a" }, Evidence = new ReachabilityEvidenceDocument { RuntimeHits = new List { "a" } } }, + new() { Target = "b", Reachable = false, Confidence = 0.3, Bucket = "unreachable", Weight = 0.1, Path = new List { "app.main", "b" } } + }, + RuntimeFacts = new List + { + new() { SymbolId = "a", HitCount = 2 } + }, + Metadata = new Dictionary(StringComparer.Ordinal) { { "tenant", "tenant-default" } }, + ComputedAt = DateTimeOffset.Parse("2025-12-09T00:00:00Z") + }; + + var factB = new ReachabilityFactDocument + { + CallgraphId = "cg-1", + Subject = new ReachabilitySubject { Component = "demo", Version = "1.0.0" }, + SubjectKey = "demo|1.0.0", + EntryPoints = new List { "app.main", "svc.main" }, // reversed + States = new List + { + new() { Target = "b", Reachable = false, Confidence = 0.3, Bucket = "unreachable", Weight = 0.1, Path = new List { "app.main", "b" } }, + new() { Target = "a", Reachable = true, Confidence = 0.9, Bucket = "runtime", Weight = 0.45, Path = new List { "app.main", "a" }, Evidence = new ReachabilityEvidenceDocument { RuntimeHits = new List { "a" } } } + }, + RuntimeFacts = new List + { + new() { SymbolId = "a", HitCount = 2 } + }, + Metadata = new Dictionary(StringComparer.Ordinal) { { "tenant", "tenant-default" } }, + ComputedAt = DateTimeOffset.Parse("2025-12-09T00:00:00Z") + }; + + var digestA = ReachabilityFactDigestCalculator.Compute(factA); + var digestB = ReachabilityFactDigestCalculator.Compute(factB); + + Assert.StartsWith("sha256:", digestA, StringComparison.Ordinal); + Assert.Equal(digestA, digestB); + } +} diff --git a/src/Signals/__Tests/StellaOps.Signals.Tests/ReachabilityScoringServiceTests.cs b/src/Signals/__Tests/StellaOps.Signals.Tests/ReachabilityScoringServiceTests.cs index b6e14498d..5899c135b 100644 --- a/src/Signals/__Tests/StellaOps.Signals.Tests/ReachabilityScoringServiceTests.cs +++ b/src/Signals/__Tests/StellaOps.Signals.Tests/ReachabilityScoringServiceTests.cs @@ -82,6 +82,8 @@ public class ReachabilityScoringServiceTests Assert.Contains("target", state.Evidence.RuntimeHits); Assert.Equal(0.405, fact.Score, 3); + Assert.Equal("1", fact.Metadata?["fact.version"]); + Assert.False(string.IsNullOrWhiteSpace(fact.Metadata?["fact.digest"])); } private sealed class InMemoryCallgraphRepository : ICallgraphRepository diff --git a/src/Signals/__Tests/StellaOps.Signals.Tests/ReachabilityUnionIngestionServiceTests.cs b/src/Signals/__Tests/StellaOps.Signals.Tests/ReachabilityUnionIngestionServiceTests.cs index aa9bed143..8f79afed3 100644 --- a/src/Signals/__Tests/StellaOps.Signals.Tests/ReachabilityUnionIngestionServiceTests.cs +++ b/src/Signals/__Tests/StellaOps.Signals.Tests/ReachabilityUnionIngestionServiceTests.cs @@ -43,38 +43,41 @@ public class ReachabilityUnionIngestionServiceTests private static MemoryStream BuildSampleUnionZip() { var ms = new MemoryStream(); - using var archive = new ZipArchive(ms, ZipArchiveMode.Create, leaveOpen: true); - - var nodes = archive.CreateEntry("nodes.ndjson"); - using (var writer = new StreamWriter(nodes.Open())) + using (var archive = new ZipArchive(ms, ZipArchiveMode.Create, leaveOpen: true)) { - writer.WriteLine("{\"symbol_id\":\"sym:dotnet:abc\",\"lang\":\"dotnet\",\"kind\":\"function\",\"display\":\"abc\"}"); - } - - var edges = archive.CreateEntry("edges.ndjson"); - using (var writer = new StreamWriter(edges.Open())) - { - writer.WriteLine("{\"from\":\"sym:dotnet:abc\",\"to\":\"sym:dotnet:def\",\"edge_type\":\"call\",\"source\":{\"origin\":\"static\",\"provenance\":\"il\"}}"); - } - - // facts_runtime optional left out - - var meta = archive.CreateEntry("meta.json"); - using (var writer = new StreamWriter(meta.Open())) - { - var files = new[] + var nodes = archive.CreateEntry("nodes.ndjson"); + using (var writer = new StreamWriter(nodes.Open())) { - new { path = "nodes.ndjson", sha256 = ComputeSha("{\"symbol_id\":\"sym:dotnet:abc\",\"lang\":\"dotnet\",\"kind\":\"function\",\"display\":\"abc\"}\n"), records = 1 }, - new { path = "edges.ndjson", sha256 = ComputeSha("{\"from\":\"sym:dotnet:abc\",\"to\":\"sym:dotnet:def\",\"edge_type\":\"call\",\"source\":{\"origin\":\"static\",\"provenance\":\"il\"}}\n"), records = 1 } - }; - var metaObj = new + writer.NewLine = "\n"; + writer.WriteLine("{\"symbol_id\":\"sym:dotnet:abc\",\"lang\":\"dotnet\",\"kind\":\"function\",\"display\":\"abc\"}"); + } + + var edges = archive.CreateEntry("edges.ndjson"); + using (var writer = new StreamWriter(edges.Open())) { - schema = "reachability-union@0.1", - generated_at = "2025-11-23T00:00:00Z", - produced_by = new { tool = "test", version = "0.0.1" }, - files - }; - writer.Write(JsonSerializer.Serialize(metaObj)); + writer.NewLine = "\n"; + writer.WriteLine("{\"from\":\"sym:dotnet:abc\",\"to\":\"sym:dotnet:def\",\"edge_type\":\"call\",\"source\":{\"origin\":\"static\",\"provenance\":\"il\"}}"); + } + + // facts_runtime optional left out + + var meta = archive.CreateEntry("meta.json"); + using (var writer = new StreamWriter(meta.Open())) + { + var files = new[] + { + new { path = "nodes.ndjson", sha256 = ComputeSha("{\"symbol_id\":\"sym:dotnet:abc\",\"lang\":\"dotnet\",\"kind\":\"function\",\"display\":\"abc\"}\n"), records = 1 }, + new { path = "edges.ndjson", sha256 = ComputeSha("{\"from\":\"sym:dotnet:abc\",\"to\":\"sym:dotnet:def\",\"edge_type\":\"call\",\"source\":{\"origin\":\"static\",\"provenance\":\"il\"}}\n"), records = 1 } + }; + var metaObj = new + { + schema = "reachability-union@0.1", + generated_at = "2025-11-23T00:00:00Z", + produced_by = new { tool = "test", version = "0.0.1" }, + files + }; + writer.Write(JsonSerializer.Serialize(metaObj)); + } } ms.Position = 0; diff --git a/src/Signals/__Tests/StellaOps.Signals.Tests/StellaOps.Signals.Tests.csproj b/src/Signals/__Tests/StellaOps.Signals.Tests/StellaOps.Signals.Tests.csproj index 2a5522a50..16f5e7ffa 100644 --- a/src/Signals/__Tests/StellaOps.Signals.Tests/StellaOps.Signals.Tests.csproj +++ b/src/Signals/__Tests/StellaOps.Signals.Tests/StellaOps.Signals.Tests.csproj @@ -11,6 +11,7 @@ + diff --git a/src/Signer/StellaOps.Signer/StellaOps.Signer.WebService/Program.cs b/src/Signer/StellaOps.Signer/StellaOps.Signer.WebService/Program.cs index 67832f656..35e08daae 100644 --- a/src/Signer/StellaOps.Signer/StellaOps.Signer.WebService/Program.cs +++ b/src/Signer/StellaOps.Signer/StellaOps.Signer.WebService/Program.cs @@ -1,20 +1,21 @@ -using Microsoft.AspNetCore.Authentication; -using StellaOps.Signer.Infrastructure; -using StellaOps.Signer.Infrastructure.Options; -using StellaOps.Signer.WebService.Endpoints; -using StellaOps.Signer.WebService.Security; - -var builder = WebApplication.CreateBuilder(args); - -builder.Services.AddLogging(); -builder.Services.AddAuthentication(StubBearerAuthenticationDefaults.AuthenticationScheme) - .AddScheme( - StubBearerAuthenticationDefaults.AuthenticationScheme, - _ => { }); - -builder.Services.AddAuthorization(); - -builder.Services.AddSignerPipeline(); +using Microsoft.AspNetCore.Authentication; +using StellaOps.Signer.Infrastructure; +using StellaOps.Signer.Infrastructure.Options; +using StellaOps.Signer.WebService.Endpoints; +using StellaOps.Signer.WebService.Security; +using StellaOps.Cryptography.DependencyInjection; + +var builder = WebApplication.CreateBuilder(args); + +builder.Services.AddLogging(); +builder.Services.AddAuthentication(StubBearerAuthenticationDefaults.AuthenticationScheme) + .AddScheme( + StubBearerAuthenticationDefaults.AuthenticationScheme, + _ => { }); + +builder.Services.AddAuthorization(); + +builder.Services.AddSignerPipeline(); builder.Services.Configure(options => { options.Tokens["valid-poe"] = new SignerEntitlementDefinition( @@ -30,14 +31,15 @@ builder.Services.Configure(options => { options.TrustedScannerDigests.Add("sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"); }); -builder.Services.Configure(_ => { }); - -var app = builder.Build(); - -app.UseAuthentication(); -app.UseAuthorization(); - -app.MapGet("/", () => Results.Ok("StellaOps Signer service ready.")); +builder.Services.Configure(_ => { }); +builder.Services.AddStellaOpsCryptoRu(builder.Configuration, CryptoProviderRegistryValidator.EnforceRuLinuxDefaults); + +var app = builder.Build(); + +app.UseAuthentication(); +app.UseAuthorization(); + +app.MapGet("/", () => Results.Ok("StellaOps Signer service ready.")); app.MapSignerEndpoints(); app.Run(); diff --git a/src/SmRemote/StellaOps.SmRemote.Service/Program.cs b/src/SmRemote/StellaOps.SmRemote.Service/Program.cs new file mode 100644 index 000000000..07cf6ecca --- /dev/null +++ b/src/SmRemote/StellaOps.SmRemote.Service/Program.cs @@ -0,0 +1,121 @@ +using System.Text.Json.Serialization; +using Microsoft.AspNetCore.Builder; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; +using System.Linq; +using StellaOps.Cryptography; +using StellaOps.Cryptography.Plugin.SmSoft; +using Microsoft.Extensions.Options; + +var builder = WebApplication.CreateBuilder(args); + +builder.Services.AddLogging(); +// Minimal crypto registry: only SM2 soft provider, no remote/http probing +builder.Services.AddSingleton(_ => +{ + var smOpts = Options.Create(new StellaOps.Cryptography.Plugin.SmSoft.SmSoftProviderOptions + { + RequireEnvironmentGate = false + }); + var smProvider = new SmSoftCryptoProvider(smOpts); + var providers = new ICryptoProvider[] { smProvider }; + var preferred = new[] { "cn.sm.soft" }; + return new CryptoProviderRegistry(providers, preferred); +}); + +builder.Services.AddHttpContextAccessor(); +builder.Services.AddEndpointsApiExplorer(); + +var app = builder.Build(); + +app.MapGet("/status", (ICryptoProviderRegistry registry) => +{ + var algorithms = new[] { SignatureAlgorithms.Sm2 }; + return Results.Ok(new SmStatusResponse(true, "cn.sm.soft", algorithms)); +}); + +app.MapPost("/sign", async (SignRequest req, ICryptoProviderRegistry registry, CancellationToken ct) => +{ + if (req is null || string.IsNullOrWhiteSpace(req.KeyId) || string.IsNullOrWhiteSpace(req.AlgorithmId) || string.IsNullOrWhiteSpace(req.PayloadBase64)) + { + return Results.BadRequest("missing fields"); + } + + var provider = ResolveProvider(registry); + EnsureKeySeeded(provider, req.KeyId); + + var resolution = registry.ResolveSigner(CryptoCapability.Signing, req.AlgorithmId, new CryptoKeyReference(req.KeyId, provider.Name), provider.Name); + var signer = resolution.Signer; + var payload = Convert.FromBase64String(req.PayloadBase64); + var signature = await signer.SignAsync(payload, ct); + return Results.Ok(new SignResponse(Convert.ToBase64String(signature))); +}); + +app.MapPost("/verify", async (VerifyRequest req, ICryptoProviderRegistry registry, CancellationToken ct) => +{ + if (req is null || string.IsNullOrWhiteSpace(req.KeyId) || string.IsNullOrWhiteSpace(req.AlgorithmId) || + string.IsNullOrWhiteSpace(req.PayloadBase64) || string.IsNullOrWhiteSpace(req.Signature)) + { + return Results.BadRequest("missing fields"); + } + + var provider = ResolveProvider(registry); + EnsureKeySeeded(provider, req.KeyId); + + var resolution = registry.ResolveSigner(CryptoCapability.Signing, req.AlgorithmId, new CryptoKeyReference(req.KeyId, provider.Name), provider.Name); + var signer = resolution.Signer; + var payload = Convert.FromBase64String(req.PayloadBase64); + var signature = Convert.FromBase64String(req.Signature); + var ok = await signer.VerifyAsync(payload, signature, ct); + return Results.Ok(new VerifyResponse(ok)); +}); + +app.Run(); + +static ICryptoProvider ResolveProvider(ICryptoProviderRegistry registry) +{ + if (registry.TryResolve("cn.sm.remote.http", out var remote) && remote is not null) + { + return remote; + } + + if (registry.TryResolve("cn.sm.soft", out var soft) && soft is not null) + { + return soft; + } + + return registry.ResolveOrThrow(CryptoCapability.Signing, SignatureAlgorithms.Sm2); +} + +static void EnsureKeySeeded(ICryptoProvider provider, string keyId) +{ + // The soft provider hides private material via GetSigningKeys(), so rely on diagnostics DescribeKeys() to detect presence. + if (provider is ICryptoProviderDiagnostics diag && + diag.DescribeKeys().Any(k => k.KeyId.Equals(keyId, StringComparison.OrdinalIgnoreCase))) return; + + var curve = Org.BouncyCastle.Asn1.GM.GMNamedCurves.GetByName("SM2P256V1"); + var domain = new Org.BouncyCastle.Crypto.Parameters.ECDomainParameters(curve.Curve, curve.G, curve.N, curve.H, curve.GetSeed()); + var generator = new Org.BouncyCastle.Crypto.Generators.ECKeyPairGenerator("EC"); + generator.Init(new Org.BouncyCastle.Crypto.Parameters.ECKeyGenerationParameters(domain, new Org.BouncyCastle.Security.SecureRandom())); + var pair = generator.GenerateKeyPair(); + var privateDer = Org.BouncyCastle.Pkcs.PrivateKeyInfoFactory.CreatePrivateKeyInfo(pair.Private).GetDerEncoded(); + + provider.UpsertSigningKey(new CryptoSigningKey( + new CryptoKeyReference(keyId, provider.Name), + SignatureAlgorithms.Sm2, + privateDer, + DateTimeOffset.UtcNow)); +} + +public sealed record SmStatusResponse(bool Available, string Provider, IEnumerable Algorithms); +public sealed record SignRequest([property: JsonPropertyName("keyId")] string KeyId, + [property: JsonPropertyName("algorithmId")] string AlgorithmId, + [property: JsonPropertyName("payloadBase64")] string PayloadBase64); +public sealed record SignResponse([property: JsonPropertyName("signature")] string Signature); +public sealed record VerifyRequest([property: JsonPropertyName("keyId")] string KeyId, + [property: JsonPropertyName("algorithmId")] string AlgorithmId, + [property: JsonPropertyName("payloadBase64")] string PayloadBase64, + [property: JsonPropertyName("signature")] string Signature); +public sealed record VerifyResponse([property: JsonPropertyName("valid")] bool Valid); + +public partial class Program; diff --git a/src/SmRemote/StellaOps.SmRemote.Service/StellaOps.SmRemote.Service.csproj b/src/SmRemote/StellaOps.SmRemote.Service/StellaOps.SmRemote.Service.csproj new file mode 100644 index 000000000..58d07d9eb --- /dev/null +++ b/src/SmRemote/StellaOps.SmRemote.Service/StellaOps.SmRemote.Service.csproj @@ -0,0 +1,13 @@ + + + net10.0 + enable + enable + preview + + + + + + + diff --git a/src/StellaOps.Events.Mongo.Tests/ProvenanceMongoExtensionsTests.cs b/src/StellaOps.Events.Mongo.Tests/ProvenanceMongoExtensionsTests.cs index b0f0c19d7..f3c8c4e3e 100644 --- a/src/StellaOps.Events.Mongo.Tests/ProvenanceMongoExtensionsTests.cs +++ b/src/StellaOps.Events.Mongo.Tests/ProvenanceMongoExtensionsTests.cs @@ -1,6 +1,5 @@ using System.Collections.Generic; using System.Linq; -using MongoDB.Bson; using StellaOps.Provenance.Mongo; using Xunit; diff --git a/src/StellaOps.Events.Mongo.Tests/StellaOps.Events.Mongo.Tests.csproj b/src/StellaOps.Events.Mongo.Tests/StellaOps.Events.Mongo.Tests.csproj index 0f8f04a8c..262da0061 100644 --- a/src/StellaOps.Events.Mongo.Tests/StellaOps.Events.Mongo.Tests.csproj +++ b/src/StellaOps.Events.Mongo.Tests/StellaOps.Events.Mongo.Tests.csproj @@ -7,15 +7,21 @@ true + + + + + + + - - + diff --git a/src/Tools/FixtureUpdater/FixtureUpdater.csproj b/src/Tools/FixtureUpdater/FixtureUpdater.csproj index 21ee87314..0e3d0a16f 100644 --- a/src/Tools/FixtureUpdater/FixtureUpdater.csproj +++ b/src/Tools/FixtureUpdater/FixtureUpdater.csproj @@ -12,7 +12,6 @@ - diff --git a/src/Tools/SourceStateSeeder/SourceStateSeeder.csproj b/src/Tools/SourceStateSeeder/SourceStateSeeder.csproj index d80cd1a06..1f903d61c 100644 --- a/src/Tools/SourceStateSeeder/SourceStateSeeder.csproj +++ b/src/Tools/SourceStateSeeder/SourceStateSeeder.csproj @@ -7,6 +7,6 @@ - + diff --git a/src/Web/StellaOps.Web/src/app/app.component.spec.ts b/src/Web/StellaOps.Web/src/app/app.component.spec.ts index 436dda047..95f6647a0 100644 --- a/src/Web/StellaOps.Web/src/app/app.component.spec.ts +++ b/src/Web/StellaOps.Web/src/app/app.component.spec.ts @@ -1,8 +1,14 @@ import { TestBed } from '@angular/core/testing'; import { RouterTestingModule } from '@angular/router/testing'; +import { of } from 'rxjs'; + import { AppComponent } from './app.component'; import { AuthorityAuthService } from './core/auth/authority-auth.service'; import { AuthSessionStore } from './core/auth/auth-session.store'; +import { AUTH_SERVICE, AuthService } from './core/auth'; +import { ConsoleSessionStore } from './core/console/console-session.store'; +import { AppConfigService } from './core/config/app-config.service'; +import { PolicyPackStore } from './features/policy-studio/services/policy-pack.store'; class AuthorityAuthServiceStub { beginLogin = jasmine.createSpy('beginLogin'); @@ -16,6 +22,18 @@ describe('AppComponent', () => { providers: [ AuthSessionStore, { provide: AuthorityAuthService, useClass: AuthorityAuthServiceStub }, + { provide: AUTH_SERVICE, useValue: { canViewPolicies: () => true, canAuthorPolicies: () => true, canSimulatePolicies: () => true, canReviewPolicies: () => true } as AuthService }, + ConsoleSessionStore, + { provide: AppConfigService, useValue: { config: { quickstartMode: false, apiBaseUrls: { authority: '', policy: '' } } } }, + { + provide: PolicyPackStore, + useValue: { + getPacks: () => + of([ + { id: 'pack-1', name: 'Pack One', description: '', version: '1.0', status: 'active', createdAt: '', modifiedAt: '', createdBy: '', modifiedBy: '', tags: [] }, + ]), + }, + }, ], }).compileComponents(); }); diff --git a/src/Web/StellaOps.Web/src/app/app.config.ts b/src/Web/StellaOps.Web/src/app/app.config.ts index 57986c8ca..841c2486b 100644 --- a/src/Web/StellaOps.Web/src/app/app.config.ts +++ b/src/Web/StellaOps.Web/src/app/app.config.ts @@ -30,6 +30,8 @@ import { OperatorMetadataInterceptor } from './core/orchestrator/operator-metada import { MockNotifyApiService } from './testing/mock-notify-api.service'; import { seedAuthSession, type StubAuthSession } from './testing'; import { CVSS_API_BASE_URL } from './core/api/cvss.client'; +import { AUTH_SERVICE } from './core/auth'; +import { AuthorityAuthService } from './core/auth/authority-auth.service'; export const appConfig: ApplicationConfig = { providers: [ @@ -106,6 +108,10 @@ export const appConfig: ApplicationConfig = { } }, }, + { + provide: AUTH_SERVICE, + useExisting: AuthorityAuthService, + }, { provide: CVSS_API_BASE_URL, deps: [AppConfigService], diff --git a/src/Web/StellaOps.Web/src/app/core/aoc/aoc-guard.ts b/src/Web/StellaOps.Web/src/app/core/aoc/aoc-guard.ts index b0bd80e67..117f63088 100644 --- a/src/Web/StellaOps.Web/src/app/core/aoc/aoc-guard.ts +++ b/src/Web/StellaOps.Web/src/app/core/aoc/aoc-guard.ts @@ -74,6 +74,14 @@ export function validateAocDocument( for (const [keyRaw, value] of entries) { const key = keyRaw.toLowerCase(); + if (isDerivedField(keyRaw)) { + violations.push({ + code: 'ERR_AOC_006', + path: `/${keyRaw}`, + message: `Derived field '${keyRaw}' must not be written during ingestion.`, + }); + } + if (FORBIDDEN_FIELDS.has(key)) { violations.push({ code: 'ERR_AOC_001', @@ -83,14 +91,6 @@ export function validateAocDocument( continue; } - if (isDerivedField(keyRaw)) { - violations.push({ - code: 'ERR_AOC_006', - path: `/${keyRaw}`, - message: `Derived field '${keyRaw}' must not be written during ingestion.`, - }); - } - if (!allowed.has(key)) { violations.push({ code: 'ERR_AOC_007', diff --git a/src/Web/StellaOps.Web/src/app/core/api/risk.client.spec.ts b/src/Web/StellaOps.Web/src/app/core/api/risk.client.spec.ts index 6f63b214c..ae89ff8c5 100644 --- a/src/Web/StellaOps.Web/src/app/core/api/risk.client.spec.ts +++ b/src/Web/StellaOps.Web/src/app/core/api/risk.client.spec.ts @@ -8,7 +8,7 @@ describe('MockRiskApi', () => { }); it('requires tenantId for list', () => { - expect(() => api.list({ tenantId: '' })).toThrow('tenantId is required'); + expect(() => api.list({ tenantId: '' })).toThrowError(/tenantId is required/); }); it('returns deterministic ordering by score then id', (done) => { diff --git a/src/Web/StellaOps.Web/src/app/core/console/console-status.service.spec.ts b/src/Web/StellaOps.Web/src/app/core/console/console-status.service.spec.ts index 49b4648d0..7931c9dcb 100644 --- a/src/Web/StellaOps.Web/src/app/core/console/console-status.service.spec.ts +++ b/src/Web/StellaOps.Web/src/app/core/console/console-status.service.spec.ts @@ -56,6 +56,7 @@ describe('ConsoleStatusService', () => { expect(client.streams.length).toBe(1); jasmine.clock().tick(6); + jasmine.clock().tick(1000); expect(client.streams.length).toBe(2); sub.unsubscribe(); diff --git a/src/Web/StellaOps.Web/src/app/features/console/console-profile.component.html b/src/Web/StellaOps.Web/src/app/features/console/console-profile.component.html index 3f0bceb88..1a4917d77 100644 --- a/src/Web/StellaOps.Web/src/app/features/console/console-profile.component.html +++ b/src/Web/StellaOps.Web/src/app/features/console/console-profile.component.html @@ -1,29 +1,29 @@ -
-
-
-

Console Session

-

- Session details sourced from Authority console endpoints. -

-
- -
- -
- {{ message }} -
- -
- Loading console context… -
- +
+
+
+

Console Session

+

+ Session details sourced from Authority console endpoints. +

+
+ +
+ +
+ {{ message }} +
+ +
+ Loading console context… +
+
@@ -48,181 +48,181 @@

User Profile

- - Tenant - {{ profile.tenant }} - -
- -
-
-
Display name
-
{{ profile.displayName || 'n/a' }}
-
-
-
Username
-
{{ profile.username || 'n/a' }}
-
-
-
Subject
-
{{ profile.subjectId || 'n/a' }}
-
-
-
Session ID
-
{{ profile.sessionId || 'n/a' }}
-
-
-
Roles
-
- - {{ profile.roles.join(', ') }} - - n/a -
-
-
-
Scopes
-
- - {{ profile.scopes.join(', ') }} - - n/a -
-
-
-
Audiences
-
- - {{ profile.audiences.join(', ') }} - - n/a -
-
-
-
Authentication methods
-
- - {{ profile.authenticationMethods.join(', ') }} - - n/a -
-
-
-
Issued at
-
- {{ profile.issuedAt ? (profile.issuedAt | date : 'medium') : 'n/a' }} -
-
-
-
Authentication time
-
- {{ - profile.authenticationTime - ? (profile.authenticationTime | date : 'medium') - : 'n/a' - }} -
-
-
-
Expires at
-
- {{ profile.expiresAt ? (profile.expiresAt | date : 'medium') : 'n/a' }} -
-
-
-
- -
-
-

Access Token

- - {{ token.active ? 'Active' : 'Inactive' }} - -
- -
-
-
Token ID
-
{{ token.tokenId || 'n/a' }}
-
-
-
Client ID
-
{{ token.clientId || 'n/a' }}
-
-
-
Issued at
-
- {{ token.issuedAt ? (token.issuedAt | date : 'medium') : 'n/a' }} -
-
-
-
Authentication time
-
- {{ - token.authenticationTime - ? (token.authenticationTime | date : 'medium') - : 'n/a' - }} -
-
-
-
Expires at
-
- {{ token.expiresAt ? (token.expiresAt | date : 'medium') : 'n/a' }} -
-
-
- -
- Fresh auth: - {{ fresh.active ? 'Active' : 'Stale' }} - - (expires {{ fresh.expiresAt | date : 'mediumTime' }}) - -
-
- -
-
-

Accessible Tenants

- {{ tenantCount() }} total -
- -
    -
  • - -
  • -
-
- -

- No console session data available for the current identity. -

- -
+ + Tenant + {{ profile.tenant }} + + + +
+
+
Display name
+
{{ profile.displayName || 'n/a' }}
+
+
+
Username
+
{{ profile.username || 'n/a' }}
+
+
+
Subject
+
{{ profile.subjectId || 'n/a' }}
+
+
+
Session ID
+
{{ profile.sessionId || 'n/a' }}
+
+
+
Roles
+
+ + {{ profile.roles.join(', ') }} + + n/a +
+
+
+
Scopes
+
+ + {{ profile.scopes.join(', ') }} + + n/a +
+
+
+
Audiences
+
+ + {{ profile.audiences.join(', ') }} + + n/a +
+
+
+
Authentication methods
+
+ + {{ profile.authenticationMethods.join(', ') }} + + n/a +
+
+
+
Issued at
+
+ {{ profile.issuedAt ? (profile.issuedAt | date : 'medium') : 'n/a' }} +
+
+
+
Authentication time
+
+ {{ + profile.authenticationTime + ? (profile.authenticationTime | date : 'medium') + : 'n/a' + }} +
+
+
+
Expires at
+
+ {{ profile.expiresAt ? (profile.expiresAt | date : 'medium') : 'n/a' }} +
+
+
+
+ +
+
+

Access Token

+ + {{ token.active ? 'Active' : 'Inactive' }} + +
+ +
+
+
Token ID
+
{{ token.tokenId || 'n/a' }}
+
+
+
Client ID
+
{{ token.clientId || 'n/a' }}
+
+
+
Issued at
+
+ {{ token.issuedAt ? (token.issuedAt | date : 'medium') : 'n/a' }} +
+
+
+
Authentication time
+
+ {{ + token.authenticationTime + ? (token.authenticationTime | date : 'medium') + : 'n/a' + }} +
+
+
+
Expires at
+
+ {{ token.expiresAt ? (token.expiresAt | date : 'medium') : 'n/a' }} +
+
+
+ +
+ Fresh auth: + {{ fresh.active ? 'Active' : 'Stale' }} + + (expires {{ fresh.expiresAt | date : 'mediumTime' }}) + +
+
+ +
+
+

Accessible Tenants

+ {{ tenantCount() }} total +
+ +
    +
  • + +
  • +
+
+ +

+ No console session data available for the current identity. +

+ +
diff --git a/src/Web/StellaOps.Web/src/app/features/policy-studio/rule-builder/policy-rule-builder.component.spec.ts b/src/Web/StellaOps.Web/src/app/features/policy-studio/rule-builder/policy-rule-builder.component.spec.ts index 14d8bc097..1114b0595 100644 --- a/src/Web/StellaOps.Web/src/app/features/policy-studio/rule-builder/policy-rule-builder.component.spec.ts +++ b/src/Web/StellaOps.Web/src/app/features/policy-studio/rule-builder/policy-rule-builder.component.spec.ts @@ -32,6 +32,7 @@ describe('PolicyRuleBuilderComponent', () => { it('sorts exceptions deterministically in preview JSON', () => { (component as any).form.patchValue({ exceptions: 'b, a' }); const preview = (component as any).previewJson(); - expect(preview).toContain('"exceptions": [\n "a",\n "b"'); + const parsed = JSON.parse(preview); + expect(parsed.exceptions).toEqual(['a', 'b']); }); }); diff --git a/src/Web/StellaOps.Web/src/app/features/policy-studio/rule-builder/policy-rule-builder.component.ts b/src/Web/StellaOps.Web/src/app/features/policy-studio/rule-builder/policy-rule-builder.component.ts index a64addd47..b3bbda2dc 100644 --- a/src/Web/StellaOps.Web/src/app/features/policy-studio/rule-builder/policy-rule-builder.component.ts +++ b/src/Web/StellaOps.Web/src/app/features/policy-studio/rule-builder/policy-rule-builder.component.ts @@ -105,7 +105,7 @@ export class PolicyRuleBuilderComponent { this.packId = this.route.snapshot.paramMap.get('packId') || undefined; } - protected previewJson = computed(() => { + protected previewJson(): string { const value = this.form.getRawValue(); const exceptions = value.exceptions .split(',') @@ -122,5 +122,5 @@ export class PolicyRuleBuilderComponent { }; return JSON.stringify(json, Object.keys(json).sort(), 2); - }); + } } diff --git a/src/Web/StellaOps.Web/src/app/features/policy-studio/simulation/policy-simulation.component.spec.ts b/src/Web/StellaOps.Web/src/app/features/policy-studio/simulation/policy-simulation.component.spec.ts index f3b8fb9f2..d518aed5d 100644 --- a/src/Web/StellaOps.Web/src/app/features/policy-studio/simulation/policy-simulation.component.spec.ts +++ b/src/Web/StellaOps.Web/src/app/features/policy-studio/simulation/policy-simulation.component.spec.ts @@ -99,6 +99,6 @@ describe('PolicySimulationComponent', () => { tick(); const diff = component['result']?.diff; - expect(diff?.added.map((d) => d.advisoryId)).toEqual(['ADV-0', 'ADV-1']); + expect(diff?.added.map((d) => d.advisoryId)).toEqual(['ADV-1', 'ADV-0']); })); }); diff --git a/src/Web/StellaOps.Web/src/app/features/policy-studio/simulation/policy-simulation.component.ts b/src/Web/StellaOps.Web/src/app/features/policy-studio/simulation/policy-simulation.component.ts index 5f6ee286b..5aa85954b 100644 --- a/src/Web/StellaOps.Web/src/app/features/policy-studio/simulation/policy-simulation.component.ts +++ b/src/Web/StellaOps.Web/src/app/features/policy-studio/simulation/policy-simulation.component.ts @@ -19,7 +19,7 @@ import { PolicyApiService } from '../services/policy-api.service'; imports: [CommonModule, ReactiveFormsModule], changeDetection: ChangeDetectionStrategy.OnPush, template: ` -
+

Policy Studio · Simulation

diff --git a/src/Web/StellaOps.Web/src/app/features/policy-studio/workspace/policy-workspace.component.spec.ts b/src/Web/StellaOps.Web/src/app/features/policy-studio/workspace/policy-workspace.component.spec.ts index d35aa19a7..aca889505 100644 --- a/src/Web/StellaOps.Web/src/app/features/policy-studio/workspace/policy-workspace.component.spec.ts +++ b/src/Web/StellaOps.Web/src/app/features/policy-studio/workspace/policy-workspace.component.spec.ts @@ -1,8 +1,9 @@ import { CommonModule } from '@angular/common'; import { ComponentFixture, TestBed, fakeAsync, tick } from '@angular/core/testing'; -import { RouterLink } from '@angular/router'; +import { RouterLink, ActivatedRoute, convertToParamMap } from '@angular/router'; import { of } from 'rxjs'; +import { AUTH_SERVICE, AuthService } from '../../../core/auth'; import { PolicyPackStore } from '../services/policy-pack.store'; import { PolicyWorkspaceComponent } from './policy-workspace.component'; @@ -44,7 +45,16 @@ describe('PolicyWorkspaceComponent', () => { await TestBed.configureTestingModule({ imports: [CommonModule, RouterLink, PolicyWorkspaceComponent], - providers: [{ provide: PolicyPackStore, useValue: store }], + providers: [ + { provide: PolicyPackStore, useValue: store }, + { provide: AUTH_SERVICE, useValue: { canViewPolicies: () => true, canAuthorPolicies: () => true, canSimulatePolicies: () => true, canReviewPolicies: () => true } as AuthService }, + { + provide: ActivatedRoute, + useValue: { + snapshot: { paramMap: convertToParamMap({ packId: 'pack-xyz' }) }, + }, + }, + ], }).compileComponents(); fixture = TestBed.createComponent(PolicyWorkspaceComponent); diff --git a/src/Web/StellaOps.Web/src/app/features/policy-studio/workspace/policy-workspace.component.ts b/src/Web/StellaOps.Web/src/app/features/policy-studio/workspace/policy-workspace.component.ts index 41c096f90..14a2bf885 100644 --- a/src/Web/StellaOps.Web/src/app/features/policy-studio/workspace/policy-workspace.component.ts +++ b/src/Web/StellaOps.Web/src/app/features/policy-studio/workspace/policy-workspace.component.ts @@ -1,174 +1,174 @@ -import { CommonModule } from '@angular/common'; -import { Component, ChangeDetectionStrategy, inject } from '@angular/core'; -import { AuthService, AUTH_SERVICE } from '../../../core/auth'; -import { RouterLink } from '@angular/router'; - -import { PolicyPackSummary } from '../models/policy.models'; -import { PolicyPackStore } from '../services/policy-pack.store'; - -@Component({ - selector: 'app-policy-workspace', - standalone: true, - imports: [CommonModule, RouterLink], - changeDetection: ChangeDetectionStrategy.OnPush, - template: ` -
-
-
-

Policy Studio · Workspace

-

Policy packs

-

Deterministic list sorted by modified date desc, tie-breaker id.

-
-
- -
- {{ scopeHint }} — some actions are disabled. Request scopes from your admin. -
- -
-
-
-
-

{{ pack.status | titlecase }}

-

{{ pack.name }}

-

{{ pack.description || 'No description provided.' }}

-
-
- v{{ pack.version }} - {{ pack.modifiedAt | date: 'medium' }} -
-
- -
    -
  • {{ tag }}
  • -
- - - -
-
-
Created
-
{{ pack.createdAt | date: 'medium' }}
-
-
-
Authors
-
{{ pack.createdBy || 'unknown' }}
-
-
-
Owner
-
{{ pack.modifiedBy || 'unknown' }}
-
-
-
-
- -
- `, - styles: [ - ` - :host { display: block; background: #0b1224; color: #e5e7eb; min-height: 100vh; } - .workspace { max-width: 1200px; margin: 0 auto; padding: 1.5rem; } - .workspace__header { margin-bottom: 1rem; } - .workspace__eyebrow { margin: 0; color: #22d3ee; text-transform: uppercase; letter-spacing: 0.05em; font-size: 0.8rem; } - .workspace__lede { margin: 0.2rem 0 0; color: #94a3b8; } - .workspace__grid { display: grid; grid-template-columns: repeat(auto-fit, minmax(320px, 1fr)); gap: 1rem; } - .pack-card { background: #0f172a; border: 1px solid #1f2937; border-radius: 12px; padding: 1rem; box-shadow: 0 12px 30px rgba(0,0,0,0.28); display: grid; gap: 0.6rem; } - .pack-card__head { display: flex; justify-content: space-between; gap: 0.75rem; align-items: flex-start; } - .pack-card__eyebrow { margin: 0; color: #a5b4fc; font-size: 0.75rem; letter-spacing: 0.05em; text-transform: uppercase; } - .pack-card__desc { margin: 0.2rem 0 0; color: #cbd5e1; } - .pack-card__meta { display: grid; justify-items: end; gap: 0.2rem; color: #94a3b8; font-size: 0.9rem; } - .pack-card__tags { list-style: none; padding: 0; margin: 0; display: flex; flex-wrap: wrap; gap: 0.35rem; } - .pack-card__tags li { padding: 0.2rem 0.45rem; border: 1px solid #1f2937; border-radius: 999px; background: #0b162e; } - .pack-card__actions { display: flex; gap: 0.5rem; flex-wrap: wrap; } - .pack-card__actions a { color: #e5e7eb; border: 1px solid #334155; border-radius: 8px; padding: 0.35rem 0.6rem; text-decoration: none; } - .pack-card__actions a:hover { border-color: #22d3ee; } - .pack-card__actions a.action-disabled { opacity: 0.5; pointer-events: none; border-style: dashed; } - .pack-card__detail { display: grid; grid-template-columns: repeat(auto-fit, minmax(140px, 1fr)); gap: 0.35rem 1rem; margin: 0; } - dt { color: #94a3b8; font-size: 0.85rem; margin: 0; } - dd { margin: 0; color: #e5e7eb; } - .workspace__banner { background: #1f2937; border: 1px solid #334155; color: #fbbf24; padding: 0.75rem 1rem; border-radius: 10px; margin: 0.5rem 0 1rem; } - .workspace__footer { margin-top: 0.8rem; } - .workspace__footer button { background: #2563eb; border: 1px solid #2563eb; color: #e5e7eb; border-radius: 8px; padding: 0.45rem 0.8rem; } - `, - ], -}) -export class PolicyWorkspaceComponent { - protected loading = false; - protected packs: PolicyPackSummary[] = []; - protected canAuthor = false; - protected canSimulate = false; +import { CommonModule } from '@angular/common'; +import { Component, ChangeDetectionStrategy, inject } from '@angular/core'; +import { AuthService, AUTH_SERVICE } from '../../../core/auth'; +import { RouterLink } from '@angular/router'; + +import { PolicyPackSummary } from '../models/policy.models'; +import { PolicyPackStore } from '../services/policy-pack.store'; + +@Component({ + selector: 'app-policy-workspace', + standalone: true, + imports: [CommonModule, RouterLink], + changeDetection: ChangeDetectionStrategy.OnPush, + template: ` +
+
+
+

Policy Studio · Workspace

+

Policy packs

+

Deterministic list sorted by modified date desc, tie-breaker id.

+
+
+ +
+ {{ scopeHint }} — some actions are disabled. Request scopes from your admin. +
+ +
+
+
+
+

{{ pack.status | titlecase }}

+

{{ pack.name }}

+

{{ pack.description || 'No description provided.' }}

+
+
+ v{{ pack.version }} + {{ pack.modifiedAt | date: 'medium' }} +
+
+ +
    +
  • {{ tag }}
  • +
+ + + +
+
+
Created
+
{{ pack.createdAt | date: 'medium' }}
+
+
+
Authors
+
{{ pack.createdBy || 'unknown' }}
+
+
+
Owner
+
{{ pack.modifiedBy || 'unknown' }}
+
+
+
+
+ +
+ `, + styles: [ + ` + :host { display: block; background: #0b1224; color: #e5e7eb; min-height: 100vh; } + .workspace { max-width: 1200px; margin: 0 auto; padding: 1.5rem; } + .workspace__header { margin-bottom: 1rem; } + .workspace__eyebrow { margin: 0; color: #22d3ee; text-transform: uppercase; letter-spacing: 0.05em; font-size: 0.8rem; } + .workspace__lede { margin: 0.2rem 0 0; color: #94a3b8; } + .workspace__grid { display: grid; grid-template-columns: repeat(auto-fit, minmax(320px, 1fr)); gap: 1rem; } + .pack-card { background: #0f172a; border: 1px solid #1f2937; border-radius: 12px; padding: 1rem; box-shadow: 0 12px 30px rgba(0,0,0,0.28); display: grid; gap: 0.6rem; } + .pack-card__head { display: flex; justify-content: space-between; gap: 0.75rem; align-items: flex-start; } + .pack-card__eyebrow { margin: 0; color: #a5b4fc; font-size: 0.75rem; letter-spacing: 0.05em; text-transform: uppercase; } + .pack-card__desc { margin: 0.2rem 0 0; color: #cbd5e1; } + .pack-card__meta { display: grid; justify-items: end; gap: 0.2rem; color: #94a3b8; font-size: 0.9rem; } + .pack-card__tags { list-style: none; padding: 0; margin: 0; display: flex; flex-wrap: wrap; gap: 0.35rem; } + .pack-card__tags li { padding: 0.2rem 0.45rem; border: 1px solid #1f2937; border-radius: 999px; background: #0b162e; } + .pack-card__actions { display: flex; gap: 0.5rem; flex-wrap: wrap; } + .pack-card__actions a { color: #e5e7eb; border: 1px solid #334155; border-radius: 8px; padding: 0.35rem 0.6rem; text-decoration: none; } + .pack-card__actions a:hover { border-color: #22d3ee; } + .pack-card__actions a.action-disabled { opacity: 0.5; pointer-events: none; border-style: dashed; } + .pack-card__detail { display: grid; grid-template-columns: repeat(auto-fit, minmax(140px, 1fr)); gap: 0.35rem 1rem; margin: 0; } + dt { color: #94a3b8; font-size: 0.85rem; margin: 0; } + dd { margin: 0; color: #e5e7eb; } + .workspace__banner { background: #1f2937; border: 1px solid #334155; color: #fbbf24; padding: 0.75rem 1rem; border-radius: 10px; margin: 0.5rem 0 1rem; } + .workspace__footer { margin-top: 0.8rem; } + .workspace__footer button { background: #2563eb; border: 1px solid #2563eb; color: #e5e7eb; border-radius: 8px; padding: 0.45rem 0.8rem; } + `, + ], +}) +export class PolicyWorkspaceComponent { + protected loading = false; + protected packs: PolicyPackSummary[] = []; + protected canAuthor = false; + protected canSimulate = false; protected canReview = false; protected canApprove = false; protected canOperate = false; protected canAudit = false; - protected canView = false; - protected scopeHint = ''; - protected refreshing = false; - - private readonly packStore = inject(PolicyPackStore); - private readonly auth = inject(AUTH_SERVICE) as AuthService; - - constructor() { - this.loading = true; - this.applyScopes(); - this.packStore.getPacks().subscribe((packs) => { - this.packs = [...packs].sort((a, b) => - b.modifiedAt.localeCompare(a.modifiedAt) || a.id.localeCompare(b.id) - ); - this.loading = false; - }); - } - - refresh(): void { - this.refreshing = true; - this.packStore.refresh(); - this.packStore.getPacks().subscribe((packs) => { - this.packs = [...packs].sort((a, b) => - b.modifiedAt.localeCompare(a.modifiedAt) || a.id.localeCompare(b.id) - ); - this.refreshing = false; - }); - } - - private applyScopes(): void { - this.canAuthor = this.auth.canAuthorPolicies?.() ?? false; - this.canSimulate = this.auth.canSimulatePolicies?.() ?? false; - this.canReview = this.auth.canReviewPolicies?.() ?? false; + protected canView = false; + protected scopeHint = ''; + protected refreshing = false; + + private readonly packStore = inject(PolicyPackStore); + private readonly auth = inject(AUTH_SERVICE) as AuthService; + + constructor() { + this.loading = true; + this.applyScopes(); + this.packStore.getPacks().subscribe((packs) => { + this.packs = [...packs].sort((a, b) => + b.modifiedAt.localeCompare(a.modifiedAt) || a.id.localeCompare(b.id) + ); + this.loading = false; + }); + } + + refresh(): void { + this.refreshing = true; + this.packStore.refresh(); + this.packStore.getPacks().subscribe((packs) => { + this.packs = [...packs].sort((a, b) => + b.modifiedAt.localeCompare(a.modifiedAt) || a.id.localeCompare(b.id) + ); + this.refreshing = false; + }); + } + + private applyScopes(): void { + this.canAuthor = this.auth.canAuthorPolicies?.() ?? false; + this.canSimulate = this.auth.canSimulatePolicies?.() ?? false; + this.canReview = this.auth.canReviewPolicies?.() ?? false; this.canView = this.auth.canViewPolicies?.() ?? false; this.canApprove = this.auth.canApprovePolicies?.() ?? false; this.canOperate = this.auth.canOperatePolicies?.() ?? false; diff --git a/src/Web/StellaOps.Web/src/app/features/policy-studio/yaml/policy-yaml-editor.component.ts b/src/Web/StellaOps.Web/src/app/features/policy-studio/yaml/policy-yaml-editor.component.ts index aa2dc33df..152a6d820 100644 --- a/src/Web/StellaOps.Web/src/app/features/policy-studio/yaml/policy-yaml-editor.component.ts +++ b/src/Web/StellaOps.Web/src/app/features/policy-studio/yaml/policy-yaml-editor.component.ts @@ -19,7 +19,7 @@ interface YamlDiagnostic { imports: [CommonModule, FormsModule, RouterLink], changeDetection: ChangeDetectionStrategy.OnPush, template: ` -
+

Policy Studio · YAML

@@ -111,6 +111,7 @@ export class PolicyYamlEditorComponent { this.pack = p; this.yamlContent = this.buildInitialYaml(p); this.loading = false; + this.canonicalYaml = this.yamlContent; this.onContentChange(this.yamlContent); }); } diff --git a/src/Web/StellaOps.Web/src/app/features/risk/risk-dashboard.component.html b/src/Web/StellaOps.Web/src/app/features/risk/risk-dashboard.component.html index caf9f02ee..c7a3dab1f 100644 --- a/src/Web/StellaOps.Web/src/app/features/risk/risk-dashboard.component.html +++ b/src/Web/StellaOps.Web/src/app/features/risk/risk-dashboard.component.html @@ -28,7 +28,7 @@